2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Beladys spillalgorithm version 2.
23 * @author Daniel Grund, Matthias Braun, Sebastian Hack
34 #include "irnodeset.h"
36 #include "irprintf_t.h"
42 #include "iredges_t.h"
50 #include "bespillbelady.h"
52 #include "besched_t.h"
56 #include "bechordal_t.h"
57 #include "bespilloptions.h"
58 #include "beloopana.h"
69 #define DBG_WORKSET 128
70 #define DBG_GLOBAL 256
71 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
75 * An association between a node and a point in time.
77 typedef struct _loc_t {
78 ir_node *irn; /**< A node. */
79 unsigned time; /**< A use time (see beuses.h). */
80 unsigned version; /**< That is used in the global pass below.
81 For usage see the comments below.
82 In the local belady pass, this is not
86 typedef struct _workset_t {
87 int len; /**< current length */
88 loc_t vals[0]; /**< inlined array of the values/distances in this working set */
91 typedef struct _belady_env_t {
94 const arch_env_t *arch;
95 const arch_register_class_t *cls;
99 ir_node **blocks; /**< Array of all blocks. */
100 int n_blocks; /**< Number of blocks in the graph. */
101 int n_regs; /**< number of regs in this reg-class */
102 workset_t *ws; /**< the main workset used while processing a block. ob-allocated */
103 be_uses_t *uses; /**< env for the next-use magic */
104 ir_node *instr; /**< current instruction */
105 unsigned instr_nr; /**< current instruction number (relative to block start) */
107 spill_env_t *senv; /**< see bespill.h */
111 static int loc_compare_idx(const void *a, const void *b)
115 return (int) get_irn_idx(p->irn) - (int) get_irn_idx(q->irn);
117 static int loc_compare(const void *a, const void *b)
121 int diff = (int) p->time - (int) q->time;
122 return diff != 0 ? diff : loc_compare_idx(a, b);
125 static INLINE void workset_print(const workset_t *w)
129 for(i = 0; i < w->len; ++i) {
130 ir_fprintf(stderr, "%+F %d\n", w->vals[i].irn, w->vals[i].time);
135 * Alloc a new workset on obstack @p ob with maximum size @p max
137 static INLINE workset_t *new_workset(belady_env_t *env, struct obstack *ob) {
139 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
140 res = obstack_alloc(ob, size);
141 memset(res, 0, size);
146 * Alloc a new instance on obstack and make it equal to @param ws
148 static INLINE workset_t *workset_clone(belady_env_t *env, struct obstack *ob, workset_t *ws) {
150 size_t size = sizeof(*res) + (env->n_regs)*sizeof(res->vals[0]);
151 res = obstack_alloc(ob, size);
152 memcpy(res, ws, size);
157 * Do NOT alloc anything. Make @param tgt equal to @param src.
158 * returns @param tgt for convenience
160 static INLINE workset_t *workset_copy(belady_env_t *env, workset_t *tgt, workset_t *src) {
161 size_t size = sizeof(*src) + (env->n_regs)*sizeof(src->vals[0]);
162 memcpy(tgt, src, size);
167 * Overwrites the current content array of @param ws with the
168 * @param count locations given at memory @param locs.
169 * Set the length of @param ws to count.
171 static INLINE void workset_bulk_fill(workset_t *workset, int count, const loc_t *locs) {
172 workset->len = count;
173 memcpy(&(workset->vals[0]), locs, count * sizeof(locs[0]));
177 * Inserts the value @p val into the workset, iff it is not
178 * already contained. The workset must not be full.
180 static INLINE void workset_insert(belady_env_t *env, workset_t *ws, ir_node *val) {
182 /* check for current regclass */
183 if (!arch_irn_consider_in_reg_alloc(env->arch, env->cls, val)) {
184 DBG((dbg, DBG_WORKSET, "Skipped %+F\n", val));
188 /* check if val is already contained */
189 for(i=0; i<ws->len; ++i)
190 if (ws->vals[i].irn == val)
194 assert(ws->len < env->n_regs && "Workset already full!");
195 ws->vals[ws->len++].irn = val;
199 * Removes all entries from this workset
201 static INLINE void workset_clear(workset_t *ws) {
206 * Removes the value @p val from the workset if present.
208 static INLINE void workset_remove(workset_t *ws, ir_node *val) {
210 for(i=0; i<ws->len; ++i) {
211 if (ws->vals[i].irn == val) {
212 ws->vals[i] = ws->vals[--ws->len];
218 static INLINE int workset_get_index(const workset_t *ws, const ir_node *val) {
220 for(i=0; i<ws->len; ++i) {
221 if (ws->vals[i].irn == val)
229 * Iterates over all values in the working set.
230 * @p ws The workset to iterate
231 * @p v A variable to put the current value in
232 * @p i An integer for internal use
234 #define workset_foreach(ws, v, i) for(i=0; \
235 v=(i < ws->len) ? ws->vals[i].irn : NULL, i < ws->len; \
238 #define workset_set_time(ws, i, t) (ws)->vals[i].time=t
239 #define workset_get_time(ws, i) (ws)->vals[i].time
240 #define workset_set_length(ws, length) (ws)->len = length
241 #define workset_get_length(ws) ((ws)->len)
242 #define workset_get_val(ws, i) ((ws)->vals[i].irn)
243 #define workset_sort(ws) qsort((ws)->vals, (ws)->len, sizeof((ws)->vals[0]), loc_compare);
244 #define workset_contains(ws, n) (workset_get_index(ws, n) >= 0)
246 typedef struct _block_use_t {
247 struct _block_use_t *next;
252 typedef struct _block_info_t {
255 ir_node *first_non_in; /**< First node in block which is not a phi. */
256 workset_t *ws_start, *ws_end;
258 workset_t *entrance_reg; /**< That set will contain all values
259 transported into the block which
260 are used before they are displaced.
261 That means, we later have to care to
262 bring them into the block in a register
263 or reload them at the entry of the block. */
265 workset_t * entrance_mem; /**< That set will contain all variables
266 (transported into the block)
267 which are in the memory upon entering
269 entrance_reg U entrance_mem = live-in + local phis. */
271 int pressure; /**< The amount of registers which remain free
272 in this block. This capacity can be used to let
273 global variables, transported into other blocks,
274 live through this block. */
276 double exec_freq; /**< The execution frequency of this block. */
280 static INLINE void *new_block_info(belady_env_t *bel, ir_node *bl) {
281 block_info_t *res = obstack_alloc(&bel->ob, sizeof(*res));
282 memset(res, 0, sizeof(res[0]));
285 res->entrance_reg = new_workset(bel, &bel->ob);
286 res->entrance_mem = new_workset(bel, &bel->ob);
287 res->exec_freq = get_block_execfreq(bel->ef, bl);
288 set_irn_link(bl, res);
292 #define get_block_info(block) ((block_info_t *)get_irn_link(block))
293 #define set_block_info(block, info) set_irn_link(block, info)
296 * @return The distance to the next use or 0 if irn has dont_spill flag set
298 static INLINE unsigned get_distance(belady_env_t *env, ir_node *from, unsigned from_step, const ir_node *def, int skip_from_uses)
301 int flags = arch_irn_get_flags(env->arch, def);
303 assert(! (flags & arch_irn_flags_ignore));
305 use = be_get_next_use(env->uses, from, from_step, def, skip_from_uses);
306 if(USES_IS_INFINITE(use.time))
307 return USES_INFINITY;
309 /* We have to keep nonspillable nodes in the workingset */
310 if(flags & arch_irn_flags_dont_spill)
317 * Check, if the value is something that is transported into a block.
318 * That is, the value is live in or defined by a Phi in the block.
319 * @param env The belady environment.
320 * @param bl The block in question.
321 * @param irn The node in question.
322 * @return 1, if node is something transported into @p bl, 0 if not.
324 static INLINE int is_transport_in(belady_env_t *env, const ir_node *bl, const ir_node *irn)
326 return (is_Phi(irn) && get_nodes_block(irn) == bl) || be_is_live_in(env->lv, bl, irn);
330 * Performs the actions necessary to grant the request that:
331 * - new_vals can be held in registers
332 * - as few as possible other values are disposed
333 * - the worst values get disposed
336 * Actually, we should displace a value immediately after it was used.
337 * If we don't, the cardinality of the workset does not reflect the register pressure.
338 * That might be necessary to determine the capacity left in the block.
340 * @p is_usage indicates that the values in new_vals are used (not defined)
341 * In this case reloads must be performed
343 static void displace(block_info_t *bi, workset_t *new_vals, int is_usage) {
344 belady_env_t *env = bi->bel;
346 int i, len, max_allowed, demand, iter;
348 workset_t *ws = env->ws;
349 ir_node **to_insert = alloca(env->n_regs * sizeof(*to_insert));
352 1. Identify the number of needed slots and the values to reload
355 workset_foreach(new_vals, val, iter) {
356 /* mark value as used */
358 if (! workset_contains(ws, val)) {
359 DBG((dbg, DBG_DECIDE, " insert %+F\n", val));
360 to_insert[demand++] = val;
362 int insert_reload = 1;
365 * if we use a value which is transported in this block,
366 * i.e. a phi defined here or a live in, we check if there
367 * is room for that guy to survive from the block's entrance
370 if (is_transport_in(env, bi->bl, val)) {
371 DBG((dbg, DBG_SPILL, "entrance node %+F, capacity %d:\n", val, bi->pressure));
372 if (bi->pressure < env->n_regs) {
374 workset_insert(env, bi->entrance_reg, val);
376 DBG((dbg, DBG_SPILL, "... no reload. must be considered at block start\n"));
380 * if the value on't survive in a register, note that
381 * it will be in the memory so that we can spill phis
385 workset_insert(env, bi->entrance_mem, val);
389 DBG((dbg, DBG_SPILL, "Reload %+F before %+F\n", val, env->instr));
390 be_add_reload(env->senv, val, env->instr, env->cls, 1);
395 assert(is_usage || "Defined value already in workset?!?");
396 DBG((dbg, DBG_DECIDE, " skip %+F\n", val));
399 DBG((dbg, DBG_DECIDE, " demand = %d\n", demand));
402 2. Make room for at least 'demand' slots
404 len = workset_get_length(ws);
405 max_allowed = env->n_regs - demand;
407 DBG((dbg, DBG_DECIDE, " disposing %d values\n", ws->len - max_allowed));
409 /* Only make more free room if we do not have enough */
410 if (len > max_allowed) {
411 /* get current next-use distance */
412 for (i = 0; i < ws->len; ++i) {
413 unsigned dist = get_distance(env, env->instr, env->instr_nr, workset_get_val(ws, i), !is_usage);
414 workset_set_time(ws, i, dist);
417 /* sort entries by increasing nextuse-distance*/
420 /* kill the last 'demand' entries in the array */
421 workset_set_length(ws, max_allowed);
425 3. Insert the new values into the workset
427 for (i = 0; i < demand; ++i)
428 workset_insert(env, env->ws, to_insert[i]);
432 * For the given block @p block, decide for each values
433 * whether it is used from a register or is reloaded
436 static void belady(ir_node *block, void *data) {
437 belady_env_t *env = data;
438 block_info_t *block_info = new_block_info(env, block);
443 /* process the block from start to end */
444 DBG((dbg, DBG_WSETS, "Processing...\n"));
446 new_vals = new_workset(env, &env->ob);
447 block_info->first_non_in = NULL;
449 sched_foreach(block, irn) {
451 assert(workset_get_length(env->ws) <= env->n_regs && "Too much values in workset!");
453 /* projs are handled with the tuple value.
454 * Phis are no real instr (see insert_starters())
455 * instr_nr does not increase */
456 if (is_Proj(irn) || is_Phi(irn)) {
457 DBG((dbg, DBG_DECIDE, " ...%+F skipped\n", irn));
460 DBG((dbg, DBG_DECIDE, " ...%+F\n", irn));
462 if (!block_info->first_non_in)
463 block_info->first_non_in = irn;
465 /* set instruction in the workset */
468 /* allocate all values _used_ by this instruction */
469 workset_clear(new_vals);
470 for(i = 0, arity = get_irn_arity(irn); i < arity; ++i) {
471 workset_insert(env, new_vals, get_irn_n(irn, i));
473 displace(block_info, new_vals, 1);
475 /* allocate all values _defined_ by this instruction */
476 workset_clear(new_vals);
477 if (get_irn_mode(irn) == mode_T) { /* special handling for tuples and projs */
478 const ir_edge_t *edge;
480 foreach_out_edge(irn, edge) {
481 ir_node *proj = get_edge_src_irn(edge);
482 workset_insert(env, new_vals, proj);
485 workset_insert(env, new_vals, irn);
487 displace(block_info, new_vals, 0);
489 block_info->pressure = MAX(block_info->pressure, workset_get_length(env->ws));
493 /* Remember end-workset for this block */
494 block_info->ws_end = workset_clone(env, &env->ob, env->ws);
495 DBG((dbg, DBG_WSETS, "End workset for %+F:\n", block));
496 workset_foreach(block_info->ws_end, irn, iter)
497 DBG((dbg, DBG_WSETS, " %+F (%u)\n", irn, workset_get_time(block_info->ws_end, iter)));
502 |_ _| |__ ___ __ _| | ___ | |__ __ _| | | _ \ __ _ _ __| |_
503 | | | '_ \ / _ \ / _` | |/ _ \| '_ \ / _` | | | |_) / _` | '__| __|
504 | | | | | | __/ | (_| | | (_) | |_) | (_| | | | __/ (_| | | | |_
505 |_| |_| |_|\___| \__, |_|\___/|_.__/ \__,_|_| |_| \__,_|_| \__|
510 static int block_freq_gt(const void *a, const void *b)
512 const ir_node * const *p = a;
513 const ir_node * const *q = b;
514 block_info_t *pi = get_block_info(*p);
515 block_info_t *qi = get_block_info(*q);
516 return qi->exec_freq - pi->exec_freq;
519 typedef struct _block_end_state_t {
523 workset_t *end_state;
524 unsigned reload_at_end : 1;
525 unsigned live_through : 1;
528 typedef struct _global_end_state_t {
532 block_end_state_t *end_info;
535 } global_end_state_t;
537 static block_end_state_t *get_block_end_state(global_end_state_t *state, ir_node *bl, ir_node *irn)
541 for (i = 0; i < state->gauge; ++i) {
542 block_end_state_t *bei = &state->end_info[i];
543 if (bei->bl == bl && bei->irn == irn)
548 block_info_t *bi = get_block_info(bl);
549 block_end_state_t *curr;
551 /* make sure we have room in the array */
552 ARR_EXTO(block_end_state_t, state->end_info, (int) state->gauge);
554 curr = &state->end_info[state->gauge];
556 memset(curr, 0, sizeof(curr[0]));
559 curr->end_state = workset_clone(state->env, &state->obst, bi->ws_end);
566 static double can_bring_in(global_end_state_t *ges, ir_node *bl, ir_node *irn, int level);
568 static double can_make_available_at_end(global_end_state_t *ges, ir_node *bl, ir_node *irn, int level)
570 block_end_state_t *bes = get_block_end_state(ges, bl, irn);
571 workset_t *end = bes->end_state;
572 block_info_t *bi = get_block_info(bl);
573 int n_regs = bi->bel->n_regs;
576 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}can make avail %+F at end of %+F (pressure %d)\n",
577 level, irn, bl, bi->pressure));
580 * to make the value available at end,
581 * we have several cases here.
583 * - we already visited that block.
584 * - If the value is in the final end set, return 0.
585 * somebody else already allocated it there.
586 * - If not and the final end set is already full,
587 * we cannot make the value available at the end
588 * of this block. return INFINITY.
589 * - Else (value not in final end set and there is room):
590 * 1) The value is in a register at the end of the local Belady pass.
591 * Allocate a slot in the final end set and return 0.
592 * 2) The value is not in the Belady end set:
593 * If the block's capacity is < k then check what it costs
594 * to transport the value from upper blocks to this block.
595 * Compare that against the reload cost in this block. If
596 * cheaper, do the other thing. If not, reload it here.
600 * we have been here before and already figured out some costs.
601 * so we can exit safely.
603 if (bes->costs >= 0.0) {
604 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}we\'ve been here before\n", level));
608 /* if the end set contains it already, it is in a reg and it costs nothing
609 * to load it to one. */
610 index = workset_get_index(end, irn);
612 unsigned ver = end->vals[index].version;
613 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}node is in the end set and is %s fixed\n",
614 level, ver > ges->version ? "" : "not"));
617 * if the version is older, the value is already fixed
618 * and cannot be removed from the end set. If not,
619 * we fix it here by giving it our version.
621 if (ver < ges->version)
622 end->vals[index].version = ges->version;
629 * Now we have two options:
630 * 1) Reload the value at the end of the block.
631 * Therefore, perhaps, we have to erase another one from the workset.
632 * This may only be done if it has not been fixed.
633 * Since fixed means that a previous pass has decided that that value
634 * *has* to stay in the end set.
635 * 2) we can try, if the capacity of the block allows it, to let
636 * the value live through the block and make it available at
639 * First, we test the local (reload in this block) alternative
640 * and compare against the other alternative.
641 * Of course, we chose the cheaper one.
645 int len = workset_get_length(end);
649 bes->costs = HUGE_VAL;
652 * look if there is room in the end array
653 * for the variable. Note that this does not
654 * means that the var is living through the block.
655 * There is just room at the *end*
658 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}the end set has %d free slots\n",
659 level, n_regs - len));
663 for (i = 0; i < len; ++i)
664 if (end->vals[i].version < ges->version)
668 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}%+F (slot %d) can be erased from the end set\n",
669 level, end->vals[i].irn, i));
675 int gauge = ges->gauge;
676 double reload_here = bi->exec_freq;
677 double bring_in = bi->pressure < n_regs ? can_bring_in(ges, bl, irn, level + 1) : HUGE_VAL;
679 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}there is a free slot. capacity=%d, reload here=%f, bring in=%f\n",
680 level, n_regs - bi->pressure, reload_here, bring_in));
683 * reloading here pays off; bringing the value in from elsewhere
684 * is too expensive, hence we drop that search by resetting
687 if (reload_here <= bring_in) {
689 bes->costs = reload_here;
690 bes->reload_at_end = 1;
694 bes->live_through = 1;
695 bes->costs = bring_in;
698 end->vals[slot].irn = irn;
699 end->vals[slot].version = ges->version;
704 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}-> %f\n", level, bes->costs));
708 static double can_bring_in(global_end_state_t *ges, ir_node *bl, ir_node *irn, int level)
710 double glob_costs = HUGE_VAL;
711 int def_block = bl == get_nodes_block(irn);
712 int phi = is_Phi(irn);
714 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}can bring in for %+F at block %+F\n", level, irn, bl));
716 if (phi || !def_block) {
717 int i, n = get_irn_arity(bl);
718 ir_node **nodes = alloca(get_irn_arity(bl) * sizeof(nodes[0]));
720 int gauge_begin = ges->gauge;
723 for (i = 0; i < n; ++i) {
724 ir_node *pr = get_Block_cfgpred_block(bl, i);
725 ir_node *op = phi && def_block ? get_irn_n(irn, i) : irn;
726 double c = can_make_available_at_end(ges, pr, op, level + 1);
729 ges->gauge = gauge_begin;
730 glob_costs = HUGE_VAL;
739 DBG((dbg, DBG_GLOBAL, "\t%{firm:indent}-> %f\n", level, glob_costs));
743 static void materialize_and_commit_end_state(global_end_state_t *ges)
745 belady_env_t *env = ges->env;
748 DBG((dbg, DBG_GLOBAL, "\tmaterializing\n"));
749 for (i = 0; i < ges->gauge; ++i) {
750 block_end_state_t *bes = &ges->end_info[i];
751 block_info_t *bi = get_block_info(bes->bl);
754 /* insert the reload if the val was reloaded at the block's end */
755 if (bes->reload_at_end) {
756 be_add_reload(env->senv, bes->irn, bes->bl, env->cls, 1);
757 DBG((dbg, DBG_GLOBAL, "\t\tadding reload of %+F at end of %+F\n", bes->irn, bes->bl));
761 * if the variable is live through the block,
762 * update the pressure indicator.
764 bi->pressure = MAX(bi->pressure + bes->live_through, workset_get_length(bes->end_state));
766 idx = workset_get_index(bes->end_state, bes->irn);
769 * set the version number in the workset.
770 * That will mark this value as fixed in the end set
771 * and will prevent further investigations from removing
773 * Also "commit" the workset;
774 * by copying it back to the block's end workset.
777 DBG((dbg, DBG_GLOBAL, "\t\tcommiting workset of %+F with version %x\n", bes->bl, ges->version));
778 bes->end_state->vals[idx].version = ges->version;
779 workset_copy(env, bi->ws_end, bes->end_state);
785 * Examine all irns which shall be in regs at the beginning of the
788 static void fix_block_borders(global_end_state_t *ges, ir_node *block) {
789 block_info_t *bi = get_block_info(block);
790 belady_env_t *env = ges->env;
795 DBG((dbg, DBG_GLOBAL, "fixing block borders at %+F (%fHz)\n", block, bi->exec_freq));
797 /* process all variables which shall be in a reg at
798 * the beginning of the block in the order of the next use. */
799 workset_foreach(bi->entrance_reg, irn, i) {
800 int is_entrance_phi = is_Phi(irn) && get_nodes_block(irn) == block;
801 double bring_in_costs;
803 /* reset the gauge and begin the search */
807 DBG((dbg, DBG_GLOBAL, "\ttrans in var %+F, version %x\n", irn, ges->version));
809 bring_in_costs = can_bring_in(ges, block, irn, 0);
811 /* we were not able to let the value arrive
812 * in a register at the entrance of the block
813 * so we have to do the reload locally */
814 if (bring_in_costs > bi->exec_freq) {
815 DBG((dbg, DBG_GLOBAL, "\tbring in: %f, local: %f -> doing reload at beginning\n",
816 bring_in_costs, bi->exec_freq));
818 be_add_reload(env->senv, irn, bi->first_non_in, env->cls, 1);
823 * Mark this phi as succeeded.
824 * It was not replaced by a reload at the block's entrance
825 * and thus is not phi_spilled.
828 bitset_add_irn(ges->succ_phis, irn);
830 materialize_and_commit_end_state(ges);
835 static void global_assign(belady_env_t *env)
837 global_end_state_t ges;
840 obstack_init(&ges.obst);
844 ges.end_info = NEW_ARR_F(block_end_state_t, env->n_blocks);
845 ges.succ_phis = bitset_irg_obstack_alloc(&env->ob, env->irg);
848 * sort the blocks according to execution frequency.
849 * That's not necessary for belady() but for the global pass later on.
851 qsort(env->blocks, env->n_blocks, sizeof(env->blocks[0]), block_freq_gt);
853 for (i = 0; i < env->n_blocks; ++i)
854 fix_block_borders(&ges, env->blocks[i]);
857 * Now we spill phis which cannot be kept since they were replaced
858 * by reloads at the block entrances.
860 for (i = 0; i < env->n_blocks; ++i) {
861 ir_node *bl = env->blocks[i];
864 sched_foreach(bl, irn) {
868 if (bitset_contains_irn(ges.succ_phis, irn))
869 be_spill_phi(env->senv, irn);
875 static void collect_blocks(ir_node *bl, void *data)
877 belady_env_t *env = data;
879 obstack_ptr_grow(&env->ob, bl);
882 void be_spill_belady_spill_env2(be_irg_t *birg, const arch_register_class_t *cls, spill_env_t *spill_env) {
884 ir_graph *irg = be_get_birg_irg(birg);
887 /* some special classes contain only ignore regs, nothing to do then */
888 n_regs = cls->n_regs - be_put_ignore_regs(birg, cls, NULL);
894 /* init belady env */
895 obstack_init(&env.ob);
897 env.arch = birg->main_env->arch_env;
899 env.lv = be_get_birg_liveness(birg);
901 env.ws = new_workset(&env, &env.ob);
902 env.uses = be_begin_uses(irg, env.lv);
903 env.senv = spill_env ? spill_env : be_new_spill_env(birg);
904 env.ef = be_get_birg_exec_freq(birg);
907 irg_block_walk_graph(irg, NULL, collect_blocks, &env);
908 obstack_ptr_grow(&env.ob, NULL);
909 env.blocks = obstack_finish(&env.ob);
911 /* Fix high register pressure with belady algorithm */
912 for (i = 0; i < env.n_blocks; ++i)
913 belady(env.blocks[i], &env);
917 /* Insert spill/reload nodes into the graph and fix usages */
918 be_insert_spills_reloads(env.senv);
921 if(spill_env == NULL)
922 be_delete_spill_env(env.senv);
923 be_end_uses(env.uses);
924 obstack_free(&env.ob, NULL);
929 * Do spilling for a register class on a graph using the belady heuristic.
930 * In the transformed graph, the register pressure never exceeds the number
931 * of available registers.
933 * @param birg The backend graph
934 * @param cls The register class to spill
936 static void be_spill_belady(be_irg_t *birg, const arch_register_class_t *cls) {
937 be_spill_belady_spill_env2(birg, cls, NULL);
941 void be_init_spillbelady2(void)
943 static be_spiller_t belady_spiller = {
947 be_register_spiller("belady2", &belady_spiller);
948 FIRM_DBG_REGISTER(dbg, "firm.be.spill.belady2");
951 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillbelady2);