2 * Author: Daniel Grund, Sebastian Hack
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "iredges_t.h"
26 #include "besched_t.h"
29 #include "bechordal_t.h"
32 /* This enables re-computation of values. Current state: Unfinished and buggy. */
35 typedef struct _reloader_t reloader_t;
36 typedef struct _spill_info_t spill_info_t;
43 struct _spill_info_t {
44 ir_node *spilled_node;
45 reloader_t *reloaders;
48 typedef struct _spill_ctx_t {
49 ir_node *spilled; /**< The spilled node. */
50 ir_node *user; /**< The node this spill is for. */
51 ir_node *spill; /**< The spill itself. */
55 const arch_register_class_t *cls;
56 const be_chordal_env_t *chordal_env;
59 set *spills; /**< all spill_info_t's, which must be placed */
60 pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
61 decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
62 void *data; /**< data passed to all callbacks */
63 DEBUG_ONLY(firm_dbg_module_t *dbg;)
66 /* associated Phi -> Spill*/
67 typedef struct _phi_spill_assoc_t {
73 * Compare two Phi->Spill associations.
75 static int cmp_phi_spill_assoc(const void *a, const void *b, size_t n) {
76 const phi_spill_assoc_t *p1 = a;
77 const phi_spill_assoc_t *p2 = b;
78 return p1->phi != p2->phi;
82 * compare two spill contexts.
84 static int cmp_spillctx(const void *a, const void *b, size_t n) {
85 const spill_ctx_t *p = a;
86 const spill_ctx_t *q = b;
87 return p->user != q->user || p->spilled != q->spilled;
91 * Compare two spill infos.
93 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
94 const spill_info_t *xx = x;
95 const spill_info_t *yy = y;
96 return xx->spilled_node != yy->spilled_node;
100 /* Sets the debug module of a spill environment. */
101 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
106 /* Creates a new spill environment. */
107 spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env, decide_irn_t is_mem_phi, void *data) {
108 spill_env_t *env = xmalloc(sizeof(env[0]));
109 env->spill_ctxs = new_set(cmp_spillctx, 1024);
110 env->spills = new_set(cmp_spillinfo, 1024);
111 env->cls = chordal_env->cls;
112 env->is_mem_phi = is_mem_phi;
114 env->chordal_env = chordal_env;
115 obstack_init(&env->obst);
119 /* Deletes a spill environment. */
120 void be_delete_spill_env(spill_env_t *senv) {
121 del_set(senv->spill_ctxs);
122 del_set(senv->spills);
123 obstack_free(&senv->obst, NULL);
128 * Returns a spill context. If the context did not exists, create one.
130 * @param sc the set containing all spill contexts
131 * @param to_spill the node that should be spilled
132 * @param ctx_irn an user of the spilled node
134 * @return a spill context.
136 static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
139 templ.spilled = to_spill;
140 templ.user = ctx_irn;
143 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
149 * @param senv the spill environment
150 * @param irn the node that should be spilled
151 * @param ctx_irn an user of the spilled node
153 * @return a be_Spill node
155 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
157 const be_main_env_t *env = senv->chordal_env->birg->main_env;
158 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
160 // Has the value already been spilled?
161 ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
165 /* Trying to spill an already spilled value, no need for a new spill
166 * node then, we can simply connect to the same one for this reload
168 if(be_is_Reload(irn)) {
169 return get_irn_n(irn, be_pos_Reload_mem);
172 ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
177 * If the first usage of a Phi result would be out of memory
178 * there is no sense in allocating a register for it.
179 * Thus we spill it and all its operands to the same spill slot.
180 * Therefore the phi/dataB becomes a phi/Memory
182 * @param senv the spill environment
183 * @param phi the Phi node that should be spilled
184 * @param ctx_irn an user of the spilled node
186 * @return a be_Spill node
188 static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, unsigned visited_nr, set *already_visited_phis) {
189 int i, n = get_irn_arity(phi);
190 ir_graph *irg = senv->chordal_env->irg;
191 ir_node *bl = get_nodes_block(phi);
192 ir_node **ins, *phi_spill;
193 phi_spill_assoc_t key;
197 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
199 /* build a new PhiM */
200 NEW_ARR_A(ir_node *, ins, n);
201 for (i = 0; i < n; ++i) {
202 ins[i] = new_r_Bad(irg);
204 phi_spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
206 key.spill = phi_spill;
207 set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi));
209 /* search an existing spill for this context */
210 ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
212 /* if not found spill the phi */
214 set_irn_visited(phi, visited_nr);
216 /* collect all arguments of the phi */
217 for (i = 0; i < n; ++i) {
218 ir_node *arg = get_irn_n(phi, i);
220 phi_spill_assoc_t *entry;
222 if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) {
223 if (get_irn_visited(arg) < visited_nr)
224 sub_res = be_spill_phi(senv, arg, ctx_irn, visited_nr, already_visited_phis);
226 /* we already visited the argument phi: get it's spill */
229 entry = set_find(already_visited_phis, &key, sizeof(key), HASH_PTR(arg));
230 assert(entry && "argument phi already visited, but no spill found?!?");
231 sub_res = entry->spill;
232 assert(sub_res && "spill missing?!?");
236 sub_res = be_spill_irn(senv, arg, ctx_irn);
238 set_irn_n(phi_spill, i, sub_res);
241 ctx->spill = phi_spill;
249 * @param senv the spill environment
250 * @param to_spill the node that should be spilled
252 * @return a be_Spill node
254 static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill, unsigned visited_nr) {
255 ir_graph *irg = get_irn_irg(to_spill);
256 int save_optimize = get_optimize();
257 int save_normalize = get_opt_normalize();
258 set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10);
262 * Disable optimization so that the phi functions do not
266 set_opt_normalize(0);
268 if (pset_find_ptr(senv->mem_phis, to_spill))
269 res = be_spill_phi(senv, to_spill, to_spill, visited_nr, already_visited_phis);
271 res = be_spill_irn(senv, to_spill, to_spill);
273 del_set(already_visited_phis);
275 /* reset the optimizations */
276 set_optimize(save_optimize);
277 set_opt_normalize(save_normalize);
287 * Check if a spilled node could be rematerialized.
289 * @param senv the spill environment
290 * @param spill the Spill node
291 * @param spilled the node that was spilled
292 * @param reloader a irn that requires a reload
294 static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
297 /* check for 'normal' spill and general remat condition */
298 if (!be_is_Spill(spill) || !arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
301 /* check availability of original arguments */
302 if (is_Block(reloader)) {
304 /* we want to remat at the end of a block.
305 * thus all arguments must be alive at the end of the block
307 for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
308 ir_node *arg = get_irn_n(spilled, pos);
309 if (!is_live_end(reloader, arg))
315 /* we want to remat before the insn reloader
316 * thus an arguments is alive if
317 * - it interferes with the reloaders result
319 * - or it is (last-) used by reloader itself
321 for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
322 ir_node *arg = get_irn_n(spilled, pos);
325 if (values_interfere(reloader, arg))
328 for (i=0, m=get_irn_arity(reloader); i<m; ++i) {
329 ir_node *rel_arg = get_irn_n(reloader, i);
334 /* arg is not alive before reloader */
346 #else /* BUGGY_REMAT */
349 * A very simple rematerialization checker.
351 * @param senv the spill environment
352 * @param spill the Spill node
353 * @param spilled the node that was spilled
354 * @param reloader a irn that requires a reload
356 static int check_remat_conditions(spill_env_t *senv, ir_node *spill, ir_node *spilled, ir_node *reloader) {
357 const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
359 return get_irn_arity(spilled) == 0 &&
360 be_is_Spill(spill) &&
361 arch_irn_is(aenv, spilled, rematerializable);
364 #endif /* BUGGY_REMAT */
369 * Re-materialize a node.
371 * @param senv the spill environment
372 * @param spilled the node that was spilled
373 * @param reloader a irn that requires a reload
375 static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
377 ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader);
379 /* recompute the value */
380 res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl,
382 get_irn_mode(spilled),
383 get_irn_arity(spilled),
384 get_irn_in(spilled) + 1);
385 copy_node_attr(spilled, res);
387 DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader));
389 /* insert in schedule */
390 if (is_Block(reloader)) {
391 ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env);
392 sched_add_after(insert, res);
394 sched_add_before(reloader, res);
401 * Walker: fills the mem_phis set by evaluating Phi nodes
402 * using the is_mem_phi() callback.
404 static void phi_walker(ir_node *irn, void *env) {
405 spill_env_t *senv = env;
408 const arch_env_t *arch = senv->chordal_env->birg->main_env->arch_env;
409 if (arch_irn_has_reg_class(arch, irn, 0, senv->cls) &&
410 senv->is_mem_phi(irn, senv->data)) {
411 DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
412 pset_insert_ptr(senv->mem_phis, irn);
417 void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
418 const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
419 ir_graph *irg = senv->chordal_env->irg;
425 /* get all special spilled phis */
426 DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
427 senv->mem_phis = pset_new_ptr_default();
428 irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
430 /* Add reloads for mem_phis */
431 /* BETTER: These reloads (1) should only be inserted, if they are really needed */
432 DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
433 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
435 DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
436 foreach_out_edge(irn, e) {
437 ir_node *user = e->src;
438 if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
439 ir_node *use_bl = get_nodes_block(user);
440 DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
441 be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
446 visited_nr = get_irg_visited(irg) + 1;
447 set_irg_visited(irg, visited_nr);
449 /* process each spilled node */
450 DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
451 possibly_dead = new_pdeq();
452 for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
455 ir_mode *mode = get_irn_mode(si->spilled_node);
457 pset *values = pset_new_ptr(16);
459 /* go through all reloads for this spill */
460 for(rld = si->reloaders; rld; rld = rld->next) {
463 /* the spill for this reloader */
464 ir_node *spill = be_spill_node(senv, si->spilled_node, visited_nr);
467 if (check_remat_conditions(senv, spill, si->spilled_node, rld->reloader)) {
468 new_val = do_remat(senv, si->spilled_node, rld->reloader);
469 pdeq_putl(possibly_dead, spill);
474 new_val = be_reload(aenv, senv->cls, rld->reloader, mode, spill);
476 DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
477 pset_insert_ptr(values, new_val);
479 pset_insert_ptr(reload_set, new_val);
482 /* introduce copies, rewire the uses */
483 assert(pset_count(values) > 0 && "???");
484 pset_insert_ptr(values, si->spilled_node);
485 be_ssa_constr_set_ignore(senv->chordal_env->dom_front, values, senv->mem_phis);
487 /* Remove reloads which are not used by anyone */
488 /* TODO: Better call a general garbage collection routine here... this here gets clunky
489 * and doesn't handle all cases (like memphis)
491 foreach_pset(values, value) {
492 if(get_irn_n_edges(value) == 0) {
494 // remove the node from preds
495 if(be_is_Reload(value)) {
496 ir_node* spill = get_irn_n(value, be_pos_Reload_mem);
497 if(be_is_Spill(spill)) {
498 assert(be_is_Spill(spill));
500 set_irn_n(value, be_pos_Reload_mem, new_r_Bad(irg));
501 set_irn_n(value, be_pos_Reload_frame, new_r_Bad(irg));
503 // maybe the spill is not used anymoe too now?
504 if(get_irn_n_edges(spill) == 0) {
506 set_irn_n(spill, be_pos_Spill_val, new_r_Bad(irg));
507 set_irn_n(spill, be_pos_Spill_frame, new_r_Bad(irg));
509 } else if(is_Phi(spill)) {
512 assert(0 && "Only spill or mem-phi expected here");
514 } else if(is_Phi(value)) {
515 for(i = 0; i < get_Phi_n_preds(value); ++i)
516 set_irn_n(value, i, new_r_Bad(irg));
526 foreach_pset(senv->mem_phis, irn) {
528 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
529 pdeq_putl(possibly_dead, get_irn_n(irn, i));
530 set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
535 /* check if possibly dead nodes are really dead yet */
536 while (! pdeq_empty(possibly_dead)) {
537 ir_node *irn = pdeq_getr(possibly_dead);
538 const ir_edge_t *edge = get_irn_out_edge_first(irn);
542 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
543 pdeq_putl(possibly_dead, get_irn_n(irn, i));
544 set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
549 del_pdeq(possibly_dead);
550 del_pset(senv->mem_phis);
552 // reloads are placed now, but we might reuse the spill environment for further spilling decisions
553 del_set(senv->spills);
554 senv->spills = new_set(cmp_spillinfo, 1024);
557 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
558 spill_info_t templ, *res;
561 templ.spilled_node = to_spill;
562 templ.reloaders = NULL;
563 res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
565 rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
566 rel->reloader = before;
567 rel->next = res->reloaders;
568 res->reloaders = rel;
571 void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
572 ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
573 be_add_reload(senv, to_spill, insert_bl);
578 /****************************************
580 SPILL SLOT MANAGEMENT AND OPTS
582 ****************************************/
584 typedef struct _spill_slot_t {
588 ir_mode *largest_mode; /* the mode of all members with largest size */
591 typedef struct _ss_env_t {
593 be_chordal_env_t *cenv;
594 pmap *slots; /* maps spill_contexts to spill_slots */
595 pmap *types; /* maps modes to types */
596 DEBUG_ONLY(firm_dbg_module_t *dbg;)
601 * Walker: compute the spill slots
603 static void compute_spill_slots_walker(ir_node *spill, void *env) {
604 ss_env_t *ssenv = env;
609 if (!be_is_Spill(spill))
612 /* check, if this spill is for a context already known */
613 ctx = be_get_Spill_context(spill);
614 entry = pmap_find(ssenv->slots, ctx);
617 struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
618 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
619 ir_mode *largest_mode = arch_register_class_mode(cls);
621 /* this is a new spill context */
622 ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
623 ss->members = pset_new_ptr(8);
624 ss->largest_mode = largest_mode;
625 ss->size = get_mode_size_bytes(ss->largest_mode);
626 ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls);
627 pmap_insert(ssenv->slots, ctx, ss);
629 /* values with the same spill_ctx must go into the same spill slot */
633 /* ugly mega assert :-) */
636 struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
637 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
638 int size = get_mode_size_bytes(arch_register_class_mode(cls));
639 assert(ss->size == size && "Different sizes for the same spill slot are not allowed.");
640 for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
641 /* use values_interfere here, because it uses the dominance check,
642 which does work for values in memory */
643 assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!");
649 pset_insert_ptr(ss->members, spill);
653 * qsort compare function, sort spill slots by size.
655 static int ss_sorter(const void *v1, const void *v2) {
656 const spill_slot_t **ss1 = (const spill_slot_t **)v1;
657 const spill_slot_t **ss2 = (const spill_slot_t **)v2;
658 return ((int) (*ss2)->size) - ((int) (*ss1)->size);
663 * This function should optimize the spill slots.
664 * - Coalescing of multiple slots
665 * - Ordering the slots
667 * Input slots are in @p ssenv->slots
668 * @p size The count of initial spill slots in @p ssenv->slots
669 * This also is the size of the preallocated array @p ass
671 * @return An array of spill slots @p ass in specific order
673 static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) {
674 int i, o, used_slots;
678 pmap_foreach(ssenv->slots, entr)
679 ass[i++] = entr->value;
681 /* Sort the array to minimize fragmentation and cache footprint.
682 Large slots come first */
683 qsort(ass, size, sizeof(ass[0]), ss_sorter);
685 /* For each spill slot:
686 - assign a new offset to this slot
687 - xor find another slot to coalesce with */
689 for (i=0; i<size; ++i) { /* for each spill slot */
693 DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
694 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
695 DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
698 for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
699 /* check inter-slot-pairs for interference */
701 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
702 for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
703 if(values_interfere(n1, n2)) {
704 pset_break(ass[i]->members);
705 pset_break(ass[o]->members);
706 DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
707 goto interf_detected;
710 /* if we are here, there is no interference between ass[i] and ass[o] */
713 interf_detected: /*nothing*/ ;
716 /* now the members of ass[i] join the members of ass[tgt_slot] */
718 /* do we need a new slot? */
719 if (tgt_slot == -1) {
720 tgt_slot = used_slots;
725 ass[tgt_slot]->size = ass[i]->size;
726 del_pset(ass[tgt_slot]->members);
727 ass[tgt_slot]->members = pset_new_ptr(8);
731 /* copy the members to the target pset */
732 /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
734 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
735 pset_insert_ptr(ass[tgt_slot]->members, n1);
739 #define ALIGN_SPILL_AREA 16
740 #define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset))
743 * Returns a spill type for a mode. Keep them in a map to reduce
744 * the number of types.
746 * @param types a map containing all created types
747 * @param ss the spill slot
749 * Note that type types should are identical for every mode.
750 * This rule might break if two different register classes return the same
751 * mode but different alignments.
753 static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
754 pmap_entry *e = pmap_find(types, ss->largest_mode);
759 snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
760 res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
761 set_type_alignment_bytes(res, ss->align);
762 pmap_insert(types, ss->largest_mode, res);
766 assert(get_type_alignment_bytes(res) == (int)ss->align);
772 * Create spill slot entities on the frame type.
774 * @param ssenv the spill environment
775 * @param n number of spill slots
776 * @param ss array of spill slots
778 static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
779 int i, offset, frame_align;
780 ir_type *frame = get_irg_frame_type(ssenv->cenv->irg);
782 /* aligning by increasing frame size */
783 offset = get_type_size_bits(frame) / 8;
784 offset = round_up2(offset, ALIGN_SPILL_AREA);
785 set_type_size_bytes(frame, -1);
787 /* create entities and assign offsets according to size and alignment*/
788 for (i = 0; i < n_slots; ++i) {
795 snprintf(buf, sizeof(buf), "spill_slot_%d", i);
796 name = new_id_from_str(buf);
798 spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));
801 offset = round_up2(offset, ss[i]->align);
803 set_entity_offset_bytes(spill_ent, offset);
804 /* next possible offset */
805 offset += round_up2(ss[i]->size, ss[i]->align);
807 pset_foreach(ss[i]->members, irn)
808 be_set_Spill_entity(irn, spill_ent);
811 /* set final size of stack frame */
812 frame_align = get_type_alignment_bytes(frame);
813 set_type_size_bytes(frame, round_up2(offset, frame_align));
816 void be_compute_spill_offsets(be_chordal_env_t *cenv) {
822 obstack_init(&ssenv.ob);
824 ssenv.slots = pmap_create();
825 ssenv.types = pmap_create();
826 FIRM_DBG_REGISTER(ssenv.dbg, "ir.be.spillslots");
828 /* Get initial spill slots */
829 irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
831 /* Build an empty array for optimized spill slots */
832 ss_size = pmap_count(ssenv.slots);
833 ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss));
834 optimize_slots(&ssenv, ss_size, ss);
836 /* Integrate slots into the stack frame entity */
837 assign_entities(&ssenv, ss_size, ss);
840 pmap_foreach(ssenv.slots, pme)
841 del_pset(((spill_slot_t *)pme->value)->members);
842 pmap_destroy(ssenv.slots);
843 pmap_destroy(ssenv.types);
844 obstack_free(&ssenv.ob, NULL);
846 be_copy_entities_to_reloads(cenv->irg);