2 * Author: Daniel Grund, Sebastian Hack
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "iredges_t.h"
26 #include "besched_t.h"
29 #include "bechordal_t.h"
31 /* This enables re-computation of values. Current state: Unfinished and buggy. */
34 typedef struct _reloader_t reloader_t;
35 typedef struct _spill_info_t spill_info_t;
42 struct _spill_info_t {
43 ir_node *spilled_node;
44 reloader_t *reloaders;
47 typedef struct _spill_ctx_t {
48 ir_node *spilled; /**< The spilled node. */
49 ir_node *user; /**< The node this spill is for. */
50 ir_node *spill; /**< The spill itself. */
54 const arch_register_class_t *cls;
55 const be_chordal_env_t *chordal_env;
58 set *spills; /**< all spill_info_t's, which must be placed */
59 pset *mem_phis; /**< set of all special spilled phis. allocated and freed separately */
60 ir_node **copies; /**< set of copies placed because of phi spills */
61 DEBUG_ONLY(firm_dbg_module_t *dbg;)
64 /* associated Phi -> Spill*/
65 typedef struct _phi_spill_assoc_t {
71 * Compare two Phi->Spill associations.
73 static int cmp_phi_spill_assoc(const void *a, const void *b, size_t n) {
74 const phi_spill_assoc_t *p1 = a;
75 const phi_spill_assoc_t *p2 = b;
76 return p1->phi != p2->phi;
80 * compare two spill contexts.
82 static int cmp_spillctx(const void *a, const void *b, size_t n) {
83 const spill_ctx_t *p = a;
84 const spill_ctx_t *q = b;
85 return p->user != q->user || p->spilled != q->spilled;
89 * Compare two spill infos.
91 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
92 const spill_info_t *xx = x;
93 const spill_info_t *yy = y;
94 return xx->spilled_node != yy->spilled_node;
98 /* Sets the debug module of a spill environment. */
99 void be_set_spill_env_dbg_module(spill_env_t *env, firm_dbg_module_t *dbg) {
104 /* Creates a new spill environment. */
105 spill_env_t *be_new_spill_env(const be_chordal_env_t *chordal_env) {
106 spill_env_t *env = xmalloc(sizeof(env[0]));
107 env->spill_ctxs = new_set(cmp_spillctx, 1024);
108 env->spills = new_set(cmp_spillinfo, 1024);
109 env->cls = chordal_env->cls;
110 env->chordal_env = chordal_env;
111 env->mem_phis = pset_new_ptr_default();
112 env->copies = NEW_ARR_F(ir_node*, 0);
113 obstack_init(&env->obst);
117 /* Deletes a spill environment. */
118 void be_delete_spill_env(spill_env_t *env) {
119 del_set(env->spill_ctxs);
120 del_set(env->spills);
121 del_pset(env->mem_phis);
122 DEL_ARR_F(env->copies);
123 obstack_free(&env->obst, NULL);
128 * Returns a spill context. If the context did not exists, create one.
130 * @param sc the set containing all spill contexts
131 * @param to_spill the node that should be spilled
132 * @param ctx_irn an user of the spilled node
134 * @return a spill context.
136 static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
139 templ.spilled = to_spill;
140 templ.user = ctx_irn;
143 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
147 * Schedules a node after an instruction. (That is the place after all projs and phis
148 * that are scheduled after the instruction)
150 static void sched_add_after_insn(ir_node *sched_after, ir_node *node) {
151 ir_node *next = sched_next(sched_after);
152 while(!sched_is_end(next)) {
153 if(!is_Proj(next) && !is_Phi(next))
155 next = sched_next(next);
158 if(sched_is_end(next)) {
159 next = sched_last(get_nodes_block(sched_after));
160 sched_add_after(next, node);
162 sched_add_before(next, node);
169 * @param senv the spill environment
170 * @param irn the node that should be spilled
171 * @param ctx_irn an user of the spilled node
173 * @return a be_Spill node
175 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
177 const be_main_env_t *env = senv->chordal_env->birg->main_env;
178 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
180 // Has the value already been spilled?
181 ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
185 /* Trying to spill an already spilled value, no need for a new spill
186 * node then, we can simply connect to the same one for this reload
188 if(be_is_Reload(irn)) {
189 return get_irn_n(irn, be_pos_Reload_mem);
192 ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
193 sched_add_after_insn(irn, ctx->spill);
199 * Removes all copies introduced for phi-spills
201 static void remove_copies(spill_env_t *env) {
204 for(i = 0; i < ARR_LEN(env->copies); ++i) {
205 ir_node *node = env->copies[i];
207 const ir_edge_t *edge, *ne;
209 assert(be_is_Copy(node));
211 src = be_get_Copy_op(node);
212 foreach_out_edge_safe(node, edge, ne) {
213 ir_node *user = get_edge_src_irn(edge);
214 int user_pos = get_edge_src_pos(edge);
216 set_irn_n(user, user_pos, src);
220 ARR_SETLEN(ir_node*, env->copies, 0);
223 static INLINE ir_node *skip_projs(ir_node *node) {
224 while(is_Proj(node)) {
225 node = sched_next(node);
226 assert(!sched_is_end(node));
233 * Searchs the schedule backwards until we reach the first use or def of a
235 * Returns the node after this node (so that you can do sched_add_before)
237 static ir_node *find_last_use_def(spill_env_t *env, ir_node *block, ir_node *value) {
238 ir_node *node, *last;
241 sched_foreach_reverse(block, node) {
248 return skip_projs(last);
250 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
251 ir_node *arg = get_irn_n(node, i);
253 return skip_projs(last);
259 // simply return first node if no def or use found
260 return sched_first(block);
264 * If the first usage of a Phi result would be out of memory
265 * there is no sense in allocating a register for it.
266 * Thus we spill it and all its operands to the same spill slot.
267 * Therefore the phi/dataB becomes a phi/Memory
269 * @param senv the spill environment
270 * @param phi the Phi node that should be spilled
271 * @param ctx_irn an user of the spilled node
273 * @return a be_Spill node
275 static ir_node *spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn, set *already_visited_phis, bitset_t *bs) {
277 int arity = get_irn_arity(phi);
278 ir_graph *irg = senv->chordal_env->irg;
279 ir_node *bl = get_nodes_block(phi);
280 ir_node **ins, *phi_spill;
281 phi_spill_assoc_t key;
285 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
287 /* build a new PhiM */
288 NEW_ARR_A(ir_node *, ins, arity);
289 for (i = 0; i < arity; ++i) {
290 ins[i] = new_r_Bad(irg);
292 phi_spill = new_r_Phi(senv->chordal_env->irg, bl, arity, ins, mode_M);
294 key.spill = phi_spill;
295 set_insert(already_visited_phis, &key, sizeof(key), HASH_PTR(phi));
296 bitset_set(bs, get_irn_idx(phi));
298 /* search an existing spill for this context */
299 ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
301 /* if not found spill the phi */
303 /* collect all arguments of the phi */
304 for (i = 0; i < arity; ++i) {
305 ir_node *arg = get_irn_n(phi, i);
307 phi_spill_assoc_t *entry;
309 if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg)) {
313 } else if (! bitset_is_set(bs, get_irn_idx(arg))) {
314 sub_res = spill_phi(senv, arg, ctx_irn, already_visited_phis, bs);
316 /* we already visited the argument phi: get it's spill */
319 entry = set_find(already_visited_phis, &key, sizeof(key), HASH_PTR(arg));
320 assert(entry && "argument phi already visited, but no spill found?!?");
321 sub_res = entry->spill;
322 assert(sub_res && "spill missing?!?");
325 sub_res = be_spill_irn(senv, arg, ctx_irn);
328 set_irn_n(phi_spill, i, sub_res);
331 ctx->spill = phi_spill;
339 * @param senv the spill environment
340 * @param to_spill the node that should be spilled
342 * @return a be_Spill node
344 static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
345 ir_graph *irg = get_irn_irg(to_spill);
348 if (pset_find_ptr(senv->mem_phis, to_spill)) {
349 set *already_visited_phis = new_set(cmp_phi_spill_assoc, 10);
350 bitset_t *bs = bitset_alloca(get_irg_last_idx(irg));
351 res = spill_phi(senv, to_spill, to_spill, already_visited_phis, bs);
352 del_set(already_visited_phis);
354 res = be_spill_irn(senv, to_spill, to_spill);
363 * Check if a spilled node could be rematerialized.
365 * @param senv the spill environment
366 * @param spill the Spill node
367 * @param spilled the node that was spilled
368 * @param reloader a irn that requires a reload
370 static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
373 /* check for 'normal' spill and general remat condition */
374 if (!arch_irn_is(senv->chordal_env->birg->main_env->arch_env, spilled, rematerializable))
377 /* check availability of original arguments */
378 if (is_Block(reloader)) {
380 /* we want to remat at the end of a block.
381 * thus all arguments must be alive at the end of the block
383 for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
384 ir_node *arg = get_irn_n(spilled, pos);
385 if (!is_live_end(reloader, arg))
391 /* we want to remat before the insn reloader
392 * thus an arguments is alive if
393 * - it interferes with the reloaders result
395 * - or it is (last-) used by reloader itself
397 for (pos=0, max=get_irn_arity(spilled); pos<max; ++pos) {
398 ir_node *arg = get_irn_n(spilled, pos);
401 if (values_interfere(reloader, arg))
404 for (i=0, m=get_irn_arity(reloader); i<m; ++i) {
405 ir_node *rel_arg = get_irn_n(reloader, i);
410 /* arg is not alive before reloader */
422 #else /* BUGGY_REMAT */
425 * A very simple rematerialization checker.
427 * @param senv the spill environment
428 * @param spill the Spill node
429 * @param spilled the node that was spilled
430 * @param reloader a irn that requires a reload
432 static int check_remat_conditions(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
433 const arch_env_t *aenv = senv->chordal_env->birg->main_env->arch_env;
435 return get_irn_arity(spilled) == 0 &&
436 arch_irn_is(aenv, spilled, rematerializable);
439 #endif /* BUGGY_REMAT */
442 * Re-materialize a node.
444 * @param senv the spill environment
445 * @param spilled the node that was spilled
446 * @param reloader a irn that requires a reload
448 static ir_node *do_remat(spill_env_t *senv, ir_node *spilled, ir_node *reloader) {
450 ir_node *bl = (is_Block(reloader)) ? reloader : get_nodes_block(reloader);
452 /* recompute the value */
453 res = new_ir_node(get_irn_dbg_info(spilled), senv->chordal_env->irg, bl,
455 get_irn_mode(spilled),
456 get_irn_arity(spilled),
457 get_irn_in(spilled) + 1);
458 copy_node_attr(spilled, res);
460 DBG((senv->dbg, LEVEL_1, "Insert remat %+F before reloader %+F\n", res, reloader));
462 /* insert in schedule */
463 if (is_Block(reloader)) {
464 ir_node *insert = sched_skip(reloader, 0, sched_skip_cf_predicator, (void *) senv->chordal_env->birg->main_env->arch_env);
465 sched_add_after(insert, res);
467 sched_add_before(reloader, res);
473 static place_copies_for_phi(spill_env_t *env, ir_node* node) {
476 assert(is_Phi(node));
478 /* We have to place copy nodes in the predecessor blocks to temporarily
479 * produce new values that get separate spill slots
481 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
482 ir_node *pred_block, *arg, *copy, *insert_point;
484 /* Don't do anything for looping edges (there's no need
485 * and placing copies here breaks stuff as it suddenly
486 * generates new living values through the whole loop)
488 arg = get_irn_n(node, i);
492 pred_block = get_Block_cfgpred_block(get_nodes_block(node), i);
493 copy = be_new_Copy(env->cls, get_irn_irg(arg), pred_block, arg);
495 ARR_APP1(ir_node*, env->copies, copy);
496 insert_point = find_last_use_def(env, pred_block, arg);
497 sched_add_before(insert_point, copy);
499 set_irn_n(node, i, copy);
503 void be_place_copies(spill_env_t *env) {
506 foreach_pset(env->mem_phis, node) {
507 place_copies_for_phi(env, node);
511 void be_spill_phi(spill_env_t *env, ir_node *node) {
512 spill_ctx_t *spill_ctx;
514 assert(is_Phi(node));
516 pset_insert_ptr(env->mem_phis, node);
518 // remove spill context for this phi (if there was one)
519 spill_ctx = be_get_spill_ctx(env->spill_ctxs, node, node);
520 if(spill_ctx != NULL) {
521 spill_ctx->spill = NULL;
525 void be_insert_spills_reloads(spill_env_t *env) {
526 const arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
531 // Matze: This should be pointless as beladies fix_block_borders
532 // should result in the same
533 DBG((env->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
534 foreach_pset(env->mem_phis, node) {
537 assert(is_Phi(node));
539 /* Add reloads for mem_phis */
540 /* BETTER: These reloads (1) should only be inserted, if they are really needed */
541 DBG((env->dbg, LEVEL_1, " Mem-phi %+F\n", node));
542 foreach_out_edge(node, e) {
543 ir_node *user = e->src;
544 if (is_Phi(user) && !pset_find_ptr(env->mem_phis, user)) {
545 ir_node *use_bl = get_nodes_block(user);
546 DBG((env->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
547 be_add_reload_on_edge(env, node, use_bl, e->pos); /* (1) */
553 /* process each spilled node */
554 DBG((env->dbg, LEVEL_1, "Insert spills and reloads:\n"));
555 for(si = set_first(env->spills); si; si = set_next(env->spills)) {
557 ir_mode *mode = get_irn_mode(si->spilled_node);
558 pset *values = pset_new_ptr(16);
560 /* go through all reloads for this spill */
561 for(rld = si->reloaders; rld; rld = rld->next) {
564 if (check_remat_conditions(env, si->spilled_node, rld->reloader)) {
565 new_val = do_remat(env, si->spilled_node, rld->reloader);
567 /* the spill for this reloader */
568 ir_node *spill = be_spill_node(env, si->spilled_node);
571 new_val = be_reload(arch_env, env->cls, rld->reloader, mode, spill);
574 DBG((env->dbg, LEVEL_1, " %+F of %+F before %+F\n", new_val, si->spilled_node, rld->reloader));
575 pset_insert_ptr(values, new_val);
578 /* introduce copies, rewire the uses */
579 assert(pset_count(values) > 0 && "???");
580 pset_insert_ptr(values, si->spilled_node);
581 be_ssa_constr_set_ignore(env->chordal_env->dom_front, env->chordal_env->lv, values, env->mem_phis);
588 // reloads are placed now, but we might reuse the spill environment for further spilling decisions
589 del_set(env->spills);
590 env->spills = new_set(cmp_spillinfo, 1024);
593 void be_add_reload(spill_env_t *env, ir_node *to_spill, ir_node *before) {
594 spill_info_t templ, *res;
597 assert(sched_is_scheduled(before));
598 assert(arch_irn_consider_in_reg_alloc(env->chordal_env->birg->main_env->arch_env, env->cls, to_spill));
600 templ.spilled_node = to_spill;
601 templ.reloaders = NULL;
602 res = set_insert(env->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
604 rel = obstack_alloc(&env->obst, sizeof(rel[0]));
605 rel->reloader = before;
606 rel->next = res->reloaders;
607 res->reloaders = rel;
609 be_liveness_add_missing(env->chordal_env->lv);
612 void be_add_reload_on_edge(spill_env_t *env, ir_node *to_spill, ir_node *block, int pos) {
613 ir_node *predblock, *last;
615 /* simply add the reload to the beginning of the block if we only have 1 predecessor
616 * (we don't need to check for phis as there can't be any in a block with only 1 pred)
618 if(get_Block_n_cfgpreds(block) == 1) {
619 assert(!is_Phi(sched_first(block)));
620 be_add_reload(env, to_spill, sched_first(block));
624 /* We have to reload the value in pred-block */
625 predblock = get_Block_cfgpred_block(block, pos);
626 last = sched_last(predblock);
628 /* we might have projs and keepanys behind the jump... */
629 while(is_Proj(last) || be_is_Keep(last)) {
630 last = sched_prev(last);
631 assert(!sched_is_end(last));
633 assert(is_cfop(last));
635 // add the reload before the (cond-)jump
636 be_add_reload(env, to_spill, last);
639 /****************************************
641 SPILL SLOT MANAGEMENT AND OPTS
643 ****************************************/
645 typedef struct _spill_slot_t {
649 ir_mode *largest_mode; /* the mode of all members with largest size */
652 typedef struct _ss_env_t {
654 be_chordal_env_t *cenv;
655 pmap *slots; /* maps spill_contexts to spill_slots */
656 pmap *types; /* maps modes to types */
657 DEBUG_ONLY(firm_dbg_module_t *dbg;)
662 * Walker: compute the spill slots
664 static void compute_spill_slots_walker(ir_node *spill, void *env) {
665 ss_env_t *ssenv = env;
666 arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
670 const arch_register_class_t *cls;
672 if (! be_is_Spill(spill))
675 cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
677 if (cls != ssenv->cenv->cls)
680 /* check, if this spill is for a context already known */
681 ctx = be_get_Spill_context(spill);
682 entry = pmap_find(ssenv->slots, ctx);
685 ir_mode *largest_mode = arch_register_class_mode(cls);
687 /* this is a new spill context */
688 ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
689 ss->members = pset_new_ptr(8);
690 ss->largest_mode = largest_mode;
691 ss->size = get_mode_size_bytes(ss->largest_mode);
692 ss->align = arch_isa_get_reg_class_alignment(arch_env->isa, cls);
693 pmap_insert(ssenv->slots, ctx, ss);
695 /* values with the same spill_ctx must go into the same spill slot */
699 /* ugly mega assert :-) */
702 struct _arch_env_t *arch_env = ssenv->cenv->birg->main_env->arch_env;
703 const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, spill, be_pos_Spill_val);
704 int size = get_mode_size_bytes(arch_register_class_mode(cls));
705 assert((int) ss->size == size && "Different sizes for the same spill slot are not allowed.");
706 for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
707 /* use values_interfere here, because it uses the dominance check,
708 which does work for values in memory */
709 assert(!values_interfere(ssenv->cenv->lv, spill, irn) && "Spills for the same spill slot must not interfere!");
715 pset_insert_ptr(ss->members, spill);
719 * qsort compare function, sort spill slots by size.
721 static int ss_sorter(const void *v1, const void *v2) {
722 const spill_slot_t **ss1 = (const spill_slot_t **)v1;
723 const spill_slot_t **ss2 = (const spill_slot_t **)v2;
724 return ((int) (*ss2)->size) - ((int) (*ss1)->size);
729 * This function should optimize the spill slots.
730 * - Coalescing of multiple slots
731 * - Ordering the slots
733 * Input slots are in @p ssenv->slots
734 * @p size The count of initial spill slots in @p ssenv->slots
735 * This also is the size of the preallocated array @p ass
737 * @return An array of spill slots @p ass in specific order
739 static void optimize_slots(ss_env_t *ssenv, int size, spill_slot_t *ass[]) {
740 int i, o, used_slots;
744 pmap_foreach(ssenv->slots, entr)
745 ass[i++] = entr->value;
747 /* Sort the array to minimize fragmentation and cache footprint.
748 Large slots come first */
749 qsort(ass, size, sizeof(ass[0]), ss_sorter);
751 /* For each spill slot:
752 - assign a new offset to this slot
753 - xor find another slot to coalesce with */
755 for (i=0; i<size; ++i) {
756 /* for each spill slot */
760 DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
761 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
762 DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
765 for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
766 /* check inter-slot-pairs for interference */
768 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
769 for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
770 if(values_interfere(ssenv->cenv->lv, n1, n2)) {
771 pset_break(ass[i]->members);
772 pset_break(ass[o]->members);
773 DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
774 goto interf_detected;
777 /* if we are here, there is no interference between ass[i] and ass[o] */
780 interf_detected: /*nothing*/ ;
783 /* now the members of ass[i] join the members of ass[tgt_slot] */
785 /* do we need a new slot? */
786 if (tgt_slot == -1) {
787 tgt_slot = used_slots;
792 ass[tgt_slot]->size = ass[i]->size;
793 del_pset(ass[tgt_slot]->members);
794 ass[tgt_slot]->members = pset_new_ptr(8);
798 /* copy the members to the target pset */
799 /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
801 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
802 pset_insert_ptr(ass[tgt_slot]->members, n1);
806 #define ALIGN_SPILL_AREA 16
807 #define pset_foreach(pset, elm) for(elm=pset_first(pset); elm; elm=pset_next(pset))
810 * Returns a spill type for a mode. Keep them in a map to reduce
811 * the number of types.
813 * @param types a map containing all created types
814 * @param ss the spill slot
816 * Note that type types should are identical for every mode.
817 * This rule might break if two different register classes return the same
818 * mode but different alignments.
820 static ir_type *get_spill_type(pmap *types, spill_slot_t *ss) {
821 pmap_entry *e = pmap_find(types, ss->largest_mode);
826 snprintf(buf, sizeof(buf), "spill_slot_type_%s", get_mode_name(ss->largest_mode));
827 buf[sizeof(buf) - 1] = '\0';
828 res = new_type_primitive(new_id_from_str(buf), ss->largest_mode);
829 set_type_alignment_bytes(res, ss->align);
830 pmap_insert(types, ss->largest_mode, res);
833 assert(get_type_alignment_bytes(res) == (int)ss->align);
840 * Create spill slot entities on the frame type.
842 * @param ssenv the spill environment
843 * @param n number of spill slots
844 * @param ss array of spill slots
846 static void assign_entities(ss_env_t *ssenv, int n_slots, spill_slot_t *ss[]) {
847 int i, offset, frame_align;
850 /* do not align the frame if no spill slots are needed */
854 frame = get_irg_frame_type(ssenv->cenv->irg);
856 /* aligning by increasing frame size */
857 offset = get_type_size_bytes(frame);
858 offset = round_up2(offset, ALIGN_SPILL_AREA);
860 /* create entities and assign offsets according to size and alignment*/
861 for (i = 0; i < n_slots; ++i) {
868 snprintf(buf, sizeof(buf), "spill_slot_%d", i);
869 buf[sizeof(buf) - 1] = '\0';
870 name = new_id_from_str(buf);
872 spill_ent = new_entity(frame, name, get_spill_type(ssenv->types, ss[i]));
875 offset = round_up2(offset, ss[i]->align);
877 set_entity_offset_bytes(spill_ent, offset);
878 /* next possible offset */
879 offset += round_up2(ss[i]->size, ss[i]->align);
881 pset_foreach(ss[i]->members, irn)
882 be_set_Spill_entity(irn, spill_ent);
886 /* set final size of stack frame */
887 frame_align = get_type_alignment_bytes(frame);
888 set_type_size_bytes(frame, round_up2(offset, frame_align));
891 void be_compute_spill_offsets(be_chordal_env_t *cenv) {
897 obstack_init(&ssenv.ob);
899 ssenv.slots = pmap_create();
900 ssenv.types = pmap_create();
901 FIRM_DBG_REGISTER(ssenv.dbg, "firm.be.spillslots");
903 /* Get initial spill slots */
904 irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
906 /* Build an empty array for optimized spill slots */
907 ss_size = pmap_count(ssenv.slots);
908 ss = obstack_alloc(&ssenv.ob, ss_size * sizeof(*ss));
909 optimize_slots(&ssenv, ss_size, ss);
911 /* Integrate slots into the stack frame entity */
912 assign_entities(&ssenv, ss_size, ss);
915 pmap_foreach(ssenv.slots, pme)
916 del_pset(((spill_slot_t *)pme->value)->members);
917 pmap_destroy(ssenv.slots);
918 pmap_destroy(ssenv.types);
919 obstack_free(&ssenv.ob, NULL);
921 be_copy_entities_to_reloads(cenv->irg);