2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Spillslot coalescer.
23 * @author Matthias Braun
39 #include "unionfind.h"
44 #include "bespillslots.h"
45 #include "bechordal_t.h"
46 #include "bejavacoal.h"
47 #include "bestatevent.h"
48 #include "bespilloptions.h"
50 #include "beintlive_t.h"
54 #define DBG_COALESCING 1
55 #define DBG_INTERFERENCES 2
57 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
59 typedef struct _spill_t {
61 const ir_mode *mode; /**< mode of the spilled value */
62 int alignment; /**< alignment for the spilled value */
63 int spillslot; /**< index into spillslot_unionfind structure */
66 typedef struct _affinity_edge_t {
71 struct _be_fec_env_t {
73 const arch_env_t *arch_env;
77 affinity_edge_t **affinity_edges;
81 /** Compare 2 affinity edges (used in quicksort) */
82 static int cmp_affinity(const void *d1, const void *d2)
84 const affinity_edge_t * const *e1 = d1;
85 const affinity_edge_t * const *e2 = d2;
87 /* sort in descending order */
88 return (*e1)->affinity < (*e2)->affinity ? 1 : -1;
91 static int cmp_spill(const void* d1, const void* d2, size_t size)
93 const spill_t* s1 = d1;
94 const spill_t* s2 = d2;
97 return s1->spill != s2->spill;
100 static spill_t *get_spill(be_fec_env_t *env, ir_node *node)
103 int hash = hash_irn(node);
106 res = set_find(env->spills, &spill, sizeof(spill), hash);
112 static INLINE ir_node *get_memory_edge(const ir_node *node)
116 arity = get_irn_arity(node);
117 for(i = arity - 1; i >= 0; --i) {
118 ir_node *arg = get_irn_n(node, i);
119 if(get_irn_mode(arg) == mode_M)
126 static spill_t *collect_spill(be_fec_env_t *env, ir_node *node,
127 const ir_mode *mode, int align)
132 /* beware: although untypical there might be store nodes WITH Proj's,
133 vfisttp in ia32 for instance */
134 node = skip_Proj(node);
135 hash = hash_irn(node);
137 /* insert into set of spills if not already there */
139 res = set_find(env->spills, &spill, sizeof(spill), hash);
142 spill.spillslot = set_count(env->spills);
144 spill.alignment = align;
145 res = set_insert(env->spills, &spill, sizeof(spill), hash);
147 assert(res->mode == mode);
148 assert(res->alignment == align);
154 static spill_t *collect_memphi(be_fec_env_t *env, ir_node *node,
155 const ir_mode *mode, int align)
159 int hash = hash_irn(node);
160 const ir_exec_freq *exec_freq = be_get_birg_exec_freq(env->birg);
162 assert(is_Phi(node));
165 res = set_find(env->spills, &spill, sizeof(spill), hash);
167 assert(res->mode == mode);
168 assert(res->alignment == align);
172 spill.spillslot = set_count(env->spills);
174 spill.alignment = align;
175 res = set_insert(env->spills, &spill, sizeof(spill), hash);
177 /* collect attached spills and mem-phis */
178 arity = get_irn_arity(node);
179 for(i = 0; i < arity; ++i) {
180 affinity_edge_t *affinty_edge;
181 ir_node *arg = get_irn_n(node, i);
185 arg_spill = collect_memphi(env, arg, mode, align);
187 arg_spill = collect_spill(env, arg, mode, align);
190 /* add an affinity edge */
191 affinty_edge = obstack_alloc(&env->obst, sizeof(affinty_edge[0]));
192 affinty_edge->affinity = get_block_execfreq(exec_freq, get_nodes_block(arg));
193 affinty_edge->slot1 = res->spillslot;
194 affinty_edge->slot2 = arg_spill->spillslot;
195 ARR_APP1(affinity_edge_t*, env->affinity_edges, affinty_edge);
201 void be_node_needs_frame_entity(be_fec_env_t *env, ir_node *node,
202 const ir_mode *mode, int align)
204 ir_node *spillnode = get_memory_edge(node);
207 assert(spillnode != NULL);
209 /* walk upwards and collect all phis and spills on this way */
210 if (is_Phi(spillnode)) {
211 spill = collect_memphi(env, spillnode, mode, align);
213 spill = collect_spill(env, spillnode, mode, align);
216 ARR_APP1(ir_node *, env->reloads, node);
221 static int merge_interferences(be_fec_env_t *env, bitset_t** interferences,
222 int* spillslot_unionfind, int s1, int s2)
228 /* merge spillslots and interferences */
229 res = uf_union(spillslot_unionfind, s1, s2);
230 /* we assume that we always merge s2 to s1 so swap s1, s2 if necessary */
237 bitset_or(interferences[s1], interferences[s2]);
239 /* update other interferences */
240 spillcount = set_count(env->spills);
241 for(i = 0; i < spillcount; ++i) {
242 bitset_t *intfs = interferences[i];
243 if(bitset_is_set(intfs, s2))
244 bitset_set(intfs, s1);
250 static int my_values_interfere2(be_irg_t *birg, const ir_node *a,
253 be_lv_t *lv = be_get_birg_liveness(birg);
255 int a2b = _value_dominates(a, b);
256 int b2a = _value_dominates(b, a);
258 /* If there is no dominance relation, they do not interfere. */
259 if((a2b | b2a) > 0) {
260 const ir_edge_t *edge;
264 * Adjust a and b so, that a dominates b if
265 * a dominates b or vice versa.
268 const ir_node *t = a;
273 bb = get_nodes_block(b);
276 * If a is live end in b's block it is
277 * live at b's definition (a dominates b)
279 if(be_is_live_end(lv, bb, a))
283 * Look at all usages of a.
284 * If there's one usage of a in the block of b, then
285 * we check, if this use is dominated by b, if that's true
286 * a and b interfere. Note that b must strictly dominate the user,
287 * since if b is the last user of in the block, b and a do not
289 * Uses of a not in b's block can be disobeyed, because the
290 * check for a being live at the end of b's block is already
293 foreach_out_edge(a, edge) {
294 const ir_node *user = get_edge_src_irn(edge);
296 const ir_edge_t *edge2;
297 foreach_out_edge(user, edge2) {
298 const ir_node *user2 = get_edge_src_irn(edge2);
299 assert(!is_Sync(user2));
300 if(get_nodes_block(user2) == bb && !is_Phi(user2) &&
301 _value_strictly_dominates(b, user2))
305 if(get_nodes_block(user) == bb && !is_Phi(user) &&
306 _value_strictly_dominates(b, user))
316 * same as values_interfere but with special handling for Syncs
318 static int my_values_interfere(be_irg_t *birg, ir_node *a, ir_node *b)
321 int i, arity = get_irn_arity(a);
322 for(i = 0; i < arity; ++i) {
323 ir_node *in = get_irn_n(a, i);
324 if(my_values_interfere(birg, in, b))
328 } else if(is_Sync(b)) {
329 int i, arity = get_irn_arity(b);
330 for(i = 0; i < arity; ++i) {
331 ir_node *in = get_irn_n(b, i);
332 /* a is not a sync, so no need for my_values_interfere */
333 if(my_values_interfere2(birg, a, in))
339 return my_values_interfere2(birg, a, b);
343 * A greedy coalescing algorithm for spillslots:
344 * 1. Sort the list of affinity edges
345 * 2. Try to merge slots with affinity edges (most expensive slots first)
346 * 3. Try to merge everything else that is possible
348 static void do_greedy_coalescing(be_fec_env_t *env)
354 int affinity_edge_count;
355 bitset_t **interferences;
356 int* spillslot_unionfind;
358 spillcount = set_count(env->spills);
362 DBG((dbg, DBG_COALESCING, "Coalescing %d spillslots\n", spillcount));
364 interferences = alloca(spillcount * sizeof(interferences[0]));
365 spillslot_unionfind = alloca(spillcount * sizeof(spillslot_unionfind[0]));
366 spilllist = alloca(spillcount * sizeof(spilllist[0]));
368 uf_init(spillslot_unionfind, 0, spillcount);
371 memset(spilllist, 0, spillcount * sizeof(spilllist[0]));
374 for(spill = set_first(env->spills), i = 0; spill != NULL;
375 spill = set_next(env->spills), ++i) {
376 assert(spill->spillslot < spillcount);
377 spilllist[spill->spillslot] = spill;
380 for(i = 0; i < spillcount; ++i) {
381 interferences[i] = bitset_alloca(spillcount);
384 /* construct interferences */
385 for (i = 0; i < spillcount; ++i) {
386 ir_node *spill1 = spilllist[i]->spill;
388 if (is_NoMem(spill1))
391 for(i2 = i+1; i2 < spillcount; ++i2) {
392 ir_node *spill2 = spilllist[i2]->spill;
394 if (is_NoMem(spill2))
397 if (my_values_interfere(env->birg, spill1, spill2)) {
398 DBG((dbg, DBG_INTERFERENCES,
399 "Slot %d and %d interfere\n", i, i2));
401 bitset_set(interferences[i], i2);
402 bitset_set(interferences[i2], i);
407 /* sort affinity edges */
408 affinity_edge_count = ARR_LEN(env->affinity_edges);
409 qsort(env->affinity_edges, affinity_edge_count,
410 sizeof(env->affinity_edges[0]), cmp_affinity);
412 /*dump_interference_graph(env, interferences, "before"); */
414 /* try to merge affine nodes */
415 for(i = 0; i < affinity_edge_count; ++i) {
416 const affinity_edge_t *edge = env->affinity_edges[i];
417 int s1 = uf_find(spillslot_unionfind, edge->slot1);
418 int s2 = uf_find(spillslot_unionfind, edge->slot2);
420 /* test if values interfere */
421 if (bitset_is_set(interferences[s1], s2)) {
422 assert(bitset_is_set(interferences[s2], s1));
426 DBG((dbg, DBG_COALESCING,
427 "Merging %d and %d because of affinity edge\n", s1, s2));
429 merge_interferences(env, interferences, spillslot_unionfind, s1, s2);
432 /* try to merge as much remaining spillslots as possible */
433 for(i = 0; i < spillcount; ++i) {
434 int s1 = uf_find(spillslot_unionfind, i);
438 for(i2 = i+1; i2 < spillcount; ++i2) {
439 int s2 = uf_find(spillslot_unionfind, i2);
443 /* test if values interfere
444 * we have to test n1-n2 and n2-n1, because only 1 side gets updated
445 * when node merging occurs
447 if(bitset_is_set(interferences[s1], s2)) {
448 assert(bitset_is_set(interferences[s2], s1));
452 DBG((dbg, DBG_COALESCING,
453 "Merging %d and %d because it is possible\n", s1, s2));
455 if(merge_interferences(env, interferences, spillslot_unionfind, s1, s2) != 0) {
456 /* we can break the loop here, because s2 is the new supernode
457 * now and we'll test s2 again later anyway */
463 /* assign spillslots to spills */
464 for(i = 0; i < spillcount; ++i) {
465 spill_t *spill = spilllist[i];
467 spill->spillslot = uf_find(spillslot_unionfind, i);
470 /*dump_interference_graph(env, interferences, "after");*/
475 typedef struct _spill_slot_t {
481 typedef struct _memperm_entry_t {
486 struct _memperm_entry_t *next;
489 typedef struct _memperm_t {
492 memperm_entry_t *entries;
495 static int cmp_memperm(const void* d1, const void* d2, size_t size)
497 const memperm_t* e1 = d1;
498 const memperm_t* e2 = d2;
501 return e1->block != e2->block;
504 static memperm_t *get_memperm(be_fec_env_t *env, ir_node *block)
506 memperm_t entry, *res;
510 hash = hash_irn(block);
512 res = set_find(env->memperms, &entry, sizeof(entry), hash);
515 entry.entrycount = 0;
516 entry.entries = NULL;
517 res = set_insert(env->memperms, &entry, sizeof(entry), hash);
523 static ir_entity* create_stack_entity(be_fec_env_t *env, spill_slot_t *slot)
525 ir_graph *irg = be_get_birg_irg(env->birg);
526 ir_type *frame = get_irg_frame_type(irg);
527 ir_entity *res = frame_alloc_area(frame, slot->size, slot->align, 0);
529 /* adjust size of the entity type... */
530 ir_type *enttype = get_entity_type(res);
531 set_type_size_bytes(enttype, slot->size);
539 * Enlarges a spillslot (if necessary) so that it can carry a value of size
540 * @p othersize and alignment @p otheralign.
542 static void enlarge_spillslot(spill_slot_t *slot, int otheralign, int othersize)
544 if(othersize > slot->size) {
545 slot->size = othersize;
547 if(otheralign > slot->align) {
548 if(otheralign % slot->align != 0)
549 slot->align *= otheralign;
551 slot->align = otheralign;
552 } else if(slot->align % otheralign != 0) {
553 slot->align *= otheralign;
558 static void assign_spill_entity(const arch_env_t *arch_env, ir_node *node,
566 arity = get_irn_arity(node);
567 for(i = 0; i < arity; ++i) {
568 ir_node *in = get_irn_n(node, i);
571 assign_spill_entity(arch_env, in, entity);
576 assert(arch_get_frame_entity(arch_env, node) == NULL);
577 arch_set_frame_entity(arch_env, node, entity);
581 * Create stack entities for the spillslots and assign them to the spill and
584 static void assign_spillslots(be_fec_env_t *env)
586 const arch_env_t *arch_env = env->arch_env;
590 spill_slot_t* spillslots;
592 spillcount = set_count(env->spills);
593 spillslots = alloca(spillcount * sizeof(spillslots[0]));
595 memset(spillslots, 0, spillcount * sizeof(spillslots[0]));
597 /* construct spillslots */
598 for(spill = set_first(env->spills); spill != NULL;
599 spill = set_next(env->spills)) {
601 int slotid = spill->spillslot;
602 const ir_mode *mode = spill->mode;
603 spill_slot_t *slot = & (spillslots[slotid]);
604 int size = get_mode_size_bytes(mode);
605 int align = spill->alignment;
607 if(slot->align == 0 && slot->size == 0) {
611 enlarge_spillslot(slot, align, size);
615 for(spill = set_first(env->spills); spill != NULL;
616 spill = set_next(env->spills)) {
618 ir_node *node = spill->spill;
619 int slotid = spill->spillslot;
622 slot = &spillslots[slotid];
623 if(slot->entity == NULL) {
624 create_stack_entity(env, slot);
629 ir_node *block = get_nodes_block(node);
631 /* should be a PhiM */
632 assert(is_Phi(node));
634 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
635 ir_node *arg = get_irn_n(node, i);
636 ir_node *predblock = get_Block_cfgpred_block(block, i);
640 argspill = get_spill(env, arg);
641 assert(argspill != NULL);
643 argslotid = argspill->spillslot;
644 if(slotid != argslotid) {
646 memperm_entry_t *entry;
647 spill_slot_t *argslot = &spillslots[argslotid];
648 if(argslot->entity == NULL) {
649 create_stack_entity(env, argslot);
652 memperm = get_memperm(env, predblock);
654 entry = obstack_alloc(&env->obst, sizeof(entry[0]));
657 entry->in = argslot->entity;
658 entry->out = slot->entity;
659 entry->next = memperm->entries;
660 memperm->entrycount++;
661 memperm->entries = entry;
665 assign_spill_entity(arch_env, node, slot->entity);
669 for(i = 0; i < ARR_LEN(env->reloads); ++i) {
670 ir_node *reload = env->reloads[i];
671 ir_node *spillnode = skip_Proj(get_memory_edge(reload));
672 spill_t *spill = get_spill(env, spillnode);
673 const spill_slot_t *slot = & spillslots[spill->spillslot];
675 assert(slot->entity != NULL);
677 arch_set_frame_entity(arch_env, reload, slot->entity);
682 * Returns the last node in a block which is no control flow changing node
684 static ir_node *get_end_of_block_insertion_point(ir_node* block)
686 ir_node* ins = sched_last(block);
687 while(is_Proj(ins) && get_irn_mode(ins) == mode_X) {
688 ins = sched_prev(ins);
694 ir_node *prev = sched_prev(ins);
704 static void create_memperms(be_fec_env_t *env)
706 const arch_env_t *arch_env = env->arch_env;
707 ir_graph *irg = be_get_birg_irg(env->birg);
710 for(memperm = set_first(env->memperms); memperm != NULL; memperm = set_next(env->memperms)) {
712 memperm_entry_t *entry;
714 ir_node** nodes = alloca(memperm->entrycount * sizeof(nodes[0]));
715 ir_node* mempermnode;
717 assert(memperm->entrycount > 0);
719 for(entry = memperm->entries, i = 0; entry != NULL; entry = entry->next, ++i) {
720 ir_node* arg = get_irn_n(entry->node, entry->pos);
724 mempermnode = be_new_MemPerm(arch_env, irg, memperm->block,
725 memperm->entrycount, nodes);
727 /* insert node into schedule */
728 blockend = get_end_of_block_insertion_point(memperm->block);
729 sched_add_before(blockend, mempermnode);
730 stat_ev_dbl("mem_perm", memperm->entrycount);
733 for(entry = memperm->entries; entry != NULL; entry = entry->next, ++i) {
735 ir_node* arg = get_irn_n(entry->node, entry->pos);
737 be_set_MemPerm_in_entity(mempermnode, i, entry->in);
738 be_set_MemPerm_out_entity(mempermnode, i, entry->out);
739 set_irg_current_block(irg, memperm->block);
740 proj = new_Proj(mempermnode, get_irn_mode(arg), i);
742 set_irn_n(entry->node, entry->pos, proj);
747 static int count_spillslots(const be_fec_env_t *env)
749 const spill_t *spill;
750 int spillcount = set_count(env->spills);
751 bitset_t *counted = bitset_alloca(spillcount);
755 for(spill = set_first(env->spills); spill != NULL;
756 spill = set_next(env->spills)) {
757 int spillslot = spill->spillslot;
758 if(!bitset_is_set(counted, spillslot)) {
760 bitset_set(counted, spillslot);
767 be_fec_env_t *be_new_frame_entity_coalescer(be_irg_t *birg)
769 const arch_env_t *arch_env = &birg->main_env->arch_env;
770 be_fec_env_t *env = xmalloc(sizeof(env[0]));
772 be_liveness_assure_chk(be_assure_liveness(birg));
774 obstack_init(&env->obst);
775 env->arch_env = arch_env;
777 env->spills = new_set(cmp_spill, 10);
778 env->reloads = NEW_ARR_F(ir_node*, 0);
779 env->affinity_edges = NEW_ARR_F(affinity_edge_t*, 0);
780 env->memperms = new_set(cmp_memperm, 10);
785 void be_free_frame_entity_coalescer(be_fec_env_t *env)
787 del_set(env->memperms);
788 DEL_ARR_F(env->reloads);
789 DEL_ARR_F(env->affinity_edges);
790 del_set(env->spills);
791 obstack_free(&env->obst, NULL);
796 void be_assign_entities(be_fec_env_t *env)
798 stat_ev_dbl("spillslots", set_count(env->spills));
800 if(be_coalesce_spill_slots) {
801 do_greedy_coalescing(env);
804 stat_ev_dbl("spillslots_after_coalescing", count_spillslots(env));
806 assign_spillslots(env);
808 create_memperms(env);
812 * This walker function searches for reloads and collects all the spills
813 * and memphis attached to them.
815 static void collect_spills_walker(ir_node *node, void *data)
817 be_fec_env_t *env = data;
818 const arch_env_t *arch_env = env->arch_env;
820 const arch_register_class_t *cls;
823 /* classify returns classification of the irn the proj is attached to */
827 if (!arch_irn_class_is(arch_env, node, reload))
830 mode = get_irn_mode(node);
831 cls = arch_get_irn_reg_class(arch_env, node, -1);
832 align = arch_isa_get_reg_class_alignment(arch_env_get_isa(arch_env), cls);
834 be_node_needs_frame_entity(env, node, mode, align);
837 void be_coalesce_spillslots(be_irg_t *birg)
839 be_fec_env_t *env = be_new_frame_entity_coalescer(birg);
841 /* collect reloads */
842 irg_walk_graph(birg->irg, NULL, collect_spills_walker, env);
844 be_assign_entities(env);
846 be_free_frame_entity_coalescer(env);
849 void be_init_spillslots(void)
851 FIRM_DBG_REGISTER(dbg, "firm.be.spillslots");
854 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillslots);