2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Spillslot coalescer.
23 * @author Matthias Braun
36 #include "unionfind.h"
42 #include "bespillslots.h"
43 #include "bechordal_t.h"
46 #include "beintlive_t.h"
49 #include "bespillutil.h"
51 #define DBG_COALESCING 1
52 #define DBG_INTERFERENCES 2
54 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
56 typedef struct spill_t {
58 const ir_mode *mode; /**< mode of the spilled value */
59 int alignment; /**< alignment for the spilled value */
63 typedef struct affinity_edge_t {
75 affinity_edge_t **affinity_edges;
77 set_frame_entity_func set_frame_entity;
78 bool at_begin; /**< frame entities should be allocate at
79 the beginning of the stackframe */
82 /** Compare 2 affinity edges (used in quicksort) */
83 static int cmp_affinity(const void *d1, const void *d2)
85 const affinity_edge_t * const *e1 = (const affinity_edge_t**)d1;
86 const affinity_edge_t * const *e2 = (const affinity_edge_t**)d2;
87 double aff1 = (*e1)->affinity;
88 double aff2 = (*e2)->affinity;
90 /* sort in descending order */
93 } else if (aff1 > aff2) {
96 int slot11 = (*e1)->slot1;
97 int slot21 = (*e2)->slot1;
98 if (slot11 < slot21) {
100 } else if (slot11 > slot21) {
103 int slot12 = (*e1)->slot2;
104 int slot22 = (*e2)->slot2;
105 return (slot12<slot22) - (slot12<slot22);
110 static spill_t *get_spill(be_fec_env_t *env, ir_node *node)
112 assert(rbitset_is_set(env->spills_set, get_irn_idx(node)));
113 return (spill_t*)get_irn_link(node);
116 static inline ir_node *get_memory_edge(const ir_node *node)
120 arity = get_irn_arity(node);
121 for (i = arity - 1; i >= 0; --i) {
122 ir_node *arg = get_irn_n(node, i);
123 if (get_irn_mode(arg) == mode_M)
130 static spill_t *collect_spill(be_fec_env_t *env, ir_node *node,
131 const ir_mode *mode, int align)
135 /* already in spill set? */
136 unsigned idx = get_irn_idx(node);
137 if (rbitset_is_set(env->spills_set, idx)) {
138 spill_t *spill = get_spill(env, node);
139 assert(spill->mode == mode);
140 assert(spill->alignment == align);
143 rbitset_set(env->spills_set, idx);
145 spill = OALLOC(&env->obst, spill_t);
146 /* insert into set of spills if not already there */
149 spill->alignment = align;
150 spill->spillslot = (int)ARR_LEN(env->spills);
151 ARR_APP1(spill_t*, env->spills, spill);
152 set_irn_link(node, spill);
153 DB((dbg, DBG_COALESCING, "Slot %d: %+F\n", spill->spillslot, node));
156 int arity = get_irn_arity(node);
158 for (i = 0; i < arity; ++i) {
159 affinity_edge_t *affinty_edge;
160 ir_node *arg = get_irn_n(node, i);
161 spill_t *arg_spill = collect_spill(env, arg, mode, align);
162 ir_node *block = get_nodes_block(arg);
164 /* add an affinity edge */
165 affinty_edge = OALLOC(&env->obst, affinity_edge_t);
166 affinty_edge->affinity = get_block_execfreq(block);
167 affinty_edge->slot1 = spill->spillslot;
168 affinty_edge->slot2 = arg_spill->spillslot;
169 ARR_APP1(affinity_edge_t*, env->affinity_edges, affinty_edge);
176 void be_node_needs_frame_entity(be_fec_env_t *env, ir_node *node,
177 const ir_mode *mode, int align)
179 ir_node *spillnode = get_memory_edge(node);
180 assert(spillnode != NULL);
182 /* walk upwards and collect all phis and spills on this way */
183 collect_spill(env, spillnode, mode, align);
185 ARR_APP1(ir_node *, env->reloads, node);
188 static int merge_interferences(be_fec_env_t *env, bitset_t** interferences,
189 int* spillslot_unionfind, int s1, int s2)
195 /* merge spillslots and interferences */
196 res = uf_union(spillslot_unionfind, s1, s2);
197 /* we assume that we always merge s2 to s1 so swap s1, s2 if necessary */
204 bitset_or(interferences[s1], interferences[s2]);
206 /* update other interferences */
207 spillcount = ARR_LEN(env->spills);
208 for (i = 0; i < spillcount; ++i) {
209 bitset_t *intfs = interferences[i];
210 if (bitset_is_set(intfs, s2))
211 bitset_set(intfs, s1);
217 static bool my_values_interfere2(ir_graph *const irg, ir_node const *a, ir_node const *b)
219 if (value_dominates(b, a)) {
220 /* Adjust a and b so, that a dominates b if
221 * a dominates b or vice versa. */
222 ir_node const *const t = a;
225 } else if (!value_dominates(a, b)) {
226 /* If there is no dominance relation, they do not interfere. */
230 ir_node *const bb = get_nodes_block(b);
232 /* If a is live end in b's block it is
233 * live at b's definition (a dominates b) */
234 be_lv_t *const lv = be_get_irg_liveness(irg);
235 if (be_is_live_end(lv, bb, a))
238 /* Look at all usages of a.
239 * If there's one usage of a in the block of b, then
240 * we check, if this use is dominated by b, if that's true
241 * a and b interfere. Note that b must strictly dominate the user,
242 * since if b is the last user of in the block, b and a do not
244 * Uses of a not in b's block can be disobeyed, because the
245 * check for a being live at the end of b's block is already
247 foreach_out_edge(a, edge) {
248 ir_node const *const user = get_edge_src_irn(edge);
250 foreach_out_edge(user, edge2) {
251 ir_node const *const user2 = get_edge_src_irn(edge2);
252 assert(!is_Sync(user2));
253 if (get_nodes_block(user2) == bb && !is_Phi(user2) &&
254 _value_strictly_dominates_intrablock(b, user2))
258 if (get_nodes_block(user) == bb && !is_Phi(user) &&
259 _value_strictly_dominates_intrablock(b, user))
268 * same as values_interfere but with special handling for Syncs
270 static int my_values_interfere(ir_graph *irg, ir_node *a, ir_node *b)
273 int i, arity = get_irn_arity(a);
274 for (i = 0; i < arity; ++i) {
275 ir_node *in = get_irn_n(a, i);
276 if (my_values_interfere(irg, in, b))
280 } else if (is_Sync(b)) {
281 int i, arity = get_irn_arity(b);
282 for (i = 0; i < arity; ++i) {
283 ir_node *in = get_irn_n(b, i);
284 /* a is not a sync, so no need for my_values_interfere */
285 if (my_values_interfere2(irg, a, in))
291 return my_values_interfere2(irg, a, b);
295 * A greedy coalescing algorithm for spillslots:
296 * 1. Sort the list of affinity edges
297 * 2. Try to merge slots with affinity edges (most expensive slots first)
298 * 3. Try to merge everything else that is possible
300 static void do_greedy_coalescing(be_fec_env_t *env)
302 spill_t **spills = env->spills;
303 size_t spillcount = ARR_LEN(spills);
305 size_t affinity_edge_count;
306 bitset_t **interferences;
307 int* spillslot_unionfind;
315 DB((dbg, DBG_COALESCING, "Coalescing %d spillslots\n", spillcount));
317 interferences = OALLOCN(&data, bitset_t*, spillcount);
318 spillslot_unionfind = OALLOCN(&data, int, spillcount);
320 uf_init(spillslot_unionfind, spillcount);
322 for (i = 0; i < spillcount; ++i) {
323 interferences[i] = bitset_obstack_alloc(&data, spillcount);
326 /* construct interferences */
327 for (i = 0; i < spillcount; ++i) {
329 ir_node *spill1 = spills[i]->spill;
330 if (is_NoMem(spill1))
333 for (i2 = i+1; i2 < spillcount; ++i2) {
334 ir_node *spill2 = spills[i2]->spill;
335 if (is_NoMem(spill2))
338 if (my_values_interfere(env->irg, spill1, spill2)) {
339 DB((dbg, DBG_INTERFERENCES,
340 "Slot %d and %d interfere\n", i, i2));
342 bitset_set(interferences[i], i2);
343 bitset_set(interferences[i2], i);
348 /* sort affinity edges */
349 affinity_edge_count = ARR_LEN(env->affinity_edges);
350 qsort(env->affinity_edges, affinity_edge_count,
351 sizeof(env->affinity_edges[0]), cmp_affinity);
353 /* try to merge affine nodes */
354 for (i = 0; i < affinity_edge_count; ++i) {
355 const affinity_edge_t *edge = env->affinity_edges[i];
356 int s1 = uf_find(spillslot_unionfind, edge->slot1);
357 int s2 = uf_find(spillslot_unionfind, edge->slot2);
359 /* test if values interfere */
360 if (bitset_is_set(interferences[s1], s2)) {
361 assert(bitset_is_set(interferences[s2], s1));
365 DB((dbg, DBG_COALESCING,
366 "Merging %d and %d because of affinity edge\n", s1, s2));
368 merge_interferences(env, interferences, spillslot_unionfind, s1, s2);
371 /* try to merge as much remaining spillslots as possible */
372 for (i = 0; i < spillcount; ++i) {
374 int s1 = uf_find(spillslot_unionfind, i);
378 for (i2 = i+1; i2 < spillcount; ++i2) {
379 int s2 = uf_find(spillslot_unionfind, i2);
383 /* test if values interfere
384 * we have to test n1-n2 and n2-n1, because only 1 side gets updated
385 * when node merging occurs
387 if (bitset_is_set(interferences[s1], s2)) {
388 assert(bitset_is_set(interferences[s2], s1));
392 DB((dbg, DBG_COALESCING,
393 "Merging %d and %d because it is possible\n", s1, s2));
395 if (merge_interferences(env, interferences, spillslot_unionfind, s1, s2) != 0) {
396 /* we can break the loop here, because s2 is the new supernode
397 * now and we'll test s2 again later anyway */
403 /* assign spillslots to spills */
404 for (i = 0; i < spillcount; ++i) {
405 spills[i]->spillslot = uf_find(spillslot_unionfind, i);
408 obstack_free(&data, 0);
411 typedef struct spill_slot_t {
417 typedef struct memperm_entry_t {
422 struct memperm_entry_t *next;
425 typedef struct memperm_t {
428 memperm_entry_t *entries;
431 static int cmp_memperm(const void* d1, const void* d2, size_t size)
433 const memperm_t* e1 = (const memperm_t*)d1;
434 const memperm_t* e2 = (const memperm_t*)d2;
437 return e1->block != e2->block;
440 static memperm_t *get_memperm(be_fec_env_t *env, ir_node *block)
442 memperm_t entry, *res;
446 hash = hash_irn(block);
448 res = set_find(memperm_t, env->memperms, &entry, sizeof(entry), hash);
451 entry.entrycount = 0;
452 entry.entries = NULL;
453 res = set_insert(memperm_t, env->memperms, &entry, sizeof(entry), hash);
459 static ir_entity* create_stack_entity(be_fec_env_t *env, spill_slot_t *slot)
461 ir_graph *irg = env->irg;
462 ir_type *frame = get_irg_frame_type(irg);
463 ir_entity *res = frame_alloc_area(frame, slot->size, slot->align,
471 * Enlarges a spillslot (if necessary) so that it can carry a value of size
472 * @p othersize and alignment @p otheralign.
474 static void enlarge_spillslot(spill_slot_t *slot, int otheralign, int othersize)
476 if (othersize > slot->size) {
477 slot->size = othersize;
479 if (otheralign > slot->align) {
480 if (otheralign % slot->align != 0)
481 slot->align *= otheralign;
483 slot->align = otheralign;
484 } else if (slot->align % otheralign != 0) {
485 slot->align *= otheralign;
489 static void assign_spill_entity(be_fec_env_t *env,
490 ir_node *node, ir_entity *entity)
497 arity = get_irn_arity(node);
498 for (i = 0; i < arity; ++i) {
499 ir_node *in = get_irn_n(node, i);
502 assign_spill_entity(env, in, entity);
507 /* beware: we might have Stores with Memory Proj's, ia32 fisttp for
509 node = skip_Proj(node);
510 assert(arch_get_frame_entity(node) == NULL);
511 env->set_frame_entity(node, entity);
515 * Create stack entities for the spillslots and assign them to the spill and
518 static void assign_spillslots(be_fec_env_t *env)
520 spill_t **spills = env->spills;
521 size_t spillcount = ARR_LEN(spills);
522 spill_slot_t *spillslots = ALLOCANZ(spill_slot_t, spillcount);
525 /* construct spillslots */
526 for (s = 0; s < spillcount; ++s) {
527 const spill_t *spill = spills[s];
528 int slotid = spill->spillslot;
529 const ir_mode *mode = spill->mode;
530 spill_slot_t *slot = & (spillslots[slotid]);
531 int size = get_mode_size_bytes(mode);
532 int align = spill->alignment;
534 if (slot->align == 0 && slot->size == 0) {
538 enlarge_spillslot(slot, align, size);
542 for (s = 0; s < spillcount; ++s) {
543 const spill_t *spill = spills[s];
544 ir_node *node = spill->spill;
545 int slotid = spill->spillslot;
546 spill_slot_t *slot = &spillslots[slotid];
548 if (slot->entity == NULL) {
549 create_stack_entity(env, slot);
553 int arity = get_irn_arity(node);
555 ir_node *block = get_nodes_block(node);
557 /* should be a PhiM */
558 assert(get_irn_mode(node) == mode_M);
560 for (i = 0; i < arity; ++i) {
561 ir_node *arg = get_irn_n(node, i);
562 ir_node *predblock = get_Block_cfgpred_block(block, i);
563 spill_t *argspill = get_spill(env, arg);
564 int argslotid = argspill->spillslot;
566 if (slotid != argslotid) {
568 memperm_entry_t *entry;
569 spill_slot_t *argslot = &spillslots[argslotid];
570 if (argslot->entity == NULL) {
571 create_stack_entity(env, argslot);
574 memperm = get_memperm(env, predblock);
576 entry = OALLOC(&env->obst, memperm_entry_t);
579 entry->in = argslot->entity;
580 entry->out = slot->entity;
581 entry->next = memperm->entries;
582 memperm->entrycount++;
583 memperm->entries = entry;
587 assign_spill_entity(env, node, slot->entity);
591 for (s = 0; s < ARR_LEN(env->reloads); ++s) {
592 ir_node *reload = env->reloads[s];
593 ir_node *spillnode = get_memory_edge(reload);
594 const spill_t *spill = get_spill(env, spillnode);
595 const spill_slot_t *slot = &spillslots[spill->spillslot];
597 assert(slot->entity != NULL);
599 env->set_frame_entity(reload, slot->entity);
603 static void create_memperms(be_fec_env_t *env)
605 foreach_set(env->memperms, memperm_t, memperm) {
606 ir_node **nodes = ALLOCAN(ir_node*, memperm->entrycount);
607 memperm_entry_t *entry;
608 ir_node *mempermnode;
611 assert(memperm->entrycount > 0);
613 for (entry = memperm->entries, i = 0; entry != NULL; entry = entry->next, ++i) {
614 ir_node* arg = get_irn_n(entry->node, entry->pos);
618 mempermnode = be_new_MemPerm(memperm->block, memperm->entrycount,
621 /* insert node into schedule */
622 ir_node *const blockend = be_get_end_of_block_insertion_point(memperm->block);
623 sched_add_before(blockend, mempermnode);
624 stat_ev_dbl("mem_perm", memperm->entrycount);
627 for (entry = memperm->entries; entry != NULL; entry = entry->next, ++i) {
629 ir_node* arg = get_irn_n(entry->node, entry->pos);
631 be_set_MemPerm_in_entity(mempermnode, i, entry->in);
632 be_set_MemPerm_out_entity(mempermnode, i, entry->out);
633 proj = new_r_Proj(mempermnode, get_irn_mode(arg), i);
635 set_irn_n(entry->node, entry->pos, proj);
640 static unsigned count_spillslots(const be_fec_env_t *env)
642 size_t spillcount = ARR_LEN(env->spills);
643 unsigned slotcount = 0;
646 unsigned *const counted = rbitset_alloca(spillcount);
647 for (s = 0; s < spillcount; ++s) {
648 spill_t *spill = env->spills[s];
649 int spillslot = spill->spillslot;
650 if (!rbitset_is_set(counted, spillslot)) {
652 rbitset_set(counted, spillslot);
659 be_fec_env_t *be_new_frame_entity_coalescer(ir_graph *irg)
661 be_fec_env_t *env = XMALLOCZ(be_fec_env_t);
663 be_assure_live_chk(irg);
665 obstack_init(&env->obst);
667 env->spills = NEW_ARR_F(spill_t*, 0);
668 env->spills_set = rbitset_malloc(get_irg_last_idx(irg));
669 env->reloads = NEW_ARR_F(ir_node*, 0);
670 env->affinity_edges = NEW_ARR_F(affinity_edge_t*, 0);
671 env->memperms = new_set(cmp_memperm, 10);
673 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
678 void be_free_frame_entity_coalescer(be_fec_env_t *env)
680 ir_free_resources(env->irg, IR_RESOURCE_IRN_LINK);
682 del_set(env->memperms);
683 DEL_ARR_F(env->reloads);
684 DEL_ARR_F(env->affinity_edges);
685 DEL_ARR_F(env->spills);
686 xfree(env->spills_set);
687 obstack_free(&env->obst, NULL);
692 void be_assign_entities(be_fec_env_t *env,
693 set_frame_entity_func set_frame_entity,
694 bool alloc_entities_at_begin)
696 env->set_frame_entity = set_frame_entity;
697 env->at_begin = alloc_entities_at_begin;
699 if (stat_ev_enabled) {
700 stat_ev_dbl("spillslots", ARR_LEN(env->spills));
703 if (be_coalesce_spill_slots) {
704 do_greedy_coalescing(env);
707 if (stat_ev_enabled) {
708 stat_ev_dbl("spillslots_after_coalescing", count_spillslots(env));
711 assign_spillslots(env);
713 create_memperms(env);
716 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_spillslots)
717 void be_init_spillslots(void)
719 FIRM_DBG_REGISTER(dbg, "firm.be.spillslots");