2 * Author: Daniel Grund, Sebastian Hack
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "iredges_t.h"
23 #include "bechordal_t.h"
25 typedef struct _reloader_t reloader_t;
26 typedef struct _spill_info_t spill_info_t;
33 struct _spill_info_t {
34 ir_node *spilled_node;
35 reloader_t *reloaders;
38 typedef struct _spill_ctx_t {
39 ir_node *spilled; /**< The spilled node. */
40 ir_node *user; /**< The node this spill is for. */
41 ir_node *spill; /**< The spill itself. */
45 firm_dbg_module_t *dbg;
46 const arch_register_class_t *cls;
47 const be_chordal_env_t *chordal_env;
50 set *spills; /**< all spill_info_t's, which must be placed */
51 pset *mem_phis; /**< set of all special spilled phis. allocated and freed seperately */
52 decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
53 void *data; /**< data passed to all callbacks */
56 static int cmp_spillctx(const void *a, const void *b, size_t n) {
57 const spill_ctx_t *p = a;
58 const spill_ctx_t *q = b;
59 return !(p->user == q->user && p->spilled == q->spilled);
62 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
63 const spill_info_t *xx = x;
64 const spill_info_t *yy = y;
65 return ! (xx->spilled_node == yy->spilled_node);
68 spill_env_t *be_new_spill_env(firm_dbg_module_t *dbg,
69 const be_chordal_env_t *chordal_env,
70 decide_irn_t is_mem_phi, void *data) {
72 spill_env_t *env = malloc(sizeof(env[0]));
73 env->spill_ctxs = new_set(cmp_spillctx, 1024);
74 env->spills = new_set(cmp_spillinfo, 1024);
75 env->cls = chordal_env->cls;
77 env->is_mem_phi = is_mem_phi;
79 env->chordal_env = chordal_env;
80 obstack_init(&env->obst);
84 void be_delete_spill_env(spill_env_t *senv) {
85 del_set(senv->spill_ctxs);
86 del_set(senv->spills);
87 obstack_free(&senv->obst, NULL);
91 static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
94 templ.spilled = to_spill;
98 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
101 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
103 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
105 ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
107 const be_main_env_t *env = senv->chordal_env->main_env;
108 ctx->spill = be_spill(env->node_factory, env->arch_env, irn, ctx_irn);
115 * If the first usage of a phi result would be out of memory
116 * there is no sense in allocating a register for it.
117 * Thus we spill it and all its operands to the same spill slot.
118 * Therefore the phi/dataB becomes a phi/Memory
120 static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn) {
121 int i, n = get_irn_arity(phi);
122 ir_node **ins, *bl = get_nodes_block(phi);
123 ir_graph *irg = senv->chordal_env->irg;
127 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
129 /* search an existing spill for this context */
130 ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
132 /* if not found spill the phi */
134 /* build a new PhiM with dummy in-array */
135 ins = malloc(n * sizeof(ins[0]));
137 ins[i] = new_r_Unknown(irg, mode_M);
138 ctx->spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
141 /* re-wire the phiM */
143 ir_node *arg = get_irn_n(phi, i);
146 if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg))
147 sub_res = be_spill_phi(senv, arg, ctx_irn);
149 sub_res = be_spill_irn(senv, arg, ctx_irn);
151 set_irn_n(ctx->spill, i, sub_res);
157 static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
159 if (pset_find_ptr(senv->mem_phis, to_spill))
160 res = be_spill_phi(senv, to_spill, to_spill);
162 res = be_spill_irn(senv, to_spill, to_spill);
167 static void phi_walker(ir_node *irn, void *env) {
168 spill_env_t *senv = env;
169 const arch_env_t *arch = senv->chordal_env->main_env->arch_env;
171 if (is_Phi(irn) && arch_irn_has_reg_class(arch, irn, 0, senv->cls)
172 && senv->is_mem_phi(irn, senv->data)) {
173 DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
174 pset_insert_ptr(senv->mem_phis, irn);
178 void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
179 ir_graph *irg = senv->chordal_env->irg;
186 /* get all special spilled phis */
187 DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
188 senv->mem_phis = pset_new_ptr_default();
189 irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
191 /* Add reloads for mem_phis */
192 /* BETTER: These reloads (1) should only be inserted, if they are really needed */
193 DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
194 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
196 DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
197 foreach_out_edge(irn, e) {
198 ir_node *user = e->src;
199 if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
200 ir_node *use_bl = get_nodes_block(user);
201 DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
202 be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
207 /* process each spilled node */
208 DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
209 for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
213 ir_mode *mode = get_irn_mode(si->spilled_node);
215 /* go through all reloads for this spill */
216 for(rld = si->reloaders; rld; rld = rld->next) {
217 /* the spill for this reloader */
218 ir_node *spill = be_spill_node(senv, si->spilled_node);
221 ir_node *bl = is_Block(rld->reloader) ? rld->reloader : get_nodes_block(rld->reloader);
222 ir_node *reload = new_Reload(senv->chordal_env->main_env->node_factory,
223 senv->cls, irg, bl, mode, spill);
225 DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", reload, si->spilled_node, rld->reloader));
227 pset_insert_ptr(reload_set, reload);
229 /* remember the reaload */
230 obstack_ptr_grow(&ob, reload);
231 sched_add_before(rld->reloader, reload);
235 assert(n_reloads > 0);
236 reloads = obstack_finish(&ob);
237 be_introduce_copies_ignore(senv->chordal_env->dom_front, si->spilled_node,
238 n_reloads, reloads, senv->mem_phis);
239 obstack_free(&ob, reloads);
242 obstack_free(&ob, NULL);
244 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
246 for(i = 0, n = get_irn_arity(irn); i < n; ++i)
247 set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
251 del_pset(senv->mem_phis);
254 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
255 spill_info_t templ, *res;
258 templ.spilled_node = to_spill;
259 templ.reloaders = NULL;
260 res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
262 rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
263 rel->reloader = before;
264 rel->next = res->reloaders;
265 res->reloaders = rel;
268 void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
269 ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
270 be_add_reload(senv, to_spill, insert_bl);
277 SPILL SLOT MANAGEMENT AND OPTS
281 typedef struct _spill_slot_t {
287 typedef struct _ss_env_t {
288 firm_dbg_module_t *dbg;
290 be_chordal_env_t *cenv;
291 pmap *slots; /* maps spill_contexts to spill_slots */
295 static void compute_spill_slots_walker(ir_node *spill, void *env) {
296 ss_env_t *ssenv = env;
301 if (!be_is_Spill(spill))
304 /* check, if this spill is for a context already known */
305 ctx = get_Spill_context(spill);
306 entry = pmap_find(ssenv->slots, ctx);
309 /* this is a new spill context */
310 ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
311 ss->members = pset_new_ptr(8);
312 ss->size = get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0)));
313 pmap_insert(ssenv->slots, ctx, ss);
316 /* values with the same spill_ctx must go into the same spill slot */
318 assert(ss->size == (unsigned)get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0))) && "Different sizes for the same spill slot");
319 for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
320 /* use values_interfere here, because it uses the dominance check,
321 which does work for values in memory */
322 assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!");
326 pset_insert_ptr(ss->members, spill);
329 static int ss_sorter(const void *v1, const void *v2) {
330 const spill_slot_t *ss1 = v1;
331 const spill_slot_t *ss2 = v2;
332 return ((int) ss2->size) - ((int) ss1->size);
336 /* NOTE/TODO: This function assumes, that all spill slot sizes are a power of 2.
337 Further it assumes, that the alignment is equal to the size and the
338 baseaddr of the spill area is aligned sufficiently for all possible aligments.
340 static void coalesce_slots(ss_env_t *ssenv) {
341 int i, o, used_slots;
342 unsigned curr_offset;
346 /* Build an array of all spill slots */
347 int count = pmap_count(ssenv->slots);
348 ass = obstack_alloc(&ssenv->ob, count * sizeof(*ass));
351 pmap_foreach(ssenv->slots, entr)
352 ass[i++] = entr->value;
354 /* Sort the array to minimize fragmentation and cache footprint.
355 Large slots come first */
356 qsort(ass, count, sizeof(ass[0]), ss_sorter);
358 /* For each spill slot:
359 - assign a new offset to this slot
360 - xor find another slot to coalesce with */
363 for (i=0; i<count; ++i) { /* for each spill slot */
367 DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
368 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
369 DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
372 for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
373 /* check inter-slot-pairs for interference */
375 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
376 for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
377 if(values_interfere(n1, n2)) {
378 pset_break(ass[i]->members);
379 pset_break(ass[o]->members);
380 DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
381 goto interf_detected;
384 /* if we are here, there is no interference between ass[i] and ass[o] */
387 interf_detected: /*nothing*/ ;
390 /* now the members of ass[i] join the members of ass[tgt_slot] */
392 /* do we need a new slot? */
393 if (tgt_slot == -1) {
394 tgt_slot = used_slots;
397 ass[tgt_slot]->offset = curr_offset;
398 curr_offset += ass[i]->size;
402 ass[tgt_slot]->size = ass[i]->size;
403 del_pset(ass[tgt_slot]->members);
404 ass[tgt_slot]->members = pset_new_ptr(8);
408 /* copy the members to the target pset */
409 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members)) {
410 /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
412 pset_insert_ptr(ass[tgt_slot]->members, n1);
414 set_Spill_offset(n1, ass[tgt_slot]->offset);
415 DBG((ssenv->dbg, LEVEL_1, " Offset %+F %d\n", n1, ass[tgt_slot]->offset));
419 /* free all used psets, all other stuff is on the ssenv-obstack */
420 for (i=0; i<count; ++i)
421 del_pset(ass[i]->members);
425 void be_compute_spill_offsets(be_chordal_env_t *cenv) {
428 obstack_init(&ssenv.ob);
430 ssenv.slots = pmap_create();
431 ssenv.dbg = firm_dbg_register("ir.be.spillslots");
433 irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
434 coalesce_slots(&ssenv);
436 pmap_destroy(ssenv.slots);
437 obstack_free(&ssenv.ob, NULL);