2 * Author: Daniel Grund, Sebastian Hack
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "iredges_t.h"
23 #include "bechordal_t.h"
25 typedef struct _reloader_t reloader_t;
26 typedef struct _spill_info_t spill_info_t;
33 struct _spill_info_t {
34 ir_node *spilled_node;
35 reloader_t *reloaders;
38 typedef struct _spill_ctx_t {
39 ir_node *spilled; /**< The spilled node. */
40 ir_node *user; /**< The node this spill is for. */
41 ir_node *spill; /**< The spill itself. */
45 firm_dbg_module_t *dbg;
46 const arch_register_class_t *cls;
47 const be_chordal_env_t *chordal_env;
50 set *spills; /**< all spill_info_t's, which must be placed */
51 pset *mem_phis; /**< set of all special spilled phis. allocated and freed seperately */
52 decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
53 void *data; /**< data passed to all callbacks */
56 static int cmp_spillctx(const void *a, const void *b, size_t n) {
57 const spill_ctx_t *p = a;
58 const spill_ctx_t *q = b;
59 return !(p->user == q->user && p->spilled == q->spilled);
62 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
63 const spill_info_t *xx = x;
64 const spill_info_t *yy = y;
65 return ! (xx->spilled_node == yy->spilled_node);
68 spill_env_t *be_new_spill_env(firm_dbg_module_t *dbg,
69 const be_chordal_env_t *chordal_env,
70 decide_irn_t is_mem_phi, void *data) {
72 spill_env_t *env = malloc(sizeof(env[0]));
73 env->spill_ctxs = new_set(cmp_spillctx, 1024);
74 env->spills = new_set(cmp_spillinfo, 1024);
75 env->cls = chordal_env->cls;
77 env->is_mem_phi = is_mem_phi;
79 env->chordal_env = chordal_env;
80 obstack_init(&env->obst);
84 void be_delete_spill_env(spill_env_t *senv) {
85 del_set(senv->spill_ctxs);
86 del_set(senv->spills);
87 obstack_free(&senv->obst, NULL);
91 static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
94 templ.spilled = to_spill;
98 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
101 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
103 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
105 ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
107 const be_main_env_t *env = senv->chordal_env->main_env;
108 ctx->spill = be_spill(env->arch_env, irn, ctx_irn);
115 * If the first usage of a phi result would be out of memory
116 * there is no sense in allocating a register for it.
117 * Thus we spill it and all its operands to the same spill slot.
118 * Therefore the phi/dataB becomes a phi/Memory
120 static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn) {
121 int i, n = get_irn_arity(phi);
122 ir_node **ins, *bl = get_nodes_block(phi);
123 ir_graph *irg = senv->chordal_env->irg;
127 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
129 /* search an existing spill for this context */
130 ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
132 /* if not found spill the phi */
134 /* build a new PhiM with dummy in-array */
135 ins = malloc(n * sizeof(ins[0]));
137 ins[i] = new_r_Unknown(irg, mode_M);
138 ctx->spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
141 /* re-wire the phiM */
143 ir_node *arg = get_irn_n(phi, i);
146 if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg))
147 sub_res = be_spill_phi(senv, arg, ctx_irn);
149 sub_res = be_spill_irn(senv, arg, ctx_irn);
151 set_irn_n(ctx->spill, i, sub_res);
157 static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
159 if (pset_find_ptr(senv->mem_phis, to_spill))
160 res = be_spill_phi(senv, to_spill, to_spill);
162 res = be_spill_irn(senv, to_spill, to_spill);
167 static void phi_walker(ir_node *irn, void *env) {
168 spill_env_t *senv = env;
169 const arch_env_t *arch = senv->chordal_env->main_env->arch_env;
171 if (is_Phi(irn) && arch_irn_has_reg_class(arch, irn, 0, senv->cls)
172 && senv->is_mem_phi(irn, senv->data)) {
173 DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
174 pset_insert_ptr(senv->mem_phis, irn);
178 void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
179 ir_graph *irg = senv->chordal_env->irg;
186 /* get all special spilled phis */
187 DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
188 senv->mem_phis = pset_new_ptr_default();
189 irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
191 /* Add reloads for mem_phis */
192 /* BETTER: These reloads (1) should only be inserted, if they are really needed */
193 DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
194 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
196 DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
197 foreach_out_edge(irn, e) {
198 ir_node *user = e->src;
199 if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
200 ir_node *use_bl = get_nodes_block(user);
201 DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
202 be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
207 /* process each spilled node */
208 DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
209 for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
213 ir_mode *mode = get_irn_mode(si->spilled_node);
215 /* go through all reloads for this spill */
216 for(rld = si->reloaders; rld; rld = rld->next) {
217 /* the spill for this reloader */
218 ir_node *spill = be_spill_node(senv, si->spilled_node);
221 ir_node *bl = is_Block(rld->reloader) ? rld->reloader : get_nodes_block(rld->reloader);
222 ir_node *reload = be_new_Reload(senv->cls, irg, bl, mode, spill);
224 DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", reload, si->spilled_node, rld->reloader));
226 pset_insert_ptr(reload_set, reload);
228 /* remember the reaload */
229 obstack_ptr_grow(&ob, reload);
230 sched_add_before(rld->reloader, reload);
234 assert(n_reloads > 0);
235 reloads = obstack_finish(&ob);
236 be_introduce_copies_ignore(senv->chordal_env->dom_front, si->spilled_node,
237 n_reloads, reloads, senv->mem_phis);
238 obstack_free(&ob, reloads);
241 obstack_free(&ob, NULL);
243 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
245 for(i = 0, n = get_irn_arity(irn); i < n; ++i)
246 set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
250 del_pset(senv->mem_phis);
253 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
254 spill_info_t templ, *res;
257 templ.spilled_node = to_spill;
258 templ.reloaders = NULL;
259 res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
261 rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
262 rel->reloader = before;
263 rel->next = res->reloaders;
264 res->reloaders = rel;
267 void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
268 ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
269 be_add_reload(senv, to_spill, insert_bl);
274 /****************************************
276 SPILL SLOT MANAGEMENT AND OPTS
278 ****************************************/
280 typedef struct _spill_slot_t {
286 typedef struct _ss_env_t {
287 firm_dbg_module_t *dbg;
289 be_chordal_env_t *cenv;
290 pmap *slots; /* maps spill_contexts to spill_slots */
294 static void compute_spill_slots_walker(ir_node *spill, void *env) {
295 ss_env_t *ssenv = env;
300 if (!be_is_Spill(spill))
303 /* check, if this spill is for a context already known */
304 ctx = be_get_Spill_context(spill);
305 entry = pmap_find(ssenv->slots, ctx);
308 /* this is a new spill context */
309 ss = obstack_alloc(&ssenv->ob, sizeof(*ss));
310 ss->members = pset_new_ptr(8);
311 ss->size = get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0)));
312 pmap_insert(ssenv->slots, ctx, ss);
315 /* values with the same spill_ctx must go into the same spill slot */
317 assert(ss->size == (unsigned)get_mode_size_bytes(get_irn_mode(get_irn_n(spill, 0))) && "Different sizes for the same spill slot");
318 for (irn = pset_first(ss->members); irn; irn = pset_next(ss->members)) {
319 /* use values_interfere here, because it uses the dominance check,
320 which does work for values in memory */
321 assert(!values_interfere(spill, irn) && "Spills for the same spill slot must not interfere!");
325 pset_insert_ptr(ss->members, spill);
328 static int ss_sorter(const void *v1, const void *v2) {
329 const spill_slot_t *ss1 = v1;
330 const spill_slot_t *ss2 = v2;
331 return ((int) ss2->size) - ((int) ss1->size);
335 /* NOTE/TODO: This function assumes, that all spill slot sizes are a power of 2.
336 Further it assumes, that the alignment is equal to the size and the
337 baseaddr of the spill area is aligned sufficiently for all possible aligments.
339 static void coalesce_slots(ss_env_t *ssenv) {
340 int i, o, used_slots;
341 unsigned curr_offset;
345 /* Build an array of all spill slots */
346 int count = pmap_count(ssenv->slots);
347 ass = obstack_alloc(&ssenv->ob, count * sizeof(*ass));
350 pmap_foreach(ssenv->slots, entr)
351 ass[i++] = entr->value;
353 /* Sort the array to minimize fragmentation and cache footprint.
354 Large slots come first */
355 qsort(ass, count, sizeof(ass[0]), ss_sorter);
357 /* For each spill slot:
358 - assign a new offset to this slot
359 - xor find another slot to coalesce with */
362 for (i=0; i<count; ++i) { /* for each spill slot */
366 DBG((ssenv->dbg, LEVEL_1, "Spill slot %d members:\n", i));
367 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
368 DBG((ssenv->dbg, LEVEL_1, " %+F\n", n1));
371 for (o=0; o < used_slots && tgt_slot == -1; ++o) { /* for each offset-assigned spill slot */
372 /* check inter-slot-pairs for interference */
374 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members))
375 for(n2 = pset_first(ass[o]->members); n2; n2 = pset_next(ass[o]->members))
376 if(values_interfere(n1, n2)) {
377 pset_break(ass[i]->members);
378 pset_break(ass[o]->members);
379 DBG((ssenv->dbg, LEVEL_1, " Interf %+F -- %+F\n", n1, n2));
380 goto interf_detected;
383 /* if we are here, there is no interference between ass[i] and ass[o] */
386 interf_detected: /*nothing*/ ;
389 /* now the members of ass[i] join the members of ass[tgt_slot] */
391 /* do we need a new slot? */
392 if (tgt_slot == -1) {
393 tgt_slot = used_slots;
396 ass[tgt_slot]->offset = curr_offset;
397 curr_offset += ass[i]->size;
401 ass[tgt_slot]->size = ass[i]->size;
402 del_pset(ass[tgt_slot]->members);
403 ass[tgt_slot]->members = pset_new_ptr(8);
407 /* copy the members to the target pset */
408 for(n1 = pset_first(ass[i]->members); n1; n1 = pset_next(ass[i]->members)) {
409 /* NOTE: If src and tgt pset are the same, inserting while iterating is not allowed */
411 pset_insert_ptr(ass[tgt_slot]->members, n1);
413 be_set_Spill_offset(n1, ass[tgt_slot]->offset);
414 DBG((ssenv->dbg, LEVEL_1, " Offset %+F %d\n", n1, ass[tgt_slot]->offset));
418 /* free all used psets, all other stuff is on the ssenv-obstack */
419 for (i=0; i<count; ++i)
420 del_pset(ass[i]->members);
424 void be_compute_spill_offsets(be_chordal_env_t *cenv) {
427 obstack_init(&ssenv.ob);
429 ssenv.slots = pmap_create();
430 ssenv.dbg = firm_dbg_register("ir.be.spillslots");
432 irg_walk_graph(cenv->irg, NULL, compute_spill_slots_walker, &ssenv);
433 coalesce_slots(&ssenv);
435 pmap_destroy(ssenv.slots);
436 obstack_free(&ssenv.ob, NULL);