2 * Author: Daniel Grund, Sebastian Hack
4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
11 #include "iredges_t.h"
17 #include "bechordal_t.h"
19 typedef struct _reloader_t reloader_t;
20 typedef struct _spill_info_t spill_info_t;
27 struct _spill_info_t {
28 ir_node *spilled_node;
29 reloader_t *reloaders;
32 typedef struct _spill_ctx_t {
33 ir_node *spilled; /**< The spilled node. */
34 ir_node *user; /**< The node this spill is for. */
35 ir_node *spill; /**< The spill itself. */
39 firm_dbg_module_t *dbg;
40 const arch_register_class_t *cls;
41 const be_chordal_env_t *chordal_env;
44 set *spills; /**< all spill_info_t's, which must be placed */
45 pset *mem_phis; /**< set of all special spilled phis. allocated and freed seperately */
46 decide_irn_t is_mem_phi; /**< callback func to decide if a phi needs special spilling */
47 void *data; /**< data passed to all callbacks */
50 static int cmp_spillctx(const void *a, const void *b, size_t n) {
51 const spill_ctx_t *p = a;
52 const spill_ctx_t *q = b;
53 return !(p->user == q->user && p->spilled == q->spilled);
56 static int cmp_spillinfo(const void *x, const void *y, size_t size) {
57 const spill_info_t *xx = x;
58 const spill_info_t *yy = y;
59 return ! (xx->spilled_node == yy->spilled_node);
62 spill_env_t *be_new_spill_env(firm_dbg_module_t *dbg,
63 const be_chordal_env_t *chordal_env,
64 decide_irn_t is_mem_phi, void *data) {
66 spill_env_t *env = malloc(sizeof(env[0]));
67 env->spill_ctxs = new_set(cmp_spillctx, 1024);
68 env->spills = new_set(cmp_spillinfo, 1024);
69 env->cls = chordal_env->cls;
71 env->is_mem_phi = is_mem_phi;
73 env->chordal_env = chordal_env;
74 obstack_init(&env->obst);
78 void be_delete_spill_env(spill_env_t *senv) {
79 del_set(senv->spill_ctxs);
80 del_set(senv->spills);
81 obstack_free(&senv->obst, NULL);
85 static spill_ctx_t *be_get_spill_ctx(set *sc, ir_node *to_spill, ir_node *ctx_irn) {
88 templ.spilled = to_spill;
92 return set_insert(sc, &templ, sizeof(templ), HASH_COMBINE(HASH_PTR(to_spill), HASH_PTR(ctx_irn)));
95 static ir_node *be_spill_irn(spill_env_t *senv, ir_node *irn, ir_node *ctx_irn) {
97 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", irn, ctx_irn));
99 ctx = be_get_spill_ctx(senv->spill_ctxs, irn, ctx_irn);
101 const be_main_env_t *env = senv->chordal_env->main_env;
102 ctx->spill = be_spill(env->node_factory, env->arch_env, irn);
109 * If the first usage of a phi result would be out of memory
110 * there is no sense in allocating a register for it.
111 * Thus we spill it and all its operands to the same spill slot.
112 * Therefore the phi/dataB becomes a phi/Memory
114 static ir_node *be_spill_phi(spill_env_t *senv, ir_node *phi, ir_node *ctx_irn) {
115 int i, n = get_irn_arity(phi);
116 ir_node **ins, *bl = get_nodes_block(phi);
117 ir_graph *irg = senv->chordal_env->irg;
121 DBG((senv->dbg, LEVEL_1, "%+F in ctx %+F\n", phi, ctx_irn));
123 /* search an existing spill for this context */
124 ctx = be_get_spill_ctx(senv->spill_ctxs, phi, ctx_irn);
126 /* if not found spill the phi */
128 /* build a new PhiM with dummy in-array */
129 ins = malloc(n * sizeof(ins[0]));
131 ins[i] = new_r_Unknown(irg, mode_M);
132 ctx->spill = new_r_Phi(senv->chordal_env->irg, bl, n, ins, mode_M);
135 /* re-wire the phiM */
137 ir_node *arg = get_irn_n(phi, i);
140 if(is_Phi(arg) && pset_find_ptr(senv->mem_phis, arg))
141 sub_res = be_spill_phi(senv, arg, ctx_irn);
143 sub_res = be_spill_irn(senv, arg, ctx_irn);
145 set_irn_n(ctx->spill, i, sub_res);
151 static ir_node *be_spill_node(spill_env_t *senv, ir_node *to_spill) {
153 if (pset_find_ptr(senv->mem_phis, to_spill))
154 res = be_spill_phi(senv, to_spill, to_spill);
156 res = be_spill_irn(senv, to_spill, to_spill);
161 static void phi_walker(ir_node *irn, void *env) {
162 spill_env_t *senv = env;
163 const arch_env_t *arch = senv->chordal_env->main_env->arch_env;
165 if (is_Phi(irn) && arch_irn_has_reg_class(arch, irn, 0, senv->cls)
166 && senv->is_mem_phi(irn, senv->data)) {
167 DBG((senv->dbg, LEVEL_1, " %+F\n", irn));
168 pset_insert_ptr(senv->mem_phis, irn);
172 void be_insert_spills_reloads(spill_env_t *senv, pset *reload_set) {
173 ir_graph *irg = senv->chordal_env->irg;
180 /* get all special spilled phis */
181 DBG((senv->dbg, LEVEL_1, "Mem-phis:\n"));
182 senv->mem_phis = pset_new_ptr_default();
183 irg_walk_graph(senv->chordal_env->irg, phi_walker, NULL, senv);
185 /* Add reloads for mem_phis */
186 /* BETTER: These reloads (1) should only be inserted, if they are really needed */
187 DBG((senv->dbg, LEVEL_1, "Reloads for mem-phis:\n"));
188 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
190 DBG((senv->dbg, LEVEL_1, " Mem-phi %+F\n", irn));
191 foreach_out_edge(irn, e) {
192 ir_node *user = e->src;
193 if (is_Phi(user) && !pset_find_ptr(senv->mem_phis, user)) {
194 ir_node *use_bl = get_nodes_block(user);
195 DBG((senv->dbg, LEVEL_1, " non-mem-phi user %+F\n", user));
196 be_add_reload_on_edge(senv, irn, use_bl, e->pos); /* (1) */
201 /* process each spilled node */
202 DBG((senv->dbg, LEVEL_1, "Insert spills and reloads:\n"));
203 for(si = set_first(senv->spills); si; si = set_next(senv->spills)) {
207 ir_mode *mode = get_irn_mode(si->spilled_node);
209 /* go through all reloads for this spill */
210 for(rld = si->reloaders; rld; rld = rld->next) {
211 /* the spill for this reloader */
212 ir_node *spill = be_spill_node(senv, si->spilled_node);
215 ir_node *bl = is_Block(rld->reloader) ? rld->reloader : get_nodes_block(rld->reloader);
216 ir_node *reload = new_Reload(senv->chordal_env->main_env->node_factory,
217 senv->cls, irg, bl, mode, spill);
219 DBG((senv->dbg, LEVEL_1, " %+F of %+F before %+F\n", reload, si->spilled_node, rld->reloader));
221 pset_insert_ptr(reload_set, reload);
223 /* remember the reaload */
224 obstack_ptr_grow(&ob, reload);
225 sched_add_before(rld->reloader, reload);
229 assert(n_reloads > 0);
230 reloads = obstack_finish(&ob);
231 be_introduce_copies_ignore(senv->chordal_env->dom_front, si->spilled_node,
232 n_reloads, reloads, senv->mem_phis);
233 obstack_free(&ob, reloads);
236 obstack_free(&ob, NULL);
238 for(irn = pset_first(senv->mem_phis); irn; irn = pset_next(senv->mem_phis)) {
240 for(i = 0, n = get_irn_arity(irn); i < n; ++i)
241 set_irn_n(irn, i, new_r_Bad(senv->chordal_env->irg));
245 del_pset(senv->mem_phis);
248 void be_add_reload(spill_env_t *senv, ir_node *to_spill, ir_node *before) {
249 spill_info_t templ, *res;
252 // assert(get_irn_opcode(to_spill) != iro_Unknown);
254 templ.spilled_node = to_spill;
255 templ.reloaders = NULL;
256 res = set_insert(senv->spills, &templ, sizeof(templ), HASH_PTR(to_spill));
258 rel = obstack_alloc(&senv->obst, sizeof(rel[0]));
259 rel->reloader = before;
260 rel->next = res->reloaders;
261 res->reloaders = rel;
264 void be_add_reload_on_edge(spill_env_t *senv, ir_node *to_spill, ir_node *bl, int pos) {
265 ir_node *insert_bl = get_irn_arity(bl) == 1 ? sched_first(bl) : get_Block_cfgpred_block(bl, pos);
266 be_add_reload(senv, to_spill, insert_bl);