bugfixes
[libfirm] / ir / be / bespillilp.c
1 /**
2  * @file   bespillilp.c
3  * @date   15.07.2005
4  * @author Sebastian Hack
5  *
6  * ILP based spilling
7  *
8  * Copyright (C) 2005 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #include <math.h>
12
13 #include "hashptr.h"
14 #include "debug.h"
15 #include "obst.h"
16 #include "set.h"
17 #include "list.h"
18 #include "pmap.h"
19
20 #include "irprintf.h"
21 #include "irgwalk.h"
22 #include "irnode_t.h"
23 #include "ircons_t.h"
24
25 #include <lpp/lpp.h>
26 #include <lpp/lpp_net.h>
27 #include <lpp/lpp_cplex.h>
28
29 #include "be_t.h"
30 #include "belive_t.h"
31 #include "besched_t.h"
32 #include "beirgmod.h"
33 #include "bearch.h"
34 #include "benode_t.h"
35 #include "beutil.h"
36 #include "bespillilp.h"
37
38 #define BIGM 1000.0
39
40 #define MAX(a,b) ((a) > (b) ? (a) : (b))
41
42 #define DBG_LEVEL SET_LEVEL_3
43
44 #define DUMP_SOLUTION
45 #define DUMP_ILP
46 #undef  DUMP_STATS
47
48 #undef  SOLVE_LOCAL
49 #define LPP_SERVER "i44pc52"
50 #define LPP_SOLVER "cplex"
51
52 #define COST_LOAD      10
53 #define COST_STORE     50
54 #define COST_REMAT     (-9)
55
56 #define is_end_of_block_use(lr) (is_Block((lr)->user))
57
58 /**
59  * Reloads on edges.
60  */
61 typedef struct _edge_reload_t {
62         ir_node *irn;
63         ir_node *bl;
64         int pos;
65         int in_mem_var;
66         struct _edge_reload_t *next;
67 } edge_reload_t;
68
69 typedef struct _spill_stat_t {
70         int n_spills;
71         int n_reloads;
72         int n_remat;
73 } spill_stat_t;
74
75 typedef struct _spill_ilp_t {
76         const arch_register_class_t *cls;
77         const be_main_session_env_t *session;
78         firm_dbg_module_t *dbg;
79         lpp_t *lpp;
80         set *irn_use_heads;
81         set *live_ranges;
82         set *first_uses;
83         spill_env_t *senv;
84         edge_reload_t *edges;
85         struct obstack *obst;
86         int enable_store : 1;
87         int enable_remat : 1;
88 } spill_ilp_t;
89
90 typedef struct _live_range_t live_range_t;
91
92 typedef struct _irn_use_head_t {
93         struct list_head head;
94         ir_node *irn;
95         int spill_var;
96         int n_uses;
97         live_range_t *closest_use;
98 } irn_use_head_t;
99
100 struct _live_range_t {
101         struct list_head list;
102         irn_use_head_t *use_head;
103         ir_node *user;
104         ir_node *irn;
105         int pos;
106         int in_mem_var;
107         int is_remat_var;
108 };
109
110 /*
111  * Associates the first use of a live-in in a block
112  * with its live range.
113  */
114 typedef struct _first_use_t {
115         ir_node *bl;
116         ir_node *irn;       /**< A value live in at bl. */
117         live_range_t *lr;   /**< The live range for the first use of irn in bl. */
118 } first_use_t;
119
120
121
122 static INLINE int has_reg_class(const spill_ilp_t *si, const ir_node *irn)
123 {
124   return arch_irn_has_reg_class(si->session->main_env->arch_env,
125       irn, arch_pos_make_out(0), si->cls);
126 }
127
128 static int cmp_live_range(const void *a, const void *b, size_t n)
129 {
130   const live_range_t *p = a;
131   const live_range_t *q = b;
132
133   return !(p->user == q->user && p->irn == q->irn && p->pos == q->pos);
134 }
135
136 static int cmp_irn_use_head(const void *a, const void *b, size_t n)
137 {
138   const irn_use_head_t *p = a;
139   const irn_use_head_t *q = b;
140
141         return !(p->irn == q->irn);
142 }
143
144 static irn_use_head_t *get_use_head(spill_ilp_t *si, const ir_node *irn)
145 {
146         irn_use_head_t templ;
147         templ.irn = (ir_node *) irn;
148         return set_find(si->irn_use_heads, &templ, sizeof(templ), HASH_PTR(irn));
149 }
150
151 static int cmp_first_use(const void *a, const void *b, size_t n)
152 {
153   const first_use_t *p = a;
154   const first_use_t *q = b;
155
156   return !(p->irn == q->irn && p->bl == q->bl);
157 }
158
159 static void add_first_use(spill_ilp_t *si, ir_node *bl, ir_node *irn, live_range_t *lr)
160 {
161         first_use_t templ;
162         templ.bl    = bl;
163         templ.irn   = irn;
164         templ.lr    = lr;
165
166         set_insert(si->first_uses, &templ, sizeof(templ),
167                         HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn)));
168 }
169
170 static live_range_t *get_first_use_lr(spill_ilp_t *si, ir_node *bl, ir_node *irn)
171 {
172         first_use_t *res;
173         first_use_t templ;
174         templ.bl    = bl;
175         templ.irn   = irn;
176
177         res = set_find(si->first_uses, &templ, sizeof(templ),
178                         HASH_COMBINE(HASH_PTR(bl), HASH_PTR(irn)));
179
180         return res ? res->lr : NULL;
181 }
182
183 /**
184  * Checks, if a vertain node can be recomputed at a certain position.
185  * @param si    The spill ILP environment.
186  * @param irn   The node to recompute.
187  * @param live  The nodes live at the place where @p irn shall be
188  *              recomputed.
189  * @return      1, if irn can be recomputed, 0 if not.
190  */
191 static INLINE int can_remat(const spill_ilp_t *si, const ir_node *irn, pset *live)
192 {
193         int i, n;
194   const arch_env_t *arch_env    = si->session->main_env->arch_env;
195         int remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
196
197         for(i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
198                 ir_node *op = get_irn_n(irn, i);
199                 remat &= !has_reg_class(si, op) || pset_find_ptr(live, op);
200         }
201
202         return remat;
203 }
204
205 static live_range_t *get_live_range(spill_ilp_t *si, ir_node *irn, ir_node *user, int pos)
206 {
207         live_range_t lr, *res;
208         irn_use_head_t iuh, *head;
209         int is_new;
210         unsigned hash = HASH_COMBINE(HASH_PTR(irn), HASH_PTR(user));
211
212         lr.user         = user;
213         lr.irn          = irn;
214         lr.pos          = pos;
215         lr.in_mem_var   = -1;
216         lr.is_remat_var = -1;
217
218         res = set_insert(si->live_ranges, &lr, sizeof(lr), hash);
219         is_new = res->in_mem_var == -1;
220
221         if(is_new) {
222                 char buf[128];
223                 ir_snprintf(buf, sizeof(buf), "m_%s%N_%N_%d",
224                                         is_Phi(irn) ? "phi_" : "", irn, user, MAX(pos, 0));
225                 res->in_mem_var = lpp_add_var(si->lpp, buf, lpp_binary, pos >= 0 ? COST_LOAD : 0.0);
226         }
227
228         memset(&iuh, 0, sizeof(iuh));
229         iuh.irn = irn;
230         iuh.n_uses = -1;
231         head = set_insert(si->irn_use_heads, &iuh, sizeof(iuh), HASH_PTR(irn));
232         if(head->n_uses == -1) {
233                 head->n_uses = 0;
234                 INIT_LIST_HEAD(&head->head);
235         }
236
237         if(is_new) {
238                 list_add_tail(&res->list, &head->head);
239                 head->n_uses++;
240         }
241
242         res->use_head = head;
243
244         return res;
245 }
246
247 static void print_live_set(spill_ilp_t *si, pset *s) {
248         ir_node *n;
249         for(n=pset_first(s); n; n=pset_next(s))
250                 DBG((si->dbg, LEVEL_3, "    %+F\n", n));
251 }
252
253 static void process_block(ir_node *bl, void *data)
254 {
255         char buf[128];
256         int i, n, skipped=0;
257         spill_ilp_t *si  = data;
258         int step         = 0;
259         int n_regs       = arch_register_class_n_regs(si->cls);
260         int n_preds      = get_irn_arity(bl);
261         pset *live       = pset_new_ptr_default();
262         irn_live_t *li;
263         ir_node *irn;
264
265         DBG((si->dbg, LEVEL_3, "\n"));
266         DBG((si->dbg, LEVEL_3, "Processing %+F\n", bl));
267
268         /*
269          * Get all live-end values of this block
270          */
271         live_foreach(bl, li) {
272                 if(live_is_end(li) && has_reg_class(si, li->irn)) {
273                         ir_node *irn = (ir_node *) li->irn;
274                         pset_insert_ptr(live, irn);
275
276                         /*The "user" of the live range to the end of a block
277                          * is the block itself. This is quite arbitrary. */
278                         set_irn_link(irn, get_live_range(si, irn, bl, -1));
279                 }
280         }
281         DBG((si->dbg, LEVEL_3, "Live-End:\n"));
282         print_live_set(si, live);
283
284         /*
285          * Walk through the schedule of this block from end to begin.
286          * Phis are handled togther with live ins after this loop.
287          */
288         for(irn = sched_last(bl); !sched_is_begin(irn) && !is_Phi(irn); irn = sched_prev(irn)) {
289                 ir_node *l;
290                 int cst;
291                 int relevant_args, results;
292                 int demand;
293                 int n_cand;
294                 int must_be_in_mem;
295                 pset *cand;
296
297                 /*
298                  * Determine the number of results
299                  */
300                 /* Special handling of Projs */
301                 if(is_Proj(irn)) {
302                         if(has_reg_class(si, irn)) {
303                                 assert(pset_find_ptr(live, irn) && "node must be live");
304                                 pset_remove_ptr(live, irn);
305                                 skipped++;
306                         }
307
308                         DBG((si->dbg, LEVEL_2, "Skipped %+F\n", irn));
309                         continue;
310                 }
311
312                 DBG((si->dbg, LEVEL_1, "Irn %+F\n", irn));
313                 if(skipped > 0) {
314                         /* ModeT node */
315                         assert(get_irn_mode(irn) == mode_T && "node before projs must be tuple");
316                         results = skipped;
317                         skipped = 0;
318                 } else {
319                         /* Normal node */
320                         if(has_reg_class(si, irn)) {
321                                 assert(get_irn_mode(irn) != mode_T && "node must not be a tuple");
322                                 assert(pset_find_ptr(live, irn) && "node must be live");
323                                 pset_remove_ptr(live, irn);
324                                 results = 1;
325                         } else {
326                                 results = 0;
327                         }
328                 }
329
330                 /* cand holds the irns which may be spilled */
331                 cand = pset_new_ptr(8);
332                 for(l=pset_first(live); l; l=pset_next(live))
333                         pset_insert_ptr(cand, l);
334
335                 /*
336                  * Determine number of arguments
337                  */
338                 relevant_args = 0;
339                 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
340                         ir_node *op = get_irn_n(irn, i);
341                         if(has_reg_class(si, op)) {
342                                 DBG((si->dbg, LEVEL_2, "  arg %+F\n", op));
343                                 relevant_args++;
344                                 /* arguments must not be spilled */
345                                 if(pset_find_ptr(cand, op))
346                                         pset_remove_ptr(cand, op);
347                         }
348                 }
349
350                 /*
351                  * Determine, how many values must be in memory.
352                  * We have 'n_regs' registers.
353                  * The instr. needs 'demand'.
354                  * So (R:= n_regs - demand) registers can be used for candidates 'cand'.
355                  * The rest (M:= n_cand - R) must reside in memory.
356                  */
357                 demand = MAX(results, relevant_args);
358                 n_cand = pset_count(cand);
359                 must_be_in_mem = n_cand - (n_regs - demand);
360
361                 DBG((si->dbg, LEVEL_1, "  Demand: %d, Cands: %d, InMem: %d\n", demand, n_cand, must_be_in_mem));
362                 DBG((si->dbg, LEVEL_3, "  Cand-Set:\n"));
363                 print_live_set(si, cand);
364
365                 /*
366                  * Generate the corresponding constraint spilling
367                  * enough candidates at this label.
368                  */
369                 if(must_be_in_mem > 0) {
370                         ir_snprintf(buf, sizeof(buf), "cp_%N_%N_%d", bl, irn, step);
371                         cst = lpp_add_cst(si->lpp, buf, lpp_greater, must_be_in_mem);
372
373                         for(l = pset_first(cand); l; l = pset_next(cand)) {
374                                 live_range_t *lr = get_irn_link(l);
375                                 lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
376                         }
377                 }
378
379                 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
380                         ir_node *op = get_irn_n(irn, i);
381
382                         if(has_reg_class(si, op)) {
383                                 live_range_t *op_lr = get_live_range(si, op, irn, i);
384                                 set_irn_link(op, op_lr);
385
386 //                              /*
387 //                               * The operand is reloaded at its usage, so it must not occur
388 //                               * in the constraint which determines which values live at the
389 //                               * instruction must reside in memory.
390 //                               */
391 //                              if(must_be_in_mem > 0) {
392 //                                      DBG((si->dbg, LEVEL_3, " Resetting %+F to 0:\n", op));
393 //                                      lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 0.0);
394 //                              }
395
396                                 /*
397                                  * Check, if the node is a rematerializable node and
398                                  * if its operands are live here.
399                                  */
400                                 if(si->enable_remat && can_remat(si, op, live)) {
401                                         int cst;
402                                         int j, n;
403                                         int n_operands = 0;
404
405                                         for(j = 0, n = get_irn_arity(op); j < n; ++j)
406                                                 n_operands += has_reg_class(si, get_irn_n(op, j));
407
408                                         /* Make the remat constraint for this operand */
409                                         ir_snprintf(buf, sizeof(buf), "ce1_%N_%N_%d", op, irn, i);
410                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, n_operands);
411
412                                         /* Make the rematerialize variable for the operand */
413                                         ir_snprintf(buf, sizeof(buf), "e_%N_%N_%d", op, irn, i);
414                                         op_lr->is_remat_var = lpp_add_var(si->lpp, buf, lpp_binary, COST_REMAT);
415                                         lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, n_operands);
416
417                                         for(j = 0, n = get_irn_arity(op); j < n; ++j) {
418                                                 ir_node *oop = get_irn_n(op, j);
419                                                 if(has_reg_class(si, oop)) {
420                                                         live_range_t *lr = get_irn_link(oop);
421                                                         lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
422                                                 }
423                                         }
424
425                                         ir_snprintf(buf, sizeof(buf), "ce2_%N_%N_%d", op, irn, i);
426                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
427                                         lpp_set_factor_fast(si->lpp, cst, op_lr->is_remat_var, 1.0);
428                                         lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, -1.0);
429                                 }
430                         }
431                 }
432
433                 /*
434                  * Insert arguments of current instr into the live set
435                  */
436                 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
437                         ir_node *op = get_irn_n(irn, i);
438                         if(has_reg_class(si, op))
439                                 pset_insert_ptr(live, op);
440                 }
441
442                 del_pset(cand);
443                 step++;
444         }
445
446         if(bl == get_irg_start_block(get_irn_irg(bl)))
447                 goto end;
448
449         /*
450          * Here, the live set contains
451          * - phis of the block
452          * - live-in values of the block
453          *
454          * TODO: comment is wrong
455          * If a value is live in, it must be in a register in all predecessor
456          * blocks or in memory at the end of all predecessor blocks. Also, the
457          * closest use in the current block must then be from register or
458          * memory, respectively.
459          */
460         for(irn = pset_first(live); irn; irn = pset_next(live)) {
461                 live_range_t *lr = get_irn_link(irn);
462                 int is_phi = is_Phi(irn) && get_nodes_block(irn) == bl;
463                 int cst;
464
465                 assert(has_reg_class(si, irn));
466                 assert(is_Phi(irn) || is_live_in(bl, irn));
467
468                 /* Deprecated: Can be done with the first uses map */
469                 if(is_phi)
470                         lr->use_head->closest_use = lr;
471
472                 /*
473                  * Remind the liverange of the first use of a live (or phi) in the
474                  * current block.
475                  */
476                 add_first_use(si, bl, irn, lr);
477
478                 for(i = 0; i < n_preds; ++i) {
479                         ir_node *pred_bl     = get_Block_cfgpred_block(bl, i);
480                         ir_node *end_node    = is_phi ? get_irn_n(irn, i) : irn;
481                         live_range_t *op_lr  = get_live_range(si, end_node, pred_bl, -1);
482                         edge_reload_t *edge  = obstack_alloc(si->obst, sizeof(edge[0]));
483
484                         ir_snprintf(buf, sizeof(buf), "edge_%N_%N_%N_%N", bl, pred_bl, end_node, op_lr->irn);
485                         edge->in_mem_var = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD);
486                         edge->bl         = bl;
487                         edge->irn        = end_node;
488                         edge->pos        = i;
489
490                         ir_snprintf(buf, sizeof(buf), "cedge_%N_%N_%N_%N", bl, pred_bl, end_node, op_lr->irn);
491                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
492                         lpp_set_factor_fast(si->lpp, cst, op_lr->in_mem_var, 1.0);
493                         lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, -1.0);
494                         lpp_set_factor_fast(si->lpp, cst, edge->in_mem_var, -1.0);
495                 }
496         }
497
498 end:
499         del_pset(live);
500 }
501
502 /**
503  * Add the costs for a store.
504  *
505  * If one of the uses is from memory, add additional costs for the
506  * spill.
507  *
508  * m_1 + ... + m_n - M * s <= 0
509  *
510  * @param si The ILP spilling environment.
511  */
512 static void add_store_costs(spill_ilp_t *si)
513 {
514         char buf[64];
515         irn_use_head_t *uh;
516         double costs = si->enable_store ? COST_STORE : 0.0;
517
518         for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
519                 int cst;
520                 live_range_t *lr;
521
522                 ir_snprintf(buf, sizeof(buf), "cs_%N", uh->irn);
523                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
524
525                 ir_snprintf(buf, sizeof(buf), "s_%N", uh->irn);
526                 uh->spill_var = lpp_add_var(si->lpp, buf, lpp_binary, costs);
527                 lpp_set_factor_fast(si->lpp, cst, uh->spill_var, -BIGM);
528
529     list_for_each_entry(live_range_t, lr, &uh->head, list)
530                         lpp_set_factor_fast(si->lpp, cst, lr->in_mem_var, 1.0);
531         }
532 }
533
534 static INLINE int is_zero(double x)
535 {
536   return fabs(x) < 0.00001;
537 }
538
539 static int is_spilled(const spill_ilp_t *si, const live_range_t *lr)
540 {
541         return !is_zero(lpp_get_var_sol(si->lpp, lr->in_mem_var));
542 }
543
544 static int is_mem_phi(const ir_node *phi, void *data)
545 {
546         spill_ilp_t *si = data;
547         return is_spilled(si, get_use_head(si, phi)->closest_use);
548 }
549
550 static void writeback_results(spill_ilp_t *si)
551 {
552         irn_use_head_t *uh;
553         edge_reload_t *edge;
554
555         /* Look at each node and examine the usages. */
556         for(uh = set_first(si->irn_use_heads); uh; uh = set_next(si->irn_use_heads)) {
557                 live_range_t *lr;
558
559                 /* Go through all live ranges of the node. */
560                 list_for_each_entry(live_range_t, lr, &uh->head, list) {
561                         if(is_spilled(si, lr) && !is_end_of_block_use(lr))
562                                 be_add_reload(si->senv, lr->irn, lr->user);
563                 }
564         }
565
566         for(edge = si->edges; edge; edge = edge->next) {
567                 if(!is_zero(edge->in_mem_var))
568                         be_add_reload_on_edge(si->senv, edge->irn, edge->bl, edge->pos);
569         }
570
571         be_insert_spills_reloads(si->senv, NULL);
572 }
573
574 void be_spill_ilp(const be_main_session_env_t *session_env,
575     const arch_register_class_t *cls)
576 {
577         char problem_name[256];
578         struct obstack obst;
579         spill_ilp_t si;
580
581         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", session_env->irg, cls->name);
582
583         obstack_init(&obst);
584         si.session         = session_env;
585         si.obst            = &obst;
586         si.dbg             = firm_dbg_register("be.ra.spillilp");
587         si.senv            = be_new_spill_env(si.dbg, session_env, cls, is_mem_phi, &si);
588         si.cls             = cls;
589         si.lpp             = new_lpp(problem_name, lpp_minimize);
590         si.irn_use_heads   = new_set(cmp_irn_use_head, 4096);
591         si.live_ranges     = new_set(cmp_live_range, 16384);
592         si.first_uses      = new_set(cmp_first_use, 4096);
593         si.edges           = NULL;
594         si.enable_remat    = 0;
595         si.enable_store    = 0;
596
597         firm_dbg_set_mask(si.dbg, DBG_LEVEL);
598         irg_block_walk_graph(session_env->irg, process_block, NULL, &si);
599         if(si.enable_store)
600                 add_store_costs(&si);
601
602 #ifdef DUMP_ILP
603         {
604                 FILE *f;
605                 char buf[256];
606
607                 ir_snprintf(buf, sizeof(buf), "spill-%s.ilp", problem_name);
608                 if((f = fopen(buf, "wt")) != NULL) {
609                         lpp_dump_plain(si.lpp, f);
610                         fclose(f);
611                 }
612         }
613 #endif
614
615         DBG((si.dbg, LEVEL_1, "%F\n", session_env->irg));
616 #ifdef SOLVE_LOCAL
617         lpp_solve_cplex(si.lpp);
618 #else
619         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
620 #endif
621         assert(lpp_is_sol_valid(si.lpp) && "solution of ILP must be valid");
622
623         DBG((si.dbg, LEVEL_1, "\tnodes: %d, vars: %d, csts: %d\n",
624                                 set_count(si.irn_use_heads), si.lpp->var_next, si.lpp->cst_next));
625         DBG((si.dbg, LEVEL_1, "\titerations: %d, solution time: %g\n",
626                                 si.lpp->iterations, si.lpp->sol_time));
627
628 #ifdef DUMP_SOLUTION
629         {
630                 FILE *f;
631                 char buf[256];
632
633                 ir_snprintf(buf, sizeof(buf), "spill-%s.sol", problem_name);
634                 if((f = fopen(buf, "wt")) != NULL) {
635                         int i;
636                         for(i = 0; i < si.lpp->var_next; ++i) {
637                                 lpp_name_t *name = si.lpp->vars[i];
638                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
639                         }
640                         fclose(f);
641                 }
642         }
643 #endif
644
645         writeback_results(&si);
646
647 #ifdef DUMP_STATS
648         {
649                 FILE *f;
650
651                 ir_snprintf(buf, sizeof(buf), "%s-spill.stat", problem_name);
652                 if((f = fopen(buf, "wt")) != NULL) {
653                         fprintf(f, "%20s: %d\n", "nodes", set_count(si.irn_use_heads));
654                         fprintf(f, "%20s: %d\n", "vars", si.lpp->var_next);
655                         fprintf(f, "%20s: %d\n", "csts", si.lpp->cst_next);
656                         fprintf(f, "%20s: %f\n", "sol time", si.lpp->sol_time);
657                         fprintf(f, "%20s: %d\n", "spills", si->stats.n_spills);
658                         fprintf(f, "%20s: %d\n", "reloads", si->stats.n_reloads);
659                         fprintf(f, "%20s: %d\n", "remats", si->stats.n_remat);
660                         fclose(f);
661                 }
662         }
663 #endif
664
665         del_set(si.irn_use_heads);
666         del_set(si.live_ranges);
667         free_lpp(si.lpp);
668         obstack_free(&obst, NULL);
669 }