get frame from irg instead from spill (the spill could be a PhiM!)
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35 #include "irvrfy.h"
36
37 #include <lpp/lpp.h>
38 #include <lpp/mps.h>
39 #include <lpp/lpp_net.h>
40 #include <lpp/lpp_cplex.h>
41 //#include <lc_pset.h>
42 #include <libcore/lc_bitset.h>
43
44 #include "be_t.h"
45 #include "belive_t.h"
46 #include "besched_t.h"
47 #include "beirgmod.h"
48 #include "bearch.h"
49 #include "benode_t.h"
50 #include "beutil.h"
51 #include "bespillremat.h"
52 #include "bespill.h"
53 #include "bepressurestat.h"
54
55 #include "bechordal_t.h"
56
57 #ifdef WITH_LIBCORE
58 #include <libcore/lc_opts.h>
59 #include <libcore/lc_opts_enum.h>
60 #endif /* WITH_LIBCORE */
61
62 #define DUMP_PROBLEM       1
63 #define DUMP_MPS           2
64 #define DUMP_SOLUTION      4
65
66 #define KEEPALIVE_REMATS   1
67 #define KEEPALIVE_SPILLS   2
68 #define KEEPALIVE_RELOADS  4
69
70 #define VERIFY_MEMINTERF   1
71 #define VERIFY_DOMINANCE   2
72
73 #define REMATS_NONE        0
74 #define REMATS_BRIGGS      1
75 #define REMATS_NOINVERSE   2
76 #define REMATS_ALL         3
77
78 static int opt_dump_flags   = 0;
79 static int opt_log = 0;
80 static int opt_keep_alive   = 0;
81 static int opt_goodwin = 1;
82 static int opt_memcopies = 1;
83 static int opt_memoperands = 1;
84 static int opt_verify = VERIFY_MEMINTERF;
85 static int opt_remats = REMATS_ALL;
86 static int opt_repair_schedule = 0;
87 static int opt_no_enlarge_liveness = 0;
88 static int opt_remat_while_live = 1;
89 static int opt_timeout = 300;
90 static double opt_cost_reload = 8.0;
91 static double opt_cost_memoperand =  7.0;
92 static double opt_cost_spill =  50.0;
93 static double opt_cost_remat =  1.0;
94
95
96 #ifdef WITH_LIBCORE
97 static const lc_opt_enum_mask_items_t dump_items[] = {
98         { "problem",  DUMP_PROBLEM  },
99         { "mps",      DUMP_MPS      },
100         { "solution", DUMP_SOLUTION },
101         { NULL,       0 }
102 };
103
104 static lc_opt_enum_mask_var_t dump_var = {
105         &opt_dump_flags, dump_items
106 };
107
108 static const lc_opt_enum_mask_items_t keepalive_items[] = {
109         { "remats",  KEEPALIVE_REMATS  },
110         { "spills",  KEEPALIVE_SPILLS  },
111         { "reloads", KEEPALIVE_RELOADS },
112         { NULL,      0 }
113 };
114
115 static lc_opt_enum_mask_var_t keep_alive_var = {
116         &opt_keep_alive, keepalive_items
117 };
118
119 static const lc_opt_enum_mask_items_t remats_items[] = {
120         { "none",      REMATS_NONE      },
121         { "briggs",    REMATS_BRIGGS    },
122         { "noinverse", REMATS_NOINVERSE },
123         { "all",       REMATS_ALL       },
124         { NULL,        0 }
125 };
126
127 static lc_opt_enum_mask_var_t remats_var = {
128         &opt_remats, remats_items
129 };
130
131 static const lc_opt_table_entry_t options[] = {
132         LC_OPT_ENT_ENUM_MASK("keepalive", "keep alive remats, spills or reloads",                   &keep_alive_var),
133
134         LC_OPT_ENT_BOOL     ("goodwin",  "activate goodwin reduction",                              &opt_goodwin),
135         LC_OPT_ENT_BOOL     ("memcopies",  "activate memcopy handling",                             &opt_memcopies),
136         LC_OPT_ENT_BOOL     ("memoperands",  "activate memoperands",                                &opt_memoperands),
137         LC_OPT_ENT_ENUM_INT ("remats",  "type of remats to insert (none, briggs, noinverse or all)",&remats_var),
138         LC_OPT_ENT_BOOL     ("repair_schedule",  "repair the schedule by rematting once used nodes",&opt_repair_schedule),
139         LC_OPT_ENT_BOOL     ("no_enlage_liveness",  "do not enlarge liveness of operands of remats",&opt_no_enlarge_liveness),
140         LC_OPT_ENT_BOOL     ("remat_while_live",  "remat only values that can be used by real ops", &opt_remat_while_live),
141
142         LC_OPT_ENT_ENUM_MASK("dump", "dump problem, mps or solution",                               &dump_var),
143         LC_OPT_ENT_BOOL     ("log",  "activate the lpp log",                                        &opt_log),
144         LC_OPT_ENT_INT      ("timeout",  "ILP solver timeout",                                      &opt_timeout),
145
146         LC_OPT_ENT_DBL      ("cost_reload",  "cost of a reload",                                    &opt_cost_reload),
147         LC_OPT_ENT_DBL      ("cost_memoperand",  "cost of a memory operand",                        &opt_cost_memoperand),
148         LC_OPT_ENT_DBL      ("cost_spill",  "cost of a spill instruction",                          &opt_cost_spill),
149         LC_OPT_ENT_DBL      ("cost_remat",  "cost of a rematerialization",                          &opt_cost_remat),
150         { NULL }
151 };
152
153 void be_spill_remat_register_options(lc_opt_entry_t *grp)
154 {
155         lc_opt_entry_t *my_grp = lc_opt_get_grp(grp, "remat");
156         lc_opt_add_table(my_grp, options);
157 }
158 #endif
159
160
161 //#define EXECFREQ_LOOPDEPH   /* compute execution frequency from loop depth only */
162 //#define SCHEDULE_PHIM   /* insert phim nodes into schedule */
163
164 #define  SOLVE
165 //#define  SOLVE_LOCAL
166 #define LPP_SERVER "i44pc52"
167 #define LPP_SOLVER "cplex"
168
169
170 #define MAX_PATHS      INT_MAX
171 #define ILP_UNDEF               -1
172
173 typedef struct _spill_ilp_t {
174         const arch_register_class_t  *cls;
175         int                           n_regs;
176         const be_chordal_env_t       *chordal_env;
177         be_lv_t                      *lv;
178         lpp_t                        *lpp;
179         struct obstack               *obst;
180         set                          *remat_info;
181         pset                         *all_possible_remats;
182         pset                         *inverse_ops;
183         ir_node                      *keep;
184         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
185         pset                         *spills;
186         set                          *interferences;
187         ir_node                      *m_unknown;
188         set                          *memoperands;
189         DEBUG_ONLY(firm_dbg_module_t * dbg);
190 } spill_ilp_t;
191
192 typedef int ilp_var_t;
193 typedef int ilp_cst_t;
194
195 typedef struct _spill_bb_t {
196         set      *ilp;
197         set      *reloads;
198 } spill_bb_t;
199
200 typedef struct _remat_t {
201         const ir_node        *op;      /**< for copy_irn */
202         const ir_node        *value;   /**< the value which is being recomputed by this remat */
203         const ir_node        *proj;    /**< not NULL if the above op produces a tuple */
204         int                   cost;    /**< cost of this remat */
205         int                   inverse; /**< nonzero if this is an inverse remat */
206 } remat_t;
207
208 /**
209  * Data to be attached to each IR node. For remats this contains the ilp_var
210  * for this remat and for normal ops this contains the ilp_vars for
211  * reloading each operand
212  */
213 typedef struct _op_t {
214         int             is_remat;
215         union {
216                 struct {
217                         ilp_var_t       ilp;
218                         const remat_t  *remat; /** the remat this op belongs to */
219                         int             pre; /** 1, if this is a pressure-increasing remat */
220                 } remat;
221                 struct {
222                         ilp_var_t       ilp;
223                         ir_node        *op; /** the operation this live range belongs to */
224                         union {
225                                 ilp_var_t      *reloads;
226                                 ilp_var_t      *copies;
227                         } args;
228                 } live_range;
229         } attr;
230 } op_t;
231
232 typedef struct _defs_t {
233         const ir_node   *value;
234         ir_node         *spills;  /**< points to the first spill for this value (linked by link field) */
235         ir_node         *remats;  /**< points to the first definition for this value (linked by link field) */
236 } defs_t;
237
238 typedef struct _remat_info_t {
239         const ir_node       *irn; /**< the irn to which these remats belong */
240         pset                *remats; /**< possible remats for this value */
241         pset                *remats_by_operand; /**< remats with this value as operand */
242 } remat_info_t;
243
244 typedef struct _keyval_t {
245         const void          *key;
246         const void          *val;
247 } keyval_t;
248
249 typedef struct _spill_t {
250         ir_node            *irn;
251         ilp_var_t           reg_in;
252         ilp_var_t           mem_in;
253         ilp_var_t           reg_out;
254         ilp_var_t           mem_out;
255         ilp_var_t           spill;
256 } spill_t;
257
258 typedef struct _memoperand_t {
259         ir_node             *irn; /**< the irn */
260         unsigned int         pos; /**< the position of the argument */
261         ilp_var_t            ilp; /**< the ilp var for this memory operand */
262 } memoperand_t;
263
264 static INLINE int
265 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
266 {
267         return chordal_has_class(si->chordal_env, irn);
268 }
269
270 #if 0
271 static int
272 cmp_remat(const void *a, const void *b)
273 {
274         const keyval_t *p = a;
275         const keyval_t *q = b;
276         const remat_t  *r = p->val;
277         const remat_t  *s = q->val;
278
279         assert(r && s);
280
281         return !(r == s || r->op == s->op);
282 }
283 #endif
284 static int
285 cmp_remat(const void *a, const void *b)
286 {
287         const remat_t  *r = a;
288         const remat_t  *s = a;
289
290         return !(r == s || r->op == s->op);
291 }
292
293 static int
294 cmp_spill(const void *a, const void *b, size_t size)
295 {
296         const spill_t *p = a;
297         const spill_t *q = b;
298
299 //      return !(p->irn == q->irn && p->bb == q->bb);
300         return !(p->irn == q->irn);
301 }
302
303 static int
304 cmp_memoperands(const void *a, const void *b, size_t size)
305 {
306         const memoperand_t *p = a;
307         const memoperand_t *q = b;
308
309         return !(p->irn == q->irn && p->pos == q->pos);
310 }
311
312 static keyval_t *
313 set_find_keyval(set * set, const void * key)
314 {
315         keyval_t     query;
316
317         query.key = key;
318         return set_find(set, &query, sizeof(query), HASH_PTR(key));
319 }
320
321 static keyval_t *
322 set_insert_keyval(set * set, void * key, void * val)
323 {
324         keyval_t     query;
325
326         query.key = key;
327         query.val = val;
328         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
329 }
330
331 static defs_t *
332 set_find_def(set * set, const ir_node * value)
333 {
334         defs_t     query;
335
336         query.value = value;
337         return set_find(set, &query, sizeof(query), HASH_PTR(value));
338 }
339
340 static defs_t *
341 set_insert_def(set * set, const ir_node * value)
342 {
343         defs_t     query;
344
345         query.value = value;
346         query.spills = NULL;
347         query.remats = NULL;
348         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
349 }
350
351 static memoperand_t *
352 set_insert_memoperand(set * set, ir_node * irn, unsigned int pos, ilp_var_t ilp)
353 {
354         memoperand_t     query;
355
356         query.irn = irn;
357         query.pos = pos;
358         query.ilp = ilp;
359         return set_insert(set, &query, sizeof(query), HASH_PTR(irn)+pos);
360 }
361
362 static memoperand_t *
363 set_find_memoperand(set * set, const ir_node * irn, unsigned int pos)
364 {
365         memoperand_t     query;
366
367         query.irn = (ir_node*)irn;
368         query.pos = pos;
369         return set_find(set, &query, sizeof(query), HASH_PTR(irn)+pos);
370 }
371
372
373 static spill_t *
374 set_find_spill(set * set, const ir_node * value)
375 {
376         spill_t     query;
377
378         query.irn = (ir_node*)value;
379         return set_find(set, &query, sizeof(query), HASH_PTR(value));
380 }
381
382 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
383 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
384 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
385 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
386 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
387
388 static int
389 cmp_remat_info(const void *a, const void *b, size_t size)
390 {
391         const remat_info_t *p = a;
392         const remat_info_t *q = b;
393
394         return !(p->irn == q->irn);
395 }
396
397 static int
398 cmp_defs(const void *a, const void *b, size_t size)
399 {
400         const defs_t *p = a;
401         const defs_t *q = b;
402
403         return !(p->value == q->value);
404 }
405
406 static int
407 cmp_keyval(const void *a, const void *b, size_t size)
408 {
409         const keyval_t *p = a;
410         const keyval_t *q = b;
411
412         return !(p->key == q->key);
413 }
414
415 static double
416 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
417 {
418 #define FUDGE 0.001
419 #ifndef EXECFREQ_LOOPDEPH
420         return get_block_execfreq(si->chordal_env->exec_freq, get_block(irn)) + FUDGE;
421 #else
422         if(is_Block(irn))
423                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
424         else
425                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
426 #endif
427 }
428
429 static double
430 get_cost(const spill_ilp_t * si, const ir_node * irn)
431 {
432         if(be_is_Spill(irn)) {
433                 return opt_cost_spill;
434         } else if(be_is_Reload(irn)){
435                 return opt_cost_reload;
436         } else {
437                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
438         }
439 }
440
441 /**
442  * Checks, whether node and its operands have suitable reg classes
443  */
444 static INLINE int
445 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
446 {
447         int               n;
448         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
449         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
450
451 #if 0
452         if(!remat)
453                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
454 #endif
455
456         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
457                 ir_node        *op = get_irn_n(irn, n);
458                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
459
460 //              if(!remat)
461 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
462         }
463
464         return remat;
465 }
466
467 /**
468  * Try to create a remat from @p op with destination value @p dest_value
469  */
470 static INLINE remat_t *
471 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
472 {
473         remat_t  *remat = NULL;
474
475 //      if(!mode_is_datab(get_irn_mode(dest_value)))
476 //              return NULL;
477
478         if(dest_value == op) {
479                 const ir_node *proj = NULL;
480
481                 if(is_Proj(dest_value)) {
482                         op = get_Proj_pred(op);
483                         proj = dest_value;
484                 }
485
486                 if(!is_rematerializable(si, op))
487                         return NULL;
488
489                 remat = obstack_alloc(si->obst, sizeof(*remat));
490                 remat->op = op;
491                 remat->cost = get_cost(si, op);
492                 remat->value = dest_value;
493                 remat->proj = proj;
494                 remat->inverse = 0;
495         } else {
496                 arch_inverse_t     inverse;
497                 int                n;
498
499                 /* get the index of the operand we want to retrieve by the inverse op */
500                 for (n = get_irn_arity(op)-1; n>=0; --n) {
501                         ir_node        *arg = get_irn_n(op, n);
502
503                         if(arg == dest_value) break;
504                 }
505                 if(n<0) return NULL;
506
507                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
508
509                 /* else ask the backend to give an inverse op */
510                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
511                         int   i;
512
513                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
514
515                         assert(inverse.n > 0 && "inverse op should have at least one node");
516
517                         for(i=inverse.n-1; i>=0; --i) {
518                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
519                         }
520
521                         if(inverse.n <= 2) {
522                                 remat = obstack_alloc(si->obst, sizeof(*remat));
523                                 remat->op = inverse.nodes[0];
524                                 remat->cost = inverse.costs;
525                                 remat->value = dest_value;
526                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
527                                 remat->inverse = 1;
528
529                                 assert(is_Proj(remat->proj));
530                         } else {
531                                 assert(0 && "I can not handle remats with more than 2 nodes");
532                         }
533                 }
534         }
535
536         if(remat) {
537                 if(remat->proj) {
538                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
539                 } else {
540                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
541                 }
542         }
543         return remat;
544 }
545
546
547 static INLINE void
548 add_remat(const spill_ilp_t * si, const remat_t * remat)
549 {
550         remat_info_t    *remat_info,
551                      query;
552         int              n;
553
554         assert(remat->op);
555         assert(remat->value);
556
557         query.irn = remat->value;
558         query.remats = NULL;
559         query.remats_by_operand = NULL;
560         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
561
562         if(remat_info->remats == NULL) {
563                 remat_info->remats = new_pset(cmp_remat, 4096);
564         }
565         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
566
567         /* insert the remat into the remats_be_operand set of each argument of the remat op */
568         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
569                 ir_node        *arg = get_irn_n(remat->op, n);
570
571                 query.irn = arg;
572                 query.remats = NULL;
573                 query.remats_by_operand = NULL;
574                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
575
576                 if(remat_info->remats_by_operand == NULL) {
577                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
578                 }
579                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
580         }
581 }
582
583 static int
584 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
585 {
586         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
587         int                i = 0;
588
589         while(edge) {
590                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
591                         ++i;
592                 }
593                 edge = get_irn_out_edge_next(irn, edge);
594         }
595
596         return i;
597 }
598
599 static int
600 get_irn_n_nonignore_args(const spill_ilp_t * si, const ir_node * irn)
601 {
602         int n;
603         int ret = 0;
604
605         if(is_Proj(irn))
606                 irn = get_Proj_pred(irn);
607
608         for(n=get_irn_arity(irn)-1; n>=0; --n) {
609                 const ir_node  *arg = get_irn_n(irn, n);
610
611                 if(has_reg_class(si, arg)) ++ret;
612         }
613
614         return ret;
615 }
616
617 static INLINE void
618 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
619 {
620         int      n;
621         remat_t *remat;
622
623         if( has_reg_class(si, op)
624         && (opt_repair_schedule || get_irn_n_nonremat_edges(si, op) > 1)
625         && (opt_remats !=  REMATS_BRIGGS || get_irn_n_nonignore_args(si, op) == 0)
626         ) {
627                 remat = get_remat_from_op(si, op, op);
628                 if(remat) {
629                         add_remat(si, remat);
630                 }
631         }
632
633         if(opt_remats == REMATS_ALL) {
634                 /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
635                    for each arg */
636                 for (n = get_irn_arity(op)-1; n>=0; --n) {
637                         ir_node        *arg = get_irn_n(op, n);
638
639                         if(has_reg_class(si, arg)) {
640                                 /* try to get an inverse remat */
641                                 remat = get_remat_from_op(si, arg, op);
642                                 if(remat) {
643                                         add_remat(si, remat);
644                                 }
645                         }
646                 }
647         }
648 }
649
650 static INLINE int
651 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
652 {
653         ir_node *block;
654         ir_node *def_block = get_nodes_block(val);
655         int      ret;
656
657         if(val == pos)
658                 return 0;
659
660         /* if pos is at end of a basic block */
661         if(is_Block(pos)) {
662                 ret = (pos == def_block || block_dominates(def_block, pos));
663 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
664                 return ret;
665         }
666
667         /* else if this is a normal operation */
668         block = get_nodes_block(pos);
669         if(block == def_block) {
670                 if(!sched_is_scheduled(val)) return 1;
671
672                 ret = sched_comes_after(val, pos);
673 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
674                 return ret;
675         }
676
677         ret = block_dominates(def_block, block);
678 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
679         return ret;
680 }
681
682 static INLINE ir_node *
683 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
684 {
685     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
686 }
687
688 /**
689  * Returns first non-Phi node of block @p bb
690  */
691 static INLINE ir_node *
692 sched_block_first_nonphi(const ir_node * bb)
693 {
694         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
695 }
696
697 static int
698 sched_skip_proj_predicator(const ir_node * irn, void * data)
699 {
700         return (is_Proj(irn));
701 }
702
703 static INLINE ir_node *
704 sched_next_nonproj(const ir_node * irn, int forward)
705 {
706         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
707 }
708
709 /**
710  * Returns next operation node (non-Proj) after @p irn
711  * or the basic block of this node
712  */
713 static INLINE ir_node *
714 sched_next_op(const ir_node * irn)
715 {
716         ir_node *next = sched_next(irn);
717
718         if(is_Block(next))
719                 return next;
720
721         return sched_next_nonproj(next, 1);
722 }
723
724 /**
725  * Returns previous operation node (non-Proj) before @p irn
726  * or the basic block of this node
727  */
728 static INLINE ir_node *
729 sched_prev_op(const ir_node * irn)
730 {
731         ir_node *prev = sched_prev(irn);
732
733         if(is_Block(prev))
734                 return prev;
735
736         return sched_next_nonproj(prev, 0);
737 }
738
739 static void
740 sched_put_after(ir_node * insert, ir_node * irn)
741 {
742         if(is_Block(insert)) {
743                 insert = sched_block_first_nonphi(insert);
744         } else {
745                 insert = sched_next_op(insert);
746         }
747         sched_add_before(insert, irn);
748 }
749
750 static void
751 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
752 {
753   if(is_Block(insert)) {
754           insert = sched_block_last_noncf(si, insert);
755   } else {
756           insert = sched_next_nonproj(insert, 0);
757           insert = sched_prev(insert);
758   }
759   sched_add_after(insert, irn);
760 }
761
762 /**
763  * Tells you whether a @p remat can be placed before the irn @p pos
764  */
765 static INLINE int
766 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
767 {
768         const ir_node   *op = remat->op;
769         const ir_node   *prev;
770         int        n,
771                            res = 1;
772
773         if(is_Block(pos)) {
774                 prev = sched_block_last_noncf(si, pos);
775                 prev = sched_next_nonproj(prev, 0);
776         } else {
777                 prev = sched_prev_op(pos);
778         }
779         /* do not remat if the rematted value is defined immediately before this op */
780         if(prev == remat->op) {
781                 return 0;
782         }
783
784 #if 0
785         /* this should be just fine, the following OP will be using this value, right? */
786
787         /* only remat AFTER the real definition of a value (?) */
788         if(!value_is_defined_before(si, pos, remat->value)) {
789 //              ir_fprintf(stderr, "error(not defined)");
790                 return 0;
791         }
792 #endif
793
794         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
795                 const ir_node   *arg = get_irn_n(op, n);
796
797                 if(opt_no_enlarge_liveness) {
798                         if(has_reg_class(si, arg) && live) {
799                                 res &= pset_find_ptr((pset*)live, arg)?1:0;
800                         } else {
801                                 res &= value_is_defined_before(si, pos, arg);
802                         }
803                 } else {
804                         res &= value_is_defined_before(si, pos, arg);
805                 }
806         }
807
808         return res;
809 }
810
811 /**
812  * Tells you whether a @p remat can be placed after the irn @p pos
813  */
814 static INLINE int
815 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
816 {
817         if(is_Block(pos)) {
818                 pos = sched_block_first_nonphi(pos);
819         } else {
820                 pos = sched_next_op(pos);
821         }
822
823         /* only remat AFTER the real definition of a value (?) */
824         if(!value_is_defined_before(si, pos, remat->value)) {
825                 return 0;
826         }
827
828         return can_remat_before(si, remat, pos, live);
829 }
830
831 /**
832  * Collect potetially rematerializable OPs
833  */
834 static void
835 walker_remat_collector(ir_node * irn, void * data)
836 {
837         spill_ilp_t    *si = data;
838
839         if(!is_Block(irn) && !is_Phi(irn)) {
840                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
841                 get_remats_from_op(si, irn);
842         }
843 }
844
845 /**
846  * Inserts a copy of @p irn before @p pos
847  */
848 static ir_node *
849 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
850 {
851         ir_node     *bb;
852         ir_node     *copy;
853
854         bb = is_Block(pos)?pos:get_nodes_block(pos);
855         copy = exact_copy(irn);
856
857         _set_phi_class(copy, NULL);
858         set_nodes_block(copy, bb);
859         sched_put_before(si, pos, copy);
860
861         return copy;
862 }
863
864 /**
865  * Inserts a copy of @p irn after @p pos
866  */
867 static ir_node *
868 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
869 {
870         ir_node     *bb;
871         ir_node     *copy;
872
873         bb = is_Block(pos)?pos:get_nodes_block(pos);
874         copy = exact_copy(irn);
875
876         _set_phi_class(copy, NULL);
877         set_nodes_block(copy, bb);
878         sched_put_after(pos, copy);
879
880         return copy;
881 }
882
883 static ir_node *
884 insert_remat_after(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
885 {
886         char     buf[256];
887
888         if(can_remat_after(si, remat, pos, live)) {
889                 ir_node         *copy,
890                                                 *proj_copy;
891                 op_t            *op;
892
893                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat2 %+F\n", remat->op));
894
895                 copy = insert_copy_after(si, remat->op, pos);
896
897                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
898                 op = obstack_alloc(si->obst, sizeof(*op));
899                 op->is_remat = 1;
900                 op->attr.remat.remat = remat;
901                 op->attr.remat.pre = 0;
902                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
903
904                 set_irn_link(copy, op);
905                 pset_insert_ptr(si->all_possible_remats, copy);
906                 if(remat->proj) {
907                         proj_copy = insert_copy_after(si, remat->proj, copy);
908                         set_irn_n(proj_copy, 0, copy);
909                         set_irn_link(proj_copy, op);
910                         pset_insert_ptr(si->all_possible_remats, proj_copy);
911                 } else {
912                         proj_copy = NULL;
913                 }
914
915                 return copy;
916         }
917
918         return NULL;
919 }
920
921 static ir_node *
922 insert_remat_before(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
923 {
924         char     buf[256];
925
926         if(can_remat_before(si, remat, pos, live)) {
927                 ir_node         *copy,
928                                                 *proj_copy;
929                 op_t            *op;
930
931                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
932
933                 copy = insert_copy_before(si, remat->op, pos);
934
935                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
936                 op = obstack_alloc(si->obst, sizeof(*op));
937                 op->is_remat = 1;
938                 op->attr.remat.remat = remat;
939                 op->attr.remat.pre = 1;
940                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
941
942                 set_irn_link(copy, op);
943                 pset_insert_ptr(si->all_possible_remats, copy);
944                 if(remat->proj) {
945                         proj_copy = insert_copy_after(si, remat->proj, copy);
946                         set_irn_n(proj_copy, 0, copy);
947                         set_irn_link(proj_copy, op);
948                         pset_insert_ptr(si->all_possible_remats, proj_copy);
949                 } else {
950                         proj_copy = NULL;
951                 }
952
953                 return copy;
954         }
955
956         return NULL;
957 }
958
959 static int
960 get_block_n_succs(const ir_node *block) {
961         const ir_edge_t *edge;
962
963         assert(edges_activated(current_ir_graph));
964
965         edge = get_block_succ_first(block);
966         if (! edge)
967                 return 0;
968
969         edge = get_block_succ_next(block, edge);
970         return edge ? 2 : 1;
971 }
972
973 static int
974 is_start_block(const ir_node * bb)
975 {
976         return get_irg_start_block(get_irn_irg(bb)) == bb;
977 }
978
979 static int
980 is_before_frame(const ir_node * bb, const ir_node * irn)
981 {
982         const ir_node  *frame  = get_irg_frame(get_irn_irg(bb));
983
984         if(is_start_block(bb) && sched_get_time_step(frame) >= sched_get_time_step(irn))
985                 return 1;
986         else
987                 return 0;
988 }
989
990 static int
991 is_merge_edge(const ir_node * bb)
992 {
993         if(is_start_block(bb))
994                 return 0;
995
996         if(opt_goodwin)
997                 return get_block_n_succs(bb) == 1;
998         else
999                 return 1;
1000 }
1001
1002 static int
1003 is_diverge_edge(const ir_node * bb)
1004 {
1005         if(is_start_block(bb))
1006                 return 0;
1007
1008         if(opt_goodwin)
1009                 return get_Block_n_cfgpreds(bb) == 1;
1010         else
1011                 return 1;
1012 }
1013
1014 static void
1015 walker_regclass_copy_insertor(ir_node * irn, void * data)
1016 {
1017         spill_ilp_t    *si = data;
1018
1019         if(is_Phi(irn) && has_reg_class(si, irn)) {
1020                 int n;
1021
1022                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
1023                         ir_node  *phi_arg = get_irn_n(irn, n);
1024                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
1025
1026                         if(!has_reg_class(si, phi_arg)) {
1027                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
1028                                 ir_node   *pos = sched_block_last_noncf(si, bb);
1029                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
1030
1031                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
1032                                 sched_add_after(pos, copy);
1033                                 set_irn_n(irn, n, copy);
1034
1035                                 op->is_remat = 0;
1036                                 op->attr.live_range.args.reloads = NULL;
1037                                 op->attr.live_range.ilp = ILP_UNDEF;
1038                                 set_irn_link(copy, op);
1039                         }
1040                 }
1041         }
1042 }
1043
1044
1045 /**
1046  * Insert (so far unused) remats into the irg to
1047  * recompute the potential liveness of all values
1048  */
1049 static void
1050 walker_remat_insertor(ir_node * bb, void * data)
1051 {
1052         spill_ilp_t    *si = data;
1053         spill_bb_t     *spill_bb;
1054         ir_node        *irn;
1055         int             n, i;
1056         pset           *live = pset_new_ptr_default();
1057
1058         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
1059
1060         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1061                 ir_node        *value = be_lv_get_irn(si->lv, bb, i);
1062
1063                 /* add remats at end of block */
1064                 if (has_reg_class(si, value)) {
1065                         pset_insert_ptr(live, value);
1066                 }
1067         }
1068
1069         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
1070         set_irn_link(bb, spill_bb);
1071
1072         irn = sched_last(bb);
1073         while(!sched_is_end(irn)) {
1074                 ir_node   *next;
1075                 op_t      *op;
1076                 pset      *args;
1077                 ir_node   *arg;
1078                 pset      *remat_args;
1079
1080                 next = sched_prev(irn);
1081
1082                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
1083
1084                 if(is_Phi(irn) || is_Proj(irn)) {
1085                         op_t      *op;
1086
1087                         if(has_reg_class(si, irn)) {
1088                                 pset_remove_ptr(live, irn);
1089                         }
1090
1091                         op = obstack_alloc(si->obst, sizeof(*op));
1092                         op->is_remat = 0;
1093                         op->attr.live_range.args.reloads = NULL;
1094                         op->attr.live_range.ilp = ILP_UNDEF;
1095                         set_irn_link(irn, op);
1096
1097                         irn = next;
1098                         continue;
1099                 }
1100
1101                 op = obstack_alloc(si->obst, sizeof(*op));
1102                 op->is_remat = 0;
1103                 op->attr.live_range.ilp = ILP_UNDEF;
1104                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1105                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1106                 set_irn_link(irn, op);
1107
1108                 args = pset_new_ptr_default();
1109
1110                 /* collect arguments of op */
1111                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
1112                         ir_node        *arg = get_irn_n(irn, n);
1113
1114                         pset_insert_ptr(args, arg);
1115                 }
1116
1117                 /* set args of op already live in epilog */
1118                 pset_foreach(args, arg) {
1119                         if(has_reg_class(si, arg)) {
1120                                 pset_insert_ptr(live, arg);
1121                         }
1122                 }
1123                 /* delete defined value from live set */
1124                 if(has_reg_class(si, irn)) {
1125                         pset_remove_ptr(live, irn);
1126                 }
1127
1128
1129                 remat_args = pset_new_ptr_default();
1130
1131                 /* insert all possible remats before irn */
1132                 pset_foreach(args, arg) {
1133                         remat_info_t   *remat_info,
1134                                                     query;
1135                         remat_t        *remat;
1136
1137                         /* continue if the operand has the wrong reg class
1138                          */
1139                         if(!has_reg_class(si, arg))
1140                                 continue;
1141
1142                         query.irn = arg;
1143                         query.remats = NULL;
1144                         query.remats_by_operand = NULL;
1145                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1146
1147                         if(!remat_info) {
1148                                 continue;
1149                         }
1150
1151                         if(remat_info->remats) {
1152                                 pset_foreach(remat_info->remats, remat) {
1153                                         ir_node  *remat_irn = NULL;
1154
1155                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1156                                         if(opt_remat_while_live) {
1157                                                 if(pset_find_ptr(live, remat->value)) {
1158                                                         remat_irn = insert_remat_before(si, remat, irn, live);
1159                                                 }
1160                                         } else {
1161                                                 remat_irn = insert_remat_before(si, remat, irn, live);
1162                                         }
1163                                         if(remat_irn) {
1164                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1165                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1166
1167                                                         if(!has_reg_class(si, remat_arg)) continue;
1168
1169                                                         pset_insert_ptr(remat_args, remat_arg);
1170                                                 }
1171                                         }
1172                                 }
1173                         }
1174                 }
1175
1176                 /* now we add remat args to op's args because they could also die at this op */
1177                 pset_foreach(args,arg) {
1178                         if(pset_find_ptr(remat_args, arg)) {
1179                                 pset_remove_ptr(remat_args, arg);
1180                         }
1181                 }
1182                 pset_foreach(remat_args,arg) {
1183                         pset_insert_ptr(args, arg);
1184                 }
1185
1186                 /* insert all possible remats after irn */
1187                 pset_foreach(args, arg) {
1188                         remat_info_t   *remat_info,
1189                                                     query;
1190                         remat_t        *remat;
1191
1192                         /* continue if the operand has the wrong reg class */
1193                         if(!has_reg_class(si, arg))
1194                                 continue;
1195
1196                         query.irn = arg;
1197                         query.remats = NULL;
1198                         query.remats_by_operand = NULL;
1199                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1200
1201                         if(!remat_info) {
1202                                 continue;
1203                         }
1204
1205                         /* do not place post remats after jumps */
1206                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1207
1208                         if(remat_info->remats_by_operand) {
1209                                 pset_foreach(remat_info->remats_by_operand, remat) {
1210                                         /* do not insert remats producing the same value as one of the operands */
1211                                         if(!pset_find_ptr(args, remat->value)) {
1212                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1213                                                 if(opt_remat_while_live) {
1214                                                         if(pset_find_ptr(live, remat->value)) {
1215                                                                 insert_remat_after(si, remat, irn, live);
1216                                                         }
1217                                                 } else {
1218                                                         insert_remat_after(si, remat, irn, live);
1219                                                 }
1220                                         }
1221                                 }
1222                         }
1223                 }
1224
1225                 del_pset(remat_args);
1226                 del_pset(args);
1227                 irn = next;
1228         }
1229
1230         /* add remats at end if successor has multiple predecessors */
1231         if(is_merge_edge(bb)) {
1232                 pset     *live_out = pset_new_ptr_default();
1233                 ir_node  *value;
1234
1235                 be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1236                         value = be_lv_get_irn(si->lv, bb, i);
1237
1238                         if (has_reg_class(si, value)) {
1239                                 pset_insert_ptr(live_out, value);
1240                         }
1241                 }
1242
1243                 /* add remats at end of block */
1244                 pset_foreach(live_out, value) {
1245                         remat_info_t   *remat_info,
1246                                                    query;
1247                         remat_t        *remat;
1248
1249                         query.irn = value;
1250                         query.remats = NULL;
1251                         query.remats_by_operand = NULL;
1252                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1253
1254                         if(remat_info && remat_info->remats) {
1255                                 pset_foreach(remat_info->remats, remat) {
1256                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1257
1258                                         insert_remat_before(si, remat, bb, live_out);
1259                                 }
1260                         }
1261                 }
1262                 del_pset(live_out);
1263         }
1264
1265         if(is_diverge_edge(bb)) {
1266                 pset     *live_in = pset_new_ptr_default();
1267                 ir_node  *value;
1268
1269                 be_lv_foreach(si->lv, bb, be_lv_state_in, i) {
1270                         value = be_lv_get_irn(si->lv, bb, i);
1271
1272                         if(has_reg_class(si, value)) {
1273                                 pset_insert_ptr(live_in, value);
1274                         }
1275                 }
1276                 sched_foreach(bb, value) {
1277                         if(!is_Phi(value)) break;
1278
1279                         if(has_reg_class(si, value)) {
1280                                 pset_insert_ptr(live_in, value);
1281                         }
1282                 }
1283
1284                 /* add remat2s at beginning of block */
1285                 pset_foreach(live_in, value) {
1286                         remat_info_t   *remat_info,
1287                                                    query;
1288                         remat_t        *remat;
1289
1290                         query.irn = value;
1291                         query.remats = NULL;
1292                         query.remats_by_operand = NULL;
1293                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1294
1295                         if(remat_info && remat_info->remats_by_operand) {
1296                                 pset_foreach(remat_info->remats_by_operand, remat) {
1297                                         DBG((si->dbg, LEVEL_4, "\t  considering remat2 %+F at beginning of block %+F\n", remat->op, bb));
1298
1299                                         /* put the remat here if all its args are available */
1300                                         insert_remat_after(si, remat, bb, live_in);
1301
1302                                 }
1303                         }
1304                 }
1305                 del_pset(live_in);
1306         }
1307 }
1308
1309 /**
1310  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1311  */
1312 static void
1313 luke_endwalker(ir_node * bb, void * data)
1314 {
1315         spill_ilp_t    *si = (spill_ilp_t*)data;
1316         pset           *live;
1317         pset           *use_end;
1318         char            buf[256];
1319         ilp_cst_t       cst;
1320         ir_node        *irn;
1321         spill_bb_t     *spill_bb = get_irn_link(bb);
1322         int             i;
1323
1324
1325         live = pset_new_ptr_default();
1326         use_end = pset_new_ptr_default();
1327
1328         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1329                 irn = be_lv_get_irn(si->lv, bb, i);
1330                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1331                         op_t      *op;
1332
1333                         pset_insert_ptr(live, irn);
1334                         op = get_irn_link(irn);
1335                         assert(!op->is_remat);
1336                 }
1337         }
1338
1339         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1340         /* their reg_out must always be set */
1341         sched_foreach_reverse(bb, irn) {
1342                 int   n;
1343
1344                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1345
1346                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1347                         ir_node        *irn_arg = get_irn_n(irn, n);
1348
1349                         if(has_reg_class(si, irn_arg)) {
1350                                 pset_insert_ptr(use_end, irn_arg);
1351                         }
1352                 }
1353         }
1354
1355         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1356         //cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1357         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1358
1359         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1360
1361         /* if this is a merge edge we can reload at the end of this block */
1362         if(is_merge_edge(bb)) {
1363                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1364         } else if(pset_count(use_end)){
1365                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1366         } else {
1367                 spill_bb->reloads = NULL;
1368         }
1369
1370         pset_foreach(live,irn) {
1371                 spill_t     query,
1372                                         *spill;
1373                 double      spill_cost;
1374                 int         default_spilled;
1375
1376
1377                 /* handle values used by control flow nodes later separately */
1378                 if(pset_find_ptr(use_end, irn)) continue;
1379
1380                 query.irn = irn;
1381                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1382
1383                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1384
1385                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1386                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1387                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1388
1389                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1390                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1391
1392                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1393                 /* by default spill value right after definition */
1394                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1395                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1396
1397                 if(is_merge_edge(bb)) {
1398                         ilp_var_t   reload;
1399                         ilp_cst_t   rel_cst;
1400
1401                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1402                         reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), 1.0);
1403                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1404
1405                         /* reload <= mem_out */
1406                         rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1407                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1408                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1409                 }
1410
1411                 spill->reg_in = ILP_UNDEF;
1412                 spill->mem_in = ILP_UNDEF;
1413         }
1414
1415         pset_foreach(use_end,irn) {
1416                 spill_t     query,
1417                                         *spill;
1418                 double      spill_cost;
1419                 ilp_cst_t   end_use_req,
1420                                         rel_cst;
1421                 ilp_var_t   reload;
1422                 int         default_spilled;
1423
1424                 query.irn = irn;
1425                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1426
1427                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1428
1429                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1430                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1431                 /* if irn is used at the end of the block, then it is live anyway */
1432                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1433
1434                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1435                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1436
1437                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1438                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1439                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1440
1441                 /* reload for use be control flow op */
1442                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1443                 reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), 1.0);
1444                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1445
1446                 /* reload <= mem_out */
1447                 rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1448                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1449                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1450
1451                 spill->reg_in = ILP_UNDEF;
1452                 spill->mem_in = ILP_UNDEF;
1453
1454                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1455                 end_use_req = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1);
1456                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1457         }
1458
1459         del_pset(live);
1460         del_pset(use_end);
1461 }
1462
1463 static ir_node *
1464 next_post_remat(const ir_node * irn)
1465 {
1466         op_t      *op;
1467     ir_node   *next;
1468
1469         if(is_Block(irn)) {
1470                 next = sched_block_first_nonphi(irn);
1471         } else {
1472                 next = sched_next_op(irn);
1473         }
1474
1475         if(sched_is_end(next))
1476                 return NULL;
1477
1478         op = get_irn_link(next);
1479         if(op->is_remat && !op->attr.remat.pre) {
1480                 return next;
1481         }
1482
1483         return NULL;
1484 }
1485
1486
1487 static ir_node *
1488 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1489 {
1490         op_t      *op;
1491         ir_node   *ret;
1492
1493         if(is_Block(irn)) {
1494                 ret = sched_block_last_noncf(si, irn);
1495                 ret = sched_next(ret);
1496                 ret = sched_prev_op(ret);
1497         } else {
1498                 ret = sched_prev_op(irn);
1499         }
1500
1501         if(sched_is_end(ret) || is_Phi(ret))
1502                 return NULL;
1503
1504         op = (op_t*)get_irn_link(ret);
1505         if(op->is_remat && op->attr.remat.pre) {
1506                 return ret;
1507         }
1508
1509         return NULL;
1510 }
1511
1512 /**
1513  * Find a remat of value @p value in the epilog of @p pos
1514  */
1515 static ir_node *
1516 find_post_remat(const ir_node * value, const ir_node * pos)
1517 {
1518         while((pos = next_post_remat(pos)) != NULL) {
1519                 op_t   *op;
1520
1521                 op = get_irn_link(pos);
1522                 assert(op->is_remat && !op->attr.remat.pre);
1523
1524                 if(op->attr.remat.remat->value == value)
1525                         return (ir_node*)pos;
1526
1527 #if 0
1528         const ir_edge_t *edge;
1529                 foreach_out_edge(pos, edge) {
1530                         ir_node   *proj = get_edge_src_irn(edge);
1531                         assert(is_Proj(proj));
1532                 }
1533 #endif
1534
1535         }
1536
1537         return NULL;
1538 }
1539
1540 static spill_t *
1541 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1542 {
1543         spill_bb_t  *spill_bb = get_irn_link(bb);
1544         spill_t     *spill,
1545                                  query;
1546         char         buf[256];
1547         int          default_spilled;
1548
1549         query.irn = irn;
1550         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1551         if(!spill) {
1552                 double   spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1553
1554                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1555
1556                 spill->reg_out = ILP_UNDEF;
1557                 spill->reg_in  = ILP_UNDEF;
1558                 spill->mem_in  = ILP_UNDEF;
1559
1560                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1561                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1562
1563                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1564                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1565                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1566         }
1567
1568         return spill;
1569 }
1570
1571 static void
1572 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1573 {
1574         ir_node        *irn;
1575         int i;
1576
1577         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1578                 irn = be_lv_get_irn(si->lv, bb, i);
1579
1580                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1581                         pset_insert_ptr(live, irn);
1582                 }
1583         }
1584
1585         irn = sched_last(bb);
1586
1587         /* all values eaten by control flow operations are also live until the end of the block */
1588         sched_foreach_reverse(bb, irn) {
1589                 int  i;
1590
1591                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1592
1593                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1594                         ir_node *arg = get_irn_n(irn,i);
1595
1596                         if(has_reg_class(si, arg)) {
1597                                 pset_insert_ptr(live, arg);
1598                         }
1599                 }
1600         }
1601 }
1602
1603 /**
1604  *  Inserts ILP-constraints and variables for memory copying before the given position
1605  */
1606 static void
1607 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1608 {
1609         const ir_node    *succ;
1610         const ir_edge_t  *edge;
1611         spill_bb_t       *spill_bb = get_irn_link(block);
1612         ir_node          *phi;
1613         int               pos;
1614         ilp_cst_t         cst;
1615         ilp_var_t         copyreg;
1616         char              buf[256];
1617         ir_node          *tmp;
1618
1619
1620         assert(edges_activated(current_ir_graph));
1621
1622         edge = get_block_succ_first(block);
1623         if(!edge) return;
1624
1625         succ = edge->src;
1626         pos = edge->pos;
1627
1628         edge = get_block_succ_next(block, edge);
1629         /* next block can only contain phis, if this is a merge edge */
1630         if(edge) return;
1631
1632         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1633         copyreg = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1634
1635         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1636         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1637
1638         pset_foreach(live, tmp) {
1639                 spill_t  *spill;
1640 #if 0
1641                 op_t  *op = get_irn_link(irn);
1642                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1643 #endif
1644                 spill = set_find_spill(spill_bb->ilp, tmp);
1645                 assert(spill);
1646
1647                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1648         }
1649         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1650
1651         sched_foreach(succ, phi) {
1652                 const ir_node  *to_copy;
1653                 op_t           *to_copy_op;
1654                 spill_t        *to_copy_spill;
1655                 op_t           *phi_op = get_irn_link(phi);
1656                 ilp_var_t       reload = ILP_UNDEF;
1657
1658
1659                 if(!is_Phi(phi)) break;
1660                 if(!has_reg_class(si, phi)) continue;
1661
1662                 to_copy = get_irn_n(phi, pos);
1663
1664                 to_copy_op = get_irn_link(to_copy);
1665
1666                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1667                 assert(to_copy_spill);
1668
1669                 if(spill_bb->reloads) {
1670                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1671
1672                         if(keyval) {
1673                                 reload = PTR_TO_INT(keyval->val);
1674                         }
1675                 }
1676
1677                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N_%N", block, phi, to_copy);
1678                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1679
1680                 /* copy - reg_out - reload - remat - live_range <= 0 */
1681                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1682                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1683                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1684                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1685                 foreach_pre_remat(si, block, tmp) {
1686                         op_t     *remat_op = get_irn_link(tmp);
1687                         if(remat_op->attr.remat.remat->value == to_copy) {
1688                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1689                         }
1690                 }
1691
1692                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N_%N", block, phi, to_copy);
1693                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1694
1695                 /* copy - reg_out - copyreg <= 0 */
1696                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1697                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1698                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1699         }
1700 }
1701
1702
1703 /**
1704  * Walk all irg blocks and emit this ILP
1705  */
1706 static void
1707 luke_blockwalker(ir_node * bb, void * data)
1708 {
1709         spill_ilp_t    *si = (spill_ilp_t*)data;
1710         ir_node        *irn;
1711         pset           *live;
1712         char            buf[256];
1713         ilp_cst_t       cst;
1714         spill_bb_t     *spill_bb = get_irn_link(bb);
1715         ir_node        *tmp;
1716         spill_t        *spill;
1717         pset           *defs = pset_new_ptr_default();
1718         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
1719
1720
1721         live = pset_new_ptr_default();
1722
1723         /****************************************
1724          *      B A S I C  B L O C K  E N D
1725          ***************************************/
1726
1727
1728         /* init live values at end of block */
1729         get_live_end(si, bb, live);
1730
1731         pset_foreach(live, irn) {
1732                 op_t           *op;
1733                 ilp_var_t       reload = ILP_UNDEF;
1734
1735                 spill = set_find_spill(spill_bb->ilp, irn);
1736                 assert(spill);
1737
1738                 if(spill_bb->reloads) {
1739                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1740
1741                         if(keyval) {
1742                                 reload = PTR_TO_INT(keyval->val);
1743                         }
1744                 }
1745
1746                 op = get_irn_link(irn);
1747                 assert(!op->is_remat);
1748
1749                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1750                 op->attr.live_range.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1751                 op->attr.live_range.op = bb;
1752
1753                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1754                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1755
1756                 /* reg_out - reload - remat - live_range <= 0 */
1757                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1758                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1759                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1760                 foreach_pre_remat(si, bb, tmp) {
1761                         op_t     *remat_op = get_irn_link(tmp);
1762                         if(remat_op->attr.remat.remat->value == irn) {
1763                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1764                         }
1765                 }
1766                 /* maybe we should also assure that reg_out >= live_range etc. */
1767         }
1768
1769         if(opt_memcopies)
1770                 insert_mem_copy_position(si, live, bb);
1771
1772         /*
1773          * start new live ranges for values used by remats at end of block
1774          * and assure the remat args are available
1775          */
1776         foreach_pre_remat(si, bb, tmp) {
1777                 op_t     *remat_op = get_irn_link(tmp);
1778                 int       n;
1779
1780                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1781                         ir_node        *remat_arg = get_irn_n(tmp, n);
1782                         op_t           *arg_op = get_irn_link(remat_arg);
1783                         ilp_var_t       prev_lr;
1784
1785                         if(!has_reg_class(si, remat_arg)) continue;
1786
1787                         /* if value is becoming live through use by remat */
1788                         if(!pset_find_ptr(live, remat_arg)) {
1789                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1790                                 prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1791
1792                                 arg_op->attr.live_range.ilp = prev_lr;
1793                                 arg_op->attr.live_range.op = bb;
1794
1795                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1796
1797                                 pset_insert_ptr(live, remat_arg);
1798                                 add_to_spill_bb(si, bb, remat_arg);
1799                         }
1800
1801                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1802                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1803                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1804
1805                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1806                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1807
1808                         /* use reload placed for this argument */
1809                         if(spill_bb->reloads) {
1810                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1811
1812                                 if(keyval) {
1813                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1814
1815                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1816                                 }
1817                         }
1818                 }
1819         }
1820         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1821
1822
1823
1824
1825         /**************************************
1826          *    B A S I C  B L O C K  B O D Y
1827          **************************************/
1828
1829         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1830                 op_t       *op;
1831                 op_t       *tmp_op;
1832                 int         n,
1833                                         u = 0,
1834                                         d = 0;
1835                 ilp_cst_t       check_pre,
1836                                         check_post;
1837                 set        *args;
1838                 pset       *used;
1839                 pset       *remat_defs;
1840                 keyval_t   *keyval;
1841                 ilp_cst_t   one_memoperand;
1842
1843                 /* iterate only until first phi */
1844                 if(is_Phi(irn))
1845                         break;
1846
1847                 op = get_irn_link(irn);
1848                 /* skip remats */
1849                 if(op->is_remat) continue;
1850                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1851
1852                 /* collect defined values */
1853                 if(has_reg_class(si, irn)) {
1854                         pset_insert_ptr(defs, irn);
1855                 }
1856
1857                 /* skip projs */
1858                 if(is_Proj(irn)) continue;
1859
1860                 /*
1861                  * init set of irn's arguments
1862                  * and all possibly used values around this op
1863                  * and values defined by post remats
1864                  */
1865                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1866                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1867                 remat_defs = pset_new_ptr(pset_count(live));
1868
1869                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1870                         ir_node        *irn_arg = get_irn_n(irn, n);
1871                         if(has_reg_class(si, irn_arg)) {
1872                                 set_insert_keyval(args, irn_arg, (void*)n);
1873                                 pset_insert_ptr(used, irn_arg);
1874                         }
1875                 }
1876                 foreach_post_remat(irn, tmp) {
1877                         op_t    *remat_op = get_irn_link(tmp);
1878
1879                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1880
1881                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1882                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1883                                 if(has_reg_class(si, remat_arg)) {
1884                                         pset_insert_ptr(used, remat_arg);
1885                                 }
1886                         }
1887                 }
1888                 foreach_pre_remat(si, irn, tmp) {
1889                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1890                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1891                                 if(has_reg_class(si, remat_arg)) {
1892                                         pset_insert_ptr(used, remat_arg);
1893                                 }
1894                         }
1895                 }
1896
1897                 /**********************************
1898                  *   I N  E P I L O G  O F  irn
1899                  **********************************/
1900
1901                 /* ensure each dying value is used by only one post remat */
1902                 pset_foreach(used, tmp) {
1903                         ir_node     *value = tmp;
1904                         op_t        *value_op = get_irn_link(value);
1905                         ir_node     *remat;
1906                         int          n_remats = 0;
1907
1908                         cst = ILP_UNDEF;
1909                         foreach_post_remat(irn, remat) {
1910                                 op_t  *remat_op = get_irn_link(remat);
1911
1912                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1913                                         ir_node   *remat_arg = get_irn_n(remat, n);
1914
1915                                         /* if value is used by this remat add it to constraint */
1916                                         if(remat_arg == value) {
1917                                                 if(n_remats == 0) {
1918                                                         /* sum remat2s <= 1 + n_remats*live_range */
1919                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1920                                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
1921                                                 }
1922
1923                                                 n_remats++;
1924                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1925                                                 break;
1926                                         }
1927                                 }
1928                         }
1929
1930                         if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
1931                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1932                         }
1933                 }
1934
1935         /* ensure at least one value dies at post remat */
1936         foreach_post_remat(irn, tmp) {
1937             op_t     *remat_op = get_irn_link(tmp);
1938             pset     *remat_args = pset_new_ptr(get_irn_arity(tmp));
1939             ir_node  *remat_arg;
1940
1941             for(n=get_irn_arity(tmp)-1; n>=0; --n) {
1942                 remat_arg = get_irn_n(tmp, n);
1943
1944                 if(has_reg_class(si, remat_arg)) {
1945
1946                     /* does arg always die at this op? */
1947                     if(!pset_find_ptr(live, remat_arg))
1948                         goto skip_one_must_die;
1949
1950                     pset_insert_ptr(remat_args, remat_arg);
1951                 }
1952             }
1953
1954             /* remat + \sum live_range(remat_arg) <= |args| */
1955             ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
1956             cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
1957             lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1958
1959             pset_foreach(remat_args, remat_arg) {
1960                 op_t  *arg_op = get_irn_link(remat_arg);
1961
1962                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1963             }
1964
1965 skip_one_must_die:
1966             del_pset(remat_args);
1967         }
1968
1969                 /* new live ranges for values from L\U defined by post remats */
1970                 pset_foreach(live, tmp) {
1971                         ir_node     *value = tmp;
1972                         op_t        *value_op = get_irn_link(value);
1973
1974                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1975                                 ilp_var_t    prev_lr = ILP_UNDEF;
1976                                 ir_node     *remat;
1977
1978                                 if(pset_find_ptr(remat_defs, value)) {
1979
1980                                         /* next_live_range <= prev_live_range + sum remat2s */
1981                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1982                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1983
1984                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1985                                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1986
1987                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1988                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1989
1990                                         foreach_post_remat(irn, remat) {
1991                                                 op_t        *remat_op = get_irn_link(remat);
1992
1993                                                 /* if value is being rematerialized by this remat */
1994                                                 if(value == remat_op->attr.remat.remat->value) {
1995                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1996                                                 }
1997                                         }
1998
1999                                         value_op->attr.live_range.ilp = prev_lr;
2000                                         value_op->attr.live_range.op = irn;
2001                                 }
2002                         }
2003                 }
2004
2005                 /* requirements for post remats and start live ranges from L/U' for values dying here */
2006                 foreach_post_remat(irn, tmp) {
2007                         op_t        *remat_op = get_irn_link(tmp);
2008                         int          n;
2009
2010                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2011                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2012                                 op_t           *arg_op = get_irn_link(remat_arg);
2013
2014                                 if(!has_reg_class(si, remat_arg)) continue;
2015
2016                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
2017                                 if(!pset_find_ptr(used, remat_arg)) {
2018                                         /* remat <= live_range(remat_arg) */
2019                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
2020                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2021
2022                                         /* if value is becoming live through use by remat2 */
2023                                         if(!pset_find_ptr(live, remat_arg)) {
2024                                                 ilp_var_t     lr;
2025
2026                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
2027                                                 lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2028
2029                                                 arg_op->attr.live_range.ilp = lr;
2030                                                 arg_op->attr.live_range.op = irn;
2031
2032                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
2033
2034                                                 pset_insert_ptr(live, remat_arg);
2035                                                 add_to_spill_bb(si, bb, remat_arg);
2036                                         }
2037
2038                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2039                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2040                                 }
2041                         }
2042                 }
2043
2044                 d = pset_count(defs);
2045                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
2046
2047                 /* count how many regs irn needs for arguments */
2048                 u = set_count(args);
2049
2050
2051                 /* check the register pressure in the epilog */
2052                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
2053                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
2054                 check_post = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - d);
2055
2056                 /* add L\U' to check_post */
2057                 pset_foreach(live, tmp) {
2058                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
2059                                 /* if a live value is not used by irn */
2060                                 tmp_op = get_irn_link(tmp);
2061                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
2062                         }
2063                 }
2064
2065                 /***********************************************************
2066                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
2067                  **********************************************************/
2068
2069
2070                 pset_foreach(used, tmp) {
2071                         ilp_var_t       prev_lr;
2072                         ilp_var_t       post_use;
2073                         int             p = 0;
2074                         spill_t        *spill;
2075                         ir_node        *arg = tmp;
2076                         op_t           *arg_op = get_irn_link(arg);
2077                         ir_node        *remat;
2078
2079                         spill = add_to_spill_bb(si, bb, arg);
2080
2081                         /* new live range for each used value */
2082                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
2083                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, is_before_frame(bb, irn)?1.0:0.0);
2084
2085                         /* the epilog stuff - including post_use, check_post, check_post_remat */
2086                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
2087                         post_use = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2088
2089                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
2090
2091                         /* arg is live throughout epilog if the next live_range is in a register */
2092                         if(pset_find_ptr(live, arg)) {
2093                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
2094
2095                                 /* post_use >= next_lr + remat */
2096                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2097                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2098                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2099                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2100
2101                         }
2102
2103                         /* if value is not an arg of op and not possibly defined by post remat
2104                          * then it may only die and not become live
2105                          */
2106                         if(!set_find_keyval(args, arg)) {
2107                                 /* post_use <= prev_lr */
2108                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
2109                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2110                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2111                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2112
2113                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
2114                                         /* next_lr <= prev_lr */
2115                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
2116                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2117                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2118                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2119                                 }
2120                         }
2121
2122
2123                         /* forall post remat which use arg add a similar cst */
2124                         foreach_post_remat(irn, remat) {
2125                                 int      n;
2126
2127                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
2128                                         ir_node    *remat_arg = get_irn_n(remat, n);
2129                                         op_t       *remat_op = get_irn_link(remat);
2130
2131                                         if(remat_arg == arg) {
2132                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
2133
2134                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2135                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2136                                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2137                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2138                                         }
2139                                 }
2140                         }
2141
2142                         if(opt_memoperands) {
2143                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2144                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2145                                                 ilp_var_t       memoperand;
2146
2147                                                 ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
2148                                                 memoperand = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_memoperand*execution_frequency(si, bb), 0.0);
2149                                                 set_insert_memoperand(si->memoperands, irn, n, memoperand);
2150
2151                                                 ir_snprintf(buf, sizeof(buf), "nolivepost_%N_%d", irn, n);
2152                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2153
2154                                                 lpp_set_factor_fast(si->lpp, cst, memoperand, 1.0);
2155                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2156 //                                              if(arg_op->attr.live_range.ilp != ILP_UNDEF)
2157 //                                                      lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2158                                         }
2159                                 }
2160                         }
2161
2162                         /* new live range begins for each used value */
2163                         arg_op->attr.live_range.ilp = prev_lr;
2164                         arg_op->attr.live_range.op = irn;
2165
2166                         /*if(!pset_find_ptr(live, arg)) {
2167                                 pset_insert_ptr(live, arg);
2168                                 add_to_spill_bb(si, bb, arg);
2169                         }*/
2170                         pset_insert_ptr(live, arg);
2171
2172                 }
2173
2174                 /* just to be sure */
2175                 check_post = ILP_UNDEF;
2176
2177
2178
2179
2180                 /******************
2181                  *   P R O L O G
2182                  ******************/
2183
2184                 /* check the register pressure in the prolog */
2185                 /* sum_{L\U} lr <= k - |U| */
2186                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
2187                 check_pre = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - u);
2188
2189                 /* for the prolog remove defined values from the live set */
2190                 pset_foreach(defs, tmp) {
2191                         pset_remove_ptr(live, tmp);
2192                 }
2193
2194                 if(opt_memoperands) {
2195                         ir_snprintf(buf, sizeof(buf), "one_memoperand_%N", irn);
2196                         one_memoperand = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2197                 }
2198
2199                 /***********************************************************
2200                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
2201                  **********************************************************/
2202
2203
2204                 set_foreach(args, keyval) {
2205                         spill_t          *spill;
2206                         const ir_node    *arg = keyval->key;
2207                         int               i = PTR_TO_INT(keyval->val);
2208                         op_t             *arg_op = get_irn_link(arg);
2209                         ilp_cst_t         requirements;
2210                         int               n_memoperands;
2211
2212                         spill = set_find_spill(spill_bb->ilp, arg);
2213                         assert(spill);
2214
2215                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
2216                         op->attr.live_range.args.reloads[i] = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), is_before_frame(bb, irn)?0.0:1.0);
2217
2218                         /* reload <= mem_out */
2219                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
2220                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2221                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2222                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2223
2224                         /* requirement: arg must be in register for use */
2225                         /* reload + remat + live_range == 1 */
2226                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
2227                         requirements = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1.0);
2228
2229                         lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
2230                         lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
2231                         foreach_pre_remat(si, irn, tmp) {
2232                                 op_t     *remat_op = get_irn_link(tmp);
2233                                 if(remat_op->attr.remat.remat->value == arg) {
2234                                         lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
2235                                 }
2236                         }
2237
2238                         if(opt_memoperands) {
2239                                 n_memoperands = 0;
2240                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2241                                         if(get_irn_n(irn, n) == arg) {
2242                                                 n_memoperands++;
2243                                         }
2244                                 }
2245                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2246                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2247                                                 memoperand_t  *memoperand;
2248                                                 memoperand = set_find_memoperand(si->memoperands, irn, n);
2249
2250                                                 /* memoperand <= mem_out */
2251                                                 ir_snprintf(buf, sizeof(buf), "req_memoperand_%N_%d", irn, n);
2252                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2253                                                 lpp_set_factor_fast(si->lpp, cst, memoperand->ilp, 1.0);
2254                                                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2255
2256                                                 /* the memoperand is only sufficient if it is used once by the op */
2257                                                 if(n_memoperands == 1)
2258                                                         lpp_set_factor_fast(si->lpp, requirements, memoperand->ilp, 1.0);
2259
2260                                                 lpp_set_factor_fast(si->lpp, one_memoperand, memoperand->ilp, 1.0);
2261
2262                                                 /* we have one more free register if we use a memory operand */
2263                                                 lpp_set_factor_fast(si->lpp, check_pre, memoperand->ilp, -1.0);
2264                                         }
2265                                 }
2266                         }
2267                 }
2268
2269                 /* iterate over L\U */
2270                 pset_foreach(live, tmp) {
2271                         if(!set_find_keyval(args, tmp)) {
2272                                 /* if a live value is not used by irn */
2273                                 tmp_op = get_irn_link(tmp);
2274                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
2275                         }
2276                 }
2277
2278
2279                 /* requirements for remats */
2280                 /* start new live ranges for values used by remats */
2281                 foreach_pre_remat(si, irn, tmp) {
2282                         op_t        *remat_op = get_irn_link(tmp);
2283                         int          n;
2284
2285                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2286                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2287                                 op_t           *arg_op = get_irn_link(remat_arg);
2288
2289                                 if(!has_reg_class(si, remat_arg)) continue;
2290
2291                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2292                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2293                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2294
2295                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2296                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2297
2298                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2299                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2300                                         int    index = (int)keyval->val;
2301
2302                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2303                                 }
2304                         }
2305                 }
2306
2307
2308
2309
2310                 /*************************
2311                  *  D O N E  W I T H  O P
2312                  *************************/
2313
2314                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2315
2316                 pset_foreach(live, tmp) {
2317                         assert(has_reg_class(si, tmp));
2318                 }
2319
2320                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2321                         ir_node        *arg = get_irn_n(irn, n);
2322
2323                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2324                 }
2325
2326                 del_pset(remat_defs);
2327                 del_pset(used);
2328                 del_set(args);
2329                 del_pset(defs);
2330                 defs = pset_new_ptr_default();
2331         }
2332
2333
2334
2335         /***************************************
2336          *   B E G I N N I N G  O F  B L O C K
2337          ***************************************/
2338
2339
2340         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2341         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2342
2343         pset_foreach(live, irn) {
2344                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2345         }
2346
2347         /* construct mem_outs for all values */
2348
2349         set_foreach(spill_bb->ilp, spill) {
2350                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2351                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2352
2353                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2354                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2355
2356                 if(pset_find_ptr(live, spill->irn)) {
2357                         int default_spilled;
2358                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2359
2360                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2361                         default_spilled = be_is_live_in(si->lv, bb, spill->irn) || is_Phi(spill->irn);
2362                         spill->mem_in   = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, default_spilled);
2363                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2364
2365                         if(opt_memcopies && is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2366                                 int   n;
2367                                 op_t *op = get_irn_link(spill->irn);
2368
2369                                 /* do we have to copy a phi argument? */
2370                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2371                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2372
2373                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2374                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2375                                         double          freq=0.0;
2376                                         int             m;
2377                                         ilp_var_t       var;
2378
2379
2380                                         /* argument already done? */
2381                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2382
2383                                         /* get sum of execution frequencies of blocks with the same phi argument */
2384                                         for(m=n; m>=0; --m) {
2385                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2386
2387                                                 if(arg==arg2) {
2388                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2389                                                 }
2390                                         }
2391
2392                                         /* copies are not for free */
2393                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2394                                         var = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_spill * freq, 1.0);
2395
2396                                         for(m=n; m>=0; --m) {
2397                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2398
2399                                                 if(arg==arg2) {
2400                                                         op->attr.live_range.args.copies[m] = var;
2401                                                 }
2402                                         }
2403
2404 #if 0
2405                                         /* copy <= mem_in */
2406                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2407                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2408                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2409                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2410 #endif
2411                                 }
2412                         }
2413                 }
2414         }
2415
2416         foreach_post_remat(bb, tmp) {
2417                 int         n;
2418                 pset       *remat_args = pset_new_ptr(get_irn_arity(tmp));
2419                 op_t       *remat_op = get_irn_link(tmp);
2420                 ir_node    *remat_arg;
2421
2422                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2423                         remat_arg = get_irn_n(tmp, n);
2424                         if(has_reg_class(si, remat_arg)) {
2425                                 pset_insert_ptr(remat_args, remat_arg);
2426                         }
2427                 }
2428                 assert(pset_count(remat_args) > 0 && "post remats should have at least one arg");
2429
2430                 /* remat + \sum live_range(remat_arg) <= |args| */
2431                 ir_snprintf(buf, sizeof(buf), "one_must_die_%N", tmp);
2432                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
2433                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2434
2435                 pset_foreach(remat_args, remat_arg) {
2436                         /* if value is becoming live through use by remat2 */
2437                         if(!pset_find_ptr(live, remat_arg)) {
2438                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2439                                 ilp_cst_t   nomem;
2440
2441                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 at bb start %+F\n", remat_arg, tmp));
2442
2443                                 pset_insert_ptr(live, remat_arg);
2444                                 spill = add_to_spill_bb(si, bb, remat_arg);
2445                                 remat_arg_op->attr.live_range.ilp = ILP_UNDEF;
2446
2447                                 /* we need reg_in and mem_in for this value; they will be referenced later */
2448                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", remat_arg, bb);
2449                                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2450                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", remat_arg, bb);
2451                                 spill->mem_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
2452
2453
2454                                 /* optimization: all memory stuff should be 0, for we do not want to insert reloads for remats */
2455                                 ir_snprintf(buf, sizeof(buf), "nomem_%N_%N", remat_arg, bb);
2456                                 nomem = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0.0);
2457
2458                                 lpp_set_factor_fast(si->lpp, nomem, spill->spill, 1.0);
2459                                 if(spill_bb->reloads) {
2460                                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
2461
2462                                         if(keyval) {
2463                                                 ilp_var_t reload = PTR_TO_INT(keyval->val);
2464                                                 lpp_set_factor_fast(si->lpp, nomem, reload, 1.0);
2465                                         }
2466                                 }
2467                         } else {
2468                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2469                                 lpp_set_factor_fast(si->lpp, cst, remat_arg_op->attr.live_range.ilp, 1.0);
2470                         }
2471                 }
2472                 del_pset(remat_args);
2473         }
2474
2475         /* L\U is empty at bb start */
2476         /* arg is live throughout epilog if it is reg_in into this block */
2477
2478         /* check the register pressure at the beginning of the block
2479          * including remats
2480          */
2481         /* reg_in entspricht post_use */
2482
2483         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2484         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
2485
2486         pset_foreach(live, irn) {
2487         ilp_cst_t  nospill;
2488
2489                 spill = set_find_spill(spill_bb->ilp, irn);
2490                 assert(spill);
2491
2492                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2493                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2494
2495                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2496
2497                 /* spill + mem_in <= 1 */
2498                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2499                 nospill = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
2500
2501                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2502                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2503
2504         } /* post_remats are NOT included in register pressure check because
2505            they do not increase regpressure */
2506
2507         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2508         pset_foreach(live, irn) {
2509                 int          p = 0,
2510                                          n;
2511
2512                 spill = set_find_spill(spill_bb->ilp, irn);
2513                 assert(spill && spill->irn == irn);
2514
2515                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2516                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2517                                 ilp_cst_t       mem_in,
2518                                                                 reg_in;
2519                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2520                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2521                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2522                                 spill_t        *spill_p;
2523                                 op_t           *op = get_irn_link(irn);
2524
2525                                 /* although the phi is in the right regclass one or more of
2526                                  * its arguments can be in a different one or at least to
2527                                  * ignore
2528                                  */
2529                                 if(has_reg_class(si, phi_arg)) {
2530                                         /* mem_in < mem_out_arg + copy */
2531                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2532                                         mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2533
2534                                         /* reg_in < reg_out_arg */
2535                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2536                                         reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2537
2538                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2539                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2540
2541                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2542                                         assert(spill_p);
2543
2544                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2545                                         if(opt_memcopies)
2546                                                 lpp_set_factor_fast(si->lpp, mem_in, op->attr.live_range.args.copies[n], -1.0);
2547
2548                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2549                                 }
2550                         }
2551                 } else {
2552                         /* else assure the value arrives on all paths in the same resource */
2553
2554                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2555                                 ilp_cst_t       mem_in,
2556                                                                 reg_in;
2557                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2558                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2559                                 spill_t        *spill_p;
2560
2561                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2562                                 mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2563                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2564                                 reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2565
2566                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2567                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2568
2569                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2570                                 assert(spill_p);
2571
2572                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2573                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2574                         }
2575                 }
2576         }
2577
2578         foreach_post_remat(bb, tmp) {
2579                 int         n;
2580
2581                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2582                         ir_node    *remat_arg = get_irn_n(tmp, n);
2583                         op_t       *remat_op = get_irn_link(tmp);
2584
2585                         if(!has_reg_class(si, remat_arg)) continue;
2586
2587                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2588                         assert(spill);
2589
2590                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2591                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2592                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2593                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2594                 }
2595         }
2596
2597         pset_foreach(live, irn) {
2598                 const op_t      *op = get_irn_link(irn);
2599                 const ir_node   *remat;
2600                 int              n_remats = 0;
2601
2602                 cst = ILP_UNDEF;
2603
2604                 foreach_post_remat(bb, remat) {
2605                         int   n;
2606
2607                         for (n=get_irn_arity(remat)-1; n>=0; --n) {
2608                                 const ir_node  *arg = get_irn_n(remat, n);
2609
2610                                 if(arg == irn) {
2611                                         const op_t   *remat_op = get_irn_link(remat);
2612
2613                                         if(cst == ILP_UNDEF) {
2614                                                 /* sum remat2s <= 1 + n_remats*live_range */
2615                                                 ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", irn, bb);
2616                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2617                                         }
2618                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2619                                         ++n_remats;
2620                                         break;
2621                                 }
2622                         }
2623                 }
2624                 if(cst != ILP_UNDEF && op->attr.live_range.ilp != ILP_UNDEF) {
2625                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -n_remats);
2626                 }
2627         }
2628
2629         /* first live ranges from reg_ins */
2630         pset_foreach(live, irn) {
2631                 op_t      *op = get_irn_link(irn);
2632
2633                 if(op->attr.live_range.ilp != ILP_UNDEF) {
2634
2635                         spill = set_find_spill(spill_bb->ilp, irn);
2636                         assert(spill && spill->irn == irn);
2637
2638                         ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2639                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2640                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2641                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2642
2643                         foreach_post_remat(bb, tmp) {
2644                                 op_t     *remat_op = get_irn_link(tmp);
2645
2646                                 if(remat_op->attr.remat.remat->value == irn) {
2647                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2648                                 }
2649                         }
2650                 }
2651         }
2652
2653         /* walk forward now and compute constraints for placing spills */
2654         /* this must only be done for values that are not defined in this block */
2655         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2656         pset_foreach(live, irn) {
2657                 /*
2658                  * if value is defined in this block we can anways place the spill directly after the def
2659                  *    -> no constraint necessary
2660                  */
2661                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2662
2663
2664                 spill = set_find_spill(spill_bb->ilp, irn);
2665                 assert(spill);
2666
2667                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2668                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2669
2670                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2671                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2672
2673                 if(!is_Phi(irn)) {
2674                         sched_foreach_op(bb, tmp) {
2675                                 op_t   *op = get_irn_link(tmp);
2676
2677                                 if(is_Phi(tmp)) continue;
2678                                 assert(!is_Proj(tmp));
2679
2680                                 if(op->is_remat) {
2681                                         const ir_node   *value = op->attr.remat.remat->value;
2682
2683                                         if(value == irn) {
2684                                                 /* only collect remats up to the first real use of a value */
2685                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2686                                         }
2687                                 } else {
2688                                         int   n;
2689
2690                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2691                                                 ir_node    *arg = get_irn_n(tmp, n);
2692
2693                                                 if(arg == irn) {
2694                                                         /* if a value is used stop collecting remats */
2695                             goto next_live;
2696                                                 }
2697                                         }
2698                                 }
2699                         }
2700                 }
2701 next_live: ;
2702         }
2703
2704         del_pset(live);
2705 }
2706
2707 typedef struct _irnlist_t {
2708         struct list_head   list;
2709         ir_node           *irn;
2710 } irnlist_t;
2711
2712 typedef struct _interference_t {
2713         struct list_head    blocklist;
2714         ir_node            *a;
2715         ir_node            *b;
2716 } interference_t;
2717
2718 static int
2719 cmp_interference(const void *a, const void *b, size_t size)
2720 {
2721         const interference_t *p = a;
2722         const interference_t *q = b;
2723
2724         return !(p->a == q->a && p->b == q->b);
2725 }
2726
2727 static interference_t *
2728 set_find_interference(set * set, ir_node * a, ir_node * b)
2729 {
2730         interference_t     query;
2731
2732         query.a = (a>b)?a:b;
2733         query.b = (a>b)?b:a;
2734
2735         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2736 }
2737
2738 static interference_t *
2739 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2740 {
2741         interference_t     query,
2742                                           *result;
2743         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2744
2745         list->irn = bb;
2746
2747         result = set_find_interference(set, a, b);
2748         if(result) {
2749
2750                 list_add(&list->list, &result->blocklist);
2751                 return result;
2752         }
2753
2754         query.a = (a>b)?a:b;
2755         query.b = (a>b)?b:a;
2756
2757         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2758
2759         INIT_LIST_HEAD(&result->blocklist);
2760         list_add(&list->list, &result->blocklist);
2761
2762         return result;
2763 }
2764
2765 static int
2766 values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
2767 {
2768         const ir_edge_t *edge;
2769
2770         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2771                 /* both values are live in, so they interfere */
2772                 return 1;
2773         }
2774
2775         /* ensure a dominates b */
2776         if(value_dominates(b,a)) {
2777                 const ir_node * t;
2778                 t = b;
2779                 b = a;
2780                 a = t;
2781         }
2782         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2783
2784
2785         /* the following code is stolen from bera.c */
2786         if(be_is_live_end(si->lv, bb, a))
2787                 return 1;
2788
2789         foreach_out_edge(a, edge) {
2790                 const ir_node *user = edge->src;
2791                 if(get_nodes_block(user) == bb
2792                                 && !is_Phi(user)
2793                                 && b != user
2794                                 && !pset_find_ptr(si->inverse_ops, user)
2795                                 && value_dominates(b, user))
2796                         return 1;
2797         }
2798
2799         return 0;
2800 }
2801
2802 /**
2803  * Walk all irg blocks and collect interfering values inside of phi classes
2804  */
2805 static void
2806 luke_interferencewalker(ir_node * bb, void * data)
2807 {
2808         spill_ilp_t    *si = (spill_ilp_t*)data;
2809         int             l1, l2;
2810
2811         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
2812                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
2813                 op_t           *a_op = get_irn_link(a);
2814
2815
2816                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2817                 if (has_reg_class(si, a) && get_phi_class(a)) {
2818                         if(a_op->is_remat || pset_find_ptr(si->inverse_ops, a))
2819                                 continue;
2820
2821                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
2822                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
2823                                 op_t           *b_op = get_irn_link(b);
2824
2825
2826                                 /* a and b are only interesting if they are in the same phi class */
2827                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2828                                         if(b_op->is_remat || pset_find_ptr(si->inverse_ops, b))
2829                                                 continue;
2830
2831                                         if(values_interfere_in_block(si, bb, a, b)) {
2832                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2833                                                 set_insert_interference(si, si->interferences, a, b, bb);
2834                                         }
2835                                 }
2836                         }
2837                 }
2838         }
2839 }
2840
2841 static unsigned int copy_path_id = 0;
2842
2843 static void
2844 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2845 {
2846         ilp_cst_t  cst;
2847         ilp_var_t  copy;
2848         char       buf[256];
2849         void      *ptr;
2850
2851         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2852         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
2853
2854         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2855
2856         pset_foreach(copies, ptr) {
2857                 copy = PTR_TO_INT(ptr);
2858                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2859         }
2860 }
2861
2862 /**
2863  * @parameter copies   contains a path of copies which lead us to irn
2864  * @parameter visited  contains a set of nodes already visited on this path
2865  */
2866 static int
2867 find_copy_path(spill_ilp_t * si, const ir_node * irn, const ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2868 {
2869         const ir_edge_t *edge;
2870         op_t            *op = get_irn_link(irn);
2871     pset            *visited_users = pset_new_ptr_default();
2872         int              paths = 0;
2873
2874         if(op->is_remat) return 0;
2875
2876         pset_insert_ptr(visited, irn);
2877
2878         if(is_Phi(irn)) {
2879                 int    n;
2880         pset  *visited_operands = pset_new_ptr(get_irn_arity(irn));
2881
2882                 /* visit all operands */
2883                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2884                         ir_node  *arg = get_irn_n(irn, n);
2885                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2886
2887                         if(!has_reg_class(si, arg)) continue;
2888             if(pset_find_ptr(visited_operands, arg)) continue;
2889             pset_insert_ptr(visited_operands, arg);
2890
2891                         if(arg == target) {
2892                                 if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2893                                         del_pset(visited_operands);
2894                                         del_pset(visited_users);
2895                                         pset_remove_ptr(visited, irn);
2896                                         return paths;
2897                                 }
2898                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2899                                 write_copy_path_cst(si, copies, any_interfere);
2900                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2901                         } else if(!pset_find_ptr(visited, arg)) {
2902                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2903                                 paths += find_copy_path(si, arg, target, any_interfere, copies, visited);
2904                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2905
2906                 if(paths > MAX_PATHS) {
2907                     if(pset_count(copies) == 0) {
2908                         ilp_cst_t  cst;
2909                         char       buf[256];
2910
2911                         ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2912                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
2913                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2914                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2915                         DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2916
2917                         paths = 0;
2918                     } else {
2919                         del_pset(visited_operands);
2920                         del_pset(visited_users);
2921                         pset_remove_ptr(visited, irn);
2922                         return paths;
2923                     }
2924                 } else if(pset_count(copies) == 0) {
2925                                         paths = 0;
2926                                 }
2927                         }
2928                 }
2929
2930         del_pset(visited_operands);
2931         }
2932
2933         /* visit all uses which are phis */
2934         foreach_out_edge(irn, edge) {
2935                 ir_node  *user = edge->src;
2936                 int       pos  = edge->pos;
2937                 op_t     *op = get_irn_link(user);
2938                 ilp_var_t copy;
2939
2940                 if(!is_Phi(user)) continue;
2941                 if(!has_reg_class(si, user)) continue;
2942         if(pset_find_ptr(visited_users, user)) continue;
2943         pset_insert_ptr(visited_users, user);
2944
2945                 copy = op->attr.live_range.args.copies[pos];
2946
2947                 if(user == target) {
2948                         if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2949                                 del_pset(visited_users);
2950                                 pset_remove_ptr(visited, irn);
2951                                 return paths;
2952                         }
2953                         pset_insert(copies, INT_TO_PTR(copy), copy);
2954                         write_copy_path_cst(si, copies, any_interfere);
2955                         pset_remove(copies, INT_TO_PTR(copy), copy);
2956                 } else if(!pset_find_ptr(visited, user)) {
2957                         pset_insert(copies, INT_TO_PTR(copy), copy);
2958                         paths += find_copy_path(si, user, target, any_interfere, copies, visited);
2959                         pset_remove(copies, INT_TO_PTR(copy), copy);
2960
2961             if(paths > MAX_PATHS) {
2962                 if(pset_count(copies) == 0) {
2963                     ilp_cst_t  cst;
2964                     char       buf[256];
2965
2966                     ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2967                     cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
2968                     lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2969                     lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2970                     DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2971
2972                     paths = 0;
2973                 } else {
2974                     del_pset(visited_users);
2975                     pset_remove_ptr(visited, irn);
2976                     return paths;
2977                 }
2978             } else if(pset_count(copies) == 0) {
2979                                 paths = 0;
2980                         }
2981                 }
2982         }
2983
2984     del_pset(visited_users);
2985         pset_remove_ptr(visited, irn);
2986         return paths;
2987 }
2988
2989 static void
2990 gen_copy_constraints(spill_ilp_t * si, const ir_node * a, const ir_node * b, ilp_var_t any_interfere)
2991 {
2992         pset * copies = pset_new_ptr_default();
2993         pset * visited = pset_new_ptr_default();
2994
2995         find_copy_path(si, a, b, any_interfere, copies, visited);
2996
2997         del_pset(visited);
2998         del_pset(copies);
2999 }
3000
3001
3002 static void
3003 memcopyhandler(spill_ilp_t * si)
3004 {
3005         interference_t   *interference;
3006         char              buf[256];
3007         /* teste Speicherwerte auf Interferenz */
3008
3009         /* analyze phi classes */
3010         phi_class_compute(si->chordal_env->irg);
3011
3012         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
3013         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
3014
3015         /* now lets emit the ILP unequations for the crap */
3016         set_foreach(si->interferences, interference) {
3017                 irnlist_t      *irnlist;
3018                 ilp_var_t       interfere,
3019                                                 any_interfere;
3020                 ilp_cst_t       any_interfere_cst,
3021                                                 cst;
3022                 const ir_node  *a  = interference->a;
3023                 const ir_node  *b  = interference->b;
3024
3025                 /* any_interf <= \sum interf */
3026                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
3027                 any_interfere_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3028                 any_interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3029
3030                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
3031
3032                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
3033                         const ir_node  *bb = irnlist->irn;
3034                         spill_bb_t     *spill_bb = get_irn_link(bb);
3035                         spill_t        *spilla,
3036                                                    *spillb;
3037                         char           buf[256];
3038
3039                         spilla = set_find_spill(spill_bb->ilp, a);
3040                         assert(spilla);
3041
3042                         spillb = set_find_spill(spill_bb->ilp, b);
3043                         assert(spillb);
3044
3045                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
3046                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
3047                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
3048                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
3049                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
3050                         interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3051
3052                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
3053                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
3054
3055                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
3056                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
3057                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
3058                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
3059                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
3060
3061                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
3062                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3063
3064                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3065                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
3066                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
3067
3068                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
3069                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3070
3071                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3072                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
3073                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
3074
3075
3076                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
3077
3078                         /* any_interfere >= interf */
3079                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
3080                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3081
3082                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3083                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
3084                 }
3085
3086                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
3087                 gen_copy_constraints(si,a,b,any_interfere);
3088         }
3089 }
3090
3091
3092 static INLINE int
3093 is_zero(double x)
3094 {
3095         return fabs(x) < 0.00001;
3096 }
3097
3098 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
3099 {
3100         spill_ilp_t *si = get_irg_link(current_ir_graph);
3101
3102         if(pset_find_ptr(si->all_possible_remats, n)) {
3103                 op_t   *op = (op_t*)get_irn_link(n);
3104                 assert(op && op->is_remat);
3105
3106                 if(!op->attr.remat.remat->inverse) {
3107                         if(op->attr.remat.pre) {
3108                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
3109                         } else {
3110                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
3111                         }
3112
3113                         return 1;
3114                 } else {
3115                         op_t   *op = (op_t*)get_irn_link(n);
3116                         assert(op && op->is_remat);
3117
3118                         if(op->attr.remat.pre) {
3119                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
3120                         } else {
3121                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
3122                         }
3123
3124                         return 1;
3125                 }
3126         }
3127
3128         return 0;
3129 }
3130
3131 static void
3132 dump_graph_with_remats(ir_graph * irg, const char * suffix)
3133 {
3134         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
3135         be_dump(irg, suffix, dump_ir_block_graph_sched);
3136         set_dump_node_vcgattr_hook(NULL);
3137 }
3138
3139 /**
3140  * Edge hook to dump the schedule edges with annotated register pressure.
3141  */
3142 static int
3143 sched_pressure_edge_hook(FILE *F, ir_node *irn)
3144 {
3145         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
3146                 ir_node *prev = sched_prev(irn);
3147                 fprintf(F, "edge:{sourcename:\"");
3148                 PRINT_NODEID(irn);
3149                 fprintf(F, "\" targetname:\"");
3150                 PRINT_NODEID(prev);
3151                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
3152                 fprintf(F, "\" color:magenta}\n");
3153         }
3154         return 1;
3155 }
3156
3157 static void
3158 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
3159 {
3160         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
3161
3162         dump_consts_local(0);
3163         set_dump_node_edge_hook(sched_pressure_edge_hook);
3164         dump_ir_block_graph(irg, suffix);
3165         set_dump_node_edge_hook(old_edge_hook);
3166 }
3167
3168 static void
3169 walker_pressure_annotator(ir_node * bb, void * data)
3170 {
3171         spill_ilp_t  *si = data;
3172         ir_node      *irn;
3173         int           n, i;
3174         pset         *live = pset_new_ptr_default();
3175         int           projs = 0;
3176
3177         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
3178                 irn = be_lv_get_irn(si->lv, bb, i);
3179
3180                 if (has_reg_class(si, irn)) {
3181                         pset_insert_ptr(live, irn);
3182                 }
3183         }
3184
3185         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
3186
3187         sched_foreach_reverse(bb, irn) {
3188                 if(is_Phi(irn)) {
3189                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
3190                         continue;
3191                 }
3192
3193                 if(has_reg_class(si, irn)) {
3194                         pset_remove_ptr(live, irn);
3195                         if(is_Proj(irn)) ++projs;
3196                 }
3197
3198                 if(!is_Proj(irn)) projs = 0;
3199
3200                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
3201                         ir_node    *arg = get_irn_n(irn, n);
3202
3203                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
3204                 }
3205                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
3206         }
3207
3208         del_pset(live);
3209 }
3210
3211 static void
3212 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
3213 {
3214         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
3215 }
3216
3217 static void
3218 connect_all_remats_with_keep(spill_ilp_t * si)
3219 {
3220         ir_node   *irn;
3221         ir_node  **ins,
3222                          **pos;
3223         int        n_remats;
3224
3225
3226         n_remats = pset_count(si->all_possible_remats);
3227         if(n_remats) {
3228                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
3229
3230                 pos = ins;
3231                 pset_foreach(si->all_possible_remats, irn) {
3232                         *pos = irn;
3233                         ++pos;
3234                 }
3235
3236                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
3237
3238                 obstack_free(si->obst, ins);
3239         }
3240 }
3241
3242 static void
3243 connect_all_spills_with_keep(spill_ilp_t * si)
3244 {
3245         ir_node   *irn;
3246         ir_node  **ins,
3247                          **pos;
3248         int        n_spills;
3249         ir_node   *keep;
3250
3251
3252         n_spills = pset_count(si->spills);
3253         if(n_spills) {
3254                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
3255
3256                 pos = ins;
3257                 pset_foreach(si->spills, irn) {
3258                         *pos = irn;
3259                         ++pos;
3260                 }
3261
3262                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
3263
3264                 obstack_free(si->obst, ins);
3265         }
3266 }
3267
3268 /** insert a spill at an arbitrary position */
3269 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
3270 {
3271         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
3272         ir_graph *irg   = get_irn_irg(bl);
3273         ir_node *frame  = get_irg_frame(irg);
3274         ir_node *spill;
3275         ir_node *next;
3276
3277         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
3278         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
3279
3280         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
3281
3282         /*
3283          * search the right insertion point. a spill of a phi cannot be put
3284          * directly after the phi, if there are some phis behind the one which
3285          * is spilled. Also, a spill of a Proj must be after all Projs of the
3286          * same tuple node.
3287          *
3288          * Here's one special case:
3289          * If the spill is in the start block, the spill must be after the frame
3290          * pointer is set up. This is done by setting insert to the end of the block
3291          * which is its default initialization (see above).
3292          */
3293
3294         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
3295                 insert = frame;
3296
3297         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
3298                 insert = next;
3299
3300         sched_add_after(insert, spill);
3301         return spill;
3302 }
3303
3304 static void
3305 delete_remat(spill_ilp_t * si, ir_node * remat) {
3306         int       n;
3307         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3308
3309         sched_remove(remat);
3310
3311         /* kill links to operands */
3312         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
3313                 set_irn_n(remat, n, bad);
3314         }
3315 }
3316
3317 static void
3318 clean_remat_info(spill_ilp_t * si)
3319 {
3320         int            n;
3321         remat_t       *remat;
3322         remat_info_t  *remat_info;
3323         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
3324
3325         set_foreach(si->remat_info, remat_info) {
3326                 if(!remat_info->remats) continue;
3327
3328                 pset_foreach(remat_info->remats, remat)
3329                 {
3330                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
3331                                 set_irn_n((ir_node*)remat->proj, -1, bad);
3332                                 set_irn_n((ir_node*)remat->proj, 0, bad);
3333                         }
3334
3335                         if(get_irn_n_edges(remat->op) == 0) {
3336                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
3337                                         set_irn_n((ir_node*)remat->op, n, bad);
3338                                 }
3339                         }
3340                 }
3341
3342                 if(remat_info->remats) del_pset(remat_info->remats);
3343                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
3344         }
3345 }
3346
3347 static void
3348 delete_unnecessary_remats(spill_ilp_t * si)
3349 {
3350         if(opt_keep_alive & KEEPALIVE_REMATS) {
3351                 int       n;
3352                 ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3353
3354                 if(si->keep) {
3355 //                      ir_node   *end = get_irg_end(si->chordal_env->irg);
3356 //                      ir_node  **keeps;
3357
3358                         for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
3359                                 ir_node        *keep_arg = get_irn_n(si->keep, n);
3360                                 op_t           *arg_op = get_irn_link(keep_arg);
3361                                 lpp_name_t     *name;
3362
3363                                 assert(arg_op->is_remat);
3364
3365                                 name = si->lpp->vars[arg_op->attr.remat.ilp];
3366
3367                                 if(is_zero(name->value)) {
3368                                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
3369                                         /* TODO check whether reload is preferred over remat (could be bug) */
3370                                         delete_remat(si, keep_arg);
3371                                 } else {
3372                                         if(!arg_op->attr.remat.remat->inverse) {
3373                                                 if(arg_op->attr.remat.pre) {
3374                                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
3375                                                 } else {
3376                                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
3377                                                 }
3378                                         } else {
3379                                                 if(arg_op->attr.remat.pre) {
3380                                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
3381                                                 } else {
3382                                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
3383                                                 }
3384                                         }
3385                                 }
3386
3387                                 set_irn_n(si->keep, n, bad);
3388                         }
3389 #if 0
3390                         for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
3391                                 ir_node        *end_arg = get_End_keepalive(end, i);
3392
3393                                 if(end_arg != si->keep) {
3394                                         obstack_grow(si->obst, &end_arg, sizeof(end_arg));
3395                                 }
3396                         }
3397                         keeps = obstack_finish(si->obst);
3398                         set_End_keepalives(end, n-1, keeps);
3399                         obstack_free(si->obst, keeps);
3400 #endif
3401                 } else {
3402                         DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
3403                 }
3404         } else {
3405                 ir_node  *remat;
3406
3407                 pset_foreach(si->all_possible_remats, remat) {
3408                         op_t           *remat_op = get_irn_link(remat);
3409                         lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
3410
3411                         if(is_zero(name->value)) {
3412                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3413                                 /* TODO check whether reload is preferred over remat (could be bug) */
3414                                 delete_remat(si, remat);
3415                         } else {
3416                                 if(!remat_op->attr.remat.remat->inverse) {
3417                                         if(remat_op->attr.remat.pre) {
3418                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3419                                         } else {
3420                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3421                                         }
3422                                 } else {
3423                                         if(remat_op->attr.remat.pre) {
3424                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3425                                         } else {
3426                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3427                                         }
3428                                 }
3429                         }
3430                 }
3431         }
3432 }
3433
3434 static pset *
3435 get_spills_for_value(spill_ilp_t * si, const ir_node * value)
3436 {
3437         pset     *spills = pset_new_ptr_default();
3438
3439         const ir_node  *next;
3440         defs_t         *defs;
3441
3442         defs = set_find_def(si->values, value);
3443
3444         if(defs && defs->spills) {
3445                 for(next = defs->spills; next; next = get_irn_link(next)) {
3446                         pset_insert_ptr(spills, next);
3447                 }
3448         }
3449
3450         return spills;
3451 }
3452
3453 /**
3454  * @param before   The node after which the spill will be placed in the schedule
3455  */
3456 static ir_node *
3457 insert_spill(spill_ilp_t * si, ir_node * irn, const ir_node * value, ir_node * before)
3458 {
3459         defs_t   *defs;
3460         ir_node  *spill;
3461         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3462
3463         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3464
3465         spill = be_spill2(arch_env, irn, before);
3466
3467         defs = set_insert_def(si->values, value);
3468         assert(defs);
3469
3470         /* enter into the linked list */
3471         set_irn_link(spill, defs->spills);
3472         defs->spills = spill;
3473
3474         if(opt_keep_alive & KEEPALIVE_SPILLS)
3475                 pset_insert_ptr(si->spills, spill);
3476
3477         return spill;
3478 }
3479
3480 /**
3481  * @param before   The Phi node which has to be spilled
3482  */
3483 static ir_node *
3484 insert_mem_phi(spill_ilp_t * si, ir_node * phi)
3485 {
3486         ir_node   *mem_phi;
3487         ir_node  **ins;
3488         defs_t    *defs;
3489         int        n;
3490
3491         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3492
3493         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3494                 ins[n] = si->m_unknown;
3495         }
3496
3497         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3498
3499         defs = set_insert_def(si->values, phi);
3500         assert(defs);
3501
3502         /* enter into the linked list */
3503         set_irn_link(mem_phi, defs->spills);
3504         defs->spills = mem_phi;
3505
3506 #ifdef SCHEDULE_PHIM
3507         sched_add_after(phi, mem_phi);
3508 #endif
3509
3510         if(opt_keep_alive & KEEPALIVE_SPILLS)
3511                 pset_insert_ptr(si->spills, mem_phi);
3512
3513
3514         return mem_phi;
3515 }
3516
3517 /**
3518  * Add remat to list of defs, destroys link field!
3519  */
3520 static void
3521 insert_remat(spill_ilp_t * si, ir_node * remat)
3522 {
3523         defs_t   *defs;
3524         op_t     *remat_op = get_irn_link(remat);
3525
3526         assert(remat_op->is_remat);
3527
3528         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3529         assert(defs);
3530
3531         /* enter into the linked list */
3532         set_irn_link(remat, defs->remats);
3533         defs->remats = remat;
3534 }
3535
3536
3537 /**
3538  * Add reload before operation and add to list of defs
3539  */
3540 static ir_node *
3541 insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
3542 {
3543         defs_t   *defs;
3544         ir_node  *reload,
3545                          *spill;
3546         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3547
3548         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3549
3550         defs = set_find_def(si->values, value);
3551
3552         spill = defs->spills;
3553         assert(spill && "no spill placed before reload");
3554
3555         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3556
3557         /* enter into the linked list */
3558         set_irn_link(reload, defs->remats);
3559         defs->remats = reload;
3560
3561         return reload;
3562 }
3563
3564 void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
3565 {
3566         defs_t           *defs;
3567         ir_node          *value = get_irn_n(memoperand->irn, memoperand->pos);
3568         ir_node          *spill;
3569         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3570
3571         DBG((si->dbg, LEVEL_2, "\t  inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
3572
3573         defs = set_find_def(si->values, value);
3574
3575         spill = defs->spills;
3576         assert(spill && "no spill placed before reload");
3577
3578         arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
3579 }
3580
3581 void insert_memoperands(spill_ilp_t * si)
3582 {
3583         memoperand_t   *memoperand;
3584         lpp_name_t     *name;
3585
3586         set_foreach(si->memoperands, memoperand) {
3587                 name = si->lpp->vars[memoperand->ilp];
3588                 if(!is_zero(name->value)) {
3589                         perform_memory_operand(si, memoperand);
3590                 }
3591         }
3592 }
3593
3594 static void
3595 walker_spill_placer(ir_node * bb, void * data) {
3596         spill_ilp_t   *si = (spill_ilp_t*)data;
3597         ir_node       *irn;
3598         spill_bb_t    *spill_bb = get_irn_link(bb);
3599         pset          *spills_to_do = pset_new_ptr_default();
3600         spill_t       *spill;
3601
3602         set_foreach(spill_bb->ilp, spill) {
3603                 lpp_name_t    *name;
3604
3605                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3606                         name = si->lpp->vars[spill->mem_in];
3607                         if(!is_zero(name->value)) {
3608                                 ir_node   *mem_phi;
3609
3610                                 mem_phi = insert_mem_phi(si, spill->irn);
3611
3612                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3613                         }
3614                 }
3615
3616                 name = si->lpp->vars[spill->spill];
3617                 if(!is_zero(name->value)) {
3618                         /* place spill directly after definition */
3619                         if(get_nodes_block(spill->irn) == bb) {
3620                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3621                                 continue;
3622                         }
3623
3624                         /* place spill at bb start */
3625                         if(spill->reg_in > 0) {
3626                                 name = si->lpp->vars[spill->reg_in];
3627                                 if(!is_zero(name->value)) {
3628                                         insert_spill(si, spill->irn, spill->irn, bb);
3629                                         continue;
3630                                 }
3631                         }
3632                         /* place spill after a remat */
3633                         pset_insert_ptr(spills_to_do, spill->irn);
3634                 }
3635         }
3636         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3637
3638
3639         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3640                 op_t     *op = get_irn_link(irn);
3641
3642                 if(be_is_Spill(irn)) continue;
3643
3644                 if(op->is_remat) {
3645                         /* TODO fix this if we want to support remats with more than two nodes */
3646                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3647                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3648
3649                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3650                         }
3651                 } else {
3652                         if(pset_find_ptr(spills_to_do, irn)) {
3653                                 pset_remove_ptr(spills_to_do, irn);
3654
3655                                 insert_spill(si, irn, irn, irn);
3656                         }
3657                 }
3658
3659         }
3660
3661         assert(pset_count(spills_to_do) == 0);
3662
3663         /* afterwards free data in block */
3664         del_pset(spills_to_do);
3665 }
3666
3667 static ir_node *
3668 insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
3669 {
3670         ir_node          *insert_pos = bb;
3671         ir_node          *spill;
3672         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3673
3674         /* find last definition of arg value in block */
3675         ir_node  *next;
3676         defs_t   *defs;
3677         int       last = 0;
3678
3679         defs = set_find_def(si->values, value);
3680
3681         if(defs && defs->remats) {
3682                 for(next = defs->remats; next; next = get_irn_link(next)) {
3683                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3684                                 last = sched_get_time_step(next);
3685                                 insert_pos = next;
3686                         }
3687                 }
3688         }
3689
3690         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3691                 last = sched_get_time_step(value);
3692                 insert_pos = value;
3693         }
3694
3695         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3696
3697         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
3698
3699         return spill;
3700 }
3701
3702 static void
3703 phim_fixer(spill_ilp_t *si) {
3704         defs_t  *defs;
3705
3706         set_foreach(si->values, defs) {
3707                 const ir_node  *phi = defs->value;
3708                 op_t           *op = get_irn_link(phi);
3709                 ir_node        *phi_m = NULL;
3710                 ir_node        *next = defs->spills;
3711                 int             n;
3712
3713                 if(!is_Phi(phi)) continue;
3714
3715                 while(next) {
3716                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3717                                 phi_m = next;
3718                                 break;
3719                         } else {
3720                                 next = get_irn_link(next);
3721                         }
3722                 }
3723                 if(!phi_m) continue;
3724
3725                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3726                         ir_node        *value = get_irn_n(phi, n);
3727                         defs_t         *val_defs = set_find_def(si->values, value);
3728
3729                         /* a spill of this value */
3730                         ir_node      *spill;
3731
3732
3733                         if(opt_memcopies) {
3734                                 ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3735                                 lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3736
3737                                 if(!is_zero(name->value)) {
3738                                         spill = insert_mem_copy(si, pred, value);
3739                                 } else {
3740                                         spill = val_defs->spills;
3741                                 }
3742                         } else {
3743                                 spill = val_defs->spills;
3744                         }
3745
3746                         assert(spill && "no spill placed before PhiM");
3747                         set_irn_n(phi_m, n, spill);
3748                 }
3749         }
3750 }
3751
3752 static void
3753 walker_reload_placer(ir_node * bb, void * data) {
3754         spill_ilp_t   *si = (spill_ilp_t*)data;
3755         ir_node       *irn;
3756         spill_bb_t    *spill_bb = get_irn_link(bb);
3757
3758         /* reloads at end of block */
3759         if(spill_bb->reloads) {
3760                 keyval_t    *keyval;
3761
3762                 set_foreach(spill_bb->reloads, keyval) {
3763                         ir_node        *irn = (ir_node*)keyval->key;
3764                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3765                         lpp_name_t     *name;
3766
3767                         name = si->lpp->vars[reload];
3768                         if(!is_zero(name->value)) {
3769                                 ir_node    *reload;
3770                                 ir_node    *insert_pos = bb;
3771                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3772                                 op_t       *prev_op = get_irn_link(prev);
3773
3774                                 while(be_is_Spill(prev)) {
3775                                         prev = sched_prev(prev);
3776                                 }
3777
3778                                 prev_op = get_irn_link(prev);
3779
3780                                 /* insert reload before pre-remats */
3781                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3782                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3783                                         insert_pos = prev;
3784
3785                                         do {
3786                                                 prev = sched_prev(prev);
3787                                         } while(be_is_Spill(prev));
3788
3789                                         prev_op = get_irn_link(prev);
3790
3791                                 }
3792
3793                                 reload = insert_reload(si, irn, insert_pos);
3794
3795                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3796                                         pset_insert_ptr(si->spills, reload);
3797                         }
3798                 }
3799         }
3800
3801         /* walk and insert more reloads and collect remats */
3802         sched_foreach_reverse(bb, irn) {
3803                 op_t     *op = get_irn_link(irn);
3804
3805                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3806                 if(is_Phi(irn)) break;
3807
3808                 if(op->is_remat) {
3809                         if(get_irn_mode(irn) != mode_T) {
3810                                 insert_remat(si, irn);
3811                         }
3812                 } else {
3813                         int    n;
3814
3815                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3816                                 ir_node    *arg = get_irn_n(irn, n);
3817
3818                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3819                                         lpp_name_t    *name;
3820
3821                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3822                                         if(!is_zero(name->value)) {
3823                                                 ir_node    *reload;
3824                                                 ir_node    *insert_pos = irn;
3825                                                 ir_node    *prev = sched_prev(insert_pos);
3826                                                 op_t       *prev_op;
3827
3828                                                 while(be_is_Spill(prev)) {
3829                                                         prev = sched_prev(prev);
3830                                                 }
3831
3832                                                 prev_op = get_irn_link(prev);
3833
3834                                                 /* insert reload before pre-remats */
3835                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3836                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3837                                                         insert_pos = prev;
3838
3839                                                         do {
3840                                                                 prev = sched_prev(prev);
3841                                                         } while(be_is_Spill(prev));
3842
3843                                                         prev_op = get_irn_link(prev);
3844
3845                                                 }
3846
3847                                                 reload = insert_reload(si, arg, insert_pos);
3848
3849                                                 set_irn_n(irn, n, reload);
3850
3851                                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3852                                                         pset_insert_ptr(si->spills, reload);
3853                                         }
3854                                 }
3855                         }
3856                 }
3857         }
3858
3859         del_set(spill_bb->ilp);
3860         if(spill_bb->reloads) del_set(spill_bb->reloads);
3861 }
3862
3863 static void
3864 walker_collect_used(ir_node * irn, void * data)
3865 {
3866         lc_bitset_t   *used = data;
3867
3868         lc_bitset_set(used, get_irn_idx(irn));
3869 }
3870
3871 struct kill_helper {
3872         lc_bitset_t  *used;
3873         spill_ilp_t  *si;
3874 };
3875
3876 static void
3877 walker_kill_unused(ir_node * bb, void * data)
3878 {
3879         struct kill_helper *kh = data;
3880         ir_node            *bad = get_irg_bad(get_irn_irg(bb));
3881         ir_node            *irn;
3882
3883
3884         for(irn=sched_first(bb); !sched_is_end(irn);) {
3885                 ir_node     *next = sched_next(irn);
3886                 int          n;
3887
3888                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3889                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3890                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3891 #if 0
3892                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3893 #endif
3894                         }
3895
3896                         sched_remove(irn);
3897
3898                         set_nodes_block(irn, bad);
3899                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3900                                 set_irn_n(irn, n, bad);
3901                         }
3902                 }
3903                 irn = next;
3904         }
3905 }
3906
3907 static void
3908 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3909 {
3910         struct kill_helper kh;
3911
3912         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3913         kh.si = si;
3914
3915         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3916         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3917
3918         lc_bitset_free(kh.used);
3919 }
3920
3921 void
3922 print_irn_pset(pset * p)
3923 {
3924         ir_node   *irn;
3925
3926         pset_foreach(p, irn) {
3927                 ir_printf("%+F\n", irn);
3928         }
3929 }
3930
3931 void
3932 dump_phi_class(spill_ilp_t * si, pset * phiclass, const char * file)
3933 {
3934     FILE           *f = fopen(file, "w");
3935     ir_node        *irn;
3936     interference_t *interference;
3937
3938     pset_break(phiclass);
3939     set_break(si->interferences);
3940
3941     ir_fprintf(f, "digraph phiclass {\n");
3942
3943     pset_foreach(phiclass, irn) {
3944         if(is_Phi(irn))
3945             ir_fprintf(f, "  %F%N [shape=box]\n",irn,irn);
3946     }
3947
3948     pset_foreach(phiclass, irn) {
3949         int n;
3950
3951         if(!is_Phi(irn)) continue;
3952
3953         for(n=get_irn_arity(irn)-1; n>=0; --n) {
3954             ir_node  *arg = get_irn_n(irn, n);
3955
3956             ir_fprintf(f, "  %F%N -> %F%N\n",irn,irn,arg,arg);
3957         }
3958     }
3959
3960     set_foreach(si->interferences, interference) {
3961         const ir_node  *a  = interference->a;
3962         const ir_node  *b  = interference->b;
3963         if(get_phi_class(a) == phiclass) {
3964             ir_fprintf(f, "  %F%N -> %F%N [color=red,dir=none,style=bold]\n",a,a,b,b);
3965         }
3966     }
3967
3968     ir_fprintf(f, "}");
3969     fclose(f);
3970 }
3971
3972 static void
3973 rewire_uses(spill_ilp_t * si)
3974 {
3975         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3976         defs_t               *defs;
3977         pset                 *ignore = pset_new_ptr(1);
3978
3979         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3980
3981         /* then fix uses of spills */
3982         set_foreach(si->values, defs) {
3983                 pset           *reloads;
3984                 pset           *spills;
3985                 const ir_node  *next = defs->remats;
3986                 int remats = 0;
3987
3988                 reloads = pset_new_ptr_default();
3989
3990                 while(next) {
3991                         if(be_is_Reload(next)) {
3992                                 pset_insert_ptr(reloads, next);
3993                         } else {
3994                                 ++remats;
3995                         }
3996                         next = get_irn_link(next);
3997                 }
3998
3999                 spills = get_spills_for_value(si, defs->value);
4000                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
4001                 if(pset_count(spills) > 1) {
4002                         //assert(pset_count(reloads) > 0);
4003                         //                              print_irn_pset(spills);
4004                         //                              print_irn_pset(reloads);
4005
4006                         be_ssa_constr_set_ignore(dfi, si->lv, spills, ignore);
4007                 }
4008
4009                 del_pset(reloads);
4010                 del_pset(spills);
4011         }
4012
4013         /* first fix uses of remats and reloads */
4014         set_foreach(si->values, defs) {
4015                 pset           *nodes;
4016                 const ir_node  *next = defs->remats;
4017
4018                 if(next) {
4019                         nodes = pset_new_ptr_default();
4020                         pset_insert_ptr(nodes, defs->value);
4021
4022                         while(next) {
4023                                 pset_insert_ptr(nodes, next);
4024                                 next = get_irn_link(next);
4025                         }
4026
4027                         if(pset_count(nodes) > 1) {
4028                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
4029                                 be_ssa_constr_set(dfi, si->lv, nodes);
4030                         }
4031
4032                         del_pset(nodes);
4033                 }
4034         }
4035
4036 //      remove_unused_defs(si);
4037
4038         be_free_dominance_frontiers(dfi);
4039 }
4040
4041
4042 static void
4043 writeback_results(spill_ilp_t * si)
4044 {
4045         /* walk through the graph and collect all spills, reloads and remats for a value */
4046
4047         si->values = new_set(cmp_defs, 4096);
4048
4049         DBG((si->dbg, LEVEL_1, "Applying results\n"));
4050         delete_unnecessary_remats(si);
4051         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
4052         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
4053         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
4054         if(opt_memoperands)
4055                 insert_memoperands(si);
4056         phim_fixer(si);
4057
4058         /* clean the remat info! there are still back-edges leading there! */
4059         clean_remat_info(si);
4060
4061         rewire_uses(si);
4062
4063         connect_all_spills_with_keep(si);
4064
4065         del_set(si->values);
4066 }
4067
4068 static int
4069 get_n_regs(spill_ilp_t * si)
4070 {
4071         int     arch_n_regs = arch_register_class_n_regs(si->cls);
4072         int     free = 0;
4073         int     i;
4074
4075         for(i=0; i<arch_n_regs; i++) {
4076                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
4077                         free++;
4078                 }
4079         }
4080
4081         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
4082         return free;
4083 }
4084
4085 static void
4086 walker_reload_mover(ir_node * bb, void * data)
4087 {
4088         spill_ilp_t   *si = data;
4089         ir_node           *tmp;
4090
4091         sched_foreach(bb, tmp) {
4092                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
4093                         ir_node       *reload = tmp;
4094                         ir_node       *irn = tmp;
4095
4096                         /* move reload upwards */
4097
4098                         int pressure = (int)get_irn_link(reload);
4099                         if(pressure < si->n_regs) {
4100                                 irn = sched_prev(reload);
4101                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
4102                                 sched_remove(reload);
4103                                 pressure = (int)get_irn_link(irn);
4104
4105                                 while(pressure < si->n_regs) {
4106                                         if( sched_is_end(irn) ||
4107                                            (be_is_Reload(irn) && has_reg_class(si, irn)) ||
4108                                            /* do not move reload before its spill */
4109                                            (irn == be_get_Reload_mem(reload)) ||
4110                                            /* do not move before phi */
4111                                            is_Phi(irn)) break;
4112
4113                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
4114                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
4115                                         irn = sched_prev(irn);
4116
4117                                         pressure = (int)get_irn_link(irn);
4118                                 }
4119
4120                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
4121                                 sched_put_after(irn, reload);
4122                         }
4123                 }
4124         }
4125 }
4126
4127 static void
4128 move_reloads_upward(spill_ilp_t * si)
4129 {
4130         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
4131 }
4132
4133
4134 /**
4135  * Walk all irg blocks and check for interfering spills inside of phi classes
4136  */
4137 static void
4138 luke_meminterferencechecker(ir_node * bb, void * data)
4139 {
4140         spill_ilp_t    *si = (spill_ilp_t*)data;
4141         int             l1, l2;
4142
4143         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
4144                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
4145
4146                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
4147
4148                 /* a is only interesting if it is in my register class and if it is inside a phi class */
4149                 if (has_reg_class(si, a) && get_phi_class(a)) {
4150                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
4151                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
4152
4153                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
4154
4155                                 /* a and b are only interesting if they are in the same phi class */
4156                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
4157                                         if(values_interfere_in_block(si, bb, a, b)) {
4158                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
4159                                         }
4160                                 }
4161                         }
4162                 }
4163         }
4164 }
4165
4166 static void
4167 verify_phiclasses(spill_ilp_t * si)
4168 {
4169         /* analyze phi classes */
4170         phi_class_compute(si->chordal_env->irg);
4171
4172         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
4173         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
4174 }
4175
4176 void
4177 be_spill_remat(const be_chordal_env_t * chordal_env)
4178 {
4179         char            buf[256];
4180         char            problem_name[256];
4181         char            dump_suffix[256];
4182         char            dump_suffix2[256];
4183         struct obstack  obst;
4184         spill_ilp_t     si;
4185
4186         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
4187         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
4188         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
4189
4190         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
4191         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
4192
4193         if(opt_verify & VERIFY_DOMINANCE)
4194                 be_check_dominance(chordal_env->irg);
4195
4196         obstack_init(&obst);
4197         si.chordal_env = chordal_env;
4198         si.obst = &obst;
4199         si.cls = chordal_env->cls;
4200         si.lpp = new_lpp(problem_name, lpp_minimize);
4201         si.remat_info = new_set(cmp_remat_info, 4096);
4202         si.interferences = new_set(cmp_interference, 32);
4203         si.memoperands = new_set(cmp_memoperands, 128);
4204         si.all_possible_remats = pset_new_ptr_default();
4205         si.spills = pset_new_ptr_default();
4206         si.inverse_ops = pset_new_ptr_default();
4207         si.lv = chordal_env->lv;
4208         si.keep = NULL;
4209         si.n_regs = get_n_regs(&si);
4210
4211         set_irg_link(chordal_env->irg, &si);
4212         compute_doms(chordal_env->irg);
4213
4214         /* compute phi classes */
4215 //      phi_class_compute(chordal_env->irg);
4216
4217         be_analyze_regpressure(chordal_env, "-pre");
4218
4219         if(opt_remats) {
4220                 /* collect remats */
4221                 DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
4222                 irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
4223         }
4224
4225         /* insert possible remats */
4226         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
4227         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
4228         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
4229
4230         if(opt_keep_alive & KEEPALIVE_REMATS) {
4231                 DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
4232                 connect_all_remats_with_keep(&si);
4233                 /* dump graph with inserted remats */
4234                 dump_graph_with_remats(chordal_env->irg, dump_suffix);
4235         }
4236
4237         /* insert copies for phi arguments not in my regclass */
4238         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
4239
4240         /* recompute liveness */
4241         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
4242         be_liveness_recompute(si.lv);
4243
4244         /* build the ILP */
4245
4246         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
4247         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
4248         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
4249
4250         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
4251         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
4252
4253         if(opt_memcopies) {
4254                 DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
4255                 memcopyhandler(&si);
4256         }
4257
4258         if(opt_dump_flags & DUMP_PROBLEM) {
4259                 FILE           *f;
4260                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
4261                 if ((f = fopen(buf, "wt")) != NULL) {
4262                         lpp_dump_plain(si.lpp, f);
4263                         fclose(f);
4264                 }
4265         }
4266
4267         if(opt_dump_flags & DUMP_MPS) {
4268                 FILE *f;
4269
4270                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mps", problem_name);
4271                 if((f = fopen(buf, "wt")) != NULL) {
4272                         mps_write_mps(si.lpp, s_mps_fixed, f);
4273                         fclose(f);
4274                 }
4275
4276                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mst", problem_name);
4277                 if((f = fopen(buf, "wt")) != NULL) {
4278                         mps_write_mst(si.lpp, s_mps_fixed, f);
4279                         fclose(f);
4280                 }
4281         }
4282
4283         lpp_check_startvals(si.lpp);
4284
4285 #ifdef SOLVE
4286         DBG((si.dbg, LEVEL_1, "\tSolving %s (%d variables, %d constraints)\n", problem_name, si.lpp->var_next, si.lpp->cst_next));
4287         lpp_set_time_limit(si.lpp, opt_timeout);
4288
4289         if(opt_log)
4290                 lpp_set_log(si.lpp, stdout);
4291
4292 #ifdef SOLVE_LOCAL
4293         lpp_solve_cplex(si.lpp);
4294 #else
4295         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
4296 #endif
4297         assert(lpp_is_sol_valid(si.lpp)
4298                && "solution of ILP must be valid");
4299
4300         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
4301
4302         if(opt_dump_flags & DUMP_SOLUTION) {
4303                 FILE           *f;
4304                 char            buf[256];
4305
4306                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
4307                 if ((f = fopen(buf, "wt")) != NULL) {
4308                         int             i;
4309                         for (i = 0; i < si.lpp->var_next; ++i) {
4310                                 lpp_name_t     *name = si.lpp->vars[i];
4311                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
4312                         }
4313                         fclose(f);
4314                 }
4315         }
4316
4317         writeback_results(&si);
4318
4319 #endif                          /* SOLVE */
4320
4321         kill_all_unused_values_in_schedule(&si);
4322
4323         if(opt_keep_alive & (KEEPALIVE_SPILLS | KEEPALIVE_RELOADS))
4324                 be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
4325
4326         // move reloads upwards
4327         be_liveness_recompute(si.lv);
4328         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4329         move_reloads_upward(&si);
4330
4331         if(opt_memcopies) {
4332                 verify_phiclasses(&si);
4333         }
4334
4335         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4336
4337         dump_pressure_graph(&si, dump_suffix2);
4338
4339         be_analyze_regpressure(chordal_env, "-post");
4340
4341         if(opt_verify & VERIFY_DOMINANCE)
4342                 be_check_dominance(chordal_env->irg);
4343
4344         free_dom(chordal_env->irg);
4345         del_set(si.interferences);
4346         del_pset(si.inverse_ops);
4347         del_pset(si.all_possible_remats);
4348         del_set(si.memoperands);
4349         del_pset(si.spills);
4350         free_lpp(si.lpp);
4351         obstack_free(&obst, NULL);
4352         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
4353 }
4354
4355 #else                           /* WITH_ILP */
4356
4357 static void
4358 only_that_you_can_compile_without_WITH_ILP_defined(void)
4359 {
4360 }
4361
4362 #endif                          /* WITH_ILP */