fixed profile support
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35 #include "irvrfy.h"
36
37 #include <lpp/lpp.h>
38 #include <lpp/mps.h>
39 #include <lpp/lpp_net.h>
40 #include <lpp/lpp_cplex.h>
41 //#include <lc_pset.h>
42 //#include <libcore/lc_bitset.h>
43
44 #include "be_t.h"
45 #include "belive_t.h"
46 #include "besched_t.h"
47 #include "beirgmod.h"
48 #include "bearch.h"
49 #include "beabi.h"
50 #include "benode_t.h"
51 #include "beutil.h"
52 #include "bespillremat.h"
53 #include "bespill.h"
54 #include "bepressurestat.h"
55
56 #include "bechordal_t.h"
57
58 #ifdef WITH_LIBCORE
59 #include <libcore/lc_opts.h>
60 #include <libcore/lc_opts_enum.h>
61 #endif /* WITH_LIBCORE */
62
63 #define DUMP_PROBLEM       1
64 #define DUMP_MPS           2
65 #define DUMP_SOLUTION      4
66
67 #define KEEPALIVE_REMATS   1
68 #define KEEPALIVE_SPILLS   2
69 #define KEEPALIVE_RELOADS  4
70
71 #define VERIFY_MEMINTERF   1
72 #define VERIFY_DOMINANCE   2
73
74 #define REMATS_NONE        0
75 #define REMATS_BRIGGS      1
76 #define REMATS_NOINVERSE   2
77 #define REMATS_ALL         3
78
79 static int opt_dump_flags   = 0;
80 static int opt_log = 0;
81 static int opt_keep_alive   = 0;
82 static int opt_goodwin = 1;
83 static int opt_memcopies = 1;
84 static int opt_memoperands = 1;
85 static int opt_verify = VERIFY_MEMINTERF;
86 static int opt_remats = REMATS_ALL;
87 static int opt_repair_schedule = 0;
88 static int opt_no_enlarge_liveness = 0;
89 static int opt_timeout = 300;
90 static double opt_cost_reload = 8.0;
91 static double opt_cost_memoperand =  7.0;
92 static double opt_cost_spill =  50.0;
93 static double opt_cost_remat =  1.0;
94
95
96 #ifdef WITH_LIBCORE
97 static const lc_opt_enum_mask_items_t dump_items[] = {
98         { "problem",  DUMP_PROBLEM  },
99         { "mps",      DUMP_MPS      },
100         { "solution", DUMP_SOLUTION },
101         { NULL,       0 }
102 };
103
104 static lc_opt_enum_mask_var_t dump_var = {
105         &opt_dump_flags, dump_items
106 };
107
108 static const lc_opt_enum_mask_items_t keepalive_items[] = {
109         { "remats",  KEEPALIVE_REMATS  },
110         { "spills",  KEEPALIVE_SPILLS  },
111         { "reloads", KEEPALIVE_RELOADS },
112         { NULL,      0 }
113 };
114
115 static lc_opt_enum_mask_var_t keep_alive_var = {
116         &opt_keep_alive, keepalive_items
117 };
118
119 static const lc_opt_enum_mask_items_t remats_items[] = {
120         { "none",      REMATS_NONE      },
121         { "briggs",    REMATS_BRIGGS    },
122         { "noinverse", REMATS_NOINVERSE },
123         { "all",       REMATS_ALL       },
124         { NULL,        0 }
125 };
126
127 static lc_opt_enum_mask_var_t remats_var = {
128         &opt_remats, remats_items
129 };
130
131 static const lc_opt_table_entry_t options[] = {
132         LC_OPT_ENT_ENUM_MASK("keepalive", "keep alive remats, spills or reloads",                   &keep_alive_var),
133
134         LC_OPT_ENT_BOOL     ("goodwin",  "activate goodwin reduction",                              &opt_goodwin),
135         LC_OPT_ENT_BOOL     ("memcopies",  "activate memcopy handling",                             &opt_memcopies),
136         LC_OPT_ENT_BOOL     ("memoperands",  "activate memoperands",                                &opt_memoperands),
137         LC_OPT_ENT_ENUM_INT ("remats",  "type of remats to insert (none, briggs, noinverse or all)",&remats_var),
138         LC_OPT_ENT_BOOL     ("repair_schedule",  "repair the schedule by rematting once used nodes",&opt_repair_schedule),
139         LC_OPT_ENT_BOOL     ("no_enlage_liveness",  "do not enlarge liveness of operands of remats",&opt_no_enlarge_liveness),
140
141         LC_OPT_ENT_ENUM_MASK("dump", "dump problem, mps or solution",                               &dump_var),
142         LC_OPT_ENT_BOOL     ("log",  "activate the lpp log",                                        &opt_log),
143         LC_OPT_ENT_INT      ("timeout",  "ILP solver timeout",                                      &opt_timeout),
144
145         LC_OPT_ENT_DBL      ("cost_reload",  "cost of a reload",                                    &opt_cost_reload),
146         LC_OPT_ENT_DBL      ("cost_memoperand",  "cost of a memory operand",                        &opt_cost_memoperand),
147         LC_OPT_ENT_DBL      ("cost_spill",  "cost of a spill instruction",                          &opt_cost_spill),
148         LC_OPT_ENT_DBL      ("cost_remat",  "cost of a rematerialization",                          &opt_cost_remat),
149         { NULL }
150 };
151
152 void be_spill_remat_register_options(lc_opt_entry_t *grp)
153 {
154         lc_opt_entry_t *my_grp = lc_opt_get_grp(grp, "remat");
155         lc_opt_add_table(my_grp, options);
156 }
157 #endif
158
159
160 //#define EXECFREQ_LOOPDEPH   /* compute execution frequency from loop depth only */
161 //#define SCHEDULE_PHIM   /* insert phim nodes into schedule */
162
163 #define  SOLVE
164 //#define  SOLVE_LOCAL
165 #define LPP_SERVER "i44pc52"
166 #define LPP_SOLVER "cplex"
167
168
169 #define MAX_PATHS      INT_MAX
170 #define ILP_UNDEF               -1
171
172 typedef struct _spill_ilp_t {
173         const arch_register_class_t  *cls;
174         int                           n_regs;
175         const be_chordal_env_t       *chordal_env;
176         be_lv_t                      *lv;
177         lpp_t                        *lpp;
178         struct obstack               *obst;
179         set                          *remat_info;
180         pset                         *all_possible_remats;
181         pset                         *inverse_ops;
182         ir_node                      *keep;
183         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
184         pset                         *spills;
185         set                          *interferences;
186         ir_node                      *m_unknown;
187         set                          *memoperands;
188         DEBUG_ONLY(firm_dbg_module_t * dbg);
189 } spill_ilp_t;
190
191 typedef int ilp_var_t;
192 typedef int ilp_cst_t;
193
194 typedef struct _spill_bb_t {
195         set      *ilp;
196         set      *reloads;
197 } spill_bb_t;
198
199 typedef struct _remat_t {
200         const ir_node        *op;      /**< for copy_irn */
201         const ir_node        *value;   /**< the value which is being recomputed by this remat */
202         const ir_node        *proj;    /**< not NULL if the above op produces a tuple */
203         int                   cost;    /**< cost of this remat */
204         int                   inverse; /**< nonzero if this is an inverse remat */
205 } remat_t;
206
207 /**
208  * Data to be attached to each IR node. For remats this contains the ilp_var
209  * for this remat and for normal ops this contains the ilp_vars for
210  * reloading each operand
211  */
212 typedef struct _op_t {
213         int             is_remat;
214         union {
215                 struct {
216                         ilp_var_t       ilp;
217                         const remat_t  *remat; /** the remat this op belongs to */
218                         int             pre; /** 1, if this is a pressure-increasing remat */
219                 } remat;
220                 struct {
221                         ilp_var_t       ilp;
222                         ir_node        *op; /** the operation this live range belongs to */
223                         union {
224                                 ilp_var_t      *reloads;
225                                 ilp_var_t      *copies;
226                         } args;
227                 } live_range;
228         } attr;
229 } op_t;
230
231 typedef struct _defs_t {
232         const ir_node   *value;
233         ir_node         *spills;  /**< points to the first spill for this value (linked by link field) */
234         ir_node         *remats;  /**< points to the first definition for this value (linked by link field) */
235 } defs_t;
236
237 typedef struct _remat_info_t {
238         const ir_node       *irn; /**< the irn to which these remats belong */
239         pset                *remats; /**< possible remats for this value */
240         pset                *remats_by_operand; /**< remats with this value as operand */
241 } remat_info_t;
242
243 typedef struct _keyval_t {
244         const void          *key;
245         const void          *val;
246 } keyval_t;
247
248 typedef struct _spill_t {
249         ir_node            *irn;
250         ilp_var_t           reg_in;
251         ilp_var_t           mem_in;
252         ilp_var_t           reg_out;
253         ilp_var_t           mem_out;
254         ilp_var_t           spill;
255 } spill_t;
256
257 typedef struct _memoperand_t {
258         ir_node             *irn; /**< the irn */
259         unsigned int         pos; /**< the position of the argument */
260         ilp_var_t            ilp; /**< the ilp var for this memory operand */
261 } memoperand_t;
262
263 static INLINE int
264 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
265 {
266         return chordal_has_class(si->chordal_env, irn);
267 }
268
269 #if 0
270 static int
271 cmp_remat(const void *a, const void *b)
272 {
273         const keyval_t *p = a;
274         const keyval_t *q = b;
275         const remat_t  *r = p->val;
276         const remat_t  *s = q->val;
277
278         assert(r && s);
279
280         return !(r == s || r->op == s->op);
281 }
282 #endif
283 static int
284 cmp_remat(const void *a, const void *b)
285 {
286         const remat_t  *r = a;
287         const remat_t  *s = a;
288
289         return !(r == s || r->op == s->op);
290 }
291
292 static int
293 cmp_spill(const void *a, const void *b, size_t size)
294 {
295         const spill_t *p = a;
296         const spill_t *q = b;
297
298 //      return !(p->irn == q->irn && p->bb == q->bb);
299         return !(p->irn == q->irn);
300 }
301
302 static int
303 cmp_memoperands(const void *a, const void *b, size_t size)
304 {
305         const memoperand_t *p = a;
306         const memoperand_t *q = b;
307
308         return !(p->irn == q->irn && p->pos == q->pos);
309 }
310
311 static keyval_t *
312 set_find_keyval(set * set, const void * key)
313 {
314         keyval_t     query;
315
316         query.key = key;
317         return set_find(set, &query, sizeof(query), HASH_PTR(key));
318 }
319
320 static keyval_t *
321 set_insert_keyval(set * set, void * key, void * val)
322 {
323         keyval_t     query;
324
325         query.key = key;
326         query.val = val;
327         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
328 }
329
330 static defs_t *
331 set_find_def(set * set, const ir_node * value)
332 {
333         defs_t     query;
334
335         query.value = value;
336         return set_find(set, &query, sizeof(query), HASH_PTR(value));
337 }
338
339 static defs_t *
340 set_insert_def(set * set, const ir_node * value)
341 {
342         defs_t     query;
343
344         query.value = value;
345         query.spills = NULL;
346         query.remats = NULL;
347         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
348 }
349
350 static memoperand_t *
351 set_insert_memoperand(set * set, ir_node * irn, unsigned int pos, ilp_var_t ilp)
352 {
353         memoperand_t     query;
354
355         query.irn = irn;
356         query.pos = pos;
357         query.ilp = ilp;
358         return set_insert(set, &query, sizeof(query), HASH_PTR(irn)+pos);
359 }
360
361 static memoperand_t *
362 set_find_memoperand(set * set, const ir_node * irn, unsigned int pos)
363 {
364         memoperand_t     query;
365
366         query.irn = (ir_node*)irn;
367         query.pos = pos;
368         return set_find(set, &query, sizeof(query), HASH_PTR(irn)+pos);
369 }
370
371
372 static spill_t *
373 set_find_spill(set * set, const ir_node * value)
374 {
375         spill_t     query;
376
377         query.irn = (ir_node*)value;
378         return set_find(set, &query, sizeof(query), HASH_PTR(value));
379 }
380
381 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
382 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
383 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
384 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
385 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
386
387 static int
388 cmp_remat_info(const void *a, const void *b, size_t size)
389 {
390         const remat_info_t *p = a;
391         const remat_info_t *q = b;
392
393         return !(p->irn == q->irn);
394 }
395
396 static int
397 cmp_defs(const void *a, const void *b, size_t size)
398 {
399         const defs_t *p = a;
400         const defs_t *q = b;
401
402         return !(p->value == q->value);
403 }
404
405 static int
406 cmp_keyval(const void *a, const void *b, size_t size)
407 {
408         const keyval_t *p = a;
409         const keyval_t *q = b;
410
411         return !(p->key == q->key);
412 }
413
414 static double
415 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
416 {
417 #define FUDGE 0.001
418 #ifndef EXECFREQ_LOOPDEPH
419         return get_block_execfreq(si->chordal_env->exec_freq, get_block(irn)) + FUDGE;
420 #else
421         if(is_Block(irn))
422                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
423         else
424                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
425 #endif
426 }
427
428 static double
429 get_cost(const spill_ilp_t * si, const ir_node * irn)
430 {
431         if(be_is_Spill(irn)) {
432                 return opt_cost_spill;
433         } else if(be_is_Reload(irn)){
434                 return opt_cost_reload;
435         } else {
436                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
437         }
438 }
439
440 /**
441  * Checks, whether node and its operands have suitable reg classes
442  */
443 static INLINE int
444 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
445 {
446         int               n;
447         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
448         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
449
450 #if 0
451         if(!remat)
452                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
453 #endif
454
455         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
456                 ir_node        *op = get_irn_n(irn, n);
457                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
458
459 //              if(!remat)
460 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
461         }
462
463         return remat;
464 }
465
466 /**
467  * Try to create a remat from @p op with destination value @p dest_value
468  */
469 static INLINE remat_t *
470 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
471 {
472         remat_t  *remat = NULL;
473
474 //      if(!mode_is_datab(get_irn_mode(dest_value)))
475 //              return NULL;
476
477         if(dest_value == op) {
478                 const ir_node *proj = NULL;
479
480                 if(is_Proj(dest_value)) {
481                         op = get_Proj_pred(op);
482                         proj = dest_value;
483                 }
484
485                 if(!is_rematerializable(si, op))
486                         return NULL;
487
488                 remat          = obstack_alloc(si->obst, sizeof(*remat));
489                 remat->op      = op;
490                 remat->cost    = get_cost(si, op);
491                 remat->value   = dest_value;
492                 remat->proj    = proj;
493                 remat->inverse = 0;
494         } else {
495                 arch_inverse_t     inverse;
496                 int                n;
497
498                 /* get the index of the operand we want to retrieve by the inverse op */
499                 for (n = get_irn_arity(op)-1; n>=0; --n) {
500                         ir_node        *arg = get_irn_n(op, n);
501
502                         if(arg == dest_value) break;
503                 }
504                 if(n<0) return NULL;
505
506                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
507
508                 /* else ask the backend to give an inverse op */
509                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
510                         int   i;
511
512                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
513
514                         assert(inverse.n > 0 && "inverse op should have at least one node");
515
516                         for(i=inverse.n-1; i>=0; --i) {
517                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
518                         }
519
520                         if(inverse.n <= 2) {
521                                 remat = obstack_alloc(si->obst, sizeof(*remat));
522                                 remat->op = inverse.nodes[0];
523                                 remat->cost = inverse.costs;
524                                 remat->value = dest_value;
525                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
526                                 remat->inverse = 1;
527
528                                 assert(is_Proj(remat->proj));
529                         } else {
530                                 assert(0 && "I can not handle remats with more than 2 nodes");
531                         }
532                 }
533         }
534
535         if(remat) {
536                 if(remat->proj) {
537                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
538                 } else {
539                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
540                 }
541         }
542         return remat;
543 }
544
545
546 static INLINE void
547 add_remat(const spill_ilp_t * si, const remat_t * remat)
548 {
549         remat_info_t    *remat_info,
550                      query;
551         int              n;
552
553         assert(remat->op);
554         assert(remat->value);
555
556         query.irn = remat->value;
557         query.remats = NULL;
558         query.remats_by_operand = NULL;
559         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
560
561         if(remat_info->remats == NULL) {
562                 remat_info->remats = new_pset(cmp_remat, 4096);
563         }
564         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
565
566         /* insert the remat into the remats_be_operand set of each argument of the remat op */
567         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
568                 ir_node        *arg = get_irn_n(remat->op, n);
569
570                 query.irn = arg;
571                 query.remats = NULL;
572                 query.remats_by_operand = NULL;
573                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
574
575                 if(remat_info->remats_by_operand == NULL) {
576                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
577                 }
578                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
579         }
580 }
581
582 static int
583 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
584 {
585         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
586         int                i = 0;
587
588         while(edge) {
589                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
590                         ++i;
591                 }
592                 edge = get_irn_out_edge_next(irn, edge);
593         }
594
595         return i;
596 }
597
598 static int
599 get_irn_n_nonignore_args(const spill_ilp_t * si, const ir_node * irn)
600 {
601         int n;
602         int ret = 0;
603
604         if(is_Proj(irn))
605                 irn = get_Proj_pred(irn);
606
607         for(n=get_irn_arity(irn)-1; n>=0; --n) {
608                 const ir_node  *arg = get_irn_n(irn, n);
609
610                 if(has_reg_class(si, arg)) ++ret;
611         }
612
613         return ret;
614 }
615
616 static INLINE void
617 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
618 {
619         int      n;
620         remat_t *remat;
621
622         if( has_reg_class(si, op)
623         && (opt_repair_schedule || get_irn_n_nonremat_edges(si, op) > 1)
624         && (opt_remats !=  REMATS_BRIGGS || get_irn_n_nonignore_args(si, op) == 0)
625         ) {
626                 remat = get_remat_from_op(si, op, op);
627                 if(remat) {
628                         add_remat(si, remat);
629                 }
630         }
631
632         if(opt_remats == REMATS_ALL) {
633                 /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
634                    for each arg */
635                 for (n = get_irn_arity(op)-1; n>=0; --n) {
636                         ir_node        *arg = get_irn_n(op, n);
637
638                         if(has_reg_class(si, arg)) {
639                                 /* try to get an inverse remat */
640                                 remat = get_remat_from_op(si, arg, op);
641                                 if(remat) {
642                                         add_remat(si, remat);
643                                 }
644                         }
645                 }
646         }
647 }
648
649 static INLINE int
650 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
651 {
652         ir_node *block;
653         ir_node *def_block = get_nodes_block(val);
654         int      ret;
655
656         if(val == pos)
657                 return 0;
658
659         /* if pos is at end of a basic block */
660         if(is_Block(pos)) {
661                 ret = (pos == def_block || block_dominates(def_block, pos));
662 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
663                 return ret;
664         }
665
666         /* else if this is a normal operation */
667         block = get_nodes_block(pos);
668         if(block == def_block) {
669                 if(!sched_is_scheduled(val)) return 1;
670
671                 ret = sched_comes_after(val, pos);
672 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
673                 return ret;
674         }
675
676         ret = block_dominates(def_block, block);
677 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
678         return ret;
679 }
680
681 static INLINE ir_node *
682 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
683 {
684     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
685 }
686
687 /**
688  * Returns first non-Phi node of block @p bb
689  */
690 static INLINE ir_node *
691 sched_block_first_nonphi(const ir_node * bb)
692 {
693         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
694 }
695
696 static int
697 sched_skip_proj_predicator(const ir_node * irn, void * data)
698 {
699         return (is_Proj(irn));
700 }
701
702 static INLINE ir_node *
703 sched_next_nonproj(const ir_node * irn, int forward)
704 {
705         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
706 }
707
708 /**
709  * Returns next operation node (non-Proj) after @p irn
710  * or the basic block of this node
711  */
712 static INLINE ir_node *
713 sched_next_op(const ir_node * irn)
714 {
715         ir_node *next = sched_next(irn);
716
717         if(is_Block(next))
718                 return next;
719
720         return sched_next_nonproj(next, 1);
721 }
722
723 /**
724  * Returns previous operation node (non-Proj) before @p irn
725  * or the basic block of this node
726  */
727 static INLINE ir_node *
728 sched_prev_op(const ir_node * irn)
729 {
730         ir_node *prev = sched_prev(irn);
731
732         if(is_Block(prev))
733                 return prev;
734
735         return sched_next_nonproj(prev, 0);
736 }
737
738 static void
739 sched_put_after(ir_node * insert, ir_node * irn)
740 {
741         if(is_Block(insert)) {
742                 insert = sched_block_first_nonphi(insert);
743         } else {
744                 insert = sched_next_op(insert);
745         }
746         sched_add_before(insert, irn);
747 }
748
749 static void
750 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
751 {
752   if(is_Block(insert)) {
753           insert = sched_block_last_noncf(si, insert);
754   } else {
755           insert = sched_next_nonproj(insert, 0);
756           insert = sched_prev(insert);
757   }
758   sched_add_after(insert, irn);
759 }
760
761 /**
762  * Tells you whether a @p remat can be placed before the irn @p pos
763  */
764 static INLINE int
765 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
766 {
767         const ir_node   *op = remat->op;
768         const ir_node   *prev;
769         int        n,
770                            res = 1;
771
772         if(is_Block(pos)) {
773                 prev = sched_block_last_noncf(si, pos);
774                 prev = sched_next_nonproj(prev, 0);
775         } else {
776                 prev = sched_prev_op(pos);
777         }
778         /* do not remat if the rematted value is defined immediately before this op */
779         if(prev == remat->op) {
780                 return 0;
781         }
782
783 #if 0
784         /* this should be just fine, the following OP will be using this value, right? */
785
786         /* only remat AFTER the real definition of a value (?) */
787         if(!value_is_defined_before(si, pos, remat->value)) {
788 //              ir_fprintf(stderr, "error(not defined)");
789                 return 0;
790         }
791 #endif
792
793         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
794                 const ir_node   *arg = get_irn_n(op, n);
795
796                 if(opt_no_enlarge_liveness) {
797                         if(has_reg_class(si, arg) && live) {
798                                 res &= pset_find_ptr((pset*)live, arg)?1:0;
799                         } else {
800                                 res &= value_is_defined_before(si, pos, arg);
801                         }
802                 } else {
803                         res &= value_is_defined_before(si, pos, arg);
804                 }
805         }
806
807         return res;
808 }
809
810 /**
811  * Tells you whether a @p remat can be placed after the irn @p pos
812  */
813 static INLINE int
814 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
815 {
816         if(is_Block(pos)) {
817                 pos = sched_block_first_nonphi(pos);
818         } else {
819                 pos = sched_next_op(pos);
820         }
821
822         /* only remat AFTER the real definition of a value (?) */
823         if(!value_is_defined_before(si, pos, remat->value)) {
824                 return 0;
825         }
826
827         return can_remat_before(si, remat, pos, live);
828 }
829
830 /**
831  * Collect potetially rematerializable OPs
832  */
833 static void
834 walker_remat_collector(ir_node * irn, void * data)
835 {
836         spill_ilp_t    *si = data;
837
838         if(!is_Block(irn) && !is_Phi(irn)) {
839                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
840                 get_remats_from_op(si, irn);
841         }
842 }
843
844 /**
845  * Inserts a copy of @p irn before @p pos
846  */
847 static ir_node *
848 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
849 {
850         ir_node     *bb;
851         ir_node     *copy;
852
853         bb = is_Block(pos)?pos:get_nodes_block(pos);
854         copy = exact_copy(irn);
855
856         _set_phi_class(copy, NULL);
857         set_nodes_block(copy, bb);
858         sched_put_before(si, pos, copy);
859
860         return copy;
861 }
862
863 /**
864  * Inserts a copy of @p irn after @p pos
865  */
866 static ir_node *
867 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
868 {
869         ir_node     *bb;
870         ir_node     *copy;
871
872         bb = is_Block(pos)?pos:get_nodes_block(pos);
873         copy = exact_copy(irn);
874
875         _set_phi_class(copy, NULL);
876         set_nodes_block(copy, bb);
877         sched_put_after(pos, copy);
878
879         return copy;
880 }
881
882 static ir_node *
883 insert_remat_after(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
884 {
885         char     buf[256];
886
887         if(can_remat_after(si, remat, pos, live)) {
888                 ir_node         *copy,
889                                                 *proj_copy;
890                 op_t            *op;
891
892                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat2 %+F\n", remat->op));
893
894                 copy = insert_copy_after(si, remat->op, pos);
895
896                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
897                 op = obstack_alloc(si->obst, sizeof(*op));
898                 op->is_remat = 1;
899                 op->attr.remat.remat = remat;
900                 op->attr.remat.pre = 0;
901                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
902
903                 set_irn_link(copy, op);
904                 pset_insert_ptr(si->all_possible_remats, copy);
905                 if(remat->proj) {
906                         proj_copy = insert_copy_after(si, remat->proj, copy);
907                         set_irn_n(proj_copy, 0, copy);
908                         set_irn_link(proj_copy, op);
909                         pset_insert_ptr(si->all_possible_remats, proj_copy);
910                 } else {
911                         proj_copy = NULL;
912                 }
913
914                 return copy;
915         }
916
917         return NULL;
918 }
919
920 static ir_node *
921 insert_remat_before(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
922 {
923         char     buf[256];
924
925         if(can_remat_before(si, remat, pos, live)) {
926                 ir_node         *copy,
927                                                 *proj_copy;
928                 op_t            *op;
929
930                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
931
932                 copy = insert_copy_before(si, remat->op, pos);
933
934                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
935                 op = obstack_alloc(si->obst, sizeof(*op));
936                 op->is_remat = 1;
937                 op->attr.remat.remat = remat;
938                 op->attr.remat.pre = 1;
939                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
940
941                 set_irn_link(copy, op);
942                 pset_insert_ptr(si->all_possible_remats, copy);
943                 if(remat->proj) {
944                         proj_copy = insert_copy_after(si, remat->proj, copy);
945                         set_irn_n(proj_copy, 0, copy);
946                         set_irn_link(proj_copy, op);
947                         pset_insert_ptr(si->all_possible_remats, proj_copy);
948                 } else {
949                         proj_copy = NULL;
950                 }
951
952                 return copy;
953         }
954
955         return NULL;
956 }
957
958 static int
959 get_block_n_succs(const ir_node *block) {
960         const ir_edge_t *edge;
961
962         assert(edges_activated(current_ir_graph));
963
964         edge = get_block_succ_first(block);
965         if (! edge)
966                 return 0;
967
968         edge = get_block_succ_next(block, edge);
969         return edge ? 2 : 1;
970 }
971
972 static int
973 is_start_block(const ir_node * bb)
974 {
975         return get_irg_start_block(get_irn_irg(bb)) == bb;
976 }
977
978 static int
979 is_merge_edge(const ir_node * bb)
980 {
981         if(is_start_block(bb))
982                 return 0;
983
984         if(opt_goodwin)
985                 return get_block_n_succs(bb) == 1;
986         else
987                 return 1;
988 }
989
990 static int
991 is_diverge_edge(const ir_node * bb)
992 {
993         if(is_start_block(bb))
994                 return 0;
995
996         if(opt_goodwin)
997                 return get_Block_n_cfgpreds(bb) == 1;
998         else
999                 return 1;
1000 }
1001
1002 static void
1003 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1004 {
1005         ir_node        *irn;
1006         int i;
1007
1008         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1009                 irn = be_lv_get_irn(si->lv, bb, i);
1010
1011                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1012                         pset_insert_ptr(live, irn);
1013                 }
1014         }
1015
1016         irn = sched_last(bb);
1017
1018         /* all values eaten by control flow operations are also live until the end of the block */
1019         sched_foreach_reverse(bb, irn) {
1020                 int  i;
1021
1022                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1023
1024                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1025                         ir_node *arg = get_irn_n(irn,i);
1026
1027                         if(has_reg_class(si, arg)) {
1028                                 pset_insert_ptr(live, arg);
1029                         }
1030                 }
1031         }
1032 }
1033
1034 static void
1035 walker_regclass_copy_insertor(ir_node * irn, void * data)
1036 {
1037         spill_ilp_t    *si = data;
1038
1039         if(is_Phi(irn) && has_reg_class(si, irn)) {
1040                 int n;
1041
1042                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
1043                         ir_node  *phi_arg = get_irn_n(irn, n);
1044                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
1045
1046                         if(!has_reg_class(si, phi_arg)) {
1047                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
1048                                 ir_node   *pos = sched_block_last_noncf(si, bb);
1049                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
1050
1051                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
1052                                 sched_add_after(pos, copy);
1053                                 set_irn_n(irn, n, copy);
1054
1055                                 op->is_remat = 0;
1056                                 op->attr.live_range.args.reloads = NULL;
1057                                 op->attr.live_range.ilp = ILP_UNDEF;
1058                                 set_irn_link(copy, op);
1059                         }
1060                 }
1061         }
1062 }
1063
1064 /**
1065  * Insert (so far unused) remats into the irg to
1066  * recompute the potential liveness of all values
1067  */
1068 static void
1069 walker_remat_insertor(ir_node * bb, void * data)
1070 {
1071         spill_ilp_t    *si = data;
1072         ir_node        *irn;
1073         int             n, i;
1074         pset           *live;
1075         pset           *post_remats;
1076         remat_t        *remat;
1077
1078         /* skip start block, no remats to do there */
1079         if(is_start_block(bb)) return;
1080
1081         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
1082
1083         live = pset_new_ptr_default();
1084         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1085                 ir_node        *value = be_lv_get_irn(si->lv, bb, i);
1086
1087                 /* add remats at end of block */
1088                 if (has_reg_class(si, value)) {
1089                         pset_insert_ptr(live, value);
1090                 }
1091         }
1092
1093         irn = sched_last(bb);
1094         while(!sched_is_end(irn)) {
1095                 ir_node   *next;
1096                 pset      *args;
1097                 ir_node   *arg;
1098                 pset      *used;
1099
1100                 next = sched_prev(irn);
1101
1102                 /* delete defined value from live set */
1103                 if(has_reg_class(si, irn)) {
1104                         pset_remove_ptr(live, irn);
1105                 }
1106
1107                 if(is_Phi(irn) || is_Proj(irn)) {
1108                         irn = next;
1109                         continue;
1110                 }
1111
1112                 args = pset_new_ptr_default();
1113                 used = pset_new_ptr_default();
1114
1115                 /* collect arguments of op and set args of op already live in epilog */
1116                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
1117                         ir_node        *arg = get_irn_n(irn, n);
1118
1119                         pset_insert_ptr(args, arg);
1120                         if(has_reg_class(si, arg)) {
1121                                 pset_insert_ptr(live, arg);
1122                                 pset_insert_ptr(used, arg);
1123                         }
1124                 }
1125
1126                 /* insert all possible remats before irn */
1127                 pset_foreach(args, arg) {
1128                         remat_info_t   *remat_info,
1129                                                     query;
1130
1131                         /* continue if the operand has the wrong reg class */
1132                         if(!has_reg_class(si, arg))
1133                                 continue;
1134
1135                         query.irn = arg;
1136                         query.remats = NULL;
1137                         query.remats_by_operand = NULL;
1138                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1139
1140                         if(!remat_info) {
1141                                 continue;
1142                         }
1143
1144                         if(remat_info->remats) {
1145                                 pset_foreach(remat_info->remats, remat) {
1146                                         ir_node  *remat_irn = NULL;
1147
1148                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1149                                         remat_irn = insert_remat_before(si, remat, irn, live);
1150
1151                                         if(remat_irn) {
1152                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1153                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1154
1155                                                         /* collect args of remats which are not args of op */
1156                                                         if(has_reg_class(si, remat_arg) && !pset_find_ptr(args, remat_arg)) {
1157                                                                 pset_insert_ptr(used, remat_arg);
1158                                                         }
1159                                                 }
1160                                         }
1161                                 }
1162                         }
1163                 }
1164
1165                 /* do not place post remats after jumps */
1166                 if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) {
1167                         del_pset(used);
1168                         del_pset(args);
1169                         break;
1170                 }
1171
1172                 /* insert all possible remats after irn */
1173                 post_remats = pset_new_ptr_default();
1174                 pset_foreach(used, arg) {
1175                         remat_info_t   *remat_info,
1176                                                     query;
1177
1178                         /* continue if the operand has the wrong reg class */
1179                         if(!has_reg_class(si, arg))
1180                                 continue;
1181
1182                         query.irn = arg;
1183                         query.remats = NULL;
1184                         query.remats_by_operand = NULL;
1185                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1186
1187                         if(!remat_info) {
1188                                 continue;
1189                         }
1190
1191                         if(remat_info->remats_by_operand) {
1192                                 pset_foreach(remat_info->remats_by_operand, remat) {
1193                                         /* do not insert remats producing the same value as one of the operands */
1194                                         if(!pset_find_ptr(args, remat->value)) {
1195                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1196
1197                                                 /* only remat values that can be used be real ops */
1198                                                 if(pset_find_ptr(live, remat->value)) {
1199                                                         pset_insert_ptr(post_remats, remat);
1200                                                 }
1201                                         }
1202                                 }
1203                         }
1204                 }
1205                 pset_foreach(post_remats, remat) {
1206                         insert_remat_after(si, remat, irn, live);
1207                 }
1208                 del_pset(post_remats);
1209
1210                 del_pset(used);
1211                 del_pset(args);
1212                 irn = next;
1213         }
1214
1215         /* add remats at end if successor has multiple predecessors */
1216         if(is_merge_edge(bb)) {
1217                 pset     *live_out = pset_new_ptr_default();
1218                 ir_node  *value;
1219
1220                 get_live_end(si, bb, live_out);
1221
1222                 /* add remats at end of block */
1223                 pset_foreach(live_out, value) {
1224                         remat_info_t   *remat_info,
1225                                                    query;
1226
1227                         query.irn = value;
1228                         query.remats = NULL;
1229                         query.remats_by_operand = NULL;
1230                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1231
1232                         if(remat_info && remat_info->remats) {
1233                                 pset_foreach(remat_info->remats, remat) {
1234                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1235
1236                                         insert_remat_before(si, remat, bb, live_out);
1237                                 }
1238                         }
1239                 }
1240                 del_pset(live_out);
1241         }
1242
1243         if(is_diverge_edge(bb)) {
1244                 pset     *live_in = pset_new_ptr_default();
1245                 ir_node  *value;
1246
1247                 be_lv_foreach(si->lv, bb, be_lv_state_in, i) {
1248                         value = be_lv_get_irn(si->lv, bb, i);
1249
1250                         if(has_reg_class(si, value)) {
1251                                 pset_insert_ptr(live_in, value);
1252                         }
1253                 }
1254                 /* add phis to live_in */
1255                 sched_foreach(bb, value) {
1256                         if(!is_Phi(value)) break;
1257
1258                         if(has_reg_class(si, value)) {
1259                                 pset_insert_ptr(live_in, value);
1260                         }
1261                 }
1262
1263                 /* add remat2s at beginning of block */
1264                 post_remats = pset_new_ptr_default();
1265                 pset_foreach(live_in, value) {
1266                         remat_info_t   *remat_info,
1267                                                    query;
1268
1269                         query.irn = value;
1270                         query.remats = NULL;
1271                         query.remats_by_operand = NULL;
1272                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1273
1274                         if(remat_info && remat_info->remats_by_operand) {
1275                                 pset_foreach(remat_info->remats_by_operand, remat) {
1276                                         DBG((si->dbg, LEVEL_4, "\t  considering remat2 %+F at beginning of block %+F\n", remat->op, bb));
1277
1278                                         /* put the remat here if all its args are available and result is still live */
1279                                         if(pset_find_ptr(live_in, remat->value)) {
1280                                                 pset_insert_ptr(post_remats, remat);
1281                                         }
1282                                 }
1283                         }
1284                 }
1285                 pset_foreach(post_remats, remat) {
1286                         insert_remat_after(si, remat, bb, live_in);
1287                 }
1288                 del_pset(post_remats);
1289                 del_pset(live_in);
1290         }
1291 }
1292
1293 static int
1294 can_be_copied(const ir_node * bb, const ir_node * irn)
1295 {
1296         const ir_edge_t *edge    = get_block_succ_first(bb);
1297         const ir_node   *next_bb = edge->src;
1298         int             pos      = edge->pos;
1299         const ir_node   *phi;
1300
1301         assert(is_merge_edge(bb));
1302
1303         sched_foreach(next_bb, phi) {
1304                 const ir_node  *phi_arg;
1305
1306                 if(!is_Phi(phi)) break;
1307
1308                 phi_arg = get_irn_n(phi, pos);
1309
1310                 if(phi_arg == irn) {
1311                         return 1;
1312                 }
1313         }
1314         return 0;
1315 }
1316
1317 /**
1318  * Initialize additional node info
1319  */
1320 static void
1321 luke_initializer(ir_node * bb, void * data)
1322 {
1323         spill_ilp_t    *si = (spill_ilp_t*)data;
1324         spill_bb_t     *spill_bb;
1325         ir_node        *irn;
1326
1327         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
1328         set_irn_link(bb, spill_bb);
1329
1330         sched_foreach(bb, irn) {
1331                 op_t      *op;
1332
1333                 op = obstack_alloc(si->obst, sizeof(*op));
1334                 op->is_remat = 0;
1335                 op->attr.live_range.ilp = ILP_UNDEF;
1336                 if(is_Phi(irn)) {
1337                         if(opt_memcopies) {
1338                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(irn));
1339                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(irn));
1340                         }
1341                 } else if(!is_Proj(irn)) {
1342                         op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1343                         memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1344                 } else {
1345                         op->attr.live_range.args.reloads = NULL;
1346                 }
1347                 set_irn_link(irn, op);
1348         }
1349 }
1350
1351
1352 /**
1353  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1354  */
1355 static void
1356 luke_endwalker(ir_node * bb, void * data)
1357 {
1358         spill_ilp_t    *si = (spill_ilp_t*)data;
1359         pset           *live;
1360         pset           *use_end;
1361         char            buf[256];
1362         ilp_cst_t       cst;
1363         ir_node        *irn;
1364         spill_bb_t     *spill_bb = get_irn_link(bb);
1365         int             i;
1366
1367         live = pset_new_ptr_default();
1368         use_end = pset_new_ptr_default();
1369
1370         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1371                 irn = be_lv_get_irn(si->lv, bb, i);
1372                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1373                         pset_insert_ptr(live, irn);
1374                 }
1375         }
1376
1377         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1378         /* their reg_out must always be set */
1379         sched_foreach_reverse(bb, irn) {
1380                 int   n;
1381
1382                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1383
1384                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1385                         ir_node        *irn_arg = get_irn_n(irn, n);
1386
1387                         if(has_reg_class(si, irn_arg)) {
1388                                 pset_insert_ptr(use_end, irn_arg);
1389                         }
1390                 }
1391         }
1392
1393         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1394         //cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1395         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1396
1397         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1398
1399         /* if this is a merge edge we can reload at the end of this block */
1400         if(is_merge_edge(bb)) {
1401                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1402         } else if(pset_count(use_end)){
1403                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1404         } else {
1405                 spill_bb->reloads = NULL;
1406         }
1407
1408         pset_foreach(live,irn) {
1409                 spill_t     query,
1410                                         *spill;
1411                 double      spill_cost;
1412                 int         default_spilled;
1413
1414
1415                 /* handle values used by control flow nodes later separately */
1416                 if(pset_find_ptr(use_end, irn)) continue;
1417
1418                 query.irn = irn;
1419                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1420
1421                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1422
1423                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1424                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1425                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1426
1427                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1428                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1429
1430                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1431                 /* by default spill value right after definition */
1432                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1433                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1434
1435                 if(is_merge_edge(bb)) {
1436                         ilp_var_t   reload;
1437                         ilp_cst_t   rel_cst;
1438
1439                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1440                         reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), can_be_copied(bb, irn));
1441                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1442
1443                         /* reload <= mem_out */
1444                         rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1445                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1446                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1447                 }
1448
1449                 spill->reg_in = ILP_UNDEF;
1450                 spill->mem_in = ILP_UNDEF;
1451         }
1452
1453         pset_foreach(use_end,irn) {
1454                 spill_t     query,
1455                                         *spill;
1456                 double      spill_cost;
1457                 ilp_cst_t   end_use_req,
1458                                         rel_cst;
1459                 ilp_var_t   reload;
1460                 int         default_spilled;
1461
1462                 query.irn = irn;
1463                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1464
1465                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1466
1467                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1468                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1469                 /* if irn is used at the end of the block, then it is live anyway */
1470                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1471
1472                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1473                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1474
1475                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1476                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1477                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1478
1479                 /* reload for use be control flow op */
1480                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1481                 reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), 1.0);
1482                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1483
1484                 /* reload <= mem_out */
1485                 rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1486                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1487                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1488
1489                 spill->reg_in = ILP_UNDEF;
1490                 spill->mem_in = ILP_UNDEF;
1491
1492                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1493                 end_use_req = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1);
1494                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1495         }
1496
1497         del_pset(live);
1498         del_pset(use_end);
1499 }
1500
1501 static ir_node *
1502 next_post_remat(const ir_node * irn)
1503 {
1504         op_t      *op;
1505     ir_node   *next;
1506
1507         if(is_Block(irn)) {
1508                 next = sched_block_first_nonphi(irn);
1509         } else {
1510                 next = sched_next_op(irn);
1511         }
1512
1513         if(sched_is_end(next))
1514                 return NULL;
1515
1516         op = get_irn_link(next);
1517         if(op->is_remat && !op->attr.remat.pre) {
1518                 return next;
1519         }
1520
1521         return NULL;
1522 }
1523
1524
1525 static ir_node *
1526 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1527 {
1528         op_t      *op;
1529         ir_node   *ret;
1530
1531         if(is_Block(irn)) {
1532                 ret = sched_block_last_noncf(si, irn);
1533                 ret = sched_next(ret);
1534                 ret = sched_prev_op(ret);
1535         } else {
1536                 ret = sched_prev_op(irn);
1537         }
1538
1539         if(sched_is_end(ret) || is_Phi(ret))
1540                 return NULL;
1541
1542         op = (op_t*)get_irn_link(ret);
1543         if(op->is_remat && op->attr.remat.pre) {
1544                 return ret;
1545         }
1546
1547         return NULL;
1548 }
1549
1550 /**
1551  * Find a remat of value @p value in the epilog of @p pos
1552  */
1553 static ir_node *
1554 find_post_remat(const ir_node * value, const ir_node * pos)
1555 {
1556         while((pos = next_post_remat(pos)) != NULL) {
1557                 op_t   *op;
1558
1559                 op = get_irn_link(pos);
1560                 assert(op->is_remat && !op->attr.remat.pre);
1561
1562                 if(op->attr.remat.remat->value == value)
1563                         return (ir_node*)pos;
1564
1565 #if 0
1566         const ir_edge_t *edge;
1567                 foreach_out_edge(pos, edge) {
1568                         ir_node   *proj = get_edge_src_irn(edge);
1569                         assert(is_Proj(proj));
1570                 }
1571 #endif
1572
1573         }
1574
1575         return NULL;
1576 }
1577
1578 static spill_t *
1579 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1580 {
1581         spill_bb_t  *spill_bb = get_irn_link(bb);
1582         spill_t     *spill,
1583                                  query;
1584         char         buf[256];
1585         int          default_spilled;
1586
1587         query.irn = irn;
1588         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1589         if(!spill) {
1590                 double   spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1591
1592                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1593
1594                 spill->reg_out = ILP_UNDEF;
1595                 spill->reg_in  = ILP_UNDEF;
1596                 spill->mem_in  = ILP_UNDEF;
1597
1598                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1599                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1600
1601                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1602                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1603                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1604         }
1605
1606         return spill;
1607 }
1608
1609 /**
1610  *  Inserts ILP-constraints and variables for memory copying before the given position
1611  */
1612 static void
1613 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1614 {
1615         const ir_node    *succ;
1616         const ir_edge_t  *edge;
1617         spill_bb_t       *spill_bb = get_irn_link(block);
1618         ir_node          *phi;
1619         int               pos;
1620         ilp_cst_t         cst;
1621         ilp_var_t         copyreg;
1622         char              buf[256];
1623         ir_node          *tmp;
1624
1625
1626         assert(edges_activated(current_ir_graph));
1627
1628         edge = get_block_succ_first(block);
1629         if(!edge) return;
1630
1631         succ = edge->src;
1632         pos = edge->pos;
1633
1634         edge = get_block_succ_next(block, edge);
1635         /* next block can only contain phis, if this is a merge edge */
1636         if(edge) return;
1637
1638         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1639         copyreg = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1640
1641         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1642         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1643
1644         pset_foreach(live, tmp) {
1645                 spill_t  *spill;
1646 #if 0
1647                 op_t  *op = get_irn_link(irn);
1648                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1649 #endif
1650                 spill = set_find_spill(spill_bb->ilp, tmp);
1651                 assert(spill);
1652
1653                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1654         }
1655         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1656
1657         sched_foreach(succ, phi) {
1658                 const ir_node  *to_copy;
1659                 op_t           *to_copy_op;
1660                 spill_t        *to_copy_spill;
1661                 op_t           *phi_op = get_irn_link(phi);
1662                 ilp_var_t       reload = ILP_UNDEF;
1663
1664
1665                 if(!is_Phi(phi)) break;
1666                 if(!has_reg_class(si, phi)) continue;
1667
1668                 to_copy = get_irn_n(phi, pos);
1669                 to_copy_op = get_irn_link(to_copy);
1670
1671                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1672                 assert(to_copy_spill);
1673
1674                 if(spill_bb->reloads) {
1675                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1676
1677                         if(keyval) {
1678                                 reload = PTR_TO_INT(keyval->val);
1679                         }
1680                 }
1681
1682                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N_%N", block, phi, to_copy);
1683                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1684
1685                 /* copy - reg_out - reload - remat - live_range <= 0 */
1686                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1687                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1688                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1689                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1690                 foreach_pre_remat(si, block, tmp) {
1691                         op_t     *remat_op = get_irn_link(tmp);
1692                         if(remat_op->attr.remat.remat->value == to_copy) {
1693                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1694                         }
1695                 }
1696
1697                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N_%N", block, phi, to_copy);
1698                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1699
1700                 /* copy - reg_out - copyreg <= 0 */
1701                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1702                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1703                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1704         }
1705 }
1706
1707
1708 /**
1709  * Walk all irg blocks and emit this ILP
1710  */
1711 static void
1712 luke_blockwalker(ir_node * bb, void * data)
1713 {
1714         spill_ilp_t    *si = (spill_ilp_t*)data;
1715         ir_node        *irn;
1716         pset           *live;
1717         char            buf[256];
1718         ilp_cst_t       cst;
1719         spill_bb_t     *spill_bb = get_irn_link(bb);
1720         ir_node        *tmp;
1721         spill_t        *spill;
1722         pset           *defs = pset_new_ptr_default();
1723         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
1724
1725         live = pset_new_ptr_default();
1726
1727         /****************************************
1728          *      B A S I C  B L O C K  E N D
1729          ***************************************/
1730
1731
1732         /* init live values at end of block */
1733         get_live_end(si, bb, live);
1734
1735         pset_foreach(live, irn) {
1736                 op_t           *op;
1737                 ilp_var_t       reload = ILP_UNDEF;
1738
1739                 spill = set_find_spill(spill_bb->ilp, irn);
1740                 assert(spill);
1741
1742                 if(spill_bb->reloads) {
1743                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1744
1745                         if(keyval) {
1746                                 reload = PTR_TO_INT(keyval->val);
1747                         }
1748                 }
1749
1750                 op = get_irn_link(irn);
1751                 assert(!op->is_remat);
1752
1753                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1754                 op->attr.live_range.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1755                 op->attr.live_range.op = bb;
1756
1757                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1758                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1759
1760                 /* reg_out - reload - remat - live_range <= 0 */
1761                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1762                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1763                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1764                 foreach_pre_remat(si, bb, tmp) {
1765                         op_t     *remat_op = get_irn_link(tmp);
1766                         if(remat_op->attr.remat.remat->value == irn) {
1767                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1768                         }
1769                 }
1770                 ir_snprintf(buf, sizeof(buf), "reg_out2_%N_%N", bb, irn);
1771                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_greater, 0.0);
1772
1773                 /* value may only die at bb end if it is used for a mem copy */
1774                 /* reg_out + \sum copy - reload - remat - live_range >= 0 */
1775                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1776                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1777                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1778                 foreach_pre_remat(si, bb, tmp) {
1779                         op_t     *remat_op = get_irn_link(tmp);
1780                         if(remat_op->attr.remat.remat->value == irn) {
1781                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1782                         }
1783                 }
1784                 if(is_merge_edge(bb)) {
1785                         const ir_edge_t *edge = get_block_succ_first(bb);
1786                         const ir_node   *next_bb = edge->src;
1787                         int              pos = edge->pos;
1788                         const ir_node   *phi;
1789
1790                         sched_foreach(next_bb, phi) {
1791                                 const ir_node  *phi_arg;
1792
1793                                 if(!is_Phi(phi)) break;
1794
1795                                 phi_arg = get_irn_n(phi, pos);
1796
1797                                 if(phi_arg == irn) {
1798                                         op_t      *phi_op = get_irn_link(phi);
1799                                         ilp_var_t  copy = phi_op->attr.live_range.args.copies[pos];
1800
1801                                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
1802                                 }
1803                         }
1804                 }
1805         }
1806
1807         if(opt_memcopies)
1808                 insert_mem_copy_position(si, live, bb);
1809
1810         /*
1811          * start new live ranges for values used by remats at end of block
1812          * and assure the remat args are available
1813          */
1814         foreach_pre_remat(si, bb, tmp) {
1815                 op_t     *remat_op = get_irn_link(tmp);
1816                 int       n;
1817
1818                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1819                         ir_node        *remat_arg = get_irn_n(tmp, n);
1820                         op_t           *arg_op = get_irn_link(remat_arg);
1821                         ilp_var_t       prev_lr;
1822
1823                         if(!has_reg_class(si, remat_arg)) continue;
1824
1825                         /* if value is becoming live through use by remat */
1826                         if(!pset_find_ptr(live, remat_arg)) {
1827                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1828                                 prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1829
1830                                 arg_op->attr.live_range.ilp = prev_lr;
1831                                 arg_op->attr.live_range.op = bb;
1832
1833                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1834
1835                                 pset_insert_ptr(live, remat_arg);
1836                                 add_to_spill_bb(si, bb, remat_arg);
1837                         }
1838
1839                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1840                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1841                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1842
1843                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1844                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1845
1846                         /* use reload placed for this argument */
1847                         if(spill_bb->reloads) {
1848                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1849
1850                                 if(keyval) {
1851                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1852
1853                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1854                                 }
1855                         }
1856                 }
1857         }
1858         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1859
1860
1861
1862
1863         /**************************************
1864          *    B A S I C  B L O C K  B O D Y
1865          **************************************/
1866
1867         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1868                 op_t       *op;
1869                 op_t       *tmp_op;
1870                 int         n,
1871                                         u = 0,
1872                                         d = 0;
1873                 ilp_cst_t       check_pre,
1874                                         check_post;
1875                 set        *args;
1876                 pset       *used;
1877                 pset       *remat_defs;
1878                 keyval_t   *keyval;
1879                 ilp_cst_t   one_memoperand;
1880
1881                 /* iterate only until first phi */
1882                 if(is_Phi(irn))
1883                         break;
1884
1885                 op = get_irn_link(irn);
1886                 /* skip remats */
1887                 if(op->is_remat) continue;
1888
1889                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1890
1891                 /* collect defined values */
1892                 if(has_reg_class(si, irn)) {
1893                         pset_insert_ptr(defs, irn);
1894                 }
1895
1896                 /* skip projs */
1897                 if(is_Proj(irn)) continue;
1898
1899                 /*
1900                  * init set of irn's arguments
1901                  * and all possibly used values around this op
1902                  * and values defined by post remats
1903                  */
1904                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1905                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1906                 remat_defs = pset_new_ptr(pset_count(live));
1907
1908                 if(!is_start_block(bb) || !be_is_Barrier(irn)) {
1909                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
1910                                 ir_node        *irn_arg = get_irn_n(irn, n);
1911                                 if(has_reg_class(si, irn_arg)) {
1912                                         set_insert_keyval(args, irn_arg, (void*)n);
1913                                         pset_insert_ptr(used, irn_arg);
1914                                 }
1915                         }
1916                         foreach_post_remat(irn, tmp) {
1917                                 op_t    *remat_op = get_irn_link(tmp);
1918
1919                                 pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1920
1921                                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1922                                         ir_node        *remat_arg = get_irn_n(tmp, n);
1923                                         if(has_reg_class(si, remat_arg)) {
1924                                                 pset_insert_ptr(used, remat_arg);
1925                                         }
1926                                 }
1927                         }
1928                         foreach_pre_remat(si, irn, tmp) {
1929                                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1930                                         ir_node        *remat_arg = get_irn_n(tmp, n);
1931                                         if(has_reg_class(si, remat_arg)) {
1932                                                 pset_insert_ptr(used, remat_arg);
1933                                         }
1934                                 }
1935                         }
1936                 }
1937
1938                 /**********************************
1939                  *   I N  E P I L O G  O F  irn
1940                  **********************************/
1941
1942                 /* ensure each dying value is used by only one post remat */
1943                 pset_foreach(used, tmp) {
1944                         ir_node     *value = tmp;
1945                         op_t        *value_op = get_irn_link(value);
1946                         ir_node     *remat;
1947                         int          n_remats = 0;
1948
1949                         cst = ILP_UNDEF;
1950                         foreach_post_remat(irn, remat) {
1951                                 op_t  *remat_op = get_irn_link(remat);
1952
1953                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1954                                         ir_node   *remat_arg = get_irn_n(remat, n);
1955
1956                                         /* if value is used by this remat add it to constraint */
1957                                         if(remat_arg == value) {
1958                                                 if(n_remats == 0) {
1959                                                         /* sum remat2s <= 1 + n_remats*live_range */
1960                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1961                                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
1962                                                 }
1963
1964                                                 n_remats++;
1965                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1966                                                 break;
1967                                         }
1968                                 }
1969                         }
1970
1971                         if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
1972                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1973                         }
1974                 }
1975
1976         /* ensure at least one value dies at post remat */
1977         foreach_post_remat(irn, tmp) {
1978             op_t     *remat_op = get_irn_link(tmp);
1979             pset     *remat_args = pset_new_ptr(get_irn_arity(tmp));
1980             ir_node  *remat_arg;
1981
1982             for(n=get_irn_arity(tmp)-1; n>=0; --n) {
1983                 remat_arg = get_irn_n(tmp, n);
1984
1985                 if(has_reg_class(si, remat_arg)) {
1986
1987                     /* does arg always die at this op? */
1988                     if(!pset_find_ptr(live, remat_arg))
1989                         goto skip_one_must_die;
1990
1991                     pset_insert_ptr(remat_args, remat_arg);
1992                 }
1993             }
1994
1995             /* remat + \sum live_range(remat_arg) <= |args| */
1996             ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
1997             cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
1998             lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1999
2000             pset_foreach(remat_args, remat_arg) {
2001                 op_t  *arg_op = get_irn_link(remat_arg);
2002
2003                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2004             }
2005
2006 skip_one_must_die:
2007             del_pset(remat_args);
2008         }
2009
2010                 /* new live ranges for values from L\U defined by post remats */
2011                 pset_foreach(live, tmp) {
2012                         ir_node     *value = tmp;
2013                         op_t        *value_op = get_irn_link(value);
2014
2015                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
2016                                 ilp_var_t    prev_lr = ILP_UNDEF;
2017                                 ir_node     *remat;
2018
2019                                 if(pset_find_ptr(remat_defs, value)) {
2020
2021                                         /* next_live_range <= prev_live_range + sum remat2s */
2022                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
2023                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2024
2025                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
2026                                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2027
2028                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
2029                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2030
2031                                         foreach_post_remat(irn, remat) {
2032                                                 op_t        *remat_op = get_irn_link(remat);
2033
2034                                                 /* if value is being rematerialized by this remat */
2035                                                 if(value == remat_op->attr.remat.remat->value) {
2036                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2037                                                 }
2038                                         }
2039
2040                                         value_op->attr.live_range.ilp = prev_lr;
2041                                         value_op->attr.live_range.op = irn;
2042                                 }
2043                         }
2044                 }
2045
2046                 /* requirements for post remats and start live ranges from L/U' for values dying here */
2047                 foreach_post_remat(irn, tmp) {
2048                         op_t        *remat_op = get_irn_link(tmp);
2049                         int          n;
2050
2051                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2052                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2053                                 op_t           *arg_op = get_irn_link(remat_arg);
2054
2055                                 if(!has_reg_class(si, remat_arg)) continue;
2056
2057                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
2058                                 if(!pset_find_ptr(used, remat_arg)) {
2059                                         /* remat <= live_range(remat_arg) */
2060                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
2061                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2062
2063                                         /* if value is becoming live through use by remat2 */
2064                                         if(!pset_find_ptr(live, remat_arg)) {
2065                                                 ilp_var_t     lr;
2066
2067                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
2068                                                 lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2069
2070                                                 arg_op->attr.live_range.ilp = lr;
2071                                                 arg_op->attr.live_range.op = irn;
2072
2073                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
2074
2075                                                 pset_insert_ptr(live, remat_arg);
2076                                                 add_to_spill_bb(si, bb, remat_arg);
2077                                         }
2078
2079                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2080                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2081                                 }
2082                         }
2083                 }
2084
2085                 d = pset_count(defs);
2086                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
2087
2088                 /* count how many regs irn needs for arguments */
2089                 u = set_count(args);
2090
2091
2092                 /* check the register pressure in the epilog */
2093                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
2094                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
2095                 check_post = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - d);
2096
2097                 /* add L\U' to check_post */
2098                 pset_foreach(live, tmp) {
2099                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
2100                                 /* if a live value is not used by irn */
2101                                 tmp_op = get_irn_link(tmp);
2102                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
2103                         }
2104                 }
2105
2106                 /***********************************************************
2107                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
2108                  **********************************************************/
2109
2110
2111                 pset_foreach(used, tmp) {
2112                         ilp_var_t       prev_lr;
2113                         ilp_var_t       post_use;
2114                         int             p = 0;
2115                         spill_t        *spill;
2116                         ir_node        *arg = tmp;
2117                         op_t           *arg_op = get_irn_link(arg);
2118                         ir_node        *remat;
2119
2120                         spill = add_to_spill_bb(si, bb, arg);
2121
2122                         /* new live range for each used value */
2123                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
2124                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2125
2126                         /* the epilog stuff - including post_use, check_post, check_post_remat */
2127                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
2128                         post_use = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2129
2130                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
2131
2132                         /* arg is live throughout epilog if the next live_range is in a register */
2133                         if(pset_find_ptr(live, arg)) {
2134                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
2135
2136                                 /* post_use >= next_lr + remat */
2137                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2138                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2139                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2140                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2141                         }
2142
2143                         /* forall post remat which use arg add a similar cst */
2144                         foreach_post_remat(irn, remat) {
2145                                 int      n;
2146
2147                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
2148                                         ir_node    *remat_arg = get_irn_n(remat, n);
2149                                         op_t       *remat_op = get_irn_link(remat);
2150
2151                                         if(remat_arg == arg) {
2152                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
2153
2154                                                 /* post_use >= remat */
2155                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2156                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2157                                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2158                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2159                                         }
2160                                 }
2161                         }
2162
2163                         /* if value is not an arg of op and not possibly defined by post remat
2164                          * then it may only die and not become live
2165                          */
2166                         if(!set_find_keyval(args, arg)) {
2167                                 /* post_use <= prev_lr */
2168                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
2169                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2170                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2171                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2172
2173                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
2174                                         /* next_lr <= prev_lr */
2175                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
2176                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2177                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2178                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2179                                 }
2180                         }
2181
2182                         if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
2183                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2184                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2185                                                 ilp_var_t       memoperand;
2186
2187                                                 ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
2188                                                 memoperand = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_memoperand*execution_frequency(si, bb), 0.0);
2189                                                 set_insert_memoperand(si->memoperands, irn, n, memoperand);
2190
2191                                                 ir_snprintf(buf, sizeof(buf), "nolivepost_%N_%d", irn, n);
2192                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2193
2194                                                 lpp_set_factor_fast(si->lpp, cst, memoperand, 1.0);
2195                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2196                                         }
2197                                 }
2198                         }
2199
2200                         /* new live range begins for each used value */
2201                         arg_op->attr.live_range.ilp = prev_lr;
2202                         arg_op->attr.live_range.op = irn;
2203
2204                         pset_insert_ptr(live, arg);
2205                 }
2206
2207                 /* just to be sure */
2208                 check_post = ILP_UNDEF;
2209
2210
2211                 /******************
2212                  *   P R O L O G
2213                  ******************/
2214
2215                 /* check the register pressure in the prolog */
2216                 /* sum_{L\U} lr <= k - |U| */
2217                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
2218                 check_pre = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - u);
2219
2220                 /* for the prolog remove defined values from the live set */
2221                 pset_foreach(defs, tmp) {
2222                         pset_remove_ptr(live, tmp);
2223                 }
2224
2225                 if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
2226                         ir_snprintf(buf, sizeof(buf), "one_memoperand_%N", irn);
2227                         one_memoperand = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2228                 }
2229
2230                 /***********************************************************
2231                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
2232                  **********************************************************/
2233
2234
2235                 set_foreach(args, keyval) {
2236                         spill_t          *spill;
2237                         const ir_node    *arg = keyval->key;
2238                         int               i = PTR_TO_INT(keyval->val);
2239                         op_t             *arg_op = get_irn_link(arg);
2240                         ilp_cst_t         requirements;
2241                         int               n_memoperands;
2242
2243                         spill = set_find_spill(spill_bb->ilp, arg);
2244                         assert(spill);
2245
2246                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
2247                         op->attr.live_range.args.reloads[i] = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), 1.0);
2248
2249                         /* reload <= mem_out */
2250                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
2251                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2252                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2253                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2254
2255                         /* requirement: arg must be in register for use */
2256                         /* reload + remat + live_range == 1 */
2257                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
2258                         requirements = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1.0);
2259
2260                         lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
2261                         lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
2262                         foreach_pre_remat(si, irn, tmp) {
2263                                 op_t     *remat_op = get_irn_link(tmp);
2264                                 if(remat_op->attr.remat.remat->value == arg) {
2265                                         lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
2266                                 }
2267                         }
2268
2269                         if(opt_memoperands && (!is_start_block(bb) || be_is_Barrier(irn))) {
2270                                 n_memoperands = 0;
2271                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2272                                         if(get_irn_n(irn, n) == arg) {
2273                                                 n_memoperands++;
2274                                         }
2275                                 }
2276                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2277                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2278                                                 memoperand_t  *memoperand;
2279                                                 memoperand = set_find_memoperand(si->memoperands, irn, n);
2280
2281                                                 /* memoperand <= mem_out */
2282                                                 ir_snprintf(buf, sizeof(buf), "req_memoperand_%N_%d", irn, n);
2283                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2284                                                 lpp_set_factor_fast(si->lpp, cst, memoperand->ilp, 1.0);
2285                                                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2286
2287                                                 /* the memoperand is only sufficient if it is used once by the op */
2288                                                 if(n_memoperands == 1)
2289                                                         lpp_set_factor_fast(si->lpp, requirements, memoperand->ilp, 1.0);
2290
2291                                                 lpp_set_factor_fast(si->lpp, one_memoperand, memoperand->ilp, 1.0);
2292
2293                                                 /* we have one more free register if we use a memory operand */
2294                                                 lpp_set_factor_fast(si->lpp, check_pre, memoperand->ilp, -1.0);
2295                                         }
2296                                 }
2297                         }
2298                 }
2299
2300                 /* iterate over L\U */
2301                 pset_foreach(live, tmp) {
2302                         if(!set_find_keyval(args, tmp)) {
2303                                 /* if a live value is not used by irn */
2304                                 tmp_op = get_irn_link(tmp);
2305                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
2306                         }
2307                 }
2308
2309                 /* requirements for remats */
2310                 foreach_pre_remat(si, irn, tmp) {
2311                         op_t        *remat_op = get_irn_link(tmp);
2312                         int          n;
2313
2314                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2315                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2316                                 op_t           *arg_op = get_irn_link(remat_arg);
2317
2318                                 if(!has_reg_class(si, remat_arg)) continue;
2319
2320                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2321                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2322                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2323
2324                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2325                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2326
2327                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2328                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2329                                         int    index = (int)keyval->val;
2330
2331                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2332                                 }
2333                         }
2334                 }
2335
2336
2337
2338
2339                 /*************************
2340                  *  D O N E  W I T H  O P
2341                  *************************/
2342
2343                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2344
2345                 pset_foreach(live, tmp) {
2346                         assert(has_reg_class(si, tmp));
2347                 }
2348
2349                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2350                         ir_node        *arg = get_irn_n(irn, n);
2351
2352                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2353                 }
2354
2355                 del_pset(remat_defs);
2356                 del_pset(used);
2357                 del_set(args);
2358                 del_pset(defs);
2359                 defs = pset_new_ptr_default();
2360
2361                 /* skip everything above barrier in start block */
2362                 if(is_start_block(bb) && be_is_Barrier(irn)) {
2363                         assert(pset_count(live) == 0);
2364                         break;
2365                 }
2366
2367         }
2368         del_pset(defs);
2369
2370
2371
2372         /***************************************
2373          *   B E G I N N I N G  O F  B L O C K
2374          ***************************************/
2375
2376
2377         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2378         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2379
2380         pset_foreach(live, irn) {
2381                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2382         }
2383
2384         /* construct mem_outs for all values */
2385         set_foreach(spill_bb->ilp, spill) {
2386                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2387                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2388
2389                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2390                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2391
2392                 if(pset_find_ptr(live, spill->irn)) {
2393                         int default_spilled;
2394                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2395
2396                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2397                         default_spilled = be_is_live_in(si->lv, bb, spill->irn) || is_Phi(spill->irn);
2398                         spill->mem_in   = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, default_spilled);
2399                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2400
2401                         if(opt_memcopies && is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2402                                 int   n;
2403                                 op_t *op = get_irn_link(spill->irn);
2404
2405                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2406                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2407                                         double          freq=0.0;
2408                                         int             m;
2409                                         ilp_var_t       var;
2410
2411
2412                                         /* argument already done? */
2413                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2414
2415                                         /* get sum of execution frequencies of blocks with the same phi argument */
2416                                         for(m=n; m>=0; --m) {
2417                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2418
2419                                                 if(arg==arg2) {
2420                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2421                                                 }
2422                                         }
2423
2424                                         /* copies are not for free */
2425                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2426                                         var = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_spill * freq, 1.0);
2427
2428                                         for(m=n; m>=0; --m) {
2429                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2430
2431                                                 if(arg==arg2) {
2432                                                         op->attr.live_range.args.copies[m] = var;
2433                                                 }
2434                                         }
2435
2436 #if 0
2437                                         /* copy <= mem_in */
2438                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2439                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2440                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2441                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2442 #endif
2443                                 }
2444                         }
2445                 }
2446         }
2447
2448         foreach_post_remat(bb, tmp) {
2449                 int         n;
2450                 op_t       *remat_op = get_irn_link(tmp);
2451                 pset       *remat_args = pset_new_ptr(get_irn_arity(tmp));
2452                 ir_node    *remat_arg;
2453
2454                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2455                         remat_arg = get_irn_n(tmp, n);
2456
2457                         if(has_reg_class(si, remat_arg)) {
2458                                 pset_insert_ptr(remat_args, remat_arg);
2459                         }
2460                 }
2461
2462                 /* remat + \sum live_range(remat_arg) <= |args| */
2463                 ir_snprintf(buf, sizeof(buf), "one_must_die_%N", tmp);
2464                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
2465                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2466
2467                 pset_foreach(remat_args, remat_arg) {
2468                         if(pset_find_ptr(live, remat_arg)) {
2469                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2470                                 lpp_set_factor_fast(si->lpp, cst, remat_arg_op->attr.live_range.ilp, 1.0);
2471                         }
2472                 }
2473                 del_pset(remat_args);
2474         }
2475
2476         foreach_post_remat(bb, tmp) {
2477                 int  n;
2478
2479                 for(n=get_irn_arity(tmp)-1; n>=0; --n) {
2480                         ir_node  *remat_arg = get_irn_n(tmp, n);
2481
2482                         /* if value is becoming live through use by remat2 */
2483                         if(has_reg_class(si, remat_arg) && !pset_find_ptr(live, remat_arg)) {
2484                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2485                                 ilp_cst_t   nomem;
2486
2487                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 at bb start %+F\n", remat_arg, tmp));
2488
2489                                 pset_insert_ptr(live, remat_arg);
2490                                 spill = add_to_spill_bb(si, bb, remat_arg);
2491                                 remat_arg_op->attr.live_range.ilp = ILP_UNDEF;
2492
2493                                 /* we need reg_in and mem_in for this value; they will be referenced later */
2494                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", remat_arg, bb);
2495                                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2496                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", remat_arg, bb);
2497                                 spill->mem_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
2498
2499
2500                                 /* optimization: all memory stuff should be 0, for we do not want to insert reloads for remats */
2501                                 ir_snprintf(buf, sizeof(buf), "nomem_%N_%N", remat_arg, bb);
2502                                 nomem = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0.0);
2503                                 lpp_set_factor_fast(si->lpp, nomem, spill->spill, 1.0);
2504                         }
2505                 }
2506         }
2507
2508         /* L\U is empty at bb start */
2509         /* arg is live throughout epilog if it is reg_in into this block */
2510
2511         /* check the register pressure at the beginning of the block
2512          * including remats
2513          */
2514         /* reg_in entspricht post_use */
2515
2516         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2517         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
2518
2519         pset_foreach(live, irn) {
2520         ilp_cst_t  nospill;
2521
2522                 spill = set_find_spill(spill_bb->ilp, irn);
2523                 assert(spill);
2524
2525                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2526                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2527
2528                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2529
2530                 /* spill + mem_in <= 1 */
2531                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2532                 nospill = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
2533
2534                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2535                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2536
2537         } /* post_remats are NOT included in register pressure check because
2538            they do not increase regpressure */
2539
2540         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2541         pset_foreach(live, irn) {
2542                 int          p = 0,
2543                                          n;
2544
2545                 spill = set_find_spill(spill_bb->ilp, irn);
2546                 assert(spill && spill->irn == irn);
2547
2548                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2549                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2550                                 ilp_cst_t       mem_in,
2551                                                                 reg_in;
2552                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2553                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2554                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2555                                 spill_t        *spill_p;
2556                                 op_t           *op = get_irn_link(irn);
2557
2558                                 /* although the phi is in the right regclass one or more of
2559                                  * its arguments can be in a different one or at least to
2560                                  * ignore
2561                                  */
2562                                 if(has_reg_class(si, phi_arg)) {
2563                                         /* mem_in < mem_out_arg + copy */
2564                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2565                                         mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2566
2567                                         /* reg_in < reg_out_arg */
2568                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2569                                         reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2570
2571                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2572                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2573
2574                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2575                                         assert(spill_p);
2576
2577                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2578                                         if(opt_memcopies)
2579                                                 lpp_set_factor_fast(si->lpp, mem_in, op->attr.live_range.args.copies[n], -1.0);
2580
2581                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2582                                 }
2583                         }
2584                 } else {
2585                         /* else assure the value arrives on all paths in the same resource */
2586
2587                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2588                                 ilp_cst_t       mem_in,
2589                                                                 reg_in;
2590                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2591                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2592                                 spill_t        *spill_p;
2593
2594                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2595                                 mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2596                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2597                                 reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2598
2599                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2600                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2601
2602                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2603                                 assert(spill_p);
2604
2605                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2606                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2607                         }
2608                 }
2609         }
2610
2611         foreach_post_remat(bb, tmp) {
2612                 int         n;
2613
2614                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2615                         ir_node    *remat_arg = get_irn_n(tmp, n);
2616                         op_t       *remat_op = get_irn_link(tmp);
2617
2618                         if(!has_reg_class(si, remat_arg)) continue;
2619
2620                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2621                         assert(spill);
2622
2623                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2624                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2625                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2626                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2627                 }
2628         }
2629
2630         pset_foreach(live, irn) {
2631                 const op_t      *op = get_irn_link(irn);
2632                 const ir_node   *remat;
2633                 int              n_remats = 0;
2634
2635                 cst = ILP_UNDEF;
2636
2637                 foreach_post_remat(bb, remat) {
2638                         int   n;
2639
2640                         for (n=get_irn_arity(remat)-1; n>=0; --n) {
2641                                 const ir_node  *arg = get_irn_n(remat, n);
2642
2643                                 if(arg == irn) {
2644                                         const op_t   *remat_op = get_irn_link(remat);
2645
2646                                         if(cst == ILP_UNDEF) {
2647                                                 /* sum remat2s <= 1 + n_remats*live_range */
2648                                                 ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", irn, bb);
2649                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2650                                         }
2651                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2652                                         ++n_remats;
2653                                         break;
2654                                 }
2655                         }
2656                 }
2657                 if(cst != ILP_UNDEF && op->attr.live_range.ilp != ILP_UNDEF) {
2658                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -n_remats);
2659                 }
2660         }
2661
2662         /* first live ranges from reg_ins */
2663         pset_foreach(live, irn) {
2664                 op_t      *op = get_irn_link(irn);
2665
2666                 if(op->attr.live_range.ilp != ILP_UNDEF) {
2667
2668                         spill = set_find_spill(spill_bb->ilp, irn);
2669                         assert(spill && spill->irn == irn);
2670
2671                         ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2672                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2673                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2674                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2675
2676                         foreach_post_remat(bb, tmp) {
2677                                 op_t     *remat_op = get_irn_link(tmp);
2678
2679                                 if(remat_op->attr.remat.remat->value == irn) {
2680                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2681                                 }
2682                         }
2683                 }
2684         }
2685
2686         /* walk forward now and compute constraints for placing spills */
2687         /* this must only be done for values that are not defined in this block */
2688         pset_foreach(live, irn) {
2689                 /*
2690                  * if value is defined in this block we can anways place the spill directly after the def
2691                  *    -> no constraint necessary
2692                  */
2693                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) {
2694                         assert(0);
2695                 }
2696
2697
2698                 spill = set_find_spill(spill_bb->ilp, irn);
2699                 assert(spill);
2700
2701                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2702                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2703
2704                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2705                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2706
2707                 if(!is_Phi(irn)) {
2708                         sched_foreach_op(bb, tmp) {
2709                                 op_t   *op = get_irn_link(tmp);
2710
2711                                 if(is_Phi(tmp)) continue;
2712                                 assert(!is_Proj(tmp));
2713
2714                                 if(op->is_remat) {
2715                                         const ir_node   *value = op->attr.remat.remat->value;
2716
2717                                         if(value == irn) {
2718                                                 /* only collect remats up to the first real use of a value */
2719                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2720                                         }
2721                                 } else {
2722                                         int   n;
2723
2724                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2725                                                 ir_node    *arg = get_irn_n(tmp, n);
2726
2727                                                 if(arg == irn) {
2728                                                         /* if a value is used stop collecting remats */
2729                             goto next_live;
2730                                                 }
2731                                         }
2732                                 }
2733                         }
2734                 }
2735 next_live: ;
2736         }
2737
2738         del_pset(live);
2739 }
2740
2741 typedef struct _irnlist_t {
2742         struct list_head   list;
2743         ir_node           *irn;
2744 } irnlist_t;
2745
2746 typedef struct _interference_t {
2747         struct list_head    blocklist;
2748         ir_node            *a;
2749         ir_node            *b;
2750 } interference_t;
2751
2752 static int
2753 cmp_interference(const void *a, const void *b, size_t size)
2754 {
2755         const interference_t *p = a;
2756         const interference_t *q = b;
2757
2758         return !(p->a == q->a && p->b == q->b);
2759 }
2760
2761 static interference_t *
2762 set_find_interference(set * set, ir_node * a, ir_node * b)
2763 {
2764         interference_t     query;
2765
2766         query.a = (a>b)?a:b;
2767         query.b = (a>b)?b:a;
2768
2769         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2770 }
2771
2772 static interference_t *
2773 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2774 {
2775         interference_t     query,
2776                                           *result;
2777         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2778
2779         list->irn = bb;
2780
2781         result = set_find_interference(set, a, b);
2782         if(result) {
2783
2784                 list_add(&list->list, &result->blocklist);
2785                 return result;
2786         }
2787
2788         query.a = (a>b)?a:b;
2789         query.b = (a>b)?b:a;
2790
2791         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2792
2793         INIT_LIST_HEAD(&result->blocklist);
2794         list_add(&list->list, &result->blocklist);
2795
2796         return result;
2797 }
2798
2799 static int
2800 values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
2801 {
2802         const ir_edge_t *edge;
2803
2804         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2805                 /* both values are live in, so they interfere */
2806                 return 1;
2807         }
2808
2809         /* ensure a dominates b */
2810         if(value_dominates(b,a)) {
2811                 const ir_node * t;
2812                 t = b;
2813                 b = a;
2814                 a = t;
2815         }
2816         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2817
2818
2819         /* the following code is stolen from bera.c */
2820         if(be_is_live_end(si->lv, bb, a))
2821                 return 1;
2822
2823         foreach_out_edge(a, edge) {
2824                 const ir_node *user = edge->src;
2825                 if(get_nodes_block(user) == bb
2826                                 && !is_Phi(user)
2827                                 && b != user
2828                                 && !pset_find_ptr(si->inverse_ops, user)
2829                                 && value_dominates(b, user))
2830                         return 1;
2831         }
2832
2833         return 0;
2834 }
2835
2836 /**
2837  * Walk all irg blocks and collect interfering values inside of phi classes
2838  */
2839 static void
2840 luke_interferencewalker(ir_node * bb, void * data)
2841 {
2842         spill_ilp_t    *si = (spill_ilp_t*)data;
2843         int             l1, l2;
2844
2845         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
2846                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
2847                 op_t           *a_op = get_irn_link(a);
2848
2849
2850                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2851                 if (has_reg_class(si, a) && get_phi_class(a)) {
2852                         if(a_op->is_remat || pset_find_ptr(si->inverse_ops, a))
2853                                 continue;
2854
2855                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
2856                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
2857                                 op_t           *b_op = get_irn_link(b);
2858
2859
2860                                 /* a and b are only interesting if they are in the same phi class */
2861                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2862                                         if(b_op->is_remat || pset_find_ptr(si->inverse_ops, b))
2863                                                 continue;
2864
2865                                         if(values_interfere_in_block(si, bb, a, b)) {
2866                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2867                                                 set_insert_interference(si, si->interferences, a, b, bb);
2868                                         }
2869                                 }
2870                         }
2871                 }
2872         }
2873 }
2874
2875 static unsigned int copy_path_id = 0;
2876
2877 static void
2878 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2879 {
2880         ilp_cst_t  cst;
2881         ilp_var_t  copy;
2882         char       buf[256];
2883         void      *ptr;
2884
2885         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2886         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
2887
2888         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2889
2890         pset_foreach(copies, ptr) {
2891                 copy = PTR_TO_INT(ptr);
2892                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2893         }
2894 }
2895
2896 /**
2897  * @parameter copies   contains a path of copies which lead us to irn
2898  * @parameter visited  contains a set of nodes already visited on this path
2899  */
2900 static int
2901 find_copy_path(spill_ilp_t * si, const ir_node * irn, const ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2902 {
2903         const ir_edge_t *edge;
2904         op_t            *op = get_irn_link(irn);
2905     pset            *visited_users = pset_new_ptr_default();
2906         int              paths = 0;
2907
2908         if(op->is_remat) return 0;
2909
2910         pset_insert_ptr(visited, irn);
2911
2912         if(is_Phi(irn)) {
2913                 int    n;
2914         pset  *visited_operands = pset_new_ptr(get_irn_arity(irn));
2915
2916                 /* visit all operands */
2917                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2918                         ir_node  *arg = get_irn_n(irn, n);
2919                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2920
2921                         if(!has_reg_class(si, arg)) continue;
2922             if(pset_find_ptr(visited_operands, arg)) continue;
2923             pset_insert_ptr(visited_operands, arg);
2924
2925                         if(arg == target) {
2926                                 if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2927                                         del_pset(visited_operands);
2928                                         del_pset(visited_users);
2929                                         pset_remove_ptr(visited, irn);
2930                                         return paths;
2931                                 }
2932                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2933                                 write_copy_path_cst(si, copies, any_interfere);
2934                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2935                         } else if(!pset_find_ptr(visited, arg)) {
2936                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2937                                 paths += find_copy_path(si, arg, target, any_interfere, copies, visited);
2938                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2939
2940                 if(paths > MAX_PATHS) {
2941                     if(pset_count(copies) == 0) {
2942                         ilp_cst_t  cst;
2943                         char       buf[256];
2944
2945                         ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2946                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
2947                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2948                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2949                         DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2950
2951                         paths = 0;
2952                     } else {
2953                         del_pset(visited_operands);
2954                         del_pset(visited_users);
2955                         pset_remove_ptr(visited, irn);
2956                         return paths;
2957                     }
2958                 } else if(pset_count(copies) == 0) {
2959                                         paths = 0;
2960                                 }
2961                         }
2962                 }
2963
2964         del_pset(visited_operands);
2965         }
2966
2967         /* visit all uses which are phis */
2968         foreach_out_edge(irn, edge) {
2969                 ir_node  *user = edge->src;
2970                 int       pos  = edge->pos;
2971                 op_t     *op = get_irn_link(user);
2972                 ilp_var_t copy;
2973
2974                 if(!is_Phi(user)) continue;
2975                 if(!has_reg_class(si, user)) continue;
2976         if(pset_find_ptr(visited_users, user)) continue;
2977         pset_insert_ptr(visited_users, user);
2978
2979                 copy = op->attr.live_range.args.copies[pos];
2980
2981                 if(user == target) {
2982                         if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2983                                 del_pset(visited_users);
2984                                 pset_remove_ptr(visited, irn);
2985                                 return paths;
2986                         }
2987                         pset_insert(copies, INT_TO_PTR(copy), copy);
2988                         write_copy_path_cst(si, copies, any_interfere);
2989                         pset_remove(copies, INT_TO_PTR(copy), copy);
2990                 } else if(!pset_find_ptr(visited, user)) {
2991                         pset_insert(copies, INT_TO_PTR(copy), copy);
2992                         paths += find_copy_path(si, user, target, any_interfere, copies, visited);
2993                         pset_remove(copies, INT_TO_PTR(copy), copy);
2994
2995             if(paths > MAX_PATHS) {
2996                 if(pset_count(copies) == 0) {
2997                     ilp_cst_t  cst;
2998                     char       buf[256];
2999
3000                     ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
3001                     cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
3002                     lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
3003                     lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
3004                     DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
3005
3006                     paths = 0;
3007                 } else {
3008                     del_pset(visited_users);
3009                     pset_remove_ptr(visited, irn);
3010                     return paths;
3011                 }
3012             } else if(pset_count(copies) == 0) {
3013                                 paths = 0;
3014                         }
3015                 }
3016         }
3017
3018     del_pset(visited_users);
3019         pset_remove_ptr(visited, irn);
3020         return paths;
3021 }
3022
3023 static void
3024 gen_copy_constraints(spill_ilp_t * si, const ir_node * a, const ir_node * b, ilp_var_t any_interfere)
3025 {
3026         pset * copies = pset_new_ptr_default();
3027         pset * visited = pset_new_ptr_default();
3028
3029         find_copy_path(si, a, b, any_interfere, copies, visited);
3030
3031         del_pset(visited);
3032         del_pset(copies);
3033 }
3034
3035
3036 static void
3037 memcopyhandler(spill_ilp_t * si)
3038 {
3039         interference_t   *interference;
3040         char              buf[256];
3041         /* teste Speicherwerte auf Interferenz */
3042
3043         /* analyze phi classes */
3044         phi_class_compute(si->chordal_env->irg);
3045
3046         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
3047         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
3048
3049         /* now lets emit the ILP unequations for the crap */
3050         set_foreach(si->interferences, interference) {
3051                 irnlist_t      *irnlist;
3052                 ilp_var_t       interfere,
3053                                                 any_interfere;
3054                 ilp_cst_t       any_interfere_cst,
3055                                                 cst;
3056                 const ir_node  *a  = interference->a;
3057                 const ir_node  *b  = interference->b;
3058
3059                 /* any_interf <= \sum interf */
3060                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
3061                 any_interfere_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3062                 any_interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3063
3064                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
3065
3066                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
3067                         const ir_node  *bb = irnlist->irn;
3068                         spill_bb_t     *spill_bb = get_irn_link(bb);
3069                         spill_t        *spilla,
3070                                                    *spillb;
3071                         char           buf[256];
3072
3073                         spilla = set_find_spill(spill_bb->ilp, a);
3074                         assert(spilla);
3075
3076                         spillb = set_find_spill(spill_bb->ilp, b);
3077                         assert(spillb);
3078
3079                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
3080                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
3081                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
3082                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
3083                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
3084                         interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3085
3086                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
3087                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
3088
3089                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
3090                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
3091                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
3092                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
3093                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
3094
3095                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
3096                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3097
3098                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3099                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
3100                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
3101
3102                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
3103                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3104
3105                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3106                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
3107                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
3108
3109
3110                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
3111
3112                         /* any_interfere >= interf */
3113                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
3114                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3115
3116                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3117                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
3118                 }
3119
3120                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
3121                 gen_copy_constraints(si,a,b,any_interfere);
3122         }
3123 }
3124
3125
3126 static INLINE int
3127 is_zero(double x)
3128 {
3129         return fabs(x) < 0.00001;
3130 }
3131
3132 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
3133 {
3134         spill_ilp_t *si = get_irg_link(current_ir_graph);
3135
3136         if(pset_find_ptr(si->all_possible_remats, n)) {
3137                 op_t   *op = (op_t*)get_irn_link(n);
3138                 assert(op && op->is_remat);
3139
3140                 if(!op->attr.remat.remat->inverse) {
3141                         if(op->attr.remat.pre) {
3142                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
3143                         } else {
3144                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
3145                         }
3146
3147                         return 1;
3148                 } else {
3149                         op_t   *op = (op_t*)get_irn_link(n);
3150                         assert(op && op->is_remat);
3151
3152                         if(op->attr.remat.pre) {
3153                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
3154                         } else {
3155                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
3156                         }
3157
3158                         return 1;
3159                 }
3160         }
3161
3162         return 0;
3163 }
3164
3165 static void
3166 dump_graph_with_remats(ir_graph * irg, const char * suffix)
3167 {
3168         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
3169         be_dump(irg, suffix, dump_ir_block_graph_sched);
3170         set_dump_node_vcgattr_hook(NULL);
3171 }
3172
3173 /**
3174  * Edge hook to dump the schedule edges with annotated register pressure.
3175  */
3176 static int
3177 sched_pressure_edge_hook(FILE *F, ir_node *irn)
3178 {
3179         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
3180                 ir_node *prev = sched_prev(irn);
3181                 fprintf(F, "edge:{sourcename:\"");
3182                 PRINT_NODEID(irn);
3183                 fprintf(F, "\" targetname:\"");
3184                 PRINT_NODEID(prev);
3185                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
3186                 fprintf(F, "\" color:magenta}\n");
3187         }
3188         return 1;
3189 }
3190
3191 static void
3192 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
3193 {
3194         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
3195
3196         dump_consts_local(0);
3197         set_dump_node_edge_hook(sched_pressure_edge_hook);
3198         dump_ir_block_graph(irg, suffix);
3199         set_dump_node_edge_hook(old_edge_hook);
3200 }
3201
3202 static void
3203 walker_pressure_annotator(ir_node * bb, void * data)
3204 {
3205         spill_ilp_t  *si = data;
3206         ir_node      *irn;
3207         int           n, i;
3208         pset         *live = pset_new_ptr_default();
3209         int           projs = 0;
3210
3211         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
3212                 irn = be_lv_get_irn(si->lv, bb, i);
3213
3214                 if (has_reg_class(si, irn)) {
3215                         pset_insert_ptr(live, irn);
3216                 }
3217         }
3218
3219         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
3220
3221         sched_foreach_reverse(bb, irn) {
3222                 if(is_Phi(irn)) {
3223                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
3224                         continue;
3225                 }
3226
3227                 if(has_reg_class(si, irn)) {
3228                         pset_remove_ptr(live, irn);
3229                         if(is_Proj(irn)) ++projs;
3230                 }
3231
3232                 if(!is_Proj(irn)) projs = 0;
3233
3234                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
3235                         ir_node    *arg = get_irn_n(irn, n);
3236
3237                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
3238                 }
3239                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
3240         }
3241
3242         del_pset(live);
3243 }
3244
3245 static void
3246 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
3247 {
3248         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
3249 }
3250
3251 static void
3252 connect_all_remats_with_keep(spill_ilp_t * si)
3253 {
3254         ir_node   *irn;
3255         ir_node  **ins,
3256                          **pos;
3257         int        n_remats;
3258
3259
3260         n_remats = pset_count(si->all_possible_remats);
3261         if(n_remats) {
3262                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
3263
3264                 pos = ins;
3265                 pset_foreach(si->all_possible_remats, irn) {
3266                         *pos = irn;
3267                         ++pos;
3268                 }
3269
3270                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
3271
3272                 obstack_free(si->obst, ins);
3273         }
3274 }
3275
3276 static void
3277 connect_all_spills_with_keep(spill_ilp_t * si)
3278 {
3279         ir_node   *irn;
3280         ir_node  **ins,
3281                          **pos;
3282         int        n_spills;
3283         ir_node   *keep;
3284
3285
3286         n_spills = pset_count(si->spills);
3287         if(n_spills) {
3288                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
3289
3290                 pos = ins;
3291                 pset_foreach(si->spills, irn) {
3292                         *pos = irn;
3293                         ++pos;
3294                 }
3295
3296                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
3297
3298                 obstack_free(si->obst, ins);
3299         }
3300 }
3301
3302 /** insert a spill at an arbitrary position */
3303 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
3304 {
3305         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
3306         ir_graph *irg   = get_irn_irg(bl);
3307         ir_node *frame  = get_irg_frame(irg);
3308         ir_node *spill;
3309         ir_node *next;
3310
3311         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
3312         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
3313
3314         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
3315
3316         /*
3317          * search the right insertion point. a spill of a phi cannot be put
3318          * directly after the phi, if there are some phis behind the one which
3319          * is spilled. Also, a spill of a Proj must be after all Projs of the
3320          * same tuple node.
3321          *
3322          * Here's one special case:
3323          * If the spill is in the start block, the spill must be after the frame
3324          * pointer is set up. This is done by setting insert to the end of the block
3325          * which is its default initialization (see above).
3326          */
3327
3328         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
3329                 insert = frame;
3330
3331         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
3332                 insert = next;
3333
3334         sched_add_after(insert, spill);
3335         return spill;
3336 }
3337
3338 static void
3339 delete_remat(spill_ilp_t * si, ir_node * remat) {
3340         int       n;
3341         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3342
3343         sched_remove(remat);
3344
3345         /* kill links to operands */
3346         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
3347                 set_irn_n(remat, n, bad);
3348         }
3349 }
3350
3351 static void
3352 clean_remat_info(spill_ilp_t * si)
3353 {
3354         int            n;
3355         remat_t       *remat;
3356         remat_info_t  *remat_info;
3357         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
3358
3359         set_foreach(si->remat_info, remat_info) {
3360                 if(!remat_info->remats) continue;
3361
3362                 pset_foreach(remat_info->remats, remat)
3363                 {
3364                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
3365                                 if(sched_is_scheduled(remat->proj)) {
3366                                         sched_remove((ir_node*)remat->proj);
3367                                 }
3368                                 set_irn_n((ir_node*)remat->proj, -1, bad);
3369                                 set_irn_n((ir_node*)remat->proj, 0, bad);
3370                         }
3371
3372                         if(get_irn_n_edges(remat->op) == 0) {
3373                                 if(sched_is_scheduled(remat->op)) {
3374                                         sched_remove((ir_node*)remat->op);
3375                                 }
3376                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
3377                                         set_irn_n((ir_node*)remat->op, n, bad);
3378                                 }
3379                         }
3380                 }
3381
3382                 if(remat_info->remats) del_pset(remat_info->remats);
3383                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
3384         }
3385 }
3386
3387 static void
3388 delete_unnecessary_remats(spill_ilp_t * si)
3389 {
3390         if(opt_keep_alive & KEEPALIVE_REMATS) {
3391                 int       n;
3392                 ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3393
3394                 if(si->keep) {
3395                         for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
3396                                 ir_node        *keep_arg = get_irn_n(si->keep, n);
3397                                 op_t           *arg_op = get_irn_link(keep_arg);
3398                                 lpp_name_t     *name;
3399
3400                                 assert(arg_op->is_remat);
3401
3402                                 name = si->lpp->vars[arg_op->attr.remat.ilp];
3403
3404                                 if(is_zero(name->value)) {
3405                                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
3406                                         /* TODO check whether reload is preferred over remat (could be bug) */
3407                                         delete_remat(si, keep_arg);
3408                                 } else {
3409                                         if(!arg_op->attr.remat.remat->inverse) {
3410                                                 if(arg_op->attr.remat.pre) {
3411                                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
3412                                                 } else {
3413                                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
3414                                                 }
3415                                         } else {
3416                                                 if(arg_op->attr.remat.pre) {
3417                                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
3418                                                 } else {
3419                                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
3420                                                 }
3421                                         }
3422                                 }
3423
3424                                 set_irn_n(si->keep, n, bad);
3425                         }
3426                 } else {
3427                         DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
3428                 }
3429         } else {
3430                 ir_node  *remat;
3431
3432                 pset_foreach(si->all_possible_remats, remat) {
3433                         op_t           *remat_op = get_irn_link(remat);
3434                         lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
3435
3436                         if(is_zero(name->value)) {
3437                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3438                                 /* TODO check whether reload is preferred over remat (could be bug) */
3439                                 delete_remat(si, remat);
3440                         } else {
3441                                 if(!remat_op->attr.remat.remat->inverse) {
3442                                         if(remat_op->attr.remat.pre) {
3443                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3444                                         } else {
3445                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3446                                         }
3447                                 } else {
3448                                         if(remat_op->attr.remat.pre) {
3449                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3450                                         } else {
3451                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3452                                         }
3453                                 }
3454                         }
3455                 }
3456         }
3457 }
3458
3459 static pset *
3460 get_spills_for_value(spill_ilp_t * si, const ir_node * value)
3461 {
3462         pset     *spills = pset_new_ptr_default();
3463
3464         const ir_node  *next;
3465         defs_t         *defs;
3466
3467         defs = set_find_def(si->values, value);
3468
3469         if(defs && defs->spills) {
3470                 for(next = defs->spills; next; next = get_irn_link(next)) {
3471                         pset_insert_ptr(spills, next);
3472                 }
3473         }
3474
3475         return spills;
3476 }
3477
3478 /**
3479  * @param before   The node after which the spill will be placed in the schedule
3480  */
3481 static ir_node *
3482 insert_spill(spill_ilp_t * si, ir_node * irn, const ir_node * value, ir_node * before)
3483 {
3484         defs_t   *defs;
3485         ir_node  *spill;
3486         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3487
3488         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3489
3490         spill = be_spill2(arch_env, irn, before);
3491
3492         defs = set_insert_def(si->values, value);
3493         assert(defs);
3494
3495         /* enter into the linked list */
3496         set_irn_link(spill, defs->spills);
3497         defs->spills = spill;
3498
3499         if(opt_keep_alive & KEEPALIVE_SPILLS)
3500                 pset_insert_ptr(si->spills, spill);
3501
3502         return spill;
3503 }
3504
3505 /**
3506  * @param before   The Phi node which has to be spilled
3507  */
3508 static ir_node *
3509 insert_mem_phi(spill_ilp_t * si, ir_node * phi)
3510 {
3511         ir_node   *mem_phi;
3512         ir_node  **ins;
3513         defs_t    *defs;
3514         int        n;
3515
3516         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3517
3518         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3519                 ins[n] = si->m_unknown;
3520         }
3521
3522         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3523
3524         defs = set_insert_def(si->values, phi);
3525         assert(defs);
3526
3527         /* enter into the linked list */
3528         set_irn_link(mem_phi, defs->spills);
3529         defs->spills = mem_phi;
3530
3531 #ifdef SCHEDULE_PHIM
3532         sched_add_after(phi, mem_phi);
3533 #endif
3534
3535         if(opt_keep_alive & KEEPALIVE_SPILLS)
3536                 pset_insert_ptr(si->spills, mem_phi);
3537
3538
3539         return mem_phi;
3540 }
3541
3542 /**
3543  * Add remat to list of defs, destroys link field!
3544  */
3545 static void
3546 insert_remat(spill_ilp_t * si, ir_node * remat)
3547 {
3548         defs_t   *defs;
3549         op_t     *remat_op = get_irn_link(remat);
3550
3551         assert(remat_op->is_remat);
3552
3553         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3554         assert(defs);
3555
3556         /* enter into the linked list */
3557         set_irn_link(remat, defs->remats);
3558         defs->remats = remat;
3559 }
3560
3561
3562 /**
3563  * Add reload before operation and add to list of defs
3564  */
3565 static ir_node *
3566 insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
3567 {
3568         defs_t   *defs;
3569         ir_node  *reload,
3570                          *spill;
3571         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3572
3573         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3574
3575         defs = set_find_def(si->values, value);
3576
3577         spill = defs->spills;
3578         assert(spill && "no spill placed before reload");
3579
3580         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3581
3582         /* enter into the linked list */
3583         set_irn_link(reload, defs->remats);
3584         defs->remats = reload;
3585
3586         return reload;
3587 }
3588
3589 void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
3590 {
3591         defs_t           *defs;
3592         ir_node          *value = get_irn_n(memoperand->irn, memoperand->pos);
3593         ir_node          *spill;
3594         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3595
3596         DBG((si->dbg, LEVEL_2, "\t  inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
3597
3598         defs = set_find_def(si->values, value);
3599
3600         spill = defs->spills;
3601         assert(spill && "no spill placed before reload");
3602
3603         arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
3604 }
3605
3606 void insert_memoperands(spill_ilp_t * si)
3607 {
3608         memoperand_t   *memoperand;
3609         lpp_name_t     *name;
3610
3611         set_foreach(si->memoperands, memoperand) {
3612                 name = si->lpp->vars[memoperand->ilp];
3613                 if(!is_zero(name->value)) {
3614                         perform_memory_operand(si, memoperand);
3615                 }
3616         }
3617 }
3618
3619 static void
3620 walker_spill_placer(ir_node * bb, void * data) {
3621         spill_ilp_t   *si = (spill_ilp_t*)data;
3622         ir_node       *irn;
3623         spill_bb_t    *spill_bb = get_irn_link(bb);
3624         pset          *spills_to_do = pset_new_ptr_default();
3625         spill_t       *spill;
3626
3627         set_foreach(spill_bb->ilp, spill) {
3628                 lpp_name_t    *name;
3629
3630                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3631                         name = si->lpp->vars[spill->mem_in];
3632                         if(!is_zero(name->value)) {
3633                                 ir_node   *mem_phi;
3634
3635                                 mem_phi = insert_mem_phi(si, spill->irn);
3636
3637                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3638                         }
3639                 }
3640
3641                 name = si->lpp->vars[spill->spill];
3642                 if(!is_zero(name->value)) {
3643                         /* place spill directly after definition */
3644                         if(get_nodes_block(spill->irn) == bb) {
3645                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3646                                 continue;
3647                         }
3648
3649                         /* place spill at bb start */
3650                         if(spill->reg_in > 0) {
3651                                 name = si->lpp->vars[spill->reg_in];
3652                                 if(!is_zero(name->value)) {
3653                                         insert_spill(si, spill->irn, spill->irn, bb);
3654                                         continue;
3655                                 }
3656                         }
3657                         /* place spill after a remat */
3658                         pset_insert_ptr(spills_to_do, spill->irn);
3659                 }
3660         }
3661         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3662
3663
3664         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3665                 op_t     *op = get_irn_link(irn);
3666
3667                 if(be_is_Spill(irn)) continue;
3668
3669                 if(op->is_remat) {
3670                         /* TODO fix this if we want to support remats with more than two nodes */
3671                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3672                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3673
3674                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3675                         }
3676                 } else {
3677                         if(pset_find_ptr(spills_to_do, irn)) {
3678                                 pset_remove_ptr(spills_to_do, irn);
3679
3680                                 insert_spill(si, irn, irn, irn);
3681                         }
3682                 }
3683
3684         }
3685
3686         assert(pset_count(spills_to_do) == 0);
3687
3688         /* afterwards free data in block */
3689         del_pset(spills_to_do);
3690 }
3691
3692 static ir_node *
3693 insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
3694 {
3695         ir_node          *insert_pos = bb;
3696         ir_node          *spill;
3697         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3698
3699         /* find last definition of arg value in block */
3700         ir_node  *next;
3701         defs_t   *defs;
3702         int       last = 0;
3703
3704         defs = set_find_def(si->values, value);
3705
3706         if(defs && defs->remats) {
3707                 for(next = defs->remats; next; next = get_irn_link(next)) {
3708                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3709                                 last = sched_get_time_step(next);
3710                                 insert_pos = next;
3711                         }
3712                 }
3713         }
3714
3715         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3716                 last = sched_get_time_step(value);
3717                 insert_pos = value;
3718         }
3719
3720         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3721
3722         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
3723
3724         return spill;
3725 }
3726
3727 static void
3728 phim_fixer(spill_ilp_t *si) {
3729         defs_t  *defs;
3730
3731         set_foreach(si->values, defs) {
3732                 const ir_node  *phi = defs->value;
3733                 op_t           *op = get_irn_link(phi);
3734                 ir_node        *phi_m = NULL;
3735                 ir_node        *next = defs->spills;
3736                 int             n;
3737
3738                 if(!is_Phi(phi)) continue;
3739
3740                 while(next) {
3741                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3742                                 phi_m = next;
3743                                 break;
3744                         } else {
3745                                 next = get_irn_link(next);
3746                         }
3747                 }
3748                 if(!phi_m) continue;
3749
3750                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3751                         ir_node        *value = get_irn_n(phi, n);
3752                         defs_t         *val_defs = set_find_def(si->values, value);
3753
3754                         /* a spill of this value */
3755                         ir_node      *spill;
3756
3757
3758                         if(opt_memcopies) {
3759                                 ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3760                                 lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3761
3762                                 if(!is_zero(name->value)) {
3763                                         spill = insert_mem_copy(si, pred, value);
3764                                 } else {
3765                                         spill = val_defs->spills;
3766                                 }
3767                         } else {
3768                                 spill = val_defs->spills;
3769                         }
3770
3771                         assert(spill && "no spill placed before PhiM");
3772                         set_irn_n(phi_m, n, spill);
3773                 }
3774         }
3775 }
3776
3777 static void
3778 walker_reload_placer(ir_node * bb, void * data) {
3779         spill_ilp_t   *si = (spill_ilp_t*)data;
3780         ir_node       *irn;
3781         spill_bb_t    *spill_bb = get_irn_link(bb);
3782
3783         /* reloads at end of block */
3784         if(spill_bb->reloads) {
3785                 keyval_t    *keyval;
3786
3787                 set_foreach(spill_bb->reloads, keyval) {
3788                         ir_node        *irn = (ir_node*)keyval->key;
3789                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3790                         lpp_name_t     *name;
3791
3792                         name = si->lpp->vars[reload];
3793                         if(!is_zero(name->value)) {
3794                                 ir_node    *reload;
3795                                 ir_node    *insert_pos = bb;
3796                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3797                                 op_t       *prev_op = get_irn_link(prev);
3798
3799                                 while(be_is_Spill(prev)) {
3800                                         prev = sched_prev(prev);
3801                                 }
3802
3803                                 prev_op = get_irn_link(prev);
3804
3805                                 /* insert reload before pre-remats */
3806                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3807                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3808                                         insert_pos = prev;
3809
3810                                         do {
3811                                                 prev = sched_prev(prev);
3812                                         } while(be_is_Spill(prev));
3813
3814                                         prev_op = get_irn_link(prev);
3815
3816                                 }
3817
3818                                 reload = insert_reload(si, irn, insert_pos);
3819
3820                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3821                                         pset_insert_ptr(si->spills, reload);
3822                         }
3823                 }
3824         }
3825
3826         /* walk and insert more reloads and collect remats */
3827         sched_foreach_reverse(bb, irn) {
3828                 op_t     *op = get_irn_link(irn);
3829
3830                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3831                 if(is_Phi(irn)) break;
3832
3833                 if(op->is_remat) {
3834                         if(get_irn_mode(irn) != mode_T) {
3835                                 insert_remat(si, irn);
3836                         }
3837                 } else {
3838                         int    n;
3839
3840                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3841                                 ir_node    *arg = get_irn_n(irn, n);
3842
3843                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3844                                         lpp_name_t    *name;
3845
3846                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3847                                         if(!is_zero(name->value)) {
3848                                                 ir_node    *reload;
3849                                                 ir_node    *insert_pos = irn;
3850                                                 ir_node    *prev = sched_prev(insert_pos);
3851                                                 op_t       *prev_op;
3852
3853                                                 while(be_is_Spill(prev)) {
3854                                                         prev = sched_prev(prev);
3855                                                 }
3856
3857                                                 prev_op = get_irn_link(prev);
3858
3859                                                 /* insert reload before pre-remats */
3860                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3861                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3862                                                         insert_pos = prev;
3863
3864                                                         do {
3865                                                                 prev = sched_prev(prev);
3866                                                         } while(be_is_Spill(prev));
3867
3868                                                         prev_op = get_irn_link(prev);
3869
3870                                                 }
3871
3872                                                 reload = insert_reload(si, arg, insert_pos);
3873
3874                                                 set_irn_n(irn, n, reload);
3875
3876                                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3877                                                         pset_insert_ptr(si->spills, reload);
3878                                         }
3879                                 }
3880                         }
3881                 }
3882         }
3883
3884         del_set(spill_bb->ilp);
3885         if(spill_bb->reloads) del_set(spill_bb->reloads);
3886 }
3887
3888 static void
3889 walker_collect_used(ir_node * irn, void * data)
3890 {
3891         bitset_t   *used = data;
3892
3893         bitset_set(used, get_irn_idx(irn));
3894 }
3895
3896 struct kill_helper {
3897         bitset_t  *used;
3898         spill_ilp_t  *si;
3899 };
3900
3901 static void
3902 walker_kill_unused(ir_node * bb, void * data)
3903 {
3904         struct kill_helper *kh = data;
3905         ir_node            *bad = get_irg_bad(get_irn_irg(bb));
3906         ir_node            *irn;
3907
3908
3909         for(irn=sched_first(bb); !sched_is_end(irn);) {
3910                 ir_node     *next = sched_next(irn);
3911                 int          n;
3912
3913                 if(!bitset_is_set(kh->used, get_irn_idx(irn))) {
3914                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3915                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3916 #if 0
3917                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3918 #endif
3919                         }
3920
3921                         sched_remove(irn);
3922
3923                         set_nodes_block(irn, bad);
3924                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3925                                 set_irn_n(irn, n, bad);
3926                         }
3927                 }
3928                 irn = next;
3929         }
3930 }
3931
3932 static void
3933 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3934 {
3935         struct kill_helper kh;
3936
3937         kh.used = bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3938         kh.si = si;
3939
3940         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3941         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3942
3943         bitset_free(kh.used);
3944 }
3945
3946 void
3947 print_irn_pset(pset * p)
3948 {
3949         ir_node   *irn;
3950
3951         pset_foreach(p, irn) {
3952                 ir_printf("%+F\n", irn);
3953         }
3954 }
3955
3956 void
3957 dump_phi_class(spill_ilp_t * si, pset * phiclass, const char * file)
3958 {
3959     FILE           *f = fopen(file, "w");
3960     ir_node        *irn;
3961     interference_t *interference;
3962
3963     pset_break(phiclass);
3964     set_break(si->interferences);
3965
3966     ir_fprintf(f, "digraph phiclass {\n");
3967
3968     pset_foreach(phiclass, irn) {
3969         if(is_Phi(irn))
3970             ir_fprintf(f, "  %F%N [shape=box]\n",irn,irn);
3971     }
3972
3973     pset_foreach(phiclass, irn) {
3974         int n;
3975
3976         if(!is_Phi(irn)) continue;
3977
3978         for(n=get_irn_arity(irn)-1; n>=0; --n) {
3979             ir_node  *arg = get_irn_n(irn, n);
3980
3981             ir_fprintf(f, "  %F%N -> %F%N\n",irn,irn,arg,arg);
3982         }
3983     }
3984
3985     set_foreach(si->interferences, interference) {
3986         const ir_node  *a  = interference->a;
3987         const ir_node  *b  = interference->b;
3988         if(get_phi_class(a) == phiclass) {
3989             ir_fprintf(f, "  %F%N -> %F%N [color=red,dir=none,style=bold]\n",a,a,b,b);
3990         }
3991     }
3992
3993     ir_fprintf(f, "}");
3994     fclose(f);
3995 }
3996
3997 static void
3998 rewire_uses(spill_ilp_t * si)
3999 {
4000         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
4001         defs_t               *defs;
4002         pset                 *ignore = pset_new_ptr(1);
4003
4004         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
4005
4006         /* then fix uses of spills */
4007         set_foreach(si->values, defs) {
4008                 pset           *reloads;
4009                 pset           *spills;
4010                 const ir_node  *next = defs->remats;
4011                 int remats = 0;
4012
4013                 reloads = pset_new_ptr_default();
4014
4015                 while(next) {
4016                         if(be_is_Reload(next)) {
4017                                 pset_insert_ptr(reloads, next);
4018                         } else {
4019                                 ++remats;
4020                         }
4021                         next = get_irn_link(next);
4022                 }
4023
4024                 spills = get_spills_for_value(si, defs->value);
4025                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
4026                 if(pset_count(spills) > 1) {
4027                         //assert(pset_count(reloads) > 0);
4028                         //                              print_irn_pset(spills);
4029                         //                              print_irn_pset(reloads);
4030
4031                         be_ssa_constr_set_ignore(dfi, si->lv, spills, ignore);
4032                 }
4033
4034                 del_pset(reloads);
4035                 del_pset(spills);
4036         }
4037
4038         /* first fix uses of remats and reloads */
4039         set_foreach(si->values, defs) {
4040                 pset           *nodes;
4041                 const ir_node  *next = defs->remats;
4042                 int             orig_kept = 0;
4043
4044                 if(next) {
4045                         nodes = pset_new_ptr_default();
4046                         if(sched_is_scheduled(defs->value)) {
4047                                 pset_insert_ptr(nodes, defs->value);
4048                                 orig_kept = 1;
4049                         }
4050
4051                         while(next) {
4052                                 pset_insert_ptr(nodes, next);
4053                                 next = get_irn_link(next);
4054                         }
4055
4056                         DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-orig_kept, defs->value));
4057                         be_ssa_constr_set(dfi, si->lv, nodes);
4058
4059                         del_pset(nodes);
4060                 }
4061         }
4062
4063 //      remove_unused_defs(si);
4064
4065         be_free_dominance_frontiers(dfi);
4066 }
4067
4068
4069 static void
4070 writeback_results(spill_ilp_t * si)
4071 {
4072         /* walk through the graph and collect all spills, reloads and remats for a value */
4073
4074         si->values = new_set(cmp_defs, 4096);
4075
4076         DBG((si->dbg, LEVEL_1, "Applying results\n"));
4077         delete_unnecessary_remats(si);
4078         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
4079         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
4080         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
4081         if(opt_memoperands)
4082                 insert_memoperands(si);
4083         phim_fixer(si);
4084
4085         /* clean the remat info! there are still back-edges leading there! */
4086         clean_remat_info(si);
4087
4088         rewire_uses(si);
4089
4090         connect_all_spills_with_keep(si);
4091
4092         del_set(si->values);
4093 }
4094
4095 static int
4096 get_n_regs(spill_ilp_t * si)
4097 {
4098         int       arch_n_regs = arch_register_class_n_regs(si->cls);
4099
4100         bitset_t *arch_regs = bitset_malloc(arch_n_regs);
4101         bitset_t *abi_regs = bitset_malloc(arch_n_regs);
4102
4103         arch_put_non_ignore_regs(si->chordal_env->birg->main_env->arch_env, si->cls, arch_regs);
4104     be_abi_put_ignore_regs(si->chordal_env->birg->abi, si->cls, abi_regs);
4105
4106         bitset_andnot(arch_regs, abi_regs);
4107         arch_n_regs = bitset_popcnt(arch_regs);
4108
4109         bitset_free(arch_regs);
4110         bitset_free(abi_regs);
4111
4112         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", arch_n_regs, si->cls->name));
4113         return arch_n_regs;
4114 }
4115
4116 static void
4117 walker_reload_mover(ir_node * bb, void * data)
4118 {
4119         spill_ilp_t   *si = data;
4120         ir_node           *tmp;
4121
4122         sched_foreach(bb, tmp) {
4123                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
4124                         ir_node       *reload = tmp;
4125                         ir_node       *irn = tmp;
4126
4127                         /* move reload upwards */
4128
4129                         int pressure = (int)get_irn_link(reload);
4130                         if(pressure < si->n_regs) {
4131                                 irn = sched_prev(reload);
4132                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
4133                                 sched_remove(reload);
4134                                 pressure = (int)get_irn_link(irn);
4135
4136                                 while(pressure < si->n_regs) {
4137                                         if( sched_is_end(irn) ||
4138                                            (be_is_Reload(irn) && has_reg_class(si, irn)) ||
4139                                            /* do not move reload before its spill */
4140                                            (irn == be_get_Reload_mem(reload)) ||
4141                                            /* do not move before phi */
4142                                            is_Phi(irn)) break;
4143
4144                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
4145                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
4146                                         irn = sched_prev(irn);
4147
4148                                         pressure = (int)get_irn_link(irn);
4149                                 }
4150
4151                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
4152                                 sched_put_after(irn, reload);
4153                         }
4154                 }
4155         }
4156 }
4157
4158 static void
4159 move_reloads_upward(spill_ilp_t * si)
4160 {
4161         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
4162 }
4163
4164
4165 /**
4166  * Walk all irg blocks and check for interfering spills inside of phi classes
4167  */
4168 static void
4169 luke_meminterferencechecker(ir_node * bb, void * data)
4170 {
4171         spill_ilp_t    *si = (spill_ilp_t*)data;
4172         int             l1, l2;
4173
4174         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
4175                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
4176
4177                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
4178
4179                 /* a is only interesting if it is in my register class and if it is inside a phi class */
4180                 if (has_reg_class(si, a) && get_phi_class(a)) {
4181                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
4182                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
4183
4184                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
4185
4186                                 /* a and b are only interesting if they are in the same phi class */
4187                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
4188                                         if(values_interfere_in_block(si, bb, a, b)) {
4189                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
4190                                         }
4191                                 }
4192                         }
4193                 }
4194         }
4195 }
4196
4197 static void
4198 verify_phiclasses(spill_ilp_t * si)
4199 {
4200         /* analyze phi classes */
4201         phi_class_compute(si->chordal_env->irg);
4202
4203         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
4204         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
4205 }
4206
4207 void
4208 be_spill_remat(const be_chordal_env_t * chordal_env)
4209 {
4210         char            buf[256];
4211         char            problem_name[256];
4212         char            dump_suffix[256];
4213         char            dump_suffix2[256];
4214         struct obstack  obst;
4215         spill_ilp_t     si;
4216
4217         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
4218         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
4219         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
4220
4221         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
4222         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
4223
4224         if(opt_verify & VERIFY_DOMINANCE)
4225                 be_check_dominance(chordal_env->irg);
4226
4227         obstack_init(&obst);
4228         si.chordal_env = chordal_env;
4229         si.obst = &obst;
4230         si.cls = chordal_env->cls;
4231         si.lpp = new_lpp(problem_name, lpp_minimize);
4232         si.remat_info = new_set(cmp_remat_info, 4096);
4233         si.interferences = new_set(cmp_interference, 32);
4234         si.memoperands = new_set(cmp_memoperands, 128);
4235         si.all_possible_remats = pset_new_ptr_default();
4236         si.spills = pset_new_ptr_default();
4237         si.inverse_ops = pset_new_ptr_default();
4238         si.lv = chordal_env->lv;
4239         si.keep = NULL;
4240         si.n_regs = get_n_regs(&si);
4241
4242         set_irg_link(chordal_env->irg, &si);
4243         compute_doms(chordal_env->irg);
4244
4245         /* compute phi classes */
4246 //      phi_class_compute(chordal_env->irg);
4247
4248         be_analyze_regpressure(chordal_env, "-pre");
4249
4250         DBG((si.dbg, LEVEL_2, "\t initializing\n"));
4251         irg_block_walk_graph(chordal_env->irg, luke_initializer, NULL, &si);
4252
4253         if(opt_remats) {
4254                 /* collect remats */
4255                 DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
4256                 irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
4257         }
4258
4259         /* insert possible remats */
4260         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
4261         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
4262         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
4263
4264         if(opt_keep_alive & KEEPALIVE_REMATS) {
4265                 DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
4266                 connect_all_remats_with_keep(&si);
4267                 /* dump graph with inserted remats */
4268                 dump_graph_with_remats(chordal_env->irg, dump_suffix);
4269         }
4270
4271         /* insert copies for phi arguments not in my regclass */
4272         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
4273
4274         /* recompute liveness */
4275         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
4276         be_liveness_recompute(si.lv);
4277
4278         /* build the ILP */
4279         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
4280         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
4281         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
4282
4283         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
4284         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
4285
4286         if(opt_memcopies) {
4287                 DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
4288                 memcopyhandler(&si);
4289         }
4290
4291         if(opt_dump_flags & DUMP_PROBLEM) {
4292                 FILE           *f;
4293                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
4294                 if ((f = fopen(buf, "wt")) != NULL) {
4295                         lpp_dump_plain(si.lpp, f);
4296                         fclose(f);
4297                 }
4298         }
4299
4300         if(opt_dump_flags & DUMP_MPS) {
4301                 FILE *f;
4302
4303                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mps", problem_name);
4304                 if((f = fopen(buf, "wt")) != NULL) {
4305                         mps_write_mps(si.lpp, s_mps_fixed, f);
4306                         fclose(f);
4307                 }
4308
4309                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mst", problem_name);
4310                 if((f = fopen(buf, "wt")) != NULL) {
4311                         mps_write_mst(si.lpp, s_mps_fixed, f);
4312                         fclose(f);
4313                 }
4314         }
4315
4316         lpp_check_startvals(si.lpp);
4317
4318 #ifdef SOLVE
4319         DBG((si.dbg, LEVEL_1, "\tSolving %s (%d variables, %d constraints)\n", problem_name, si.lpp->var_next, si.lpp->cst_next));
4320         lpp_set_time_limit(si.lpp, opt_timeout);
4321
4322         if(opt_log)
4323                 lpp_set_log(si.lpp, stdout);
4324
4325 #ifdef SOLVE_LOCAL
4326         lpp_solve_cplex(si.lpp);
4327 #else
4328         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
4329 #endif
4330         assert(lpp_is_sol_valid(si.lpp)
4331                && "solution of ILP must be valid");
4332
4333         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
4334
4335         if(opt_dump_flags & DUMP_SOLUTION) {
4336                 FILE           *f;
4337                 char            buf[256];
4338
4339                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
4340                 if ((f = fopen(buf, "wt")) != NULL) {
4341                         int             i;
4342                         for (i = 0; i < si.lpp->var_next; ++i) {
4343                                 lpp_name_t     *name = si.lpp->vars[i];
4344                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
4345                         }
4346                         fclose(f);
4347                 }
4348         }
4349
4350         writeback_results(&si);
4351
4352 #endif                          /* SOLVE */
4353
4354         kill_all_unused_values_in_schedule(&si);
4355
4356         if(opt_keep_alive & (KEEPALIVE_SPILLS | KEEPALIVE_RELOADS))
4357                 be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
4358
4359         // move reloads upwards
4360         be_liveness_recompute(si.lv);
4361         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4362         move_reloads_upward(&si);
4363
4364         if(opt_memcopies) {
4365                 verify_phiclasses(&si);
4366         }
4367
4368         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4369
4370         dump_pressure_graph(&si, dump_suffix2);
4371
4372         be_analyze_regpressure(chordal_env, "-post");
4373
4374         if(opt_verify & VERIFY_DOMINANCE)
4375                 be_check_dominance(chordal_env->irg);
4376
4377         free_dom(chordal_env->irg);
4378         del_set(si.interferences);
4379         del_pset(si.inverse_ops);
4380         del_pset(si.all_possible_remats);
4381         del_set(si.memoperands);
4382         del_pset(si.spills);
4383         free_lpp(si.lpp);
4384         obstack_free(&obst, NULL);
4385         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
4386 }
4387
4388 #else                           /* WITH_ILP */
4389
4390 static void
4391 only_that_you_can_compile_without_WITH_ILP_defined(void)
4392 {
4393 }
4394
4395 #endif                          /* WITH_ILP */