c2d7226c04d2f9b905a4dd94fe31a19573894dfa
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35 #include "irvrfy.h"
36
37 #include <lpp/lpp.h>
38 #include <lpp/mps.h>
39 #include <lpp/lpp_net.h>
40 #include <lpp/lpp_cplex.h>
41 //#include <lc_pset.h>
42 //#include <libcore/lc_bitset.h>
43
44 #include "be_t.h"
45 #include "belive_t.h"
46 #include "besched_t.h"
47 #include "beirgmod.h"
48 #include "bearch.h"
49 #include "beabi.h"
50 #include "benode_t.h"
51 #include "beutil.h"
52 #include "bespillremat.h"
53 #include "bespill.h"
54 #include "bepressurestat.h"
55
56 #include "bechordal_t.h"
57
58 #ifdef WITH_LIBCORE
59 #include <libcore/lc_opts.h>
60 #include <libcore/lc_opts_enum.h>
61 #endif /* WITH_LIBCORE */
62
63 #define DUMP_PROBLEM       1
64 #define DUMP_MPS           2
65 #define DUMP_SOLUTION      4
66
67 #define KEEPALIVE_REMATS   1
68 #define KEEPALIVE_SPILLS   2
69 #define KEEPALIVE_RELOADS  4
70
71 #define VERIFY_MEMINTERF   1
72 #define VERIFY_DOMINANCE   2
73
74 #define REMATS_NONE        0
75 #define REMATS_BRIGGS      1
76 #define REMATS_NOINVERSE   2
77 #define REMATS_ALL         3
78
79 static int opt_dump_flags   = 0;
80 static int opt_log = 0;
81 static int opt_keep_alive   = 0;
82 static int opt_goodwin = 1;
83 static int opt_memcopies = 1;
84 static int opt_memoperands = 1;
85 static int opt_verify = VERIFY_MEMINTERF;
86 static int opt_remats = REMATS_ALL;
87 static int opt_repair_schedule = 0;
88 static int opt_no_enlarge_liveness = 0;
89 static int opt_remat_while_live = 1;
90 static int opt_timeout = 300;
91 static double opt_cost_reload = 8.0;
92 static double opt_cost_memoperand =  7.0;
93 static double opt_cost_spill =  50.0;
94 static double opt_cost_remat =  1.0;
95
96
97 #ifdef WITH_LIBCORE
98 static const lc_opt_enum_mask_items_t dump_items[] = {
99         { "problem",  DUMP_PROBLEM  },
100         { "mps",      DUMP_MPS      },
101         { "solution", DUMP_SOLUTION },
102         { NULL,       0 }
103 };
104
105 static lc_opt_enum_mask_var_t dump_var = {
106         &opt_dump_flags, dump_items
107 };
108
109 static const lc_opt_enum_mask_items_t keepalive_items[] = {
110         { "remats",  KEEPALIVE_REMATS  },
111         { "spills",  KEEPALIVE_SPILLS  },
112         { "reloads", KEEPALIVE_RELOADS },
113         { NULL,      0 }
114 };
115
116 static lc_opt_enum_mask_var_t keep_alive_var = {
117         &opt_keep_alive, keepalive_items
118 };
119
120 static const lc_opt_enum_mask_items_t remats_items[] = {
121         { "none",      REMATS_NONE      },
122         { "briggs",    REMATS_BRIGGS    },
123         { "noinverse", REMATS_NOINVERSE },
124         { "all",       REMATS_ALL       },
125         { NULL,        0 }
126 };
127
128 static lc_opt_enum_mask_var_t remats_var = {
129         &opt_remats, remats_items
130 };
131
132 static const lc_opt_table_entry_t options[] = {
133         LC_OPT_ENT_ENUM_MASK("keepalive", "keep alive remats, spills or reloads",                   &keep_alive_var),
134
135         LC_OPT_ENT_BOOL     ("goodwin",  "activate goodwin reduction",                              &opt_goodwin),
136         LC_OPT_ENT_BOOL     ("memcopies",  "activate memcopy handling",                             &opt_memcopies),
137         LC_OPT_ENT_BOOL     ("memoperands",  "activate memoperands",                                &opt_memoperands),
138         LC_OPT_ENT_ENUM_INT ("remats",  "type of remats to insert (none, briggs, noinverse or all)",&remats_var),
139         LC_OPT_ENT_BOOL     ("repair_schedule",  "repair the schedule by rematting once used nodes",&opt_repair_schedule),
140         LC_OPT_ENT_BOOL     ("no_enlage_liveness",  "do not enlarge liveness of operands of remats",&opt_no_enlarge_liveness),
141         LC_OPT_ENT_BOOL     ("remat_while_live",  "remat only values that can be used by real ops", &opt_remat_while_live),
142
143         LC_OPT_ENT_ENUM_MASK("dump", "dump problem, mps or solution",                               &dump_var),
144         LC_OPT_ENT_BOOL     ("log",  "activate the lpp log",                                        &opt_log),
145         LC_OPT_ENT_INT      ("timeout",  "ILP solver timeout",                                      &opt_timeout),
146
147         LC_OPT_ENT_DBL      ("cost_reload",  "cost of a reload",                                    &opt_cost_reload),
148         LC_OPT_ENT_DBL      ("cost_memoperand",  "cost of a memory operand",                        &opt_cost_memoperand),
149         LC_OPT_ENT_DBL      ("cost_spill",  "cost of a spill instruction",                          &opt_cost_spill),
150         LC_OPT_ENT_DBL      ("cost_remat",  "cost of a rematerialization",                          &opt_cost_remat),
151         { NULL }
152 };
153
154 void be_spill_remat_register_options(lc_opt_entry_t *grp)
155 {
156         lc_opt_entry_t *my_grp = lc_opt_get_grp(grp, "remat");
157         lc_opt_add_table(my_grp, options);
158 }
159 #endif
160
161
162 //#define EXECFREQ_LOOPDEPH   /* compute execution frequency from loop depth only */
163 //#define SCHEDULE_PHIM   /* insert phim nodes into schedule */
164
165 #define  SOLVE
166 //#define  SOLVE_LOCAL
167 #define LPP_SERVER "i44pc23"
168 #define LPP_SOLVER "cplex"
169
170
171 #define MAX_PATHS      INT_MAX
172 #define ILP_UNDEF               -1
173
174 typedef struct _spill_ilp_t {
175         const arch_register_class_t  *cls;
176         int                           n_regs;
177         const be_chordal_env_t       *chordal_env;
178         be_lv_t                      *lv;
179         lpp_t                        *lpp;
180         struct obstack               *obst;
181         set                          *remat_info;
182         pset                         *all_possible_remats;
183         pset                         *inverse_ops;
184         ir_node                      *keep;
185         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
186         pset                         *spills;
187         set                          *interferences;
188         ir_node                      *m_unknown;
189         set                          *memoperands;
190         DEBUG_ONLY(firm_dbg_module_t * dbg);
191 } spill_ilp_t;
192
193 typedef int ilp_var_t;
194 typedef int ilp_cst_t;
195
196 typedef struct _spill_bb_t {
197         set      *ilp;
198         set      *reloads;
199 } spill_bb_t;
200
201 typedef struct _remat_t {
202         const ir_node        *op;      /**< for copy_irn */
203         const ir_node        *value;   /**< the value which is being recomputed by this remat */
204         const ir_node        *proj;    /**< not NULL if the above op produces a tuple */
205         int                   cost;    /**< cost of this remat */
206         int                   inverse; /**< nonzero if this is an inverse remat */
207 } remat_t;
208
209 /**
210  * Data to be attached to each IR node. For remats this contains the ilp_var
211  * for this remat and for normal ops this contains the ilp_vars for
212  * reloading each operand
213  */
214 typedef struct _op_t {
215         int             is_remat;
216         union {
217                 struct {
218                         ilp_var_t       ilp;
219                         const remat_t  *remat; /** the remat this op belongs to */
220                         int             pre; /** 1, if this is a pressure-increasing remat */
221                 } remat;
222                 struct {
223                         ilp_var_t       ilp;
224                         ir_node        *op; /** the operation this live range belongs to */
225                         union {
226                                 ilp_var_t      *reloads;
227                                 ilp_var_t      *copies;
228                         } args;
229                 } live_range;
230         } attr;
231 } op_t;
232
233 typedef struct _defs_t {
234         const ir_node   *value;
235         ir_node         *spills;  /**< points to the first spill for this value (linked by link field) */
236         ir_node         *remats;  /**< points to the first definition for this value (linked by link field) */
237 } defs_t;
238
239 typedef struct _remat_info_t {
240         const ir_node       *irn; /**< the irn to which these remats belong */
241         pset                *remats; /**< possible remats for this value */
242         pset                *remats_by_operand; /**< remats with this value as operand */
243 } remat_info_t;
244
245 typedef struct _keyval_t {
246         const void          *key;
247         const void          *val;
248 } keyval_t;
249
250 typedef struct _spill_t {
251         ir_node            *irn;
252         ilp_var_t           reg_in;
253         ilp_var_t           mem_in;
254         ilp_var_t           reg_out;
255         ilp_var_t           mem_out;
256         ilp_var_t           spill;
257 } spill_t;
258
259 typedef struct _memoperand_t {
260         ir_node             *irn; /**< the irn */
261         unsigned int         pos; /**< the position of the argument */
262         ilp_var_t            ilp; /**< the ilp var for this memory operand */
263 } memoperand_t;
264
265 static INLINE int
266 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
267 {
268         return chordal_has_class(si->chordal_env, irn);
269 }
270
271 #if 0
272 static int
273 cmp_remat(const void *a, const void *b)
274 {
275         const keyval_t *p = a;
276         const keyval_t *q = b;
277         const remat_t  *r = p->val;
278         const remat_t  *s = q->val;
279
280         assert(r && s);
281
282         return !(r == s || r->op == s->op);
283 }
284 #endif
285 static int
286 cmp_remat(const void *a, const void *b)
287 {
288         const remat_t  *r = a;
289         const remat_t  *s = a;
290
291         return !(r == s || r->op == s->op);
292 }
293
294 static int
295 cmp_spill(const void *a, const void *b, size_t size)
296 {
297         const spill_t *p = a;
298         const spill_t *q = b;
299
300 //      return !(p->irn == q->irn && p->bb == q->bb);
301         return !(p->irn == q->irn);
302 }
303
304 static int
305 cmp_memoperands(const void *a, const void *b, size_t size)
306 {
307         const memoperand_t *p = a;
308         const memoperand_t *q = b;
309
310         return !(p->irn == q->irn && p->pos == q->pos);
311 }
312
313 static keyval_t *
314 set_find_keyval(set * set, const void * key)
315 {
316         keyval_t     query;
317
318         query.key = key;
319         return set_find(set, &query, sizeof(query), HASH_PTR(key));
320 }
321
322 static keyval_t *
323 set_insert_keyval(set * set, void * key, void * val)
324 {
325         keyval_t     query;
326
327         query.key = key;
328         query.val = val;
329         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
330 }
331
332 static defs_t *
333 set_find_def(set * set, const ir_node * value)
334 {
335         defs_t     query;
336
337         query.value = value;
338         return set_find(set, &query, sizeof(query), HASH_PTR(value));
339 }
340
341 static defs_t *
342 set_insert_def(set * set, const ir_node * value)
343 {
344         defs_t     query;
345
346         query.value = value;
347         query.spills = NULL;
348         query.remats = NULL;
349         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
350 }
351
352 static memoperand_t *
353 set_insert_memoperand(set * set, ir_node * irn, unsigned int pos, ilp_var_t ilp)
354 {
355         memoperand_t     query;
356
357         query.irn = irn;
358         query.pos = pos;
359         query.ilp = ilp;
360         return set_insert(set, &query, sizeof(query), HASH_PTR(irn)+pos);
361 }
362
363 static memoperand_t *
364 set_find_memoperand(set * set, const ir_node * irn, unsigned int pos)
365 {
366         memoperand_t     query;
367
368         query.irn = (ir_node*)irn;
369         query.pos = pos;
370         return set_find(set, &query, sizeof(query), HASH_PTR(irn)+pos);
371 }
372
373
374 static spill_t *
375 set_find_spill(set * set, const ir_node * value)
376 {
377         spill_t     query;
378
379         query.irn = (ir_node*)value;
380         return set_find(set, &query, sizeof(query), HASH_PTR(value));
381 }
382
383 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
384 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
385 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
386 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
387 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
388
389 static int
390 cmp_remat_info(const void *a, const void *b, size_t size)
391 {
392         const remat_info_t *p = a;
393         const remat_info_t *q = b;
394
395         return !(p->irn == q->irn);
396 }
397
398 static int
399 cmp_defs(const void *a, const void *b, size_t size)
400 {
401         const defs_t *p = a;
402         const defs_t *q = b;
403
404         return !(p->value == q->value);
405 }
406
407 static int
408 cmp_keyval(const void *a, const void *b, size_t size)
409 {
410         const keyval_t *p = a;
411         const keyval_t *q = b;
412
413         return !(p->key == q->key);
414 }
415
416 static double
417 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
418 {
419 #define FUDGE 0.001
420 #ifndef EXECFREQ_LOOPDEPH
421         return get_block_execfreq(si->chordal_env->exec_freq, get_block(irn)) + FUDGE;
422 #else
423         if(is_Block(irn))
424                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
425         else
426                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
427 #endif
428 }
429
430 static double
431 get_cost(const spill_ilp_t * si, const ir_node * irn)
432 {
433         if(be_is_Spill(irn)) {
434                 return opt_cost_spill;
435         } else if(be_is_Reload(irn)){
436                 return opt_cost_reload;
437         } else {
438                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
439         }
440 }
441
442 /**
443  * Checks, whether node and its operands have suitable reg classes
444  */
445 static INLINE int
446 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
447 {
448         int               n;
449         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
450         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
451
452 #if 0
453         if(!remat)
454                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
455 #endif
456
457         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
458                 ir_node        *op = get_irn_n(irn, n);
459                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
460
461 //              if(!remat)
462 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
463         }
464
465         return remat;
466 }
467
468 /**
469  * Try to create a remat from @p op with destination value @p dest_value
470  */
471 static INLINE remat_t *
472 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
473 {
474         remat_t  *remat = NULL;
475
476 //      if(!mode_is_datab(get_irn_mode(dest_value)))
477 //              return NULL;
478
479         if(dest_value == op) {
480                 const ir_node *proj = NULL;
481
482                 if(is_Proj(dest_value)) {
483                         op = get_Proj_pred(op);
484                         proj = dest_value;
485                 }
486
487                 if(!is_rematerializable(si, op))
488                         return NULL;
489
490                 remat = obstack_alloc(si->obst, sizeof(*remat));
491                 remat->op = op;
492                 remat->cost = get_cost(si, op);
493                 remat->value = dest_value;
494                 remat->proj = proj;
495                 remat->inverse = 0;
496         } else {
497                 arch_inverse_t     inverse;
498                 int                n;
499
500                 /* get the index of the operand we want to retrieve by the inverse op */
501                 for (n = get_irn_arity(op)-1; n>=0; --n) {
502                         ir_node        *arg = get_irn_n(op, n);
503
504                         if(arg == dest_value) break;
505                 }
506                 if(n<0) return NULL;
507
508                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
509
510                 /* else ask the backend to give an inverse op */
511                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
512                         int   i;
513
514                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
515
516                         assert(inverse.n > 0 && "inverse op should have at least one node");
517
518                         for(i=inverse.n-1; i>=0; --i) {
519                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
520                         }
521
522                         if(inverse.n <= 2) {
523                                 remat = obstack_alloc(si->obst, sizeof(*remat));
524                                 remat->op = inverse.nodes[0];
525                                 remat->cost = inverse.costs;
526                                 remat->value = dest_value;
527                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
528                                 remat->inverse = 1;
529
530                                 assert(is_Proj(remat->proj));
531                         } else {
532                                 assert(0 && "I can not handle remats with more than 2 nodes");
533                         }
534                 }
535         }
536
537         if(remat) {
538                 if(remat->proj) {
539                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
540                 } else {
541                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
542                 }
543         }
544         return remat;
545 }
546
547
548 static INLINE void
549 add_remat(const spill_ilp_t * si, const remat_t * remat)
550 {
551         remat_info_t    *remat_info,
552                      query;
553         int              n;
554
555         assert(remat->op);
556         assert(remat->value);
557
558         query.irn = remat->value;
559         query.remats = NULL;
560         query.remats_by_operand = NULL;
561         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
562
563         if(remat_info->remats == NULL) {
564                 remat_info->remats = new_pset(cmp_remat, 4096);
565         }
566         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
567
568         /* insert the remat into the remats_be_operand set of each argument of the remat op */
569         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
570                 ir_node        *arg = get_irn_n(remat->op, n);
571
572                 query.irn = arg;
573                 query.remats = NULL;
574                 query.remats_by_operand = NULL;
575                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
576
577                 if(remat_info->remats_by_operand == NULL) {
578                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
579                 }
580                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
581         }
582 }
583
584 static int
585 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
586 {
587         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
588         int                i = 0;
589
590         while(edge) {
591                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
592                         ++i;
593                 }
594                 edge = get_irn_out_edge_next(irn, edge);
595         }
596
597         return i;
598 }
599
600 static int
601 get_irn_n_nonignore_args(const spill_ilp_t * si, const ir_node * irn)
602 {
603         int n;
604         int ret = 0;
605
606         if(is_Proj(irn))
607                 irn = get_Proj_pred(irn);
608
609         for(n=get_irn_arity(irn)-1; n>=0; --n) {
610                 const ir_node  *arg = get_irn_n(irn, n);
611
612                 if(has_reg_class(si, arg)) ++ret;
613         }
614
615         return ret;
616 }
617
618 static INLINE void
619 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
620 {
621         int      n;
622         remat_t *remat;
623
624         if( has_reg_class(si, op)
625         && (opt_repair_schedule || get_irn_n_nonremat_edges(si, op) > 1)
626         && (opt_remats !=  REMATS_BRIGGS || get_irn_n_nonignore_args(si, op) == 0)
627         ) {
628                 remat = get_remat_from_op(si, op, op);
629                 if(remat) {
630                         add_remat(si, remat);
631                 }
632         }
633
634         if(opt_remats == REMATS_ALL) {
635                 /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
636                    for each arg */
637                 for (n = get_irn_arity(op)-1; n>=0; --n) {
638                         ir_node        *arg = get_irn_n(op, n);
639
640                         if(has_reg_class(si, arg)) {
641                                 /* try to get an inverse remat */
642                                 remat = get_remat_from_op(si, arg, op);
643                                 if(remat) {
644                                         add_remat(si, remat);
645                                 }
646                         }
647                 }
648         }
649 }
650
651 static INLINE int
652 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
653 {
654         ir_node *block;
655         ir_node *def_block = get_nodes_block(val);
656         int      ret;
657
658         if(val == pos)
659                 return 0;
660
661         /* if pos is at end of a basic block */
662         if(is_Block(pos)) {
663                 ret = (pos == def_block || block_dominates(def_block, pos));
664 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
665                 return ret;
666         }
667
668         /* else if this is a normal operation */
669         block = get_nodes_block(pos);
670         if(block == def_block) {
671                 if(!sched_is_scheduled(val)) return 1;
672
673                 ret = sched_comes_after(val, pos);
674 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
675                 return ret;
676         }
677
678         ret = block_dominates(def_block, block);
679 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
680         return ret;
681 }
682
683 static INLINE ir_node *
684 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
685 {
686     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
687 }
688
689 /**
690  * Returns first non-Phi node of block @p bb
691  */
692 static INLINE ir_node *
693 sched_block_first_nonphi(const ir_node * bb)
694 {
695         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
696 }
697
698 static int
699 sched_skip_proj_predicator(const ir_node * irn, void * data)
700 {
701         return (is_Proj(irn));
702 }
703
704 static INLINE ir_node *
705 sched_next_nonproj(const ir_node * irn, int forward)
706 {
707         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
708 }
709
710 /**
711  * Returns next operation node (non-Proj) after @p irn
712  * or the basic block of this node
713  */
714 static INLINE ir_node *
715 sched_next_op(const ir_node * irn)
716 {
717         ir_node *next = sched_next(irn);
718
719         if(is_Block(next))
720                 return next;
721
722         return sched_next_nonproj(next, 1);
723 }
724
725 /**
726  * Returns previous operation node (non-Proj) before @p irn
727  * or the basic block of this node
728  */
729 static INLINE ir_node *
730 sched_prev_op(const ir_node * irn)
731 {
732         ir_node *prev = sched_prev(irn);
733
734         if(is_Block(prev))
735                 return prev;
736
737         return sched_next_nonproj(prev, 0);
738 }
739
740 static void
741 sched_put_after(ir_node * insert, ir_node * irn)
742 {
743         if(is_Block(insert)) {
744                 insert = sched_block_first_nonphi(insert);
745         } else {
746                 insert = sched_next_op(insert);
747         }
748         sched_add_before(insert, irn);
749 }
750
751 static void
752 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
753 {
754   if(is_Block(insert)) {
755           insert = sched_block_last_noncf(si, insert);
756   } else {
757           insert = sched_next_nonproj(insert, 0);
758           insert = sched_prev(insert);
759   }
760   sched_add_after(insert, irn);
761 }
762
763 /**
764  * Tells you whether a @p remat can be placed before the irn @p pos
765  */
766 static INLINE int
767 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
768 {
769         const ir_node   *op = remat->op;
770         const ir_node   *prev;
771         int        n,
772                            res = 1;
773
774         if(is_Block(pos)) {
775                 prev = sched_block_last_noncf(si, pos);
776                 prev = sched_next_nonproj(prev, 0);
777         } else {
778                 prev = sched_prev_op(pos);
779         }
780         /* do not remat if the rematted value is defined immediately before this op */
781         if(prev == remat->op) {
782                 return 0;
783         }
784
785 #if 0
786         /* this should be just fine, the following OP will be using this value, right? */
787
788         /* only remat AFTER the real definition of a value (?) */
789         if(!value_is_defined_before(si, pos, remat->value)) {
790 //              ir_fprintf(stderr, "error(not defined)");
791                 return 0;
792         }
793 #endif
794
795         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
796                 const ir_node   *arg = get_irn_n(op, n);
797
798                 if(opt_no_enlarge_liveness) {
799                         if(has_reg_class(si, arg) && live) {
800                                 res &= pset_find_ptr((pset*)live, arg)?1:0;
801                         } else {
802                                 res &= value_is_defined_before(si, pos, arg);
803                         }
804                 } else {
805                         res &= value_is_defined_before(si, pos, arg);
806                 }
807         }
808
809         return res;
810 }
811
812 /**
813  * Tells you whether a @p remat can be placed after the irn @p pos
814  */
815 static INLINE int
816 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
817 {
818         if(is_Block(pos)) {
819                 pos = sched_block_first_nonphi(pos);
820         } else {
821                 pos = sched_next_op(pos);
822         }
823
824         /* only remat AFTER the real definition of a value (?) */
825         if(!value_is_defined_before(si, pos, remat->value)) {
826                 return 0;
827         }
828
829         return can_remat_before(si, remat, pos, live);
830 }
831
832 /**
833  * Collect potetially rematerializable OPs
834  */
835 static void
836 walker_remat_collector(ir_node * irn, void * data)
837 {
838         spill_ilp_t    *si = data;
839
840         if(!is_Block(irn) && !is_Phi(irn)) {
841                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
842                 get_remats_from_op(si, irn);
843         }
844 }
845
846 /**
847  * Inserts a copy of @p irn before @p pos
848  */
849 static ir_node *
850 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
851 {
852         ir_node     *bb;
853         ir_node     *copy;
854
855         bb = is_Block(pos)?pos:get_nodes_block(pos);
856         copy = exact_copy(irn);
857
858         _set_phi_class(copy, NULL);
859         set_nodes_block(copy, bb);
860         sched_put_before(si, pos, copy);
861
862         return copy;
863 }
864
865 /**
866  * Inserts a copy of @p irn after @p pos
867  */
868 static ir_node *
869 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
870 {
871         ir_node     *bb;
872         ir_node     *copy;
873
874         bb = is_Block(pos)?pos:get_nodes_block(pos);
875         copy = exact_copy(irn);
876
877         _set_phi_class(copy, NULL);
878         set_nodes_block(copy, bb);
879         sched_put_after(pos, copy);
880
881         return copy;
882 }
883
884 static ir_node *
885 insert_remat_after(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
886 {
887         char     buf[256];
888
889         if(can_remat_after(si, remat, pos, live)) {
890                 ir_node         *copy,
891                                                 *proj_copy;
892                 op_t            *op;
893
894                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat2 %+F\n", remat->op));
895
896                 copy = insert_copy_after(si, remat->op, pos);
897
898                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
899                 op = obstack_alloc(si->obst, sizeof(*op));
900                 op->is_remat = 1;
901                 op->attr.remat.remat = remat;
902                 op->attr.remat.pre = 0;
903                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
904
905                 set_irn_link(copy, op);
906                 pset_insert_ptr(si->all_possible_remats, copy);
907                 if(remat->proj) {
908                         proj_copy = insert_copy_after(si, remat->proj, copy);
909                         set_irn_n(proj_copy, 0, copy);
910                         set_irn_link(proj_copy, op);
911                         pset_insert_ptr(si->all_possible_remats, proj_copy);
912                 } else {
913                         proj_copy = NULL;
914                 }
915
916                 return copy;
917         }
918
919         return NULL;
920 }
921
922 static ir_node *
923 insert_remat_before(spill_ilp_t * si, const remat_t * remat, ir_node * pos, const pset * live)
924 {
925         char     buf[256];
926
927         if(can_remat_before(si, remat, pos, live)) {
928                 ir_node         *copy,
929                                                 *proj_copy;
930                 op_t            *op;
931
932                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
933
934                 copy = insert_copy_before(si, remat->op, pos);
935
936                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
937                 op = obstack_alloc(si->obst, sizeof(*op));
938                 op->is_remat = 1;
939                 op->attr.remat.remat = remat;
940                 op->attr.remat.pre = 1;
941                 op->attr.remat.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos), 0.0);
942
943                 set_irn_link(copy, op);
944                 pset_insert_ptr(si->all_possible_remats, copy);
945                 if(remat->proj) {
946                         proj_copy = insert_copy_after(si, remat->proj, copy);
947                         set_irn_n(proj_copy, 0, copy);
948                         set_irn_link(proj_copy, op);
949                         pset_insert_ptr(si->all_possible_remats, proj_copy);
950                 } else {
951                         proj_copy = NULL;
952                 }
953
954                 return copy;
955         }
956
957         return NULL;
958 }
959
960 static int
961 get_block_n_succs(const ir_node *block) {
962         const ir_edge_t *edge;
963
964         assert(edges_activated(current_ir_graph));
965
966         edge = get_block_succ_first(block);
967         if (! edge)
968                 return 0;
969
970         edge = get_block_succ_next(block, edge);
971         return edge ? 2 : 1;
972 }
973
974 static int
975 is_start_block(const ir_node * bb)
976 {
977         return get_irg_start_block(get_irn_irg(bb)) == bb;
978 }
979
980 static int
981 is_before_frame(const ir_node * bb, const ir_node * irn)
982 {
983         const ir_node  *frame  = get_irg_frame(get_irn_irg(bb));
984
985         if(is_start_block(bb) && sched_get_time_step(frame) >= sched_get_time_step(irn))
986                 return 1;
987         else
988                 return 0;
989 }
990
991 static int
992 is_merge_edge(const ir_node * bb)
993 {
994         if(is_start_block(bb))
995                 return 0;
996
997         if(opt_goodwin)
998                 return get_block_n_succs(bb) == 1;
999         else
1000                 return 1;
1001 }
1002
1003 static int
1004 is_diverge_edge(const ir_node * bb)
1005 {
1006         if(is_start_block(bb))
1007                 return 0;
1008
1009         if(opt_goodwin)
1010                 return get_Block_n_cfgpreds(bb) == 1;
1011         else
1012                 return 1;
1013 }
1014
1015 static void
1016 walker_regclass_copy_insertor(ir_node * irn, void * data)
1017 {
1018         spill_ilp_t    *si = data;
1019
1020         if(is_Phi(irn) && has_reg_class(si, irn)) {
1021                 int n;
1022
1023                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
1024                         ir_node  *phi_arg = get_irn_n(irn, n);
1025                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
1026
1027                         if(!has_reg_class(si, phi_arg)) {
1028                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
1029                                 ir_node   *pos = sched_block_last_noncf(si, bb);
1030                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
1031
1032                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
1033                                 sched_add_after(pos, copy);
1034                                 set_irn_n(irn, n, copy);
1035
1036                                 op->is_remat = 0;
1037                                 op->attr.live_range.args.reloads = NULL;
1038                                 op->attr.live_range.ilp = ILP_UNDEF;
1039                                 set_irn_link(copy, op);
1040                         }
1041                 }
1042         }
1043 }
1044
1045
1046 /**
1047  * Insert (so far unused) remats into the irg to
1048  * recompute the potential liveness of all values
1049  */
1050 static void
1051 walker_remat_insertor(ir_node * bb, void * data)
1052 {
1053         spill_ilp_t    *si = data;
1054         spill_bb_t     *spill_bb;
1055         ir_node        *irn;
1056         int             n, i;
1057         pset           *live = pset_new_ptr_default();
1058
1059         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
1060
1061         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1062                 ir_node        *value = be_lv_get_irn(si->lv, bb, i);
1063
1064                 /* add remats at end of block */
1065                 if (has_reg_class(si, value)) {
1066                         pset_insert_ptr(live, value);
1067                 }
1068         }
1069
1070         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
1071         set_irn_link(bb, spill_bb);
1072
1073         irn = sched_last(bb);
1074         while(!sched_is_end(irn)) {
1075                 ir_node   *next;
1076                 op_t      *op;
1077                 pset      *args;
1078                 ir_node   *arg;
1079                 pset      *remat_args;
1080
1081                 next = sched_prev(irn);
1082
1083                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
1084
1085                 if(is_Phi(irn) || is_Proj(irn)) {
1086                         op_t      *op;
1087
1088                         if(has_reg_class(si, irn)) {
1089                                 pset_remove_ptr(live, irn);
1090                         }
1091
1092                         op = obstack_alloc(si->obst, sizeof(*op));
1093                         op->is_remat = 0;
1094                         op->attr.live_range.args.reloads = NULL;
1095                         op->attr.live_range.ilp = ILP_UNDEF;
1096                         set_irn_link(irn, op);
1097
1098                         irn = next;
1099                         continue;
1100                 }
1101
1102                 op = obstack_alloc(si->obst, sizeof(*op));
1103                 op->is_remat = 0;
1104                 op->attr.live_range.ilp = ILP_UNDEF;
1105                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1106                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
1107                 set_irn_link(irn, op);
1108
1109                 args = pset_new_ptr_default();
1110
1111                 /* collect arguments of op */
1112                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
1113                         ir_node        *arg = get_irn_n(irn, n);
1114
1115                         pset_insert_ptr(args, arg);
1116                 }
1117
1118                 /* set args of op already live in epilog */
1119                 pset_foreach(args, arg) {
1120                         if(has_reg_class(si, arg)) {
1121                                 pset_insert_ptr(live, arg);
1122                         }
1123                 }
1124                 /* delete defined value from live set */
1125                 if(has_reg_class(si, irn)) {
1126                         pset_remove_ptr(live, irn);
1127                 }
1128
1129
1130                 remat_args = pset_new_ptr_default();
1131
1132                 /* insert all possible remats before irn */
1133                 pset_foreach(args, arg) {
1134                         remat_info_t   *remat_info,
1135                                                     query;
1136                         remat_t        *remat;
1137
1138                         /* continue if the operand has the wrong reg class
1139                          */
1140                         if(!has_reg_class(si, arg))
1141                                 continue;
1142
1143                         query.irn = arg;
1144                         query.remats = NULL;
1145                         query.remats_by_operand = NULL;
1146                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1147
1148                         if(!remat_info) {
1149                                 continue;
1150                         }
1151
1152                         if(remat_info->remats) {
1153                                 pset_foreach(remat_info->remats, remat) {
1154                                         ir_node  *remat_irn = NULL;
1155
1156                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1157                                         if(opt_remat_while_live) {
1158                                                 if(pset_find_ptr(live, remat->value)) {
1159                                                         remat_irn = insert_remat_before(si, remat, irn, live);
1160                                                 }
1161                                         } else {
1162                                                 remat_irn = insert_remat_before(si, remat, irn, live);
1163                                         }
1164                                         if(remat_irn) {
1165                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1166                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1167
1168                                                         if(!has_reg_class(si, remat_arg)) continue;
1169
1170                                                         pset_insert_ptr(remat_args, remat_arg);
1171                                                 }
1172                                         }
1173                                 }
1174                         }
1175                 }
1176
1177                 /* now we add remat args to op's args because they could also die at this op */
1178                 pset_foreach(args,arg) {
1179                         if(pset_find_ptr(remat_args, arg)) {
1180                                 pset_remove_ptr(remat_args, arg);
1181                         }
1182                 }
1183                 pset_foreach(remat_args,arg) {
1184                         pset_insert_ptr(args, arg);
1185                 }
1186
1187                 /* insert all possible remats after irn */
1188                 pset_foreach(args, arg) {
1189                         remat_info_t   *remat_info,
1190                                                     query;
1191                         remat_t        *remat;
1192
1193                         /* continue if the operand has the wrong reg class */
1194                         if(!has_reg_class(si, arg))
1195                                 continue;
1196
1197                         query.irn = arg;
1198                         query.remats = NULL;
1199                         query.remats_by_operand = NULL;
1200                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1201
1202                         if(!remat_info) {
1203                                 continue;
1204                         }
1205
1206                         /* do not place post remats after jumps */
1207                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1208
1209                         if(remat_info->remats_by_operand) {
1210                                 pset_foreach(remat_info->remats_by_operand, remat) {
1211                                         /* do not insert remats producing the same value as one of the operands */
1212                                         if(!pset_find_ptr(args, remat->value)) {
1213                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1214                                                 if(opt_remat_while_live) {
1215                                                         if(pset_find_ptr(live, remat->value)) {
1216                                                                 insert_remat_after(si, remat, irn, live);
1217                                                         }
1218                                                 } else {
1219                                                         insert_remat_after(si, remat, irn, live);
1220                                                 }
1221                                         }
1222                                 }
1223                         }
1224                 }
1225
1226                 del_pset(remat_args);
1227                 del_pset(args);
1228                 irn = next;
1229         }
1230
1231         /* add remats at end if successor has multiple predecessors */
1232         if(is_merge_edge(bb)) {
1233                 pset     *live_out = pset_new_ptr_default();
1234                 ir_node  *value;
1235
1236                 be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1237                         value = be_lv_get_irn(si->lv, bb, i);
1238
1239                         if (has_reg_class(si, value)) {
1240                                 pset_insert_ptr(live_out, value);
1241                         }
1242                 }
1243
1244                 /* add remats at end of block */
1245                 pset_foreach(live_out, value) {
1246                         remat_info_t   *remat_info,
1247                                                    query;
1248                         remat_t        *remat;
1249
1250                         query.irn = value;
1251                         query.remats = NULL;
1252                         query.remats_by_operand = NULL;
1253                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1254
1255                         if(remat_info && remat_info->remats) {
1256                                 pset_foreach(remat_info->remats, remat) {
1257                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1258
1259                                         insert_remat_before(si, remat, bb, live_out);
1260                                 }
1261                         }
1262                 }
1263                 del_pset(live_out);
1264         }
1265
1266         if(is_diverge_edge(bb)) {
1267                 pset     *live_in = pset_new_ptr_default();
1268                 ir_node  *value;
1269
1270                 be_lv_foreach(si->lv, bb, be_lv_state_in, i) {
1271                         value = be_lv_get_irn(si->lv, bb, i);
1272
1273                         if(has_reg_class(si, value)) {
1274                                 pset_insert_ptr(live_in, value);
1275                         }
1276                 }
1277                 sched_foreach(bb, value) {
1278                         if(!is_Phi(value)) break;
1279
1280                         if(has_reg_class(si, value)) {
1281                                 pset_insert_ptr(live_in, value);
1282                         }
1283                 }
1284
1285                 /* add remat2s at beginning of block */
1286                 pset_foreach(live_in, value) {
1287                         remat_info_t   *remat_info,
1288                                                    query;
1289                         remat_t        *remat;
1290
1291                         query.irn = value;
1292                         query.remats = NULL;
1293                         query.remats_by_operand = NULL;
1294                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1295
1296                         if(remat_info && remat_info->remats_by_operand) {
1297                                 pset_foreach(remat_info->remats_by_operand, remat) {
1298                                         DBG((si->dbg, LEVEL_4, "\t  considering remat2 %+F at beginning of block %+F\n", remat->op, bb));
1299
1300                                         /* put the remat here if all its args are available */
1301                                         insert_remat_after(si, remat, bb, live_in);
1302
1303                                 }
1304                         }
1305                 }
1306                 del_pset(live_in);
1307         }
1308 }
1309
1310 int
1311 can_be_copied(const ir_node * bb, const ir_node * irn)
1312 {
1313         assert(is_merge_edge(bb));
1314
1315         const ir_edge_t *edge = get_block_succ_first(bb);
1316         const ir_node   *next_bb = edge->src;
1317         int              pos = edge->pos;
1318         const ir_node   *phi;
1319
1320         sched_foreach(next_bb, phi) {
1321                 const ir_node  *phi_arg;
1322
1323                 if(!is_Phi(phi)) break;
1324
1325                 phi_arg = get_irn_n(phi, pos);
1326
1327                 if(phi_arg == irn) {
1328                         return 1;
1329                 }
1330         }
1331         return 0;
1332 }
1333
1334 /**
1335  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1336  */
1337 static void
1338 luke_endwalker(ir_node * bb, void * data)
1339 {
1340         spill_ilp_t    *si = (spill_ilp_t*)data;
1341         pset           *live;
1342         pset           *use_end;
1343         char            buf[256];
1344         ilp_cst_t       cst;
1345         ir_node        *irn;
1346         spill_bb_t     *spill_bb = get_irn_link(bb);
1347         int             i;
1348
1349
1350         live = pset_new_ptr_default();
1351         use_end = pset_new_ptr_default();
1352
1353         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1354                 irn = be_lv_get_irn(si->lv, bb, i);
1355                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1356                         op_t      *op;
1357
1358                         pset_insert_ptr(live, irn);
1359                         op = get_irn_link(irn);
1360                         assert(!op->is_remat);
1361                 }
1362         }
1363
1364         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1365         /* their reg_out must always be set */
1366         sched_foreach_reverse(bb, irn) {
1367                 int   n;
1368
1369                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1370
1371                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1372                         ir_node        *irn_arg = get_irn_n(irn, n);
1373
1374                         if(has_reg_class(si, irn_arg)) {
1375                                 pset_insert_ptr(use_end, irn_arg);
1376                         }
1377                 }
1378         }
1379
1380         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1381         //cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1382         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1383
1384         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1385
1386         /* if this is a merge edge we can reload at the end of this block */
1387         if(is_merge_edge(bb)) {
1388                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1389         } else if(pset_count(use_end)){
1390                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1391         } else {
1392                 spill_bb->reloads = NULL;
1393         }
1394
1395         pset_foreach(live,irn) {
1396                 spill_t     query,
1397                                         *spill;
1398                 double      spill_cost;
1399                 int         default_spilled;
1400
1401
1402                 /* handle values used by control flow nodes later separately */
1403                 if(pset_find_ptr(use_end, irn)) continue;
1404
1405                 query.irn = irn;
1406                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1407
1408                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1409
1410                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1411                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1412                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1413
1414                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1415                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1416
1417                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1418                 /* by default spill value right after definition */
1419                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1420                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1421
1422                 if(is_merge_edge(bb)) {
1423                         ilp_var_t   reload;
1424                         ilp_cst_t   rel_cst;
1425
1426                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1427                         reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), can_be_copied(bb, irn));
1428                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1429
1430                         /* reload <= mem_out */
1431                         rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1432                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1433                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1434                 }
1435
1436                 spill->reg_in = ILP_UNDEF;
1437                 spill->mem_in = ILP_UNDEF;
1438         }
1439
1440         pset_foreach(use_end,irn) {
1441                 spill_t     query,
1442                                         *spill;
1443                 double      spill_cost;
1444                 ilp_cst_t   end_use_req,
1445                                         rel_cst;
1446                 ilp_var_t   reload;
1447                 int         default_spilled;
1448
1449                 query.irn = irn;
1450                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1451
1452                 spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1453
1454                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1455                 spill->reg_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1456                 /* if irn is used at the end of the block, then it is live anyway */
1457                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1458
1459                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1460                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1461
1462                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1463                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1464                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1465
1466                 /* reload for use be control flow op */
1467                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1468                 reload = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), 1.0);
1469                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1470
1471                 /* reload <= mem_out */
1472                 rel_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1473                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1474                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1475
1476                 spill->reg_in = ILP_UNDEF;
1477                 spill->mem_in = ILP_UNDEF;
1478
1479                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1480                 end_use_req = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1);
1481                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1482         }
1483
1484         del_pset(live);
1485         del_pset(use_end);
1486 }
1487
1488 static ir_node *
1489 next_post_remat(const ir_node * irn)
1490 {
1491         op_t      *op;
1492     ir_node   *next;
1493
1494         if(is_Block(irn)) {
1495                 next = sched_block_first_nonphi(irn);
1496         } else {
1497                 next = sched_next_op(irn);
1498         }
1499
1500         if(sched_is_end(next))
1501                 return NULL;
1502
1503         op = get_irn_link(next);
1504         if(op->is_remat && !op->attr.remat.pre) {
1505                 return next;
1506         }
1507
1508         return NULL;
1509 }
1510
1511
1512 static ir_node *
1513 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1514 {
1515         op_t      *op;
1516         ir_node   *ret;
1517
1518         if(is_Block(irn)) {
1519                 ret = sched_block_last_noncf(si, irn);
1520                 ret = sched_next(ret);
1521                 ret = sched_prev_op(ret);
1522         } else {
1523                 ret = sched_prev_op(irn);
1524         }
1525
1526         if(sched_is_end(ret) || is_Phi(ret))
1527                 return NULL;
1528
1529         op = (op_t*)get_irn_link(ret);
1530         if(op->is_remat && op->attr.remat.pre) {
1531                 return ret;
1532         }
1533
1534         return NULL;
1535 }
1536
1537 /**
1538  * Find a remat of value @p value in the epilog of @p pos
1539  */
1540 static ir_node *
1541 find_post_remat(const ir_node * value, const ir_node * pos)
1542 {
1543         while((pos = next_post_remat(pos)) != NULL) {
1544                 op_t   *op;
1545
1546                 op = get_irn_link(pos);
1547                 assert(op->is_remat && !op->attr.remat.pre);
1548
1549                 if(op->attr.remat.remat->value == value)
1550                         return (ir_node*)pos;
1551
1552 #if 0
1553         const ir_edge_t *edge;
1554                 foreach_out_edge(pos, edge) {
1555                         ir_node   *proj = get_edge_src_irn(edge);
1556                         assert(is_Proj(proj));
1557                 }
1558 #endif
1559
1560         }
1561
1562         return NULL;
1563 }
1564
1565 static spill_t *
1566 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1567 {
1568         spill_bb_t  *spill_bb = get_irn_link(bb);
1569         spill_t     *spill,
1570                                  query;
1571         char         buf[256];
1572         int          default_spilled;
1573
1574         query.irn = irn;
1575         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1576         if(!spill) {
1577                 double   spill_cost = is_Unknown(irn)?0.0001:opt_cost_spill*execution_frequency(si, bb);
1578
1579                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1580
1581                 spill->reg_out = ILP_UNDEF;
1582                 spill->reg_in  = ILP_UNDEF;
1583                 spill->mem_in  = ILP_UNDEF;
1584
1585                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1586                 spill->mem_out = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1587
1588                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1589                 default_spilled = be_is_live_in(si->lv, bb, irn) || is_Phi(irn);
1590                 spill->spill    = lpp_add_var_default(si->lpp, buf, lpp_binary, spill_cost, !default_spilled);
1591         }
1592
1593         return spill;
1594 }
1595
1596 static void
1597 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1598 {
1599         ir_node        *irn;
1600         int i;
1601
1602         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1603                 irn = be_lv_get_irn(si->lv, bb, i);
1604
1605                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1606                         pset_insert_ptr(live, irn);
1607                 }
1608         }
1609
1610         irn = sched_last(bb);
1611
1612         /* all values eaten by control flow operations are also live until the end of the block */
1613         sched_foreach_reverse(bb, irn) {
1614                 int  i;
1615
1616                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1617
1618                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1619                         ir_node *arg = get_irn_n(irn,i);
1620
1621                         if(has_reg_class(si, arg)) {
1622                                 pset_insert_ptr(live, arg);
1623                         }
1624                 }
1625         }
1626 }
1627
1628 /**
1629  *  Inserts ILP-constraints and variables for memory copying before the given position
1630  */
1631 static void
1632 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1633 {
1634         const ir_node    *succ;
1635         const ir_edge_t  *edge;
1636         spill_bb_t       *spill_bb = get_irn_link(block);
1637         ir_node          *phi;
1638         int               pos;
1639         ilp_cst_t         cst;
1640         ilp_var_t         copyreg;
1641         char              buf[256];
1642         ir_node          *tmp;
1643
1644
1645         assert(edges_activated(current_ir_graph));
1646
1647         edge = get_block_succ_first(block);
1648         if(!edge) return;
1649
1650         succ = edge->src;
1651         pos = edge->pos;
1652
1653         edge = get_block_succ_next(block, edge);
1654         /* next block can only contain phis, if this is a merge edge */
1655         if(edge) return;
1656
1657         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1658         copyreg = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
1659
1660         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1661         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
1662
1663         pset_foreach(live, tmp) {
1664                 spill_t  *spill;
1665 #if 0
1666                 op_t  *op = get_irn_link(irn);
1667                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1668 #endif
1669                 spill = set_find_spill(spill_bb->ilp, tmp);
1670                 assert(spill);
1671
1672                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1673         }
1674         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1675
1676         sched_foreach(succ, phi) {
1677                 const ir_node  *to_copy;
1678                 op_t           *to_copy_op;
1679                 spill_t        *to_copy_spill;
1680                 op_t           *phi_op = get_irn_link(phi);
1681                 ilp_var_t       reload = ILP_UNDEF;
1682
1683
1684                 if(!is_Phi(phi)) break;
1685                 if(!has_reg_class(si, phi)) continue;
1686
1687                 to_copy = get_irn_n(phi, pos);
1688
1689                 to_copy_op = get_irn_link(to_copy);
1690
1691                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1692                 assert(to_copy_spill);
1693
1694                 if(spill_bb->reloads) {
1695                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1696
1697                         if(keyval) {
1698                                 reload = PTR_TO_INT(keyval->val);
1699                         }
1700                 }
1701
1702                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N_%N", block, phi, to_copy);
1703                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1704
1705                 /* copy - reg_out - reload - remat - live_range <= 0 */
1706                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1707                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1708                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1709                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1710                 foreach_pre_remat(si, block, tmp) {
1711                         op_t     *remat_op = get_irn_link(tmp);
1712                         if(remat_op->attr.remat.remat->value == to_copy) {
1713                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1714                         }
1715                 }
1716
1717                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N_%N", block, phi, to_copy);
1718                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1719
1720                 /* copy - reg_out - copyreg <= 0 */
1721                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1722                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1723                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1724         }
1725 }
1726
1727
1728 /**
1729  * Walk all irg blocks and emit this ILP
1730  */
1731 static void
1732 luke_blockwalker(ir_node * bb, void * data)
1733 {
1734         spill_ilp_t    *si = (spill_ilp_t*)data;
1735         ir_node        *irn;
1736         pset           *live;
1737         char            buf[256];
1738         ilp_cst_t       cst;
1739         spill_bb_t     *spill_bb = get_irn_link(bb);
1740         ir_node        *tmp;
1741         spill_t        *spill;
1742         pset           *defs = pset_new_ptr_default();
1743         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
1744
1745
1746         live = pset_new_ptr_default();
1747
1748         /****************************************
1749          *      B A S I C  B L O C K  E N D
1750          ***************************************/
1751
1752
1753         /* init live values at end of block */
1754         get_live_end(si, bb, live);
1755
1756         pset_foreach(live, irn) {
1757                 op_t           *op;
1758                 ilp_var_t       reload = ILP_UNDEF;
1759
1760                 spill = set_find_spill(spill_bb->ilp, irn);
1761                 assert(spill);
1762
1763                 if(spill_bb->reloads) {
1764                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1765
1766                         if(keyval) {
1767                                 reload = PTR_TO_INT(keyval->val);
1768                         }
1769                 }
1770
1771                 op = get_irn_link(irn);
1772                 assert(!op->is_remat);
1773
1774                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1775                 op->attr.live_range.ilp = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1776                 op->attr.live_range.op = bb;
1777
1778                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1779                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
1780
1781                 /* reg_out - reload - remat - live_range <= 0 */
1782                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1783                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1784                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1785                 foreach_pre_remat(si, bb, tmp) {
1786                         op_t     *remat_op = get_irn_link(tmp);
1787                         if(remat_op->attr.remat.remat->value == irn) {
1788                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1789                         }
1790                 }
1791                 ir_snprintf(buf, sizeof(buf), "reg_out2_%N_%N", bb, irn);
1792                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_greater, 0.0);
1793
1794                 /* value may only die at bb end if it is used for a mem copy */
1795                 /* reg_out + \sum copy - reload - remat - live_range >= 0 */
1796                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1797                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1798                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1799                 foreach_pre_remat(si, bb, tmp) {
1800                         op_t     *remat_op = get_irn_link(tmp);
1801                         if(remat_op->attr.remat.remat->value == irn) {
1802                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1803                         }
1804                 }
1805                 if(is_merge_edge(bb)) {
1806                         const ir_edge_t *edge = get_block_succ_first(bb);
1807                         const ir_node   *next_bb = edge->src;
1808                         int              pos = edge->pos;
1809                         const ir_node   *phi;
1810
1811                         sched_foreach(next_bb, phi) {
1812                                 const ir_node  *phi_arg;
1813
1814                                 if(!is_Phi(phi)) break;
1815
1816                                 phi_arg = get_irn_n(phi, pos);
1817
1818                                 if(phi_arg == irn) {
1819                                         op_t      *phi_op = get_irn_link(phi);
1820                                         ilp_var_t  copy = phi_op->attr.live_range.args.copies[pos];
1821
1822                                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
1823                                 }
1824                         }
1825                 }
1826         }
1827
1828         if(opt_memcopies)
1829                 insert_mem_copy_position(si, live, bb);
1830
1831         /*
1832          * start new live ranges for values used by remats at end of block
1833          * and assure the remat args are available
1834          */
1835         foreach_pre_remat(si, bb, tmp) {
1836                 op_t     *remat_op = get_irn_link(tmp);
1837                 int       n;
1838
1839                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1840                         ir_node        *remat_arg = get_irn_n(tmp, n);
1841                         op_t           *arg_op = get_irn_link(remat_arg);
1842                         ilp_var_t       prev_lr;
1843
1844                         if(!has_reg_class(si, remat_arg)) continue;
1845
1846                         /* if value is becoming live through use by remat */
1847                         if(!pset_find_ptr(live, remat_arg)) {
1848                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1849                                 prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
1850
1851                                 arg_op->attr.live_range.ilp = prev_lr;
1852                                 arg_op->attr.live_range.op = bb;
1853
1854                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1855
1856                                 pset_insert_ptr(live, remat_arg);
1857                                 add_to_spill_bb(si, bb, remat_arg);
1858                         }
1859
1860                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1861                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1862                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1863
1864                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1865                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1866
1867                         /* use reload placed for this argument */
1868                         if(spill_bb->reloads) {
1869                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1870
1871                                 if(keyval) {
1872                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1873
1874                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1875                                 }
1876                         }
1877                 }
1878         }
1879         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1880
1881
1882
1883
1884         /**************************************
1885          *    B A S I C  B L O C K  B O D Y
1886          **************************************/
1887
1888         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1889                 op_t       *op;
1890                 op_t       *tmp_op;
1891                 int         n,
1892                                         u = 0,
1893                                         d = 0;
1894                 ilp_cst_t       check_pre,
1895                                         check_post;
1896                 set        *args;
1897                 pset       *used;
1898                 pset       *remat_defs;
1899                 keyval_t   *keyval;
1900                 ilp_cst_t   one_memoperand;
1901
1902                 /* iterate only until first phi */
1903                 if(is_Phi(irn))
1904                         break;
1905
1906                 op = get_irn_link(irn);
1907                 /* skip remats */
1908                 if(op->is_remat) continue;
1909                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1910
1911                 /* collect defined values */
1912                 if(has_reg_class(si, irn)) {
1913                         pset_insert_ptr(defs, irn);
1914                 }
1915
1916                 /* skip projs */
1917                 if(is_Proj(irn)) continue;
1918
1919                 /*
1920                  * init set of irn's arguments
1921                  * and all possibly used values around this op
1922                  * and values defined by post remats
1923                  */
1924                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1925                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1926                 remat_defs = pset_new_ptr(pset_count(live));
1927
1928                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1929                         ir_node        *irn_arg = get_irn_n(irn, n);
1930                         if(has_reg_class(si, irn_arg)) {
1931                                 set_insert_keyval(args, irn_arg, (void*)n);
1932                                 pset_insert_ptr(used, irn_arg);
1933                         }
1934                 }
1935                 foreach_post_remat(irn, tmp) {
1936                         op_t    *remat_op = get_irn_link(tmp);
1937
1938                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1939
1940                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1941                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1942                                 if(has_reg_class(si, remat_arg)) {
1943                                         pset_insert_ptr(used, remat_arg);
1944                                 }
1945                         }
1946                 }
1947                 foreach_pre_remat(si, irn, tmp) {
1948                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1949                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1950                                 if(has_reg_class(si, remat_arg)) {
1951                                         pset_insert_ptr(used, remat_arg);
1952                                 }
1953                         }
1954                 }
1955
1956                 /**********************************
1957                  *   I N  E P I L O G  O F  irn
1958                  **********************************/
1959
1960                 /* ensure each dying value is used by only one post remat */
1961                 pset_foreach(used, tmp) {
1962                         ir_node     *value = tmp;
1963                         op_t        *value_op = get_irn_link(value);
1964                         ir_node     *remat;
1965                         int          n_remats = 0;
1966
1967                         cst = ILP_UNDEF;
1968                         foreach_post_remat(irn, remat) {
1969                                 op_t  *remat_op = get_irn_link(remat);
1970
1971                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1972                                         ir_node   *remat_arg = get_irn_n(remat, n);
1973
1974                                         /* if value is used by this remat add it to constraint */
1975                                         if(remat_arg == value) {
1976                                                 if(n_remats == 0) {
1977                                                         /* sum remat2s <= 1 + n_remats*live_range */
1978                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1979                                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
1980                                                 }
1981
1982                                                 n_remats++;
1983                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1984                                                 break;
1985                                         }
1986                                 }
1987                         }
1988
1989                         if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
1990                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1991                         }
1992                 }
1993
1994         /* ensure at least one value dies at post remat */
1995         foreach_post_remat(irn, tmp) {
1996             op_t     *remat_op = get_irn_link(tmp);
1997             pset     *remat_args = pset_new_ptr(get_irn_arity(tmp));
1998             ir_node  *remat_arg;
1999
2000             for(n=get_irn_arity(tmp)-1; n>=0; --n) {
2001                 remat_arg = get_irn_n(tmp, n);
2002
2003                 if(has_reg_class(si, remat_arg)) {
2004
2005                     /* does arg always die at this op? */
2006                     if(!pset_find_ptr(live, remat_arg))
2007                         goto skip_one_must_die;
2008
2009                     pset_insert_ptr(remat_args, remat_arg);
2010                 }
2011             }
2012
2013             /* remat + \sum live_range(remat_arg) <= |args| */
2014             ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
2015             cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
2016             lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2017
2018             pset_foreach(remat_args, remat_arg) {
2019                 op_t  *arg_op = get_irn_link(remat_arg);
2020
2021                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2022             }
2023
2024 skip_one_must_die:
2025             del_pset(remat_args);
2026         }
2027
2028                 /* new live ranges for values from L\U defined by post remats */
2029                 pset_foreach(live, tmp) {
2030                         ir_node     *value = tmp;
2031                         op_t        *value_op = get_irn_link(value);
2032
2033                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
2034                                 ilp_var_t    prev_lr = ILP_UNDEF;
2035                                 ir_node     *remat;
2036
2037                                 if(pset_find_ptr(remat_defs, value)) {
2038
2039                                         /* next_live_range <= prev_live_range + sum remat2s */
2040                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
2041                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2042
2043                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
2044                                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2045
2046                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
2047                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2048
2049                                         foreach_post_remat(irn, remat) {
2050                                                 op_t        *remat_op = get_irn_link(remat);
2051
2052                                                 /* if value is being rematerialized by this remat */
2053                                                 if(value == remat_op->attr.remat.remat->value) {
2054                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2055                                                 }
2056                                         }
2057
2058                                         value_op->attr.live_range.ilp = prev_lr;
2059                                         value_op->attr.live_range.op = irn;
2060                                 }
2061                         }
2062                 }
2063
2064                 /* requirements for post remats and start live ranges from L/U' for values dying here */
2065                 foreach_post_remat(irn, tmp) {
2066                         op_t        *remat_op = get_irn_link(tmp);
2067                         int          n;
2068
2069                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2070                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2071                                 op_t           *arg_op = get_irn_link(remat_arg);
2072
2073                                 if(!has_reg_class(si, remat_arg)) continue;
2074
2075                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
2076                                 if(!pset_find_ptr(used, remat_arg)) {
2077                                         /* remat <= live_range(remat_arg) */
2078                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
2079                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2080
2081                                         /* if value is becoming live through use by remat2 */
2082                                         if(!pset_find_ptr(live, remat_arg)) {
2083                                                 ilp_var_t     lr;
2084
2085                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
2086                                                 lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2087
2088                                                 arg_op->attr.live_range.ilp = lr;
2089                                                 arg_op->attr.live_range.op = irn;
2090
2091                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
2092
2093                                                 pset_insert_ptr(live, remat_arg);
2094                                                 add_to_spill_bb(si, bb, remat_arg);
2095                                         }
2096
2097                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2098                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2099                                 }
2100                         }
2101                 }
2102
2103                 d = pset_count(defs);
2104                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
2105
2106                 /* count how many regs irn needs for arguments */
2107                 u = set_count(args);
2108
2109
2110                 /* check the register pressure in the epilog */
2111                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
2112                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
2113                 check_post = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - d);
2114
2115                 /* add L\U' to check_post */
2116                 pset_foreach(live, tmp) {
2117                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
2118                                 /* if a live value is not used by irn */
2119                                 tmp_op = get_irn_link(tmp);
2120                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
2121                         }
2122                 }
2123
2124                 /***********************************************************
2125                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
2126                  **********************************************************/
2127
2128
2129                 pset_foreach(used, tmp) {
2130                         ilp_var_t       prev_lr;
2131                         ilp_var_t       post_use;
2132                         int             p = 0;
2133                         spill_t        *spill;
2134                         ir_node        *arg = tmp;
2135                         op_t           *arg_op = get_irn_link(arg);
2136                         ir_node        *remat;
2137
2138                         spill = add_to_spill_bb(si, bb, arg);
2139
2140                         /* new live range for each used value */
2141                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
2142                         prev_lr = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, is_before_frame(bb, irn)?1.0:0.0);
2143
2144                         /* the epilog stuff - including post_use, check_post, check_post_remat */
2145                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
2146                         post_use = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, is_before_frame(bb, irn)?1.0:0.0);
2147
2148                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
2149
2150                         /* arg is live throughout epilog if the next live_range is in a register */
2151                         if(pset_find_ptr(live, arg)) {
2152                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
2153
2154                                 /* post_use >= next_lr + remat */
2155                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2156                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2157                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2158                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2159                         }
2160
2161                         /* forall post remat which use arg add a similar cst */
2162                         foreach_post_remat(irn, remat) {
2163                                 int      n;
2164
2165                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
2166                                         ir_node    *remat_arg = get_irn_n(remat, n);
2167                                         op_t       *remat_op = get_irn_link(remat);
2168
2169                                         if(remat_arg == arg) {
2170                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
2171
2172                                                 /* post_use >= remat */
2173                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
2174                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2175                                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
2176                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2177                                         }
2178                                 }
2179                         }
2180
2181                         /* if value is not an arg of op and not possibly defined by post remat
2182                          * then it may only die and not become live
2183                          */
2184                         if(!set_find_keyval(args, arg)) {
2185                                 /* post_use <= prev_lr */
2186                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
2187                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2188                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2189                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2190
2191                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
2192                                         /* next_lr <= prev_lr */
2193                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
2194                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2195                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2196                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
2197                                 }
2198                         }
2199
2200                         if(opt_memoperands) {
2201                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2202                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2203                                                 ilp_var_t       memoperand;
2204
2205                                                 ir_snprintf(buf, sizeof(buf), "memoperand_%N_%d", irn, n);
2206                                                 memoperand = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_memoperand*execution_frequency(si, bb), 0.0);
2207                                                 set_insert_memoperand(si->memoperands, irn, n, memoperand);
2208
2209                                                 ir_snprintf(buf, sizeof(buf), "nolivepost_%N_%d", irn, n);
2210                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2211
2212                                                 lpp_set_factor_fast(si->lpp, cst, memoperand, 1.0);
2213                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
2214                                         }
2215                                 }
2216                         }
2217
2218                         /* new live range begins for each used value */
2219                         arg_op->attr.live_range.ilp = prev_lr;
2220                         arg_op->attr.live_range.op = irn;
2221
2222                         pset_insert_ptr(live, arg);
2223                 }
2224
2225                 /* just to be sure */
2226                 check_post = ILP_UNDEF;
2227
2228
2229
2230
2231                 /******************
2232                  *   P R O L O G
2233                  ******************/
2234
2235                 /* check the register pressure in the prolog */
2236                 /* sum_{L\U} lr <= k - |U| */
2237                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
2238                 check_pre = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs - u);
2239
2240                 /* for the prolog remove defined values from the live set */
2241                 pset_foreach(defs, tmp) {
2242                         pset_remove_ptr(live, tmp);
2243                 }
2244
2245                 if(opt_memoperands) {
2246                         ir_snprintf(buf, sizeof(buf), "one_memoperand_%N", irn);
2247                         one_memoperand = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2248                 }
2249
2250                 /***********************************************************
2251                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
2252                  **********************************************************/
2253
2254
2255                 set_foreach(args, keyval) {
2256                         spill_t          *spill;
2257                         const ir_node    *arg = keyval->key;
2258                         int               i = PTR_TO_INT(keyval->val);
2259                         op_t             *arg_op = get_irn_link(arg);
2260                         ilp_cst_t         requirements;
2261                         int               n_memoperands;
2262
2263                         spill = set_find_spill(spill_bb->ilp, arg);
2264                         assert(spill);
2265
2266                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
2267                         op->attr.live_range.args.reloads[i] = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_reload*execution_frequency(si, bb), is_before_frame(bb, irn)?0.0:1.0);
2268
2269                         /* reload <= mem_out */
2270                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
2271                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2272                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2273                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2274
2275                         /* requirement: arg must be in register for use */
2276                         /* reload + remat + live_range == 1 */
2277                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
2278                         requirements = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 1.0);
2279
2280                         lpp_set_factor_fast(si->lpp, requirements, arg_op->attr.live_range.ilp, 1.0);
2281                         lpp_set_factor_fast(si->lpp, requirements, op->attr.live_range.args.reloads[i], 1.0);
2282                         foreach_pre_remat(si, irn, tmp) {
2283                                 op_t     *remat_op = get_irn_link(tmp);
2284                                 if(remat_op->attr.remat.remat->value == arg) {
2285                                         lpp_set_factor_fast(si->lpp, requirements, remat_op->attr.remat.ilp, 1.0);
2286                                 }
2287                         }
2288
2289                         if(opt_memoperands) {
2290                                 n_memoperands = 0;
2291                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2292                                         if(get_irn_n(irn, n) == arg) {
2293                                                 n_memoperands++;
2294                                         }
2295                                 }
2296                                 for(n = get_irn_arity(irn)-1; n>=0; --n) {
2297                                         if(get_irn_n(irn, n) == arg && arch_possible_memory_operand(arch_env, irn, n)) {
2298                                                 memoperand_t  *memoperand;
2299                                                 memoperand = set_find_memoperand(si->memoperands, irn, n);
2300
2301                                                 /* memoperand <= mem_out */
2302                                                 ir_snprintf(buf, sizeof(buf), "req_memoperand_%N_%d", irn, n);
2303                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2304                                                 lpp_set_factor_fast(si->lpp, cst, memoperand->ilp, 1.0);
2305                                                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2306
2307                                                 /* the memoperand is only sufficient if it is used once by the op */
2308                                                 if(n_memoperands == 1)
2309                                                         lpp_set_factor_fast(si->lpp, requirements, memoperand->ilp, 1.0);
2310
2311                                                 lpp_set_factor_fast(si->lpp, one_memoperand, memoperand->ilp, 1.0);
2312
2313                                                 /* we have one more free register if we use a memory operand */
2314                                                 lpp_set_factor_fast(si->lpp, check_pre, memoperand->ilp, -1.0);
2315                                         }
2316                                 }
2317                         }
2318                 }
2319
2320                 /* iterate over L\U */
2321                 pset_foreach(live, tmp) {
2322                         if(!set_find_keyval(args, tmp)) {
2323                                 /* if a live value is not used by irn */
2324                                 tmp_op = get_irn_link(tmp);
2325                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
2326                         }
2327                 }
2328
2329
2330                 /* requirements for remats */
2331                 foreach_pre_remat(si, irn, tmp) {
2332                         op_t        *remat_op = get_irn_link(tmp);
2333                         int          n;
2334
2335                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2336                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2337                                 op_t           *arg_op = get_irn_link(remat_arg);
2338
2339                                 if(!has_reg_class(si, remat_arg)) continue;
2340
2341                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2342                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2343                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2344
2345                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2346                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2347
2348                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2349                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2350                                         int    index = (int)keyval->val;
2351
2352                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2353                                 }
2354                         }
2355                 }
2356
2357
2358
2359
2360                 /*************************
2361                  *  D O N E  W I T H  O P
2362                  *************************/
2363
2364                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2365
2366                 pset_foreach(live, tmp) {
2367                         assert(has_reg_class(si, tmp));
2368                 }
2369
2370                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2371                         ir_node        *arg = get_irn_n(irn, n);
2372
2373                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2374                 }
2375
2376                 del_pset(remat_defs);
2377                 del_pset(used);
2378                 del_set(args);
2379                 del_pset(defs);
2380                 defs = pset_new_ptr_default();
2381         }
2382
2383
2384
2385         /***************************************
2386          *   B E G I N N I N G  O F  B L O C K
2387          ***************************************/
2388
2389
2390         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2391         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2392
2393         pset_foreach(live, irn) {
2394                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2395         }
2396
2397         /* construct mem_outs for all values */
2398
2399         set_foreach(spill_bb->ilp, spill) {
2400                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2401                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2402
2403                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2404                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2405
2406                 if(pset_find_ptr(live, spill->irn)) {
2407                         int default_spilled;
2408                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2409
2410                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2411                         default_spilled = be_is_live_in(si->lv, bb, spill->irn) || is_Phi(spill->irn);
2412                         spill->mem_in   = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, default_spilled);
2413                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2414
2415                         if(opt_memcopies && is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2416                                 int   n;
2417                                 op_t *op = get_irn_link(spill->irn);
2418
2419                                 /* do we have to copy a phi argument? */
2420                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2421                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2422
2423                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2424                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2425                                         double          freq=0.0;
2426                                         int             m;
2427                                         ilp_var_t       var;
2428
2429
2430                                         /* argument already done? */
2431                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2432
2433                                         /* get sum of execution frequencies of blocks with the same phi argument */
2434                                         for(m=n; m>=0; --m) {
2435                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2436
2437                                                 if(arg==arg2) {
2438                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2439                                                 }
2440                                         }
2441
2442                                         /* copies are not for free */
2443                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2444                                         var = lpp_add_var_default(si->lpp, buf, lpp_binary, opt_cost_spill * freq, 1.0);
2445
2446                                         for(m=n; m>=0; --m) {
2447                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2448
2449                                                 if(arg==arg2) {
2450                                                         op->attr.live_range.args.copies[m] = var;
2451                                                 }
2452                                         }
2453
2454 #if 0
2455                                         /* copy <= mem_in */
2456                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2457                                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2458                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2459                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2460 #endif
2461                                 }
2462                         }
2463                 }
2464         }
2465
2466         foreach_post_remat(bb, tmp) {
2467                 int         n;
2468                 pset       *remat_args = pset_new_ptr(get_irn_arity(tmp));
2469                 op_t       *remat_op = get_irn_link(tmp);
2470                 ir_node    *remat_arg;
2471
2472                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2473                         remat_arg = get_irn_n(tmp, n);
2474                         if(has_reg_class(si, remat_arg)) {
2475                                 pset_insert_ptr(remat_args, remat_arg);
2476                         }
2477                 }
2478                 assert(pset_count(remat_args) > 0 && "post remats should have at least one arg");
2479
2480                 /* remat + \sum live_range(remat_arg) <= |args| */
2481                 ir_snprintf(buf, sizeof(buf), "one_must_die_%N", tmp);
2482                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, pset_count(remat_args));
2483                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2484
2485                 pset_foreach(remat_args, remat_arg) {
2486                         /* if value is becoming live through use by remat2 */
2487                         if(!pset_find_ptr(live, remat_arg)) {
2488                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2489                                 ilp_cst_t   nomem;
2490
2491                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 at bb start %+F\n", remat_arg, tmp));
2492
2493                                 pset_insert_ptr(live, remat_arg);
2494                                 spill = add_to_spill_bb(si, bb, remat_arg);
2495                                 remat_arg_op->attr.live_range.ilp = ILP_UNDEF;
2496
2497                                 /* we need reg_in and mem_in for this value; they will be referenced later */
2498                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", remat_arg, bb);
2499                                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2500                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", remat_arg, bb);
2501                                 spill->mem_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
2502
2503
2504                                 /* optimization: all memory stuff should be 0, for we do not want to insert reloads for remats */
2505                                 ir_snprintf(buf, sizeof(buf), "nomem_%N_%N", remat_arg, bb);
2506                                 nomem = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0.0);
2507
2508                                 lpp_set_factor_fast(si->lpp, nomem, spill->spill, 1.0);
2509                                 if(spill_bb->reloads) {
2510                                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
2511
2512                                         if(keyval) {
2513                                                 ilp_var_t reload = PTR_TO_INT(keyval->val);
2514                                                 lpp_set_factor_fast(si->lpp, nomem, reload, 1.0);
2515                                         }
2516                                 }
2517                         } else {
2518                                 op_t       *remat_arg_op = get_irn_link(remat_arg);
2519                                 lpp_set_factor_fast(si->lpp, cst, remat_arg_op->attr.live_range.ilp, 1.0);
2520                         }
2521                 }
2522                 del_pset(remat_args);
2523         }
2524
2525         /* L\U is empty at bb start */
2526         /* arg is live throughout epilog if it is reg_in into this block */
2527
2528         /* check the register pressure at the beginning of the block
2529          * including remats
2530          */
2531         /* reg_in entspricht post_use */
2532
2533         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2534         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, si->n_regs);
2535
2536         pset_foreach(live, irn) {
2537         ilp_cst_t  nospill;
2538
2539                 spill = set_find_spill(spill_bb->ilp, irn);
2540                 assert(spill);
2541
2542                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2543                 spill->reg_in = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 0.0);
2544
2545                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2546
2547                 /* spill + mem_in <= 1 */
2548                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2549                 nospill = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
2550
2551                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2552                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2553
2554         } /* post_remats are NOT included in register pressure check because
2555            they do not increase regpressure */
2556
2557         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2558         pset_foreach(live, irn) {
2559                 int          p = 0,
2560                                          n;
2561
2562                 spill = set_find_spill(spill_bb->ilp, irn);
2563                 assert(spill && spill->irn == irn);
2564
2565                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2566                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2567                                 ilp_cst_t       mem_in,
2568                                                                 reg_in;
2569                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2570                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2571                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2572                                 spill_t        *spill_p;
2573                                 op_t           *op = get_irn_link(irn);
2574
2575                                 /* although the phi is in the right regclass one or more of
2576                                  * its arguments can be in a different one or at least to
2577                                  * ignore
2578                                  */
2579                                 if(has_reg_class(si, phi_arg)) {
2580                                         /* mem_in < mem_out_arg + copy */
2581                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2582                                         mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2583
2584                                         /* reg_in < reg_out_arg */
2585                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2586                                         reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2587
2588                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2589                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2590
2591                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2592                                         assert(spill_p);
2593
2594                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2595                                         if(opt_memcopies)
2596                                                 lpp_set_factor_fast(si->lpp, mem_in, op->attr.live_range.args.copies[n], -1.0);
2597
2598                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2599                                 }
2600                         }
2601                 } else {
2602                         /* else assure the value arrives on all paths in the same resource */
2603
2604                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2605                                 ilp_cst_t       mem_in,
2606                                                                 reg_in;
2607                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2608                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2609                                 spill_t        *spill_p;
2610
2611                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2612                                 mem_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2613                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2614                                 reg_in = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2615
2616                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2617                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2618
2619                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2620                                 assert(spill_p);
2621
2622                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2623                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2624                         }
2625                 }
2626         }
2627
2628         foreach_post_remat(bb, tmp) {
2629                 int         n;
2630
2631                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2632                         ir_node    *remat_arg = get_irn_n(tmp, n);
2633                         op_t       *remat_op = get_irn_link(tmp);
2634
2635                         if(!has_reg_class(si, remat_arg)) continue;
2636
2637                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2638                         assert(spill);
2639
2640                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2641                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2642                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2643                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2644                 }
2645         }
2646
2647         pset_foreach(live, irn) {
2648                 const op_t      *op = get_irn_link(irn);
2649                 const ir_node   *remat;
2650                 int              n_remats = 0;
2651
2652                 cst = ILP_UNDEF;
2653
2654                 foreach_post_remat(bb, remat) {
2655                         int   n;
2656
2657                         for (n=get_irn_arity(remat)-1; n>=0; --n) {
2658                                 const ir_node  *arg = get_irn_n(remat, n);
2659
2660                                 if(arg == irn) {
2661                                         const op_t   *remat_op = get_irn_link(remat);
2662
2663                                         if(cst == ILP_UNDEF) {
2664                                                 /* sum remat2s <= 1 + n_remats*live_range */
2665                                                 ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", irn, bb);
2666                                                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1.0);
2667                                         }
2668                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2669                                         ++n_remats;
2670                                         break;
2671                                 }
2672                         }
2673                 }
2674                 if(cst != ILP_UNDEF && op->attr.live_range.ilp != ILP_UNDEF) {
2675                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -n_remats);
2676                 }
2677         }
2678
2679         /* first live ranges from reg_ins */
2680         pset_foreach(live, irn) {
2681                 op_t      *op = get_irn_link(irn);
2682
2683                 if(op->attr.live_range.ilp != ILP_UNDEF) {
2684
2685                         spill = set_find_spill(spill_bb->ilp, irn);
2686                         assert(spill && spill->irn == irn);
2687
2688                         ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2689                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2690                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2691                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2692
2693                         foreach_post_remat(bb, tmp) {
2694                                 op_t     *remat_op = get_irn_link(tmp);
2695
2696                                 if(remat_op->attr.remat.remat->value == irn) {
2697                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2698                                 }
2699                         }
2700                 }
2701         }
2702
2703         /* walk forward now and compute constraints for placing spills */
2704         /* this must only be done for values that are not defined in this block */
2705         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2706         pset_foreach(live, irn) {
2707                 /*
2708                  * if value is defined in this block we can anways place the spill directly after the def
2709                  *    -> no constraint necessary
2710                  */
2711                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2712
2713
2714                 spill = set_find_spill(spill_bb->ilp, irn);
2715                 assert(spill);
2716
2717                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2718                 cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0.0);
2719
2720                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2721                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2722
2723                 if(!is_Phi(irn)) {
2724                         sched_foreach_op(bb, tmp) {
2725                                 op_t   *op = get_irn_link(tmp);
2726
2727                                 if(is_Phi(tmp)) continue;
2728                                 assert(!is_Proj(tmp));
2729
2730                                 if(op->is_remat) {
2731                                         const ir_node   *value = op->attr.remat.remat->value;
2732
2733                                         if(value == irn) {
2734                                                 /* only collect remats up to the first real use of a value */
2735                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2736                                         }
2737                                 } else {
2738                                         int   n;
2739
2740                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2741                                                 ir_node    *arg = get_irn_n(tmp, n);
2742
2743                                                 if(arg == irn) {
2744                                                         /* if a value is used stop collecting remats */
2745                             goto next_live;
2746                                                 }
2747                                         }
2748                                 }
2749                         }
2750                 }
2751 next_live: ;
2752         }
2753
2754         del_pset(live);
2755 }
2756
2757 typedef struct _irnlist_t {
2758         struct list_head   list;
2759         ir_node           *irn;
2760 } irnlist_t;
2761
2762 typedef struct _interference_t {
2763         struct list_head    blocklist;
2764         ir_node            *a;
2765         ir_node            *b;
2766 } interference_t;
2767
2768 static int
2769 cmp_interference(const void *a, const void *b, size_t size)
2770 {
2771         const interference_t *p = a;
2772         const interference_t *q = b;
2773
2774         return !(p->a == q->a && p->b == q->b);
2775 }
2776
2777 static interference_t *
2778 set_find_interference(set * set, ir_node * a, ir_node * b)
2779 {
2780         interference_t     query;
2781
2782         query.a = (a>b)?a:b;
2783         query.b = (a>b)?b:a;
2784
2785         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2786 }
2787
2788 static interference_t *
2789 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2790 {
2791         interference_t     query,
2792                                           *result;
2793         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2794
2795         list->irn = bb;
2796
2797         result = set_find_interference(set, a, b);
2798         if(result) {
2799
2800                 list_add(&list->list, &result->blocklist);
2801                 return result;
2802         }
2803
2804         query.a = (a>b)?a:b;
2805         query.b = (a>b)?b:a;
2806
2807         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2808
2809         INIT_LIST_HEAD(&result->blocklist);
2810         list_add(&list->list, &result->blocklist);
2811
2812         return result;
2813 }
2814
2815 static int
2816 values_interfere_in_block(const spill_ilp_t * si, const ir_node * bb, const ir_node * a, const ir_node * b)
2817 {
2818         const ir_edge_t *edge;
2819
2820         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2821                 /* both values are live in, so they interfere */
2822                 return 1;
2823         }
2824
2825         /* ensure a dominates b */
2826         if(value_dominates(b,a)) {
2827                 const ir_node * t;
2828                 t = b;
2829                 b = a;
2830                 a = t;
2831         }
2832         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2833
2834
2835         /* the following code is stolen from bera.c */
2836         if(be_is_live_end(si->lv, bb, a))
2837                 return 1;
2838
2839         foreach_out_edge(a, edge) {
2840                 const ir_node *user = edge->src;
2841                 if(get_nodes_block(user) == bb
2842                                 && !is_Phi(user)
2843                                 && b != user
2844                                 && !pset_find_ptr(si->inverse_ops, user)
2845                                 && value_dominates(b, user))
2846                         return 1;
2847         }
2848
2849         return 0;
2850 }
2851
2852 /**
2853  * Walk all irg blocks and collect interfering values inside of phi classes
2854  */
2855 static void
2856 luke_interferencewalker(ir_node * bb, void * data)
2857 {
2858         spill_ilp_t    *si = (spill_ilp_t*)data;
2859         int             l1, l2;
2860
2861         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
2862                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
2863                 op_t           *a_op = get_irn_link(a);
2864
2865
2866                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2867                 if (has_reg_class(si, a) && get_phi_class(a)) {
2868                         if(a_op->is_remat || pset_find_ptr(si->inverse_ops, a))
2869                                 continue;
2870
2871                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
2872                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
2873                                 op_t           *b_op = get_irn_link(b);
2874
2875
2876                                 /* a and b are only interesting if they are in the same phi class */
2877                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2878                                         if(b_op->is_remat || pset_find_ptr(si->inverse_ops, b))
2879                                                 continue;
2880
2881                                         if(values_interfere_in_block(si, bb, a, b)) {
2882                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2883                                                 set_insert_interference(si, si->interferences, a, b, bb);
2884                                         }
2885                                 }
2886                         }
2887                 }
2888         }
2889 }
2890
2891 static unsigned int copy_path_id = 0;
2892
2893 static void
2894 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2895 {
2896         ilp_cst_t  cst;
2897         ilp_var_t  copy;
2898         char       buf[256];
2899         void      *ptr;
2900
2901         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2902         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
2903
2904         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2905
2906         pset_foreach(copies, ptr) {
2907                 copy = PTR_TO_INT(ptr);
2908                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2909         }
2910 }
2911
2912 /**
2913  * @parameter copies   contains a path of copies which lead us to irn
2914  * @parameter visited  contains a set of nodes already visited on this path
2915  */
2916 static int
2917 find_copy_path(spill_ilp_t * si, const ir_node * irn, const ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2918 {
2919         const ir_edge_t *edge;
2920         op_t            *op = get_irn_link(irn);
2921     pset            *visited_users = pset_new_ptr_default();
2922         int              paths = 0;
2923
2924         if(op->is_remat) return 0;
2925
2926         pset_insert_ptr(visited, irn);
2927
2928         if(is_Phi(irn)) {
2929                 int    n;
2930         pset  *visited_operands = pset_new_ptr(get_irn_arity(irn));
2931
2932                 /* visit all operands */
2933                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2934                         ir_node  *arg = get_irn_n(irn, n);
2935                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2936
2937                         if(!has_reg_class(si, arg)) continue;
2938             if(pset_find_ptr(visited_operands, arg)) continue;
2939             pset_insert_ptr(visited_operands, arg);
2940
2941                         if(arg == target) {
2942                                 if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2943                                         del_pset(visited_operands);
2944                                         del_pset(visited_users);
2945                                         pset_remove_ptr(visited, irn);
2946                                         return paths;
2947                                 }
2948                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2949                                 write_copy_path_cst(si, copies, any_interfere);
2950                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2951                         } else if(!pset_find_ptr(visited, arg)) {
2952                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2953                                 paths += find_copy_path(si, arg, target, any_interfere, copies, visited);
2954                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2955
2956                 if(paths > MAX_PATHS) {
2957                     if(pset_count(copies) == 0) {
2958                         ilp_cst_t  cst;
2959                         char       buf[256];
2960
2961                         ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2962                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
2963                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2964                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2965                         DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2966
2967                         paths = 0;
2968                     } else {
2969                         del_pset(visited_operands);
2970                         del_pset(visited_users);
2971                         pset_remove_ptr(visited, irn);
2972                         return paths;
2973                     }
2974                 } else if(pset_count(copies) == 0) {
2975                                         paths = 0;
2976                                 }
2977                         }
2978                 }
2979
2980         del_pset(visited_operands);
2981         }
2982
2983         /* visit all uses which are phis */
2984         foreach_out_edge(irn, edge) {
2985                 ir_node  *user = edge->src;
2986                 int       pos  = edge->pos;
2987                 op_t     *op = get_irn_link(user);
2988                 ilp_var_t copy;
2989
2990                 if(!is_Phi(user)) continue;
2991                 if(!has_reg_class(si, user)) continue;
2992         if(pset_find_ptr(visited_users, user)) continue;
2993         pset_insert_ptr(visited_users, user);
2994
2995                 copy = op->attr.live_range.args.copies[pos];
2996
2997                 if(user == target) {
2998                         if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2999                                 del_pset(visited_users);
3000                                 pset_remove_ptr(visited, irn);
3001                                 return paths;
3002                         }
3003                         pset_insert(copies, INT_TO_PTR(copy), copy);
3004                         write_copy_path_cst(si, copies, any_interfere);
3005                         pset_remove(copies, INT_TO_PTR(copy), copy);
3006                 } else if(!pset_find_ptr(visited, user)) {
3007                         pset_insert(copies, INT_TO_PTR(copy), copy);
3008                         paths += find_copy_path(si, user, target, any_interfere, copies, visited);
3009                         pset_remove(copies, INT_TO_PTR(copy), copy);
3010
3011             if(paths > MAX_PATHS) {
3012                 if(pset_count(copies) == 0) {
3013                     ilp_cst_t  cst;
3014                     char       buf[256];
3015
3016                     ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
3017                     cst = lpp_add_cst_uniq(si->lpp, buf, lpp_equal, 0);
3018                     lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
3019                     lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
3020                     DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
3021
3022                     paths = 0;
3023                 } else {
3024                     del_pset(visited_users);
3025                     pset_remove_ptr(visited, irn);
3026                     return paths;
3027                 }
3028             } else if(pset_count(copies) == 0) {
3029                                 paths = 0;
3030                         }
3031                 }
3032         }
3033
3034     del_pset(visited_users);
3035         pset_remove_ptr(visited, irn);
3036         return paths;
3037 }
3038
3039 static void
3040 gen_copy_constraints(spill_ilp_t * si, const ir_node * a, const ir_node * b, ilp_var_t any_interfere)
3041 {
3042         pset * copies = pset_new_ptr_default();
3043         pset * visited = pset_new_ptr_default();
3044
3045         find_copy_path(si, a, b, any_interfere, copies, visited);
3046
3047         del_pset(visited);
3048         del_pset(copies);
3049 }
3050
3051
3052 static void
3053 memcopyhandler(spill_ilp_t * si)
3054 {
3055         interference_t   *interference;
3056         char              buf[256];
3057         /* teste Speicherwerte auf Interferenz */
3058
3059         /* analyze phi classes */
3060         phi_class_compute(si->chordal_env->irg);
3061
3062         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
3063         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
3064
3065         /* now lets emit the ILP unequations for the crap */
3066         set_foreach(si->interferences, interference) {
3067                 irnlist_t      *irnlist;
3068                 ilp_var_t       interfere,
3069                                                 any_interfere;
3070                 ilp_cst_t       any_interfere_cst,
3071                                                 cst;
3072                 const ir_node  *a  = interference->a;
3073                 const ir_node  *b  = interference->b;
3074
3075                 /* any_interf <= \sum interf */
3076                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
3077                 any_interfere_cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3078                 any_interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3079
3080                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
3081
3082                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
3083                         const ir_node  *bb = irnlist->irn;
3084                         spill_bb_t     *spill_bb = get_irn_link(bb);
3085                         spill_t        *spilla,
3086                                                    *spillb;
3087                         char           buf[256];
3088
3089                         spilla = set_find_spill(spill_bb->ilp, a);
3090                         assert(spilla);
3091
3092                         spillb = set_find_spill(spill_bb->ilp, b);
3093                         assert(spillb);
3094
3095                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
3096                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
3097                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
3098                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
3099                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
3100                         interfere = lpp_add_var_default(si->lpp, buf, lpp_binary, 0.0, 1.0);
3101
3102                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
3103                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 1);
3104
3105                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
3106                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
3107                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
3108                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
3109                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
3110
3111                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
3112                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3113
3114                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3115                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
3116                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
3117
3118                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
3119                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3120
3121                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3122                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
3123                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
3124
3125
3126                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
3127
3128                         /* any_interfere >= interf */
3129                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
3130                         cst = lpp_add_cst_uniq(si->lpp, buf, lpp_less, 0);
3131
3132                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
3133                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
3134                 }
3135
3136                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
3137                 gen_copy_constraints(si,a,b,any_interfere);
3138         }
3139 }
3140
3141
3142 static INLINE int
3143 is_zero(double x)
3144 {
3145         return fabs(x) < 0.00001;
3146 }
3147
3148 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
3149 {
3150         spill_ilp_t *si = get_irg_link(current_ir_graph);
3151
3152         if(pset_find_ptr(si->all_possible_remats, n)) {
3153                 op_t   *op = (op_t*)get_irn_link(n);
3154                 assert(op && op->is_remat);
3155
3156                 if(!op->attr.remat.remat->inverse) {
3157                         if(op->attr.remat.pre) {
3158                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
3159                         } else {
3160                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
3161                         }
3162
3163                         return 1;
3164                 } else {
3165                         op_t   *op = (op_t*)get_irn_link(n);
3166                         assert(op && op->is_remat);
3167
3168                         if(op->attr.remat.pre) {
3169                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
3170                         } else {
3171                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
3172                         }
3173
3174                         return 1;
3175                 }
3176         }
3177
3178         return 0;
3179 }
3180
3181 static void
3182 dump_graph_with_remats(ir_graph * irg, const char * suffix)
3183 {
3184         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
3185         be_dump(irg, suffix, dump_ir_block_graph_sched);
3186         set_dump_node_vcgattr_hook(NULL);
3187 }
3188
3189 /**
3190  * Edge hook to dump the schedule edges with annotated register pressure.
3191  */
3192 static int
3193 sched_pressure_edge_hook(FILE *F, ir_node *irn)
3194 {
3195         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
3196                 ir_node *prev = sched_prev(irn);
3197                 fprintf(F, "edge:{sourcename:\"");
3198                 PRINT_NODEID(irn);
3199                 fprintf(F, "\" targetname:\"");
3200                 PRINT_NODEID(prev);
3201                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
3202                 fprintf(F, "\" color:magenta}\n");
3203         }
3204         return 1;
3205 }
3206
3207 static void
3208 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
3209 {
3210         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
3211
3212         dump_consts_local(0);
3213         set_dump_node_edge_hook(sched_pressure_edge_hook);
3214         dump_ir_block_graph(irg, suffix);
3215         set_dump_node_edge_hook(old_edge_hook);
3216 }
3217
3218 static void
3219 walker_pressure_annotator(ir_node * bb, void * data)
3220 {
3221         spill_ilp_t  *si = data;
3222         ir_node      *irn;
3223         int           n, i;
3224         pset         *live = pset_new_ptr_default();
3225         int           projs = 0;
3226
3227         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
3228                 irn = be_lv_get_irn(si->lv, bb, i);
3229
3230                 if (has_reg_class(si, irn)) {
3231                         pset_insert_ptr(live, irn);
3232                 }
3233         }
3234
3235         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
3236
3237         sched_foreach_reverse(bb, irn) {
3238                 if(is_Phi(irn)) {
3239                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
3240                         continue;
3241                 }
3242
3243                 if(has_reg_class(si, irn)) {
3244                         pset_remove_ptr(live, irn);
3245                         if(is_Proj(irn)) ++projs;
3246                 }
3247
3248                 if(!is_Proj(irn)) projs = 0;
3249
3250                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
3251                         ir_node    *arg = get_irn_n(irn, n);
3252
3253                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
3254                 }
3255                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
3256         }
3257
3258         del_pset(live);
3259 }
3260
3261 static void
3262 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
3263 {
3264         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
3265 }
3266
3267 static void
3268 connect_all_remats_with_keep(spill_ilp_t * si)
3269 {
3270         ir_node   *irn;
3271         ir_node  **ins,
3272                          **pos;
3273         int        n_remats;
3274
3275
3276         n_remats = pset_count(si->all_possible_remats);
3277         if(n_remats) {
3278                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
3279
3280                 pos = ins;
3281                 pset_foreach(si->all_possible_remats, irn) {
3282                         *pos = irn;
3283                         ++pos;
3284                 }
3285
3286                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
3287
3288                 obstack_free(si->obst, ins);
3289         }
3290 }
3291
3292 static void
3293 connect_all_spills_with_keep(spill_ilp_t * si)
3294 {
3295         ir_node   *irn;
3296         ir_node  **ins,
3297                          **pos;
3298         int        n_spills;
3299         ir_node   *keep;
3300
3301
3302         n_spills = pset_count(si->spills);
3303         if(n_spills) {
3304                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
3305
3306                 pos = ins;
3307                 pset_foreach(si->spills, irn) {
3308                         *pos = irn;
3309                         ++pos;
3310                 }
3311
3312                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
3313
3314                 obstack_free(si->obst, ins);
3315         }
3316 }
3317
3318 /** insert a spill at an arbitrary position */
3319 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert)
3320 {
3321         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
3322         ir_graph *irg   = get_irn_irg(bl);
3323         ir_node *frame  = get_irg_frame(irg);
3324         ir_node *spill;
3325         ir_node *next;
3326
3327         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
3328         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
3329
3330         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn);
3331
3332         /*
3333          * search the right insertion point. a spill of a phi cannot be put
3334          * directly after the phi, if there are some phis behind the one which
3335          * is spilled. Also, a spill of a Proj must be after all Projs of the
3336          * same tuple node.
3337          *
3338          * Here's one special case:
3339          * If the spill is in the start block, the spill must be after the frame
3340          * pointer is set up. This is done by setting insert to the end of the block
3341          * which is its default initialization (see above).
3342          */
3343
3344         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
3345                 insert = frame;
3346
3347         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
3348                 insert = next;
3349
3350         sched_add_after(insert, spill);
3351         return spill;
3352 }
3353
3354 static void
3355 delete_remat(spill_ilp_t * si, ir_node * remat) {
3356         int       n;
3357         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3358
3359         sched_remove(remat);
3360
3361         /* kill links to operands */
3362         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
3363                 set_irn_n(remat, n, bad);
3364         }
3365 }
3366
3367 static void
3368 clean_remat_info(spill_ilp_t * si)
3369 {
3370         int            n;
3371         remat_t       *remat;
3372         remat_info_t  *remat_info;
3373         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
3374
3375         set_foreach(si->remat_info, remat_info) {
3376                 if(!remat_info->remats) continue;
3377
3378                 pset_foreach(remat_info->remats, remat)
3379                 {
3380                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
3381                                 set_irn_n((ir_node*)remat->proj, -1, bad);
3382                                 set_irn_n((ir_node*)remat->proj, 0, bad);
3383                         }
3384
3385                         if(get_irn_n_edges(remat->op) == 0) {
3386                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
3387                                         set_irn_n((ir_node*)remat->op, n, bad);
3388                                 }
3389                         }
3390                 }
3391
3392                 if(remat_info->remats) del_pset(remat_info->remats);
3393                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
3394         }
3395 }
3396
3397 static void
3398 delete_unnecessary_remats(spill_ilp_t * si)
3399 {
3400         if(opt_keep_alive & KEEPALIVE_REMATS) {
3401                 int       n;
3402                 ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3403
3404                 if(si->keep) {
3405 //                      ir_node   *end = get_irg_end(si->chordal_env->irg);
3406 //                      ir_node  **keeps;
3407
3408                         for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
3409                                 ir_node        *keep_arg = get_irn_n(si->keep, n);
3410                                 op_t           *arg_op = get_irn_link(keep_arg);
3411                                 lpp_name_t     *name;
3412
3413                                 assert(arg_op->is_remat);
3414
3415                                 name = si->lpp->vars[arg_op->attr.remat.ilp];
3416
3417                                 if(is_zero(name->value)) {
3418                                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
3419                                         /* TODO check whether reload is preferred over remat (could be bug) */
3420                                         delete_remat(si, keep_arg);
3421                                 } else {
3422                                         if(!arg_op->attr.remat.remat->inverse) {
3423                                                 if(arg_op->attr.remat.pre) {
3424                                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
3425                                                 } else {
3426                                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
3427                                                 }
3428                                         } else {
3429                                                 if(arg_op->attr.remat.pre) {
3430                                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
3431                                                 } else {
3432                                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
3433                                                 }
3434                                         }
3435                                 }
3436
3437                                 set_irn_n(si->keep, n, bad);
3438                         }
3439 #if 0
3440                         for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
3441                                 ir_node        *end_arg = get_End_keepalive(end, i);
3442
3443                                 if(end_arg != si->keep) {
3444                                         obstack_grow(si->obst, &end_arg, sizeof(end_arg));
3445                                 }
3446                         }
3447                         keeps = obstack_finish(si->obst);
3448                         set_End_keepalives(end, n-1, keeps);
3449                         obstack_free(si->obst, keeps);
3450 #endif
3451                 } else {
3452                         DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
3453                 }
3454         } else {
3455                 ir_node  *remat;
3456
3457                 pset_foreach(si->all_possible_remats, remat) {
3458                         op_t           *remat_op = get_irn_link(remat);
3459                         lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
3460
3461                         if(is_zero(name->value)) {
3462                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3463                                 /* TODO check whether reload is preferred over remat (could be bug) */
3464                                 delete_remat(si, remat);
3465                         } else {
3466                                 if(!remat_op->attr.remat.remat->inverse) {
3467                                         if(remat_op->attr.remat.pre) {
3468                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3469                                         } else {
3470                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3471                                         }
3472                                 } else {
3473                                         if(remat_op->attr.remat.pre) {
3474                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3475                                         } else {
3476                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3477                                         }
3478                                 }
3479                         }
3480                 }
3481         }
3482 }
3483
3484 static pset *
3485 get_spills_for_value(spill_ilp_t * si, const ir_node * value)
3486 {
3487         pset     *spills = pset_new_ptr_default();
3488
3489         const ir_node  *next;
3490         defs_t         *defs;
3491
3492         defs = set_find_def(si->values, value);
3493
3494         if(defs && defs->spills) {
3495                 for(next = defs->spills; next; next = get_irn_link(next)) {
3496                         pset_insert_ptr(spills, next);
3497                 }
3498         }
3499
3500         return spills;
3501 }
3502
3503 /**
3504  * @param before   The node after which the spill will be placed in the schedule
3505  */
3506 static ir_node *
3507 insert_spill(spill_ilp_t * si, ir_node * irn, const ir_node * value, ir_node * before)
3508 {
3509         defs_t   *defs;
3510         ir_node  *spill;
3511         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3512
3513         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3514
3515         spill = be_spill2(arch_env, irn, before);
3516
3517         defs = set_insert_def(si->values, value);
3518         assert(defs);
3519
3520         /* enter into the linked list */
3521         set_irn_link(spill, defs->spills);
3522         defs->spills = spill;
3523
3524         if(opt_keep_alive & KEEPALIVE_SPILLS)
3525                 pset_insert_ptr(si->spills, spill);
3526
3527         return spill;
3528 }
3529
3530 /**
3531  * @param before   The Phi node which has to be spilled
3532  */
3533 static ir_node *
3534 insert_mem_phi(spill_ilp_t * si, ir_node * phi)
3535 {
3536         ir_node   *mem_phi;
3537         ir_node  **ins;
3538         defs_t    *defs;
3539         int        n;
3540
3541         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3542
3543         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3544                 ins[n] = si->m_unknown;
3545         }
3546
3547         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3548
3549         defs = set_insert_def(si->values, phi);
3550         assert(defs);
3551
3552         /* enter into the linked list */
3553         set_irn_link(mem_phi, defs->spills);
3554         defs->spills = mem_phi;
3555
3556 #ifdef SCHEDULE_PHIM
3557         sched_add_after(phi, mem_phi);
3558 #endif
3559
3560         if(opt_keep_alive & KEEPALIVE_SPILLS)
3561                 pset_insert_ptr(si->spills, mem_phi);
3562
3563
3564         return mem_phi;
3565 }
3566
3567 /**
3568  * Add remat to list of defs, destroys link field!
3569  */
3570 static void
3571 insert_remat(spill_ilp_t * si, ir_node * remat)
3572 {
3573         defs_t   *defs;
3574         op_t     *remat_op = get_irn_link(remat);
3575
3576         assert(remat_op->is_remat);
3577
3578         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3579         assert(defs);
3580
3581         /* enter into the linked list */
3582         set_irn_link(remat, defs->remats);
3583         defs->remats = remat;
3584 }
3585
3586
3587 /**
3588  * Add reload before operation and add to list of defs
3589  */
3590 static ir_node *
3591 insert_reload(spill_ilp_t * si, const ir_node * value, ir_node * after)
3592 {
3593         defs_t   *defs;
3594         ir_node  *reload,
3595                          *spill;
3596         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3597
3598         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3599
3600         defs = set_find_def(si->values, value);
3601
3602         spill = defs->spills;
3603         assert(spill && "no spill placed before reload");
3604
3605         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3606
3607         /* enter into the linked list */
3608         set_irn_link(reload, defs->remats);
3609         defs->remats = reload;
3610
3611         return reload;
3612 }
3613
3614 void perform_memory_operand(spill_ilp_t * si, memoperand_t * memoperand)
3615 {
3616         defs_t           *defs;
3617         ir_node          *value = get_irn_n(memoperand->irn, memoperand->pos);
3618         ir_node          *spill;
3619         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3620
3621         DBG((si->dbg, LEVEL_2, "\t  inserting memory operand for value %+F at %+F\n", value, memoperand->irn));
3622
3623         defs = set_find_def(si->values, value);
3624
3625         spill = defs->spills;
3626         assert(spill && "no spill placed before reload");
3627
3628         arch_perform_memory_operand(arch_env, memoperand->irn, spill, memoperand->pos);
3629 }
3630
3631 void insert_memoperands(spill_ilp_t * si)
3632 {
3633         memoperand_t   *memoperand;
3634         lpp_name_t     *name;
3635
3636         set_foreach(si->memoperands, memoperand) {
3637                 name = si->lpp->vars[memoperand->ilp];
3638                 if(!is_zero(name->value)) {
3639                         perform_memory_operand(si, memoperand);
3640                 }
3641         }
3642 }
3643
3644 static void
3645 walker_spill_placer(ir_node * bb, void * data) {
3646         spill_ilp_t   *si = (spill_ilp_t*)data;
3647         ir_node       *irn;
3648         spill_bb_t    *spill_bb = get_irn_link(bb);
3649         pset          *spills_to_do = pset_new_ptr_default();
3650         spill_t       *spill;
3651
3652         set_foreach(spill_bb->ilp, spill) {
3653                 lpp_name_t    *name;
3654
3655                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3656                         name = si->lpp->vars[spill->mem_in];
3657                         if(!is_zero(name->value)) {
3658                                 ir_node   *mem_phi;
3659
3660                                 mem_phi = insert_mem_phi(si, spill->irn);
3661
3662                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3663                         }
3664                 }
3665
3666                 name = si->lpp->vars[spill->spill];
3667                 if(!is_zero(name->value)) {
3668                         /* place spill directly after definition */
3669                         if(get_nodes_block(spill->irn) == bb) {
3670                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3671                                 continue;
3672                         }
3673
3674                         /* place spill at bb start */
3675                         if(spill->reg_in > 0) {
3676                                 name = si->lpp->vars[spill->reg_in];
3677                                 if(!is_zero(name->value)) {
3678                                         insert_spill(si, spill->irn, spill->irn, bb);
3679                                         continue;
3680                                 }
3681                         }
3682                         /* place spill after a remat */
3683                         pset_insert_ptr(spills_to_do, spill->irn);
3684                 }
3685         }
3686         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3687
3688
3689         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3690                 op_t     *op = get_irn_link(irn);
3691
3692                 if(be_is_Spill(irn)) continue;
3693
3694                 if(op->is_remat) {
3695                         /* TODO fix this if we want to support remats with more than two nodes */
3696                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3697                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3698
3699                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3700                         }
3701                 } else {
3702                         if(pset_find_ptr(spills_to_do, irn)) {
3703                                 pset_remove_ptr(spills_to_do, irn);
3704
3705                                 insert_spill(si, irn, irn, irn);
3706                         }
3707                 }
3708
3709         }
3710
3711         assert(pset_count(spills_to_do) == 0);
3712
3713         /* afterwards free data in block */
3714         del_pset(spills_to_do);
3715 }
3716
3717 static ir_node *
3718 insert_mem_copy(spill_ilp_t * si, ir_node * bb, ir_node * value)
3719 {
3720         ir_node          *insert_pos = bb;
3721         ir_node          *spill;
3722         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3723
3724         /* find last definition of arg value in block */
3725         ir_node  *next;
3726         defs_t   *defs;
3727         int       last = 0;
3728
3729         defs = set_find_def(si->values, value);
3730
3731         if(defs && defs->remats) {
3732                 for(next = defs->remats; next; next = get_irn_link(next)) {
3733                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3734                                 last = sched_get_time_step(next);
3735                                 insert_pos = next;
3736                         }
3737                 }
3738         }
3739
3740         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3741                 last = sched_get_time_step(value);
3742                 insert_pos = value;
3743         }
3744
3745         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3746
3747         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos);
3748
3749         return spill;
3750 }
3751
3752 static void
3753 phim_fixer(spill_ilp_t *si) {
3754         defs_t  *defs;
3755
3756         set_foreach(si->values, defs) {
3757                 const ir_node  *phi = defs->value;
3758                 op_t           *op = get_irn_link(phi);
3759                 ir_node        *phi_m = NULL;
3760                 ir_node        *next = defs->spills;
3761                 int             n;
3762
3763                 if(!is_Phi(phi)) continue;
3764
3765                 while(next) {
3766                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3767                                 phi_m = next;
3768                                 break;
3769                         } else {
3770                                 next = get_irn_link(next);
3771                         }
3772                 }
3773                 if(!phi_m) continue;
3774
3775                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3776                         ir_node        *value = get_irn_n(phi, n);
3777                         defs_t         *val_defs = set_find_def(si->values, value);
3778
3779                         /* a spill of this value */
3780                         ir_node      *spill;
3781
3782
3783                         if(opt_memcopies) {
3784                                 ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3785                                 lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3786
3787                                 if(!is_zero(name->value)) {
3788                                         spill = insert_mem_copy(si, pred, value);
3789                                 } else {
3790                                         spill = val_defs->spills;
3791                                 }
3792                         } else {
3793                                 spill = val_defs->spills;
3794                         }
3795
3796                         assert(spill && "no spill placed before PhiM");
3797                         set_irn_n(phi_m, n, spill);
3798                 }
3799         }
3800 }
3801
3802 static void
3803 walker_reload_placer(ir_node * bb, void * data) {
3804         spill_ilp_t   *si = (spill_ilp_t*)data;
3805         ir_node       *irn;
3806         spill_bb_t    *spill_bb = get_irn_link(bb);
3807
3808         /* reloads at end of block */
3809         if(spill_bb->reloads) {
3810                 keyval_t    *keyval;
3811
3812                 set_foreach(spill_bb->reloads, keyval) {
3813                         ir_node        *irn = (ir_node*)keyval->key;
3814                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3815                         lpp_name_t     *name;
3816
3817                         name = si->lpp->vars[reload];
3818                         if(!is_zero(name->value)) {
3819                                 ir_node    *reload;
3820                                 ir_node    *insert_pos = bb;
3821                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3822                                 op_t       *prev_op = get_irn_link(prev);
3823
3824                                 while(be_is_Spill(prev)) {
3825                                         prev = sched_prev(prev);
3826                                 }
3827
3828                                 prev_op = get_irn_link(prev);
3829
3830                                 /* insert reload before pre-remats */
3831                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3832                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3833                                         insert_pos = prev;
3834
3835                                         do {
3836                                                 prev = sched_prev(prev);
3837                                         } while(be_is_Spill(prev));
3838
3839                                         prev_op = get_irn_link(prev);
3840
3841                                 }
3842
3843                                 reload = insert_reload(si, irn, insert_pos);
3844
3845                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3846                                         pset_insert_ptr(si->spills, reload);
3847                         }
3848                 }
3849         }
3850
3851         /* walk and insert more reloads and collect remats */
3852         sched_foreach_reverse(bb, irn) {
3853                 op_t     *op = get_irn_link(irn);
3854
3855                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3856                 if(is_Phi(irn)) break;
3857
3858                 if(op->is_remat) {
3859                         if(get_irn_mode(irn) != mode_T) {
3860                                 insert_remat(si, irn);
3861                         }
3862                 } else {
3863                         int    n;
3864
3865                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3866                                 ir_node    *arg = get_irn_n(irn, n);
3867
3868                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3869                                         lpp_name_t    *name;
3870
3871                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3872                                         if(!is_zero(name->value)) {
3873                                                 ir_node    *reload;
3874                                                 ir_node    *insert_pos = irn;
3875                                                 ir_node    *prev = sched_prev(insert_pos);
3876                                                 op_t       *prev_op;
3877
3878                                                 while(be_is_Spill(prev)) {
3879                                                         prev = sched_prev(prev);
3880                                                 }
3881
3882                                                 prev_op = get_irn_link(prev);
3883
3884                                                 /* insert reload before pre-remats */
3885                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3886                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3887                                                         insert_pos = prev;
3888
3889                                                         do {
3890                                                                 prev = sched_prev(prev);
3891                                                         } while(be_is_Spill(prev));
3892
3893                                                         prev_op = get_irn_link(prev);
3894
3895                                                 }
3896
3897                                                 reload = insert_reload(si, arg, insert_pos);
3898
3899                                                 set_irn_n(irn, n, reload);
3900
3901                                                 if(opt_keep_alive & KEEPALIVE_RELOADS)
3902                                                         pset_insert_ptr(si->spills, reload);
3903                                         }
3904                                 }
3905                         }
3906                 }
3907         }
3908
3909         del_set(spill_bb->ilp);
3910         if(spill_bb->reloads) del_set(spill_bb->reloads);
3911 }
3912
3913 static void
3914 walker_collect_used(ir_node * irn, void * data)
3915 {
3916         bitset_t   *used = data;
3917
3918         bitset_set(used, get_irn_idx(irn));
3919 }
3920
3921 struct kill_helper {
3922         bitset_t  *used;
3923         spill_ilp_t  *si;
3924 };
3925
3926 static void
3927 walker_kill_unused(ir_node * bb, void * data)
3928 {
3929         struct kill_helper *kh = data;
3930         ir_node            *bad = get_irg_bad(get_irn_irg(bb));
3931         ir_node            *irn;
3932
3933
3934         for(irn=sched_first(bb); !sched_is_end(irn);) {
3935                 ir_node     *next = sched_next(irn);
3936                 int          n;
3937
3938                 if(!bitset_is_set(kh->used, get_irn_idx(irn))) {
3939                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3940                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3941 #if 0
3942                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3943 #endif
3944                         }
3945
3946                         sched_remove(irn);
3947
3948                         set_nodes_block(irn, bad);
3949                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3950                                 set_irn_n(irn, n, bad);
3951                         }
3952                 }
3953                 irn = next;
3954         }
3955 }
3956
3957 static void
3958 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3959 {
3960         struct kill_helper kh;
3961
3962         kh.used = bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3963         kh.si = si;
3964
3965         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3966         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3967
3968         bitset_free(kh.used);
3969 }
3970
3971 void
3972 print_irn_pset(pset * p)
3973 {
3974         ir_node   *irn;
3975
3976         pset_foreach(p, irn) {
3977                 ir_printf("%+F\n", irn);
3978         }
3979 }
3980
3981 void
3982 dump_phi_class(spill_ilp_t * si, pset * phiclass, const char * file)
3983 {
3984     FILE           *f = fopen(file, "w");
3985     ir_node        *irn;
3986     interference_t *interference;
3987
3988     pset_break(phiclass);
3989     set_break(si->interferences);
3990
3991     ir_fprintf(f, "digraph phiclass {\n");
3992
3993     pset_foreach(phiclass, irn) {
3994         if(is_Phi(irn))
3995             ir_fprintf(f, "  %F%N [shape=box]\n",irn,irn);
3996     }
3997
3998     pset_foreach(phiclass, irn) {
3999         int n;
4000
4001         if(!is_Phi(irn)) continue;
4002
4003         for(n=get_irn_arity(irn)-1; n>=0; --n) {
4004             ir_node  *arg = get_irn_n(irn, n);
4005
4006             ir_fprintf(f, "  %F%N -> %F%N\n",irn,irn,arg,arg);
4007         }
4008     }
4009
4010     set_foreach(si->interferences, interference) {
4011         const ir_node  *a  = interference->a;
4012         const ir_node  *b  = interference->b;
4013         if(get_phi_class(a) == phiclass) {
4014             ir_fprintf(f, "  %F%N -> %F%N [color=red,dir=none,style=bold]\n",a,a,b,b);
4015         }
4016     }
4017
4018     ir_fprintf(f, "}");
4019     fclose(f);
4020 }
4021
4022 static void
4023 rewire_uses(spill_ilp_t * si)
4024 {
4025         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
4026         defs_t               *defs;
4027         pset                 *ignore = pset_new_ptr(1);
4028
4029         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
4030
4031         /* then fix uses of spills */
4032         set_foreach(si->values, defs) {
4033                 pset           *reloads;
4034                 pset           *spills;
4035                 const ir_node  *next = defs->remats;
4036                 int remats = 0;
4037
4038                 reloads = pset_new_ptr_default();
4039
4040                 while(next) {
4041                         if(be_is_Reload(next)) {
4042                                 pset_insert_ptr(reloads, next);
4043                         } else {
4044                                 ++remats;
4045                         }
4046                         next = get_irn_link(next);
4047                 }
4048
4049                 spills = get_spills_for_value(si, defs->value);
4050                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
4051                 if(pset_count(spills) > 1) {
4052                         //assert(pset_count(reloads) > 0);
4053                         //                              print_irn_pset(spills);
4054                         //                              print_irn_pset(reloads);
4055
4056                         be_ssa_constr_set_ignore(dfi, si->lv, spills, ignore);
4057                 }
4058
4059                 del_pset(reloads);
4060                 del_pset(spills);
4061         }
4062
4063         /* first fix uses of remats and reloads */
4064         set_foreach(si->values, defs) {
4065                 pset           *nodes;
4066                 const ir_node  *next = defs->remats;
4067
4068                 if(next) {
4069                         nodes = pset_new_ptr_default();
4070                         pset_insert_ptr(nodes, defs->value);
4071
4072                         while(next) {
4073                                 pset_insert_ptr(nodes, next);
4074                                 next = get_irn_link(next);
4075                         }
4076
4077                         if(pset_count(nodes) > 1) {
4078                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
4079                                 be_ssa_constr_set(dfi, si->lv, nodes);
4080                         }
4081
4082                         del_pset(nodes);
4083                 }
4084         }
4085
4086 //      remove_unused_defs(si);
4087
4088         be_free_dominance_frontiers(dfi);
4089 }
4090
4091
4092 static void
4093 writeback_results(spill_ilp_t * si)
4094 {
4095         /* walk through the graph and collect all spills, reloads and remats for a value */
4096
4097         si->values = new_set(cmp_defs, 4096);
4098
4099         DBG((si->dbg, LEVEL_1, "Applying results\n"));
4100         delete_unnecessary_remats(si);
4101         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
4102         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
4103         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
4104         if(opt_memoperands)
4105                 insert_memoperands(si);
4106         phim_fixer(si);
4107
4108         /* clean the remat info! there are still back-edges leading there! */
4109         clean_remat_info(si);
4110
4111         rewire_uses(si);
4112
4113         connect_all_spills_with_keep(si);
4114
4115         del_set(si->values);
4116 }
4117
4118 static int
4119 get_n_regs(spill_ilp_t * si)
4120 {
4121         int       arch_n_regs = arch_register_class_n_regs(si->cls);
4122
4123         bitset_t *arch_regs = bitset_malloc(arch_n_regs);
4124         bitset_t *abi_regs = bitset_malloc(arch_n_regs);
4125
4126         arch_put_non_ignore_regs(si->chordal_env->birg->main_env->arch_env, si->cls, arch_regs);
4127     be_abi_put_ignore_regs(si->chordal_env->birg->abi, si->cls, abi_regs);
4128
4129         bitset_andnot(arch_regs, abi_regs);
4130         arch_n_regs = bitset_popcnt(arch_regs);
4131
4132         bitset_free(arch_regs);
4133         bitset_free(abi_regs);
4134
4135         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", arch_n_regs, si->cls->name));
4136         return arch_n_regs;
4137 }
4138
4139 static void
4140 walker_reload_mover(ir_node * bb, void * data)
4141 {
4142         spill_ilp_t   *si = data;
4143         ir_node           *tmp;
4144
4145         sched_foreach(bb, tmp) {
4146                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
4147                         ir_node       *reload = tmp;
4148                         ir_node       *irn = tmp;
4149
4150                         /* move reload upwards */
4151
4152                         int pressure = (int)get_irn_link(reload);
4153                         if(pressure < si->n_regs) {
4154                                 irn = sched_prev(reload);
4155                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
4156                                 sched_remove(reload);
4157                                 pressure = (int)get_irn_link(irn);
4158
4159                                 while(pressure < si->n_regs) {
4160                                         if( sched_is_end(irn) ||
4161                                            (be_is_Reload(irn) && has_reg_class(si, irn)) ||
4162                                            /* do not move reload before its spill */
4163                                            (irn == be_get_Reload_mem(reload)) ||
4164                                            /* do not move before phi */
4165                                            is_Phi(irn)) break;
4166
4167                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
4168                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
4169                                         irn = sched_prev(irn);
4170
4171                                         pressure = (int)get_irn_link(irn);
4172                                 }
4173
4174                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
4175                                 sched_put_after(irn, reload);
4176                         }
4177                 }
4178         }
4179 }
4180
4181 static void
4182 move_reloads_upward(spill_ilp_t * si)
4183 {
4184         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
4185 }
4186
4187
4188 /**
4189  * Walk all irg blocks and check for interfering spills inside of phi classes
4190  */
4191 static void
4192 luke_meminterferencechecker(ir_node * bb, void * data)
4193 {
4194         spill_ilp_t    *si = (spill_ilp_t*)data;
4195         int             l1, l2;
4196
4197         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
4198                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
4199
4200                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
4201
4202                 /* a is only interesting if it is in my register class and if it is inside a phi class */
4203                 if (has_reg_class(si, a) && get_phi_class(a)) {
4204                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
4205                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
4206
4207                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
4208
4209                                 /* a and b are only interesting if they are in the same phi class */
4210                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
4211                                         if(values_interfere_in_block(si, bb, a, b)) {
4212                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
4213                                         }
4214                                 }
4215                         }
4216                 }
4217         }
4218 }
4219
4220 static void
4221 verify_phiclasses(spill_ilp_t * si)
4222 {
4223         /* analyze phi classes */
4224         phi_class_compute(si->chordal_env->irg);
4225
4226         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
4227         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
4228 }
4229
4230 void
4231 be_spill_remat(const be_chordal_env_t * chordal_env)
4232 {
4233         char            buf[256];
4234         char            problem_name[256];
4235         char            dump_suffix[256];
4236         char            dump_suffix2[256];
4237         struct obstack  obst;
4238         spill_ilp_t     si;
4239
4240         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
4241         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
4242         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
4243
4244         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
4245         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
4246
4247         if(opt_verify & VERIFY_DOMINANCE)
4248                 be_check_dominance(chordal_env->irg);
4249
4250         obstack_init(&obst);
4251         si.chordal_env = chordal_env;
4252         si.obst = &obst;
4253         si.cls = chordal_env->cls;
4254         si.lpp = new_lpp(problem_name, lpp_minimize);
4255         si.remat_info = new_set(cmp_remat_info, 4096);
4256         si.interferences = new_set(cmp_interference, 32);
4257         si.memoperands = new_set(cmp_memoperands, 128);
4258         si.all_possible_remats = pset_new_ptr_default();
4259         si.spills = pset_new_ptr_default();
4260         si.inverse_ops = pset_new_ptr_default();
4261         si.lv = chordal_env->lv;
4262         si.keep = NULL;
4263         si.n_regs = get_n_regs(&si);
4264
4265         set_irg_link(chordal_env->irg, &si);
4266         compute_doms(chordal_env->irg);
4267
4268         /* compute phi classes */
4269 //      phi_class_compute(chordal_env->irg);
4270
4271         be_analyze_regpressure(chordal_env, "-pre");
4272
4273         if(opt_remats) {
4274                 /* collect remats */
4275                 DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
4276                 irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
4277         }
4278
4279         /* insert possible remats */
4280         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
4281         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
4282         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
4283
4284         if(opt_keep_alive & KEEPALIVE_REMATS) {
4285                 DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
4286                 connect_all_remats_with_keep(&si);
4287                 /* dump graph with inserted remats */
4288                 dump_graph_with_remats(chordal_env->irg, dump_suffix);
4289         }
4290
4291         /* insert copies for phi arguments not in my regclass */
4292         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
4293
4294         /* recompute liveness */
4295         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
4296         be_liveness_recompute(si.lv);
4297
4298         /* build the ILP */
4299
4300         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
4301         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
4302         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
4303
4304         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
4305         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
4306
4307         if(opt_memcopies) {
4308                 DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
4309                 memcopyhandler(&si);
4310         }
4311
4312         if(opt_dump_flags & DUMP_PROBLEM) {
4313                 FILE           *f;
4314                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
4315                 if ((f = fopen(buf, "wt")) != NULL) {
4316                         lpp_dump_plain(si.lpp, f);
4317                         fclose(f);
4318                 }
4319         }
4320
4321         if(opt_dump_flags & DUMP_MPS) {
4322                 FILE *f;
4323
4324                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mps", problem_name);
4325                 if((f = fopen(buf, "wt")) != NULL) {
4326                         mps_write_mps(si.lpp, s_mps_fixed, f);
4327                         fclose(f);
4328                 }
4329
4330                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.mst", problem_name);
4331                 if((f = fopen(buf, "wt")) != NULL) {
4332                         mps_write_mst(si.lpp, s_mps_fixed, f);
4333                         fclose(f);
4334                 }
4335         }
4336
4337         lpp_check_startvals(si.lpp);
4338
4339 #ifdef SOLVE
4340         DBG((si.dbg, LEVEL_1, "\tSolving %s (%d variables, %d constraints)\n", problem_name, si.lpp->var_next, si.lpp->cst_next));
4341         lpp_set_time_limit(si.lpp, opt_timeout);
4342
4343         if(opt_log)
4344                 lpp_set_log(si.lpp, stdout);
4345
4346 #ifdef SOLVE_LOCAL
4347         lpp_solve_cplex(si.lpp);
4348 #else
4349         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
4350 #endif
4351         assert(lpp_is_sol_valid(si.lpp)
4352                && "solution of ILP must be valid");
4353
4354         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
4355
4356         if(opt_dump_flags & DUMP_SOLUTION) {
4357                 FILE           *f;
4358                 char            buf[256];
4359
4360                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
4361                 if ((f = fopen(buf, "wt")) != NULL) {
4362                         int             i;
4363                         for (i = 0; i < si.lpp->var_next; ++i) {
4364                                 lpp_name_t     *name = si.lpp->vars[i];
4365                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
4366                         }
4367                         fclose(f);
4368                 }
4369         }
4370
4371         writeback_results(&si);
4372
4373 #endif                          /* SOLVE */
4374
4375         kill_all_unused_values_in_schedule(&si);
4376
4377         if(opt_keep_alive & (KEEPALIVE_SPILLS | KEEPALIVE_RELOADS))
4378                 be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
4379
4380         // move reloads upwards
4381         be_liveness_recompute(si.lv);
4382         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4383         move_reloads_upward(&si);
4384
4385         if(opt_memcopies) {
4386                 verify_phiclasses(&si);
4387         }
4388
4389         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4390
4391         dump_pressure_graph(&si, dump_suffix2);
4392
4393         be_analyze_regpressure(chordal_env, "-post");
4394
4395         if(opt_verify & VERIFY_DOMINANCE)
4396                 be_check_dominance(chordal_env->irg);
4397
4398         free_dom(chordal_env->irg);
4399         del_set(si.interferences);
4400         del_pset(si.inverse_ops);
4401         del_pset(si.all_possible_remats);
4402         del_set(si.memoperands);
4403         del_pset(si.spills);
4404         free_lpp(si.lpp);
4405         obstack_free(&obst, NULL);
4406         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
4407 }
4408
4409 #else                           /* WITH_ILP */
4410
4411 static void
4412 only_that_you_can_compile_without_WITH_ILP_defined(void)
4413 {
4414 }
4415
4416 #endif                          /* WITH_ILP */