572609c7aec55ea3dd7f5ec652a071f1d4d696bc
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35 #include "irvrfy.h"
36
37 #include <lpp/lpp.h>
38 #include <lpp/lpp_net.h>
39 #include <lpp/lpp_cplex.h>
40 //#include <lc_pset.h>
41 #include <libcore/lc_bitset.h>
42
43 #include "be_t.h"
44 #include "belive_t.h"
45 #include "besched_t.h"
46 #include "beirgmod.h"
47 #include "bearch.h"
48 #include "benode_t.h"
49 #include "beutil.h"
50 #include "bespillremat.h"
51 #include "bespill.h"
52 #include "bepressurestat.h"
53
54 #include "bechordal_t.h"
55
56 #define BIGM 100000.0
57
58 #define DUMP_SOLUTION
59 #define DUMP_ILP
60 //#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
61 #define COLLECT_REMATS /* enable rematerialization */
62 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
63 #define REMAT_WHILE_LIVE /* only remat values that are live */
64 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
65 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
66 #define MAY_DIE_AT_REMAT /* allow values to die after a pre remat */
67 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
68 //#define KEEPALIVE_SPILLS
69 //#define KEEPALIVE_RELOADS
70 #define GOODWIN_REDUCTION
71 //#define NO_MEMCOPIES
72
73 #define  SOLVE
74 //#define  SOLVE_LOCAL
75 #define LPP_SERVER "i44pc52"
76 #define LPP_SOLVER "cplex"
77
78 #define COST_LOAD      8
79 #define COST_STORE     50
80 #define COST_REMAT     1
81
82 #define ILP_TIMEOUT    120
83
84 #define ILP_UNDEF               -1
85
86 typedef struct _spill_ilp_t {
87         const arch_register_class_t  *cls;
88         int                           n_regs;
89         const be_chordal_env_t       *chordal_env;
90         lpp_t                        *lpp;
91         struct obstack               *obst;
92         set                          *remat_info;
93         pset                         *all_possible_remats;
94         pset                         *inverse_ops;
95 #ifdef KEEPALIVE
96         ir_node                      *keep;
97 #endif
98         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
99         pset                         *spills;
100         set                          *interferences;
101         ir_node                      *m_unknown;
102         DEBUG_ONLY(firm_dbg_module_t * dbg);
103 } spill_ilp_t;
104
105 typedef int ilp_var_t;
106 typedef int ilp_cst_t;
107
108 typedef struct _spill_bb_t {
109         set      *ilp;
110         set      *reloads;
111 } spill_bb_t;
112
113 typedef struct _remat_t {
114         const ir_node        *op;      /**< for copy_irn */
115         const ir_node        *value;   /**< the value which is being recomputed by this remat */
116         ir_node              *proj;    /**< not NULL if the above op produces a tuple */
117         int                   cost;    /**< cost of this remat */
118         int                   inverse; /**< nonzero if this is an inverse remat */
119 } remat_t;
120
121 /**
122  * Data to be attached to each IR node. For remats this contains the ilp_var
123  * for this remat and for normal ops this contains the ilp_vars for
124  * reloading each operand
125  */
126 typedef struct _op_t {
127         int             is_remat;
128         union {
129                 struct {
130                         ilp_var_t       ilp;
131                         remat_t        *remat; /** the remat this op belongs to */
132                         int             pre; /** 1, if this is a pressure-increasing remat */
133                 } remat;
134                 struct {
135                         ilp_var_t       ilp;
136                         ir_node        *op; /** the operation this live range belongs to */
137                         union {
138                                 ilp_var_t      *reloads;
139                                 ilp_var_t      *copies;
140                         } args;
141                 } live_range;
142         } attr;
143 } op_t;
144
145 typedef struct _defs_t {
146         ir_node   *value;
147         ir_node   *spills;  /**< points to the first spill for this value (linked by link field) */
148         ir_node   *remats;  /**< points to the first definition for this value (linked by link field) */
149 } defs_t;
150
151 typedef struct _remat_info_t {
152         const ir_node       *irn; /**< the irn to which these remats belong */
153         pset                *remats; /**< possible remats for this value */
154         pset                *remats_by_operand; /**< remats with this value as operand */
155 } remat_info_t;
156
157 typedef struct _keyval_t {
158         const void          *key;
159         const void          *val;
160 } keyval_t;
161
162 typedef struct _spill_t {
163         ir_node      *irn;
164         ilp_var_t     reg_in;
165         ilp_var_t     mem_in;
166         ilp_var_t     reg_out;
167         ilp_var_t     mem_out;
168         ilp_var_t     spill;
169 } spill_t;
170
171 static INLINE int
172 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
173 {
174         return chordal_has_class(si->chordal_env, irn);
175 }
176
177 #if 0
178 static int
179 cmp_remat(const void *a, const void *b)
180 {
181         const keyval_t *p = a;
182         const keyval_t *q = b;
183         const remat_t  *r = p->val;
184         const remat_t  *s = q->val;
185
186         assert(r && s);
187
188         return !(r == s || r->op == s->op);
189 }
190 #endif
191 static int
192 cmp_remat(const void *a, const void *b)
193 {
194         const remat_t  *r = a;
195         const remat_t  *s = a;
196
197         return !(r == s || r->op == s->op);
198 }
199
200 static int
201 cmp_spill(const void *a, const void *b, size_t size)
202 {
203         const spill_t *p = a;
204         const spill_t *q = b;
205
206 //      return !(p->irn == q->irn && p->bb == q->bb);
207         return !(p->irn == q->irn);
208 }
209
210 static keyval_t *
211 set_find_keyval(set * set, void * key)
212 {
213         keyval_t     query;
214
215         query.key = key;
216         return set_find(set, &query, sizeof(query), HASH_PTR(key));
217 }
218
219 static keyval_t *
220 set_insert_keyval(set * set, void * key, void * val)
221 {
222         keyval_t     query;
223
224         query.key = key;
225         query.val = val;
226         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
227 }
228
229 static defs_t *
230 set_find_def(set * set, ir_node * value)
231 {
232         defs_t     query;
233
234         query.value = value;
235         return set_find(set, &query, sizeof(query), HASH_PTR(value));
236 }
237
238 static defs_t *
239 set_insert_def(set * set, ir_node * value)
240 {
241         defs_t     query;
242
243         query.value = value;
244         query.spills = NULL;
245         query.remats = NULL;
246         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
247 }
248
249 static spill_t *
250 set_find_spill(set * set, ir_node * value)
251 {
252         spill_t     query;
253
254         query.irn = value;
255         return set_find(set, &query, sizeof(query), HASH_PTR(value));
256 }
257
258 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
259 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
260 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
261 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
262 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
263
264 static int
265 cmp_remat_info(const void *a, const void *b, size_t size)
266 {
267         const remat_info_t *p = a;
268         const remat_info_t *q = b;
269
270         return !(p->irn == q->irn);
271 }
272
273 static int
274 cmp_defs(const void *a, const void *b, size_t size)
275 {
276         const defs_t *p = a;
277         const defs_t *q = b;
278
279         return !(p->value == q->value);
280 }
281
282 static int
283 cmp_keyval(const void *a, const void *b, size_t size)
284 {
285         const keyval_t *p = a;
286         const keyval_t *q = b;
287
288         return !(p->key == q->key);
289 }
290
291 static double
292 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
293 {
294 #define FUDGE 0.001
295 #ifndef EXECFREQ_LOOPDEPH
296         return get_block_execfreq(si->chordal_env->exec_freq, get_block(irn)) + FUDGE;
297 #else
298         if(is_Block(irn))
299                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
300         else
301                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
302 #endif
303 }
304
305 static double
306 get_cost(const spill_ilp_t * si, const ir_node * irn)
307 {
308         if(be_is_Spill(irn)) {
309                 return COST_STORE;
310         } else if(be_is_Reload(irn)){
311                 return COST_LOAD;
312         } else {
313                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
314         }
315 }
316
317 /**
318  * Checks, whether node and its operands have suitable reg classes
319  */
320 static INLINE int
321 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
322 {
323         int               n;
324         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
325         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
326
327 #if 0
328         if(!remat)
329                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
330 #endif
331
332         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
333                 ir_node        *op = get_irn_n(irn, n);
334                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
335
336 //              if(!remat)
337 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
338         }
339
340         return remat;
341 }
342
343 /**
344  * Try to create a remat from @p op with destination value @p dest_value
345  */
346 static INLINE remat_t *
347 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
348 {
349         remat_t  *remat = NULL;
350
351 //      if(!mode_is_datab(get_irn_mode(dest_value)))
352 //              return NULL;
353
354         if(dest_value == op) {
355                 const ir_node *proj = NULL;
356
357                 if(is_Proj(dest_value)) {
358                         op = get_irn_n(op, 0);
359                         proj = dest_value;
360                 }
361
362                 if(!is_rematerializable(si, op))
363                         return NULL;
364
365                 remat = obstack_alloc(si->obst, sizeof(*remat));
366                 remat->op = op;
367                 remat->cost = get_cost(si, op);
368                 remat->value = dest_value;
369                 remat->proj = proj;
370                 remat->inverse = 0;
371         } else {
372                 arch_inverse_t     inverse;
373                 int                n;
374
375                 /* get the index of the operand we want to retrieve by the inverse op */
376                 for (n = get_irn_arity(op)-1; n>=0; --n) {
377                         ir_node        *arg = get_irn_n(op, n);
378
379                         if(arg == dest_value) break;
380                 }
381                 if(n<0) return NULL;
382
383                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
384
385                 /* else ask the backend to give an inverse op */
386                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
387                         int   i;
388
389                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
390
391                         assert(inverse.n > 0 && "inverse op should have at least one node");
392
393                         for(i=inverse.n-1; i>=0; --i) {
394                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
395                         }
396
397                         if(inverse.n <= 2) {
398                                 remat = obstack_alloc(si->obst, sizeof(*remat));
399                                 remat->op = inverse.nodes[0];
400                                 remat->cost = inverse.costs;
401                                 remat->value = dest_value;
402                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
403                                 remat->inverse = 1;
404
405                                 assert(is_Proj(remat->proj));
406                         } else {
407                                 assert(0 && "I can not handle remats with more than 2 nodes");
408                         }
409                 }
410         }
411
412         if(remat) {
413                 if(remat->proj) {
414                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
415                 } else {
416                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
417                 }
418         }
419         return remat;
420 }
421
422
423 static INLINE void
424 add_remat(const spill_ilp_t * si, const remat_t * remat)
425 {
426         remat_info_t    *remat_info,
427                      query;
428         int              n;
429
430         assert(remat->op);
431         assert(remat->value);
432
433         query.irn = remat->value;
434         query.remats = NULL;
435         query.remats_by_operand = NULL;
436         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
437
438         if(remat_info->remats == NULL) {
439                 remat_info->remats = new_pset(cmp_remat, 4096);
440         }
441         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
442
443         /* insert the remat into the remats_be_operand set of each argument of the remat op */
444         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
445                 ir_node        *arg = get_irn_n(remat->op, n);
446
447                 query.irn = arg;
448                 query.remats = NULL;
449                 query.remats_by_operand = NULL;
450                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
451
452                 if(remat_info->remats_by_operand == NULL) {
453                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
454                 }
455                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
456         }
457 }
458
459 static int
460 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
461 {
462         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
463         int                i = 0;
464
465         while(edge) {
466                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
467                         ++i;
468                 }
469                 edge = get_irn_out_edge_next(irn, edge);
470         }
471
472         return i;
473 }
474
475 static INLINE void
476 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
477 {
478         int      n;
479         remat_t *remat;
480
481 #ifdef NO_SINGLE_USE_REMATS
482         if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
483 #else
484         if(has_reg_class(si, op)) {
485 #endif
486                 remat = get_remat_from_op(si, op, op);
487                 if(remat) {
488                         add_remat(si, remat);
489                 }
490         }
491
492 #ifdef COLLECT_INVERSE_REMATS
493         /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
494            for each arg */
495         for (n = get_irn_arity(op)-1; n>=0; --n) {
496                 ir_node        *arg = get_irn_n(op, n);
497
498                 if(has_reg_class(si, arg)) {
499                         /* try to get an inverse remat */
500                         remat = get_remat_from_op(si, arg, op);
501                         if(remat) {
502                                 add_remat(si, remat);
503                         }
504                 }
505         }
506 #endif
507
508 }
509
510 static INLINE int
511 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
512 {
513         ir_node *block;
514         ir_node *def_block = get_nodes_block(val);
515         int      ret;
516
517         if(val == pos)
518                 return 0;
519
520         /* if pos is at end of a basic block */
521         if(is_Block(pos)) {
522                 ret = (pos == def_block || block_dominates(def_block, pos));
523 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
524                 return ret;
525         }
526
527         /* else if this is a normal operation */
528         block = get_nodes_block(pos);
529         if(block == def_block) {
530                 if(!sched_is_scheduled(val)) return 1;
531
532                 ret = sched_comes_after(val, pos);
533 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
534                 return ret;
535         }
536
537         ret = block_dominates(def_block, block);
538 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
539         return ret;
540 }
541
542 static INLINE ir_node *
543 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
544 {
545     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
546 }
547
548 /**
549  * Returns first non-Phi node of block @p bb
550  */
551 static INLINE ir_node *
552 sched_block_first_nonphi(const ir_node * bb)
553 {
554         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
555 }
556
557 static int
558 sched_skip_proj_predicator(const ir_node * irn, void * data)
559 {
560         return (is_Proj(irn));
561 }
562
563 static INLINE ir_node *
564 sched_next_nonproj(const ir_node * irn, int forward)
565 {
566         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
567 }
568
569 /**
570  * Returns next operation node (non-Proj) after @p irn
571  * or the basic block of this node
572  */
573 static INLINE ir_node *
574 sched_next_op(const ir_node * irn)
575 {
576         ir_node *next = sched_next(irn);
577
578         if(is_Block(next))
579                 return next;
580
581         return sched_next_nonproj(next, 1);
582 }
583
584 /**
585  * Returns previous operation node (non-Proj) before @p irn
586  * or the basic block of this node
587  */
588 static INLINE ir_node *
589 sched_prev_op(const ir_node * irn)
590 {
591         ir_node *prev = sched_prev(irn);
592
593         if(is_Block(prev))
594                 return prev;
595
596         return sched_next_nonproj(prev, 0);
597 }
598
599 static void
600 sched_put_after(ir_node * insert, ir_node * irn)
601 {
602         if(is_Block(insert)) {
603                 insert = sched_block_first_nonphi(insert);
604         } else {
605                 insert = sched_next_op(insert);
606         }
607         sched_add_before(insert, irn);
608 }
609
610 static void
611 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
612 {
613   if(is_Block(insert)) {
614           insert = sched_block_last_noncf(si, insert);
615   } else {
616           insert = sched_next_nonproj(insert, 0);
617           insert = sched_prev(insert);
618   }
619   sched_add_after(insert, irn);
620 }
621
622 /**
623  * Tells you whether a @p remat can be placed before the irn @p pos
624  */
625 static INLINE int
626 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
627 {
628         const ir_node   *op = remat->op;
629         const ir_node   *prev;
630         int        n,
631                            res = 1;
632
633         if(is_Block(pos)) {
634                 prev = sched_block_last_noncf(si, pos);
635                 prev = sched_next_nonproj(prev, 0);
636         } else {
637                 prev = sched_prev_op(pos);
638         }
639         /* do not remat if the rematted value is defined immediately before this op */
640         if(prev == remat->op) {
641                 return 0;
642         }
643
644 #if 0
645         /* this should be just fine, the following OP will be using this value, right? */
646
647         /* only remat AFTER the real definition of a value (?) */
648         if(!value_is_defined_before(si, pos, remat->value)) {
649 //              ir_fprintf(stderr, "error(not defined)");
650                 return 0;
651         }
652 #endif
653
654         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
655                 const ir_node   *arg = get_irn_n(op, n);
656
657 #ifdef NO_ENLARGE_L1V3N355
658                 if(has_reg_class(si, arg) && live) {
659                         res &= pset_find_ptr(live, arg)?1:0;
660                 } else {
661                         res &= value_is_defined_before(si, pos, arg);
662                 }
663 #else
664                 res &= value_is_defined_before(si, pos, arg);
665 #endif
666         }
667
668         return res;
669 }
670
671 /**
672  * Tells you whether a @p remat can be placed after the irn @p pos
673  */
674 static INLINE int
675 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
676 {
677         if(is_Block(pos)) {
678                 pos = sched_block_first_nonphi(pos);
679         } else {
680                 pos = sched_next_op(pos);
681         }
682
683         /* only remat AFTER the real definition of a value (?) */
684         if(!value_is_defined_before(si, pos, remat->value)) {
685                 return 0;
686         }
687
688         return can_remat_before(si, remat, pos, live);
689 }
690
691 /**
692  * Collect potetially rematerializable OPs
693  */
694 static void
695 walker_remat_collector(ir_node * irn, void * data)
696 {
697         spill_ilp_t    *si = data;
698
699         if(!is_Block(irn) && !is_Phi(irn)) {
700                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
701                 get_remats_from_op(si, irn);
702         }
703 }
704
705 /**
706  * Inserts a copy of @p irn before @p pos
707  */
708 static ir_node *
709 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
710 {
711         ir_node     *bb;
712         ir_node     *copy;
713
714         bb = is_Block(pos)?pos:get_nodes_block(pos);
715         copy = exact_copy(irn);
716
717         _set_phi_class(copy, NULL);
718         set_nodes_block(copy, bb);
719         sched_put_before(si, pos, copy);
720
721         return copy;
722 }
723
724 /**
725  * Inserts a copy of @p irn after @p pos
726  */
727 static ir_node *
728 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
729 {
730         ir_node     *bb;
731         ir_node     *copy;
732
733         bb = is_Block(pos)?pos:get_nodes_block(pos);
734         copy = exact_copy(irn);
735
736         _set_phi_class(copy, NULL);
737         set_nodes_block(copy, bb);
738         sched_put_after(pos, copy);
739
740         return copy;
741 }
742
743 static ir_node *
744 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
745 {
746         char     buf[256];
747
748         if(can_remat_after(si, remat, pos, live)) {
749                 ir_node         *copy,
750                                                 *proj_copy;
751                 op_t            *op;
752
753                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
754
755                 copy = insert_copy_after(si, remat->op, pos);
756
757                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
758                 op = obstack_alloc(si->obst, sizeof(*op));
759                 op->is_remat = 1;
760                 op->attr.remat.remat = remat;
761                 op->attr.remat.pre = 0;
762                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
763
764                 set_irn_link(copy, op);
765                 pset_insert_ptr(si->all_possible_remats, copy);
766                 if(remat->proj) {
767                         proj_copy = insert_copy_after(si, remat->proj, copy);
768                         set_irn_n(proj_copy, 0, copy);
769                         set_irn_link(proj_copy, op);
770                         pset_insert_ptr(si->all_possible_remats, proj_copy);
771                 } else {
772                         proj_copy = NULL;
773                 }
774
775                 return copy;
776         }
777
778         return NULL;
779 }
780
781 static ir_node *
782 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
783 {
784         char     buf[256];
785
786         if(can_remat_before(si, remat, pos, live)) {
787                 ir_node         *copy,
788                                                 *proj_copy;
789                 op_t            *op;
790
791                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
792
793                 copy = insert_copy_before(si, remat->op, pos);
794
795                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
796                 op = obstack_alloc(si->obst, sizeof(*op));
797                 op->is_remat = 1;
798                 op->attr.remat.remat = remat;
799                 op->attr.remat.pre = 1;
800                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
801
802                 set_irn_link(copy, op);
803                 pset_insert_ptr(si->all_possible_remats, copy);
804                 if(remat->proj) {
805                         proj_copy = insert_copy_after(si, remat->proj, copy);
806                         set_irn_n(proj_copy, 0, copy);
807                         set_irn_link(proj_copy, op);
808                         pset_insert_ptr(si->all_possible_remats, proj_copy);
809                 } else {
810                         proj_copy = NULL;
811                 }
812
813                 return copy;
814         }
815
816         return NULL;
817 }
818
819 static int
820 get_block_n_succs(const ir_node *block) {
821         const ir_edge_t *edge;
822
823         assert(edges_activated(current_ir_graph));
824
825         edge = get_block_succ_first(block);
826         if (! edge)
827                 return 0;
828
829         edge = get_block_succ_next(block, edge);
830         return edge ? 2 : 1;
831 }
832
833 static int
834 is_merge_edge(const ir_node * bb)
835 {
836 #ifdef GOODWIN_REDUCTION
837         return get_block_n_succs(bb) == 1;
838 #else
839         return 1;
840 #endif
841 }
842
843 static int
844 is_diverge_edge(const ir_node * bb)
845 {
846 #ifdef GOODWIN_REDUCTION
847         return get_Block_n_cfgpreds(bb) == 1;
848 #else
849         return 1;
850 #endif
851 }
852
853 static void
854 walker_regclass_copy_insertor(ir_node * irn, void * data)
855 {
856         spill_ilp_t    *si = data;
857
858         if(is_Phi(irn) && has_reg_class(si, irn)) {
859                 int n;
860
861                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
862                         ir_node  *phi_arg = get_irn_n(irn, n);
863                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
864
865                         if(!has_reg_class(si, phi_arg)) {
866                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
867                                 ir_node   *pos = sched_block_last_noncf(si, bb);
868                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
869
870                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
871                                 sched_add_after(pos, copy);
872                                 set_irn_n(irn, n, copy);
873
874                                 op->is_remat = 0;
875                                 op->attr.live_range.args.reloads = NULL;
876                                 op->attr.live_range.ilp = ILP_UNDEF;
877                                 set_irn_link(copy, op);
878                         }
879                 }
880         }
881 }
882
883
884 /**
885  * Insert (so far unused) remats into the irg to
886  * recompute the potential liveness of all values
887  */
888 static void
889 walker_remat_insertor(ir_node * bb, void * data)
890 {
891         spill_ilp_t    *si = data;
892         spill_bb_t     *spill_bb;
893         ir_node        *irn;
894         int             n;
895         irn_live_t     *li;
896         pset           *live = pset_new_ptr_default();
897
898         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
899
900         live_foreach(bb, li) {
901                 ir_node        *value = (ir_node *) li->irn;
902
903                 /* add remats at end of block */
904                 if (live_is_end(li) && has_reg_class(si, value)) {
905                         pset_insert_ptr(live, value);
906                 }
907         }
908
909         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
910         set_irn_link(bb, spill_bb);
911
912         irn = sched_last(bb);
913         while(!sched_is_end(irn)) {
914                 ir_node   *next;
915                 op_t      *op;
916                 pset      *args;
917                 ir_node   *arg;
918                 pset      *remat_args;
919
920                 next = sched_prev(irn);
921
922                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
923
924                 if(is_Phi(irn) || is_Proj(irn)) {
925                         op_t      *op;
926
927                         if(has_reg_class(si, irn)) {
928                                 pset_remove_ptr(live, irn);
929                         }
930
931                         op = obstack_alloc(si->obst, sizeof(*op));
932                         op->is_remat = 0;
933                         op->attr.live_range.args.reloads = NULL;
934                         op->attr.live_range.ilp = ILP_UNDEF;
935                         set_irn_link(irn, op);
936
937                         irn = next;
938                         continue;
939                 }
940
941                 op = obstack_alloc(si->obst, sizeof(*op));
942                 op->is_remat = 0;
943                 op->attr.live_range.ilp = ILP_UNDEF;
944                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
945                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
946                 set_irn_link(irn, op);
947
948                 args = pset_new_ptr_default();
949
950                 /* collect arguments of op */
951                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
952                         ir_node        *arg = get_irn_n(irn, n);
953
954                         pset_insert_ptr(args, arg);
955                 }
956
957                 /* set args of op already live in epilog */
958                 pset_foreach(args, arg) {
959                         if(has_reg_class(si, arg)) {
960                                 pset_insert_ptr(live, arg);
961                         }
962                 }
963                 /* delete defined value from live set */
964                 if(has_reg_class(si, irn)) {
965                         pset_remove_ptr(live, irn);
966                 }
967
968
969                 remat_args = pset_new_ptr_default();
970
971                 /* insert all possible remats before irn */
972                 pset_foreach(args, arg) {
973                         remat_info_t   *remat_info,
974                                                     query;
975                         remat_t        *remat;
976
977                         /* continue if the operand has the wrong reg class
978                          */
979                         if(!has_reg_class(si, arg))
980                                 continue;
981
982                         query.irn = arg;
983                         query.remats = NULL;
984                         query.remats_by_operand = NULL;
985                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
986
987                         if(!remat_info) {
988                                 continue;
989                         }
990
991                         if(remat_info->remats) {
992                                 pset_foreach(remat_info->remats, remat) {
993                                         ir_node  *remat_irn = NULL;
994
995                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
996 #ifdef REMAT_WHILE_LIVE
997                                         if(pset_find_ptr(live, remat->value)) {
998                                                 remat_irn = insert_remat_before(si, remat, irn, live);
999                                         }
1000 #else
1001                                         remat_irn = insert_remat_before(si, remat, irn, live);
1002 #endif
1003                                         if(remat_irn) {
1004                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1005                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1006
1007                                                         if(!has_reg_class(si, remat_arg)) continue;
1008
1009                                                         pset_insert_ptr(remat_args, remat_arg);
1010                                                 }
1011                                         }
1012                                 }
1013                         }
1014                 }
1015
1016                 /* now we add remat args to op's args because they could also die at this op */
1017                 pset_foreach(args,arg) {
1018                         if(pset_find_ptr(remat_args, arg)) {
1019                                 pset_remove_ptr(remat_args, arg);
1020                         }
1021                 }
1022                 pset_foreach(remat_args,arg) {
1023                         pset_insert_ptr(args, arg);
1024                 }
1025
1026                 /* insert all possible remats after irn */
1027                 pset_foreach(args, arg) {
1028                         remat_info_t   *remat_info,
1029                                                     query;
1030                         remat_t        *remat;
1031
1032                         /* continue if the operand has the wrong reg class */
1033                         if(!has_reg_class(si, arg))
1034                                 continue;
1035
1036                         query.irn = arg;
1037                         query.remats = NULL;
1038                         query.remats_by_operand = NULL;
1039                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1040
1041                         if(!remat_info) {
1042                                 continue;
1043                         }
1044
1045                         /* do not place post remats after jumps */
1046                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1047
1048                         if(remat_info->remats_by_operand) {
1049                                 pset_foreach(remat_info->remats_by_operand, remat) {
1050                                         /* do not insert remats producing the same value as one of the operands */
1051                                         if(!pset_find_ptr(args, remat->value)) {
1052                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1053 #ifdef REMAT_WHILE_LIVE
1054                                                 if(pset_find_ptr(live, remat->value)) {
1055                                                         insert_remat_after(si, remat, irn, live);
1056                                                 }
1057 #else
1058                                                 insert_remat_after(si, remat, irn, live);
1059 #endif
1060                                         }
1061                                 }
1062                         }
1063                 }
1064
1065                 del_pset(remat_args);
1066                 del_pset(args);
1067                 irn = next;
1068         }
1069
1070         live_foreach(bb, li) {
1071                 ir_node        *value = (ir_node *) li->irn;
1072
1073                 /* add remats at end if successor has multiple predecessors */
1074                 if(is_merge_edge(bb)) {
1075                         /* add remats at end of block */
1076                         if (live_is_end(li) && has_reg_class(si, value)) {
1077                                 remat_info_t   *remat_info,
1078                                                            query;
1079                                 remat_t        *remat;
1080
1081                                 query.irn = value;
1082                                 query.remats = NULL;
1083                                 query.remats_by_operand = NULL;
1084                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1085
1086                                 if(remat_info && remat_info->remats) {
1087                                         pset_foreach(remat_info->remats, remat) {
1088                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1089
1090                                                 insert_remat_before(si, remat, bb, NULL);
1091                                         }
1092                                 }
1093                         }
1094                 }
1095                 if(is_diverge_edge(bb)) {
1096                         /* add remat2s at beginning of block */
1097                         if ((live_is_in(li) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1098                                 remat_info_t   *remat_info,
1099                                                            query;
1100                                 remat_t        *remat;
1101
1102                                 query.irn = value;
1103                                 query.remats = NULL;
1104                                 query.remats_by_operand = NULL;
1105                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1106
1107                                 if(remat_info && remat_info->remats) {
1108                                         pset_foreach(remat_info->remats, remat) {
1109                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at beginning of block %+F\n", remat->op, bb));
1110
1111                                                 /* put the remat here if all its args are available */
1112                                                 insert_remat_after(si, remat, bb, NULL);
1113
1114                                         }
1115                                 }
1116                         }
1117                 }
1118         }
1119 }
1120
1121 /**
1122  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1123  */
1124 static void
1125 luke_endwalker(ir_node * bb, void * data)
1126 {
1127         spill_ilp_t    *si = (spill_ilp_t*)data;
1128         irn_live_t     *li;
1129         pset           *live;
1130         pset           *use_end;
1131         char            buf[256];
1132         ilp_cst_t       cst;
1133         ir_node        *irn;
1134         spill_bb_t     *spill_bb = get_irn_link(bb);
1135
1136
1137         live = pset_new_ptr_default();
1138         use_end = pset_new_ptr_default();
1139
1140         live_foreach(bb, li) {
1141                 irn = (ir_node *) li->irn;
1142                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1143                         op_t      *op;
1144
1145                         pset_insert_ptr(live, irn);
1146                         op = get_irn_link(irn);
1147                         assert(!op->is_remat);
1148                 }
1149         }
1150
1151         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1152         /* their reg_out must always be set */
1153         sched_foreach_reverse(bb, irn) {
1154                 int   n;
1155
1156                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1157
1158                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1159                         ir_node        *irn_arg = get_irn_n(irn, n);
1160
1161                         if(has_reg_class(si, irn_arg)) {
1162                                 pset_insert_ptr(use_end, irn_arg);
1163                         }
1164                 }
1165         }
1166
1167         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1168         //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1169         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1170
1171         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1172
1173         /* if this is a merge edge we can reload at the end of this block */
1174         if(is_merge_edge(bb)) {
1175                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1176         } else if(pset_count(use_end)){
1177                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1178         } else {
1179                 spill_bb->reloads = NULL;
1180         }
1181
1182         pset_foreach(live,irn) {
1183                 spill_t     query,
1184                                         *spill;
1185                 double      spill_cost;
1186
1187
1188                 /* handle values used by control flow nodes later separately */
1189                 if(pset_find_ptr(use_end, irn)) continue;
1190
1191                 query.irn = irn;
1192                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1193
1194                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1195
1196                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1197                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1198                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1199
1200                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1201                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1202
1203                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1204                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1205
1206                 if(is_merge_edge(bb)) {
1207                         ilp_var_t   reload;
1208                         ilp_cst_t   rel_cst;
1209
1210                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1211                         reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1212                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1213
1214                         /* reload <= mem_out */
1215                         rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1216                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1217                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1218         }
1219
1220                 spill->reg_in = ILP_UNDEF;
1221                 spill->mem_in = ILP_UNDEF;
1222         }
1223
1224         pset_foreach(use_end,irn) {
1225                 spill_t     query,
1226                                         *spill;
1227                 double      spill_cost;
1228                 ilp_cst_t   end_use_req,
1229                                         rel_cst;
1230                 ilp_var_t   reload;
1231
1232                 query.irn = irn;
1233                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1234
1235                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1236
1237                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1238                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1239                 /* if irn is used at the end of the block, then it is live anyway */
1240                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1241
1242                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1243                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1244
1245                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1246                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1247
1248                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1249                 reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1250                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1251
1252                 /* reload <= mem_out */
1253                 rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1254                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1255                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1256
1257                 spill->reg_in = ILP_UNDEF;
1258                 spill->mem_in = ILP_UNDEF;
1259
1260                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1261                 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1262                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1263         }
1264
1265         del_pset(live);
1266         del_pset(use_end);
1267 }
1268
1269 static ir_node *
1270 next_post_remat(const ir_node * irn)
1271 {
1272         op_t      *op;
1273
1274         if(is_Block(irn)) {
1275                 irn = sched_block_first_nonphi(irn);
1276         } else {
1277                 irn = sched_next_op(irn);
1278         }
1279
1280         if(sched_is_end(irn))
1281                 return NULL;
1282
1283         op = (op_t*)get_irn_link(irn);
1284         if(op->is_remat && !op->attr.remat.pre) {
1285                 return irn;
1286         }
1287
1288         return NULL;
1289 }
1290
1291
1292 static ir_node *
1293 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1294 {
1295         op_t      *op;
1296         ir_node   *ret;
1297
1298         if(is_Block(irn)) {
1299                 ret = sched_block_last_noncf(si, irn);
1300                 ret = sched_next(ret);
1301                 ret = sched_prev_op(ret);
1302         } else {
1303                 ret = sched_prev_op(irn);
1304         }
1305
1306         if(sched_is_end(ret) || is_Phi(ret))
1307                 return NULL;
1308
1309         op = (op_t*)get_irn_link(ret);
1310         if(op->is_remat && op->attr.remat.pre) {
1311                 return ret;
1312         }
1313
1314         return NULL;
1315 }
1316
1317 /**
1318  * Find a remat of value @p value in the epilog of @p pos
1319  */
1320 static ir_node *
1321 find_post_remat(const ir_node * value, const ir_node * pos)
1322 {
1323         while((pos = next_post_remat(pos)) != NULL) {
1324                 op_t   *op;
1325
1326                 op = get_irn_link(pos);
1327                 assert(op->is_remat && !op->attr.remat.pre);
1328
1329                 if(op->attr.remat.remat->value == value)
1330                         return (ir_node*)pos;
1331
1332 #if 0
1333         const ir_edge_t *edge;
1334                 foreach_out_edge(pos, edge) {
1335                         ir_node   *proj = get_edge_src_irn(edge);
1336                         assert(is_Proj(proj));
1337                 }
1338 #endif
1339
1340         }
1341
1342         return NULL;
1343 }
1344
1345 static spill_t *
1346 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1347 {
1348         spill_bb_t  *spill_bb = get_irn_link(bb);
1349         spill_t     *spill,
1350                                  query;
1351         char         buf[256];
1352
1353         query.irn = irn;
1354         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1355         if(!spill) {
1356                 double   spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1357
1358                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1359
1360                 spill->reg_out = ILP_UNDEF;
1361                 spill->reg_in  = ILP_UNDEF;
1362                 spill->mem_in  = ILP_UNDEF;
1363
1364                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1365                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1366
1367                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1368                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1369         }
1370
1371         return spill;
1372 }
1373
1374 static void
1375 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1376 {
1377         irn_live_t     *li;
1378         ir_node        *irn;
1379
1380         live_foreach(bb, li) {
1381                 irn = (ir_node *) li->irn;
1382
1383                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1384                         pset_insert_ptr(live, irn);
1385                 }
1386         }
1387
1388         irn = sched_last(bb);
1389
1390         /* all values eaten by control flow operations are also live until the end of the block */
1391         sched_foreach_reverse(bb, irn) {
1392                 int  i;
1393
1394                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1395
1396                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1397                         ir_node *arg = get_irn_n(irn,i);
1398
1399                         if(has_reg_class(si, arg)) {
1400                                 pset_insert_ptr(live, arg);
1401                         }
1402                 }
1403         }
1404 }
1405
1406 /**
1407  *  Inserts ILP-constraints and variables for memory copying before the given position
1408  */
1409 static void
1410 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1411 {
1412         const ir_node    *succ;
1413         const ir_edge_t  *edge;
1414         spill_bb_t       *spill_bb = get_irn_link(block);
1415         ir_node          *phi;
1416         int               pos;
1417         ilp_cst_t         cst;
1418         ilp_var_t         copyreg;
1419         char              buf[256];
1420         ir_node          *tmp;
1421
1422
1423         assert(edges_activated(current_ir_graph));
1424
1425         edge = get_block_succ_first(block);
1426         if(!edge) return;
1427
1428         succ = edge->src;
1429         pos = edge->pos;
1430
1431         edge = get_block_succ_next(block, edge);
1432         /* next block can only contain phis, if this is a merge edge */
1433         if(edge) return;
1434
1435         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1436         copyreg = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1437
1438         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1439         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1440
1441         pset_foreach(live, tmp) {
1442                 spill_t  *spill;
1443 #if 0
1444                 op_t  *op = get_irn_link(irn);
1445                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1446 #endif
1447                 spill = set_find_spill(spill_bb->ilp, tmp);
1448                 assert(spill);
1449
1450                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1451         }
1452         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1453
1454         sched_foreach(succ, phi) {
1455                 const ir_node  *to_copy;
1456                 op_t           *to_copy_op;
1457                 spill_t        *to_copy_spill;
1458                 op_t           *phi_op = get_irn_link(phi);
1459                 ilp_var_t       reload = ILP_UNDEF;
1460
1461
1462                 if(!is_Phi(phi)) break;
1463                 if(!has_reg_class(si, phi)) continue;
1464
1465                 to_copy = get_irn_n(phi, pos);
1466
1467                 to_copy_op = get_irn_link(to_copy);
1468
1469                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1470                 assert(to_copy_spill);
1471
1472                 if(spill_bb->reloads) {
1473                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1474
1475                         if(keyval) {
1476                                 reload = PTR_TO_INT(keyval->val);
1477                         }
1478                 }
1479
1480                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N", block, to_copy);
1481                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1482
1483                 /* copy - reg_out - reload - remat - live_range <= 0 */
1484                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1485                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1486                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1487                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1488                 foreach_pre_remat(si, block, tmp) {
1489                         op_t     *remat_op = get_irn_link(tmp);
1490                         if(remat_op->attr.remat.remat->value == to_copy) {
1491                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1492                         }
1493                 }
1494
1495                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N", block, to_copy);
1496                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1497
1498                 /* copy - reg_out - copyreg <= 0 */
1499                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1500                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1501                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1502         }
1503 }
1504
1505
1506 /**
1507  * Walk all irg blocks and emit this ILP
1508  */
1509 static void
1510 luke_blockwalker(ir_node * bb, void * data)
1511 {
1512         spill_ilp_t    *si = (spill_ilp_t*)data;
1513         ir_node        *irn;
1514         pset           *live;
1515         char            buf[256];
1516         ilp_cst_t       cst;
1517         spill_bb_t     *spill_bb = get_irn_link(bb);
1518         ir_node        *tmp;
1519         spill_t        *spill;
1520         pset           *defs = pset_new_ptr_default();
1521
1522
1523         live = pset_new_ptr_default();
1524
1525         /****************************************
1526          *      B A S I C  B L O C K  E N D
1527          ***************************************/
1528
1529
1530         /* init live values at end of block */
1531         get_live_end(si, bb, live);
1532
1533         pset_foreach(live, irn) {
1534                 op_t           *op;
1535                 ilp_var_t       reload = ILP_UNDEF;
1536
1537                 spill = set_find_spill(spill_bb->ilp, irn);
1538                 assert(spill);
1539
1540                 if(spill_bb->reloads) {
1541                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1542
1543                         if(keyval) {
1544                                 reload = PTR_TO_INT(keyval->val);
1545                         }
1546                 }
1547
1548                 op = get_irn_link(irn);
1549                 assert(!op->is_remat);
1550
1551                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1552                 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1553                 op->attr.live_range.op = bb;
1554
1555                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1556                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1557
1558                 /* reg_out - reload - remat - live_range <= 0 */
1559                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1560                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1561                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1562                 foreach_pre_remat(si, bb, tmp) {
1563                         op_t     *remat_op = get_irn_link(tmp);
1564                         if(remat_op->attr.remat.remat->value == irn) {
1565                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1566                         }
1567                 }
1568                 /* maybe we should also assure that reg_out >= live_range etc. */
1569         }
1570
1571 #ifndef NO_MEMCOPIES
1572         insert_mem_copy_position(si, live, bb);
1573 #endif
1574
1575         /*
1576          * start new live ranges for values used by remats at end of block
1577          * and assure the remat args are available
1578          */
1579         foreach_pre_remat(si, bb, tmp) {
1580                 op_t     *remat_op = get_irn_link(tmp);
1581                 int       n;
1582
1583                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1584                         ir_node        *remat_arg = get_irn_n(tmp, n);
1585                         op_t           *arg_op = get_irn_link(remat_arg);
1586                         ilp_var_t       prev_lr;
1587
1588                         if(!has_reg_class(si, remat_arg)) continue;
1589
1590                         /* if value is becoming live through use by remat */
1591                         if(!pset_find_ptr(live, remat_arg)) {
1592                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1593                                 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1594
1595                                 arg_op->attr.live_range.ilp = prev_lr;
1596                                 arg_op->attr.live_range.op = bb;
1597
1598                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1599
1600                                 pset_insert_ptr(live, remat_arg);
1601                                 add_to_spill_bb(si, bb, remat_arg);
1602                         }
1603
1604                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1605                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1606                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1607
1608                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1609                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1610
1611                         /* use reload placed for this argument */
1612                         if(spill_bb->reloads) {
1613                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1614
1615                                 if(keyval) {
1616                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1617
1618                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1619                                 }
1620                         }
1621                 }
1622         }
1623         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1624
1625
1626
1627
1628         /**************************************
1629          *    B A S I C  B L O C K  B O D Y
1630          **************************************/
1631
1632         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1633                 op_t       *op;
1634                 op_t       *tmp_op;
1635                 int         n,
1636                                         u = 0,
1637                                         d = 0;
1638                 ilp_cst_t       check_pre,
1639                                         check_post;
1640                 set        *args;
1641                 pset       *used;
1642                 pset       *remat_defs;
1643                 keyval_t   *keyval;
1644
1645                 /* iterate only until first phi */
1646                 if(is_Phi(irn))
1647                         break;
1648
1649                 op = get_irn_link(irn);
1650                 /* skip remats */
1651                 if(op->is_remat) continue;
1652                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1653
1654                 /* collect defined values */
1655                 if(has_reg_class(si, irn)) {
1656                         pset_insert_ptr(defs, irn);
1657                 }
1658
1659                 /* skip projs */
1660                 if(is_Proj(irn)) continue;
1661
1662                 /*
1663                  * init set of irn's arguments
1664                  * and all possibly used values around this op
1665                  * and values defined by post remats
1666                  */
1667                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1668                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1669                 remat_defs = pset_new_ptr(pset_count(live));
1670
1671                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1672                         ir_node        *irn_arg = get_irn_n(irn, n);
1673                         if(has_reg_class(si, irn_arg)) {
1674                                 set_insert_keyval(args, irn_arg, (void*)n);
1675                                 pset_insert_ptr(used, irn_arg);
1676                         }
1677                 }
1678                 foreach_post_remat(irn, tmp) {
1679                         op_t    *remat_op = get_irn_link(tmp);
1680
1681                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1682
1683                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1684                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1685                                 if(has_reg_class(si, remat_arg)) {
1686                                         pset_insert_ptr(used, remat_arg);
1687                                 }
1688                         }
1689                 }
1690                 foreach_pre_remat(si, irn, tmp) {
1691                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1692                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1693                                 if(has_reg_class(si, remat_arg)) {
1694                                         pset_insert_ptr(used, remat_arg);
1695                                 }
1696                         }
1697                 }
1698
1699                 /**********************************
1700                  *   I N  E P I L O G  O F  irn
1701                  **********************************/
1702
1703                 /* ensure each dying value is used by only one post remat */
1704                 pset_foreach(used, tmp) {
1705                         ir_node     *value = tmp;
1706                         op_t        *value_op = get_irn_link(value);
1707                         ir_node     *remat;
1708                         int          n_remats = 0;
1709
1710                         cst = ILP_UNDEF;
1711                         foreach_post_remat(irn, remat) {
1712                                 op_t  *remat_op = get_irn_link(remat);
1713
1714                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1715                                         ir_node   *remat_arg = get_irn_n(remat, n);
1716
1717                                         /* if value is used by this remat add it to constraint */
1718                                         if(remat_arg == value) {
1719                                                 if(n_remats == 0) {
1720                                                         /* sum remat2s <= 1 + n_remats*live_range */
1721                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1722                                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1.0);
1723                                                 }
1724
1725                                                 n_remats++;
1726                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1727                                                 break;
1728                                         }
1729                                 }
1730                         }
1731
1732             // value_op->attr.live_range.ilp != ILP_UNDEF
1733                         if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
1734                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1735                         }
1736                 }
1737
1738         /* ensure at least one value dies at post remat */
1739         foreach_post_remat(irn, tmp) {
1740             op_t     *remat_op = get_irn_link(tmp);
1741             pset     *remat_args = pset_new_ptr(get_irn_arity(tmp));
1742             ir_node  *remat_arg;
1743
1744             for(n=get_irn_arity(tmp)-1; n>=0; --n) {
1745                 remat_arg = get_irn_n(tmp, n);
1746
1747                 if(has_reg_class(si, remat_arg)) {
1748
1749                     /* does arg always die at this op? */
1750                     if(!pset_find_ptr(live, remat_arg))
1751                         goto skip_one_must_die;
1752
1753                     pset_insert_ptr(remat_args, remat_arg);
1754                 }
1755             }
1756
1757             /* remat + \sum live_range(remat_arg) <= |args| */
1758             ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
1759             cst = lpp_add_cst(si->lpp, buf, lpp_less, pset_count(remat_args));
1760             lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1761
1762             pset_foreach(remat_args, remat_arg) {
1763                 op_t  *arg_op = get_irn_link(remat_arg);
1764
1765                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1766             }
1767
1768 skip_one_must_die:
1769             del_pset(remat_args);
1770         }
1771
1772                 /* new live ranges for values from L\U defined by post remats */
1773                 pset_foreach(live, tmp) {
1774                         ir_node     *value = tmp;
1775                         op_t        *value_op = get_irn_link(value);
1776
1777                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1778                                 ilp_var_t    prev_lr = ILP_UNDEF;
1779                                 ir_node     *remat;
1780
1781                                 if(pset_find_ptr(remat_defs, value)) {
1782
1783                                         /* next_live_range <= prev_live_range + sum remat2s */
1784                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1785                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1786
1787                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1788                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1789
1790                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1791                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1792
1793                                         foreach_post_remat(irn, remat) {
1794                                                 op_t        *remat_op = get_irn_link(remat);
1795
1796                                                 /* if value is being rematerialized by this remat */
1797                                                 if(value == remat_op->attr.remat.remat->value) {
1798                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1799                                                 }
1800                                         }
1801
1802                                         value_op->attr.live_range.ilp = prev_lr;
1803                                         value_op->attr.live_range.op = irn;
1804                                 }
1805                         }
1806                 }
1807
1808                 /* requirements for post remats and start live ranges from L/U' for values dying here */
1809                 foreach_post_remat(irn, tmp) {
1810                         op_t        *remat_op = get_irn_link(tmp);
1811                         int          n;
1812
1813                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1814                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1815                                 op_t           *arg_op = get_irn_link(remat_arg);
1816
1817                                 if(!has_reg_class(si, remat_arg)) continue;
1818
1819                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
1820                                 if(!pset_find_ptr(used, remat_arg)) {
1821                                         /* remat <= live_range(remat_arg) */
1822                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1823                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1824
1825                                         /* if value is becoming live through use by remat2 */
1826                                         if(!pset_find_ptr(live, remat_arg)) {
1827                                                 ilp_var_t     lr;
1828
1829                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1830                                                 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1831
1832                                                 arg_op->attr.live_range.ilp = lr;
1833                                                 arg_op->attr.live_range.op = irn;
1834
1835                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1836
1837                                                 pset_insert_ptr(live, remat_arg);
1838                                                 add_to_spill_bb(si, bb, remat_arg);
1839                                         }
1840
1841                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1842                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1843                                 }
1844                         }
1845                 }
1846
1847                 d = pset_count(defs);
1848                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
1849
1850                 /* count how many regs irn needs for arguments */
1851                 u = set_count(args);
1852
1853
1854                 /* check the register pressure in the epilog */
1855                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
1856                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1857                 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1858
1859                 /* add L\U' to check_post */
1860                 pset_foreach(live, tmp) {
1861                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
1862                                 /* if a live value is not used by irn */
1863                                 tmp_op = get_irn_link(tmp);
1864                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1865                         }
1866                 }
1867
1868                 /***********************************************************
1869                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
1870                  **********************************************************/
1871
1872
1873                 pset_foreach(used, tmp) {
1874                         ilp_var_t       prev_lr;
1875                         ilp_var_t       post_use;
1876                         int             p = 0;
1877                         spill_t        *spill;
1878                         ir_node        *arg = tmp;
1879                         op_t           *arg_op = get_irn_link(arg);
1880                         ir_node        *remat;
1881
1882                         spill = add_to_spill_bb(si, bb, arg);
1883
1884                         /* new live range for each used value */
1885                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1886                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1887
1888                         /* the epilog stuff - including post_use, check_post, check_post_remat */
1889                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1890                         post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1891
1892                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1893
1894                         /* arg is live throughout epilog if the next live_range is in a register */
1895                         if(pset_find_ptr(live, arg)) {
1896                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
1897
1898                                 /* post_use >= next_lr + remat */
1899                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1900                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1901                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1902                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1903
1904                         }
1905
1906                         /* if value is not an arg of op and not possibly defined by post remat
1907                          * then it may only die and not become live
1908                          */
1909                         if(!set_find_keyval(args, arg)) {
1910                                 /* post_use <= prev_lr */
1911                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
1912                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1913                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1914                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1915
1916                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
1917                                         /* next_lr <= prev_lr */
1918                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
1919                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1920                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1921                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1922                                 }
1923                         }
1924
1925
1926
1927                         /* forall post remat which use arg add a similar cst */
1928                         foreach_post_remat(irn, remat) {
1929                                 int      n;
1930
1931                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
1932                                         ir_node    *remat_arg = get_irn_n(remat, n);
1933                                         op_t       *remat_op = get_irn_link(remat);
1934
1935                                         if(remat_arg == arg) {
1936                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
1937
1938                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1939                                                 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1940                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1941                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1942                                         }
1943                                 }
1944                         }
1945
1946                         /* new live range begins for each used value */
1947                         arg_op->attr.live_range.ilp = prev_lr;
1948                         arg_op->attr.live_range.op = irn;
1949
1950                         /*if(!pset_find_ptr(live, arg)) {
1951                                 pset_insert_ptr(live, arg);
1952                                 add_to_spill_bb(si, bb, arg);
1953                         }*/
1954                         pset_insert_ptr(live, arg);
1955
1956                 }
1957
1958                 /* just to be sure */
1959                 check_post = ILP_UNDEF;
1960
1961
1962
1963
1964                 /******************
1965                  *   P R O L O G
1966                  ******************/
1967
1968                 /* check the register pressure in the prolog */
1969                 /* sum_{L\U} lr <= k - |U| */
1970                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1971                 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
1972
1973                 /* for the prolog remove defined values from the live set */
1974                 pset_foreach(defs, tmp) {
1975                         pset_remove_ptr(live, tmp);
1976                 }
1977
1978                 /***********************************************************
1979                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
1980                  **********************************************************/
1981
1982
1983                 set_foreach(args, keyval) {
1984                         spill_t        *spill;
1985                         ir_node        *arg = keyval->key;
1986                         int             i = PTR_TO_INT(keyval->val);
1987                         op_t           *arg_op = get_irn_link(arg);
1988
1989                         spill = set_find_spill(spill_bb->ilp, arg);
1990                         assert(spill);
1991
1992                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1993                         op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1994
1995                         /* reload <= mem_out */
1996                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
1997                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1998                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1999                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2000
2001                         /* requirement: arg must be in register for use */
2002                         /* reload + remat + live_range == 1 */
2003                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
2004                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
2005
2006                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2007                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2008                         foreach_pre_remat(si, irn, tmp) {
2009                                 op_t     *remat_op = get_irn_link(tmp);
2010                                 if(remat_op->attr.remat.remat->value == arg) {
2011                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2012                                 }
2013                         }
2014                 }
2015
2016                 /* iterate over L\U */
2017                 pset_foreach(live, tmp) {
2018                         if(!set_find_keyval(args, tmp)) {
2019                                 /* if a live value is not used by irn */
2020                                 tmp_op = get_irn_link(tmp);
2021                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
2022                         }
2023                 }
2024
2025
2026                 /* requirements for remats */
2027                 /* start new live ranges for values used by remats */
2028                 foreach_pre_remat(si, irn, tmp) {
2029                         op_t        *remat_op = get_irn_link(tmp);
2030                         int          n;
2031
2032                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2033                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2034                                 op_t           *arg_op = get_irn_link(remat_arg);
2035                                 ilp_var_t       prev_lr;
2036
2037                                 if(!has_reg_class(si, remat_arg)) continue;
2038
2039                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2040                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2041                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2042
2043                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2044                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2045
2046                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2047                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2048                                         int    index = (int)keyval->val;
2049
2050                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2051                                 }
2052                         }
2053                 }
2054
2055
2056
2057
2058                 /*************************
2059                  *  D O N E  W I T H  O P
2060                  *************************/
2061
2062                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2063
2064                 pset_foreach(live, tmp) {
2065                         assert(has_reg_class(si, tmp));
2066                 }
2067
2068                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2069                         ir_node        *arg = get_irn_n(irn, n);
2070
2071                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2072                 }
2073
2074                 del_pset(remat_defs);
2075                 del_pset(used);
2076                 del_set(args);
2077                 del_pset(defs);
2078                 defs = pset_new_ptr_default();
2079         }
2080
2081
2082
2083         /***************************************
2084          *   B E G I N N I N G  O F  B L O C K
2085          ***************************************/
2086
2087
2088         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2089         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2090
2091         pset_foreach(live, irn) {
2092                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2093         }
2094
2095         /* construct mem_outs for all values */
2096
2097         set_foreach(spill_bb->ilp, spill) {
2098                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2099                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2100
2101                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2102                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2103
2104                 if(pset_find_ptr(live, spill->irn)) {
2105                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2106
2107                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2108                         spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2109                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2110
2111                         if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2112                                 int   n;
2113                                 op_t *op = get_irn_link(spill->irn);
2114
2115                                 /* do we have to copy a phi argument? */
2116                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2117                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2118
2119                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2120                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2121                                         double          freq=0.0;
2122                                         int             m;
2123                                         ilp_var_t       var;
2124
2125
2126                                         /* argument already done? */
2127                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2128
2129                                         /* get sum of execution frequencies of blocks with the same phi argument */
2130                                         for(m=n; m>=0; --m) {
2131                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2132
2133                                                 if(arg==arg2) {
2134                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2135                                                 }
2136                                         }
2137
2138                                         /* copies are not for free */
2139                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2140                                         var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
2141
2142                                         for(m=n; m>=0; --m) {
2143                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2144
2145                                                 if(arg==arg2) {
2146                                                         op->attr.live_range.args.copies[m] = var;
2147                                                 }
2148                                         }
2149
2150                                         /* copy <= mem_in */
2151                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2152                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2153                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2154                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2155                                 }
2156                         }
2157                 }
2158         }
2159
2160
2161         /* L\U is empty at bb start */
2162         /* arg is live throughout epilog if it is reg_in into this block */
2163
2164         /* check the register pressure at the beginning of the block
2165          * including remats
2166          */
2167         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2168         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
2169
2170         pset_foreach(live, irn) {
2171         ilp_cst_t  nospill;
2172
2173                 spill = set_find_spill(spill_bb->ilp, irn);
2174                 assert(spill);
2175
2176                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2177                 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2178
2179                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2180
2181                 /* spill + mem_in <= 1 */
2182                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2183                 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2184
2185                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2186                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2187
2188         }
2189         foreach_post_remat(bb, irn) {
2190                 op_t     *remat_op = get_irn_link(irn);
2191
2192                 DBG((si->dbg, LEVEL_4, "\t  next post remat: %+F\n", irn));
2193                 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
2194
2195                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2196         }
2197
2198         /* forall post remats add requirements */
2199         foreach_post_remat(bb, tmp) {
2200                 int         n;
2201
2202                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2203                         ir_node    *remat_arg = get_irn_n(tmp, n);
2204                         op_t       *remat_op = get_irn_link(tmp);
2205
2206                         if(!has_reg_class(si, remat_arg)) continue;
2207
2208                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2209                         assert(spill);
2210
2211                         /* remat <= reg_in_argument */
2212                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2213                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2214                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2215                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2216                 }
2217         }
2218
2219         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2220         pset_foreach(live, irn) {
2221                 int          p = 0,
2222                                          n;
2223
2224                 spill = set_find_spill(spill_bb->ilp, irn);
2225                 assert(spill && spill->irn == irn);
2226
2227                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2228                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2229                                 ilp_cst_t       mem_in,
2230                                                                 reg_in;
2231                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2232                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2233                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2234                                 spill_t        *spill_p;
2235
2236                                 /* although the phi is in the right regclass one or more of
2237                                  * its arguments can be in a different one or at least to
2238                                  * ignore
2239                                  */
2240                                 if(has_reg_class(si, phi_arg)) {
2241                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2242                                         mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2243                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2244                                         reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2245
2246                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2247                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2248
2249                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2250                                         assert(spill_p);
2251
2252                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2253                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2254                                 }
2255                         }
2256                 } else {
2257                         /* else assure the value arrives on all paths in the same resource */
2258
2259                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2260                                 ilp_cst_t       mem_in,
2261                                                                 reg_in;
2262                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2263                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2264                                 spill_t        *spill_p;
2265
2266                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2267                                 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2268                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2269                                 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2270
2271                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2272                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2273
2274                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2275                                 assert(spill_p);
2276
2277                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2278                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2279                         }
2280                 }
2281         }
2282
2283         /* first live ranges from reg_ins */
2284         pset_foreach(live, irn) {
2285                 op_t      *op = get_irn_link(irn);
2286
2287                 spill = set_find_spill(spill_bb->ilp, irn);
2288                 assert(spill && spill->irn == irn);
2289
2290                 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2291                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2292                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2293                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2294
2295                 foreach_post_remat(bb, tmp) {
2296                         op_t     *remat_op = get_irn_link(tmp);
2297
2298                         if(remat_op->attr.remat.remat->value == irn) {
2299                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2300                         }
2301                 }
2302         }
2303
2304         /* walk forward now and compute constraints for placing spills */
2305         /* this must only be done for values that are not defined in this block */
2306         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2307         pset_foreach(live, irn) {
2308                 /*
2309                  * if value is defined in this block we can anways place the spill directly after the def
2310                  *    -> no constraint necessary
2311                  */
2312                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2313
2314
2315                 spill = set_find_spill(spill_bb->ilp, irn);
2316                 assert(spill);
2317
2318                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2319                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2320
2321                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2322                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2323
2324                 if(!is_Phi(irn)) {
2325                         sched_foreach_op(bb, tmp) {
2326                                 op_t   *op = get_irn_link(tmp);
2327
2328                                 if(is_Phi(tmp)) continue;
2329                                 assert(!is_Proj(tmp));
2330
2331                                 if(op->is_remat) {
2332                                         ir_node   *value = op->attr.remat.remat->value;
2333
2334                                         if(value == irn) {
2335                                                 /* only collect remats up to the first use of a value */
2336                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2337                                         }
2338                                 } else {
2339                                         int   n;
2340
2341                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2342                                                 ir_node    *arg = get_irn_n(tmp, n);
2343
2344                                                 if(arg == irn) {
2345                                                         /* if a value is used stop collecting remats */
2346                                                         cst = ILP_UNDEF;
2347                                                 }
2348                                                 break;
2349                                         }
2350                                 }
2351                                 if(cst == ILP_UNDEF) break;
2352                         }
2353                 }
2354         }
2355
2356         del_pset(live);
2357 }
2358
2359 typedef struct _irnlist_t {
2360         struct list_head   list;
2361         ir_node           *irn;
2362 } irnlist_t;
2363
2364 typedef struct _interference_t {
2365         struct list_head    blocklist;
2366         ir_node            *a;
2367         ir_node            *b;
2368 } interference_t;
2369
2370 static int
2371 cmp_interference(const void *a, const void *b, size_t size)
2372 {
2373         const interference_t *p = a;
2374         const interference_t *q = b;
2375
2376         return !(p->a == q->a && p->b == q->b);
2377 }
2378
2379 static interference_t *
2380 set_find_interference(set * set, ir_node * a, ir_node * b)
2381 {
2382         interference_t     query;
2383
2384         query.a = (a>b)?a:b;
2385         query.b = (a>b)?b:a;
2386
2387         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2388 }
2389
2390 static interference_t *
2391 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2392 {
2393         interference_t     query,
2394                                           *result;
2395         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2396
2397         list->irn = bb;
2398
2399         result = set_find_interference(set, a, b);
2400         if(result) {
2401
2402                 list_add(&list->list, &result->blocklist);
2403                 return result;
2404         }
2405
2406         query.a = (a>b)?a:b;
2407         query.b = (a>b)?b:a;
2408
2409         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2410
2411         INIT_LIST_HEAD(&result->blocklist);
2412         list_add(&list->list, &result->blocklist);
2413
2414         return result;
2415 }
2416
2417 static int
2418 values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
2419 {
2420         const ir_edge_t *edge;
2421
2422         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2423                 /* both values are live in, so they interfere */
2424                 return 1;
2425         }
2426
2427         /* ensure a dominates b */
2428         if(value_dominates(b,a)) {
2429                 const ir_node * t;
2430                 t = b;
2431                 b = a;
2432                 a = t;
2433         }
2434         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2435
2436
2437         /* the following code is stolen from bera.c */
2438         if(is_live_end(bb, a))
2439                 return 1;
2440
2441         foreach_out_edge(a, edge) {
2442                 const ir_node *user = edge->src;
2443                 if(get_nodes_block(user) == bb
2444                                 && !is_Phi(user)
2445                                 && b != user
2446                                 && value_dominates(b, user))
2447                         return 1;
2448         }
2449
2450         return 0;
2451 }
2452
2453 /**
2454  * Walk all irg blocks and collect interfering values inside of phi classes
2455  */
2456 static void
2457 luke_interferencewalker(ir_node * bb, void * data)
2458 {
2459         spill_ilp_t    *si = (spill_ilp_t*)data;
2460         irn_live_t     *li1,
2461                        *li2;
2462
2463         live_foreach(bb, li1) {
2464                 ir_node        *a = (ir_node *) li1->irn;
2465                 op_t           *a_op = get_irn_link(a);
2466
2467                 if(a_op->is_remat) continue;
2468
2469                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2470                 if (has_reg_class(si, a) && get_phi_class(a)) {
2471                         for(li2=li1->next; li2; li2 = li2->next) {
2472                                 ir_node        *b = (ir_node *) li2->irn;
2473                                 op_t           *b_op = get_irn_link(b);
2474
2475                                 if(b_op->is_remat) continue;
2476
2477                                 /* a and b are only interesting if they are in the same phi class */
2478                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2479                                         if(values_interfere_in_block(bb, a, b)) {
2480                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2481                                                 set_insert_interference(si, si->interferences, a, b, bb);
2482                                         }
2483                                 }
2484                         }
2485                 }
2486         }
2487 }
2488
2489 static unsigned int copy_path_id = 0;
2490
2491 static void
2492 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2493 {
2494         ilp_cst_t  cst;
2495         ilp_var_t  copy;
2496         char       buf[256];
2497         void      *ptr;
2498
2499         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2500         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2501
2502         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2503
2504         pset_foreach(copies, ptr) {
2505                 copy = PTR_TO_INT(ptr);
2506                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2507         }
2508 }
2509
2510 /**
2511  * @parameter copies   contains a path of copies which lead us to irn
2512  * @parameter visited  contains a set of nodes already visited on this path
2513  */
2514 static void
2515 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2516 {
2517         ir_edge_t *edge;
2518         op_t      *op = get_irn_link(irn);
2519
2520         if(op->is_remat) return;
2521
2522         pset_insert_ptr(visited, irn);
2523
2524         if(is_Phi(irn)) {
2525                 int    n;
2526
2527                 /* visit all operands */
2528                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2529                         ir_node  *arg = get_irn_n(irn, n);
2530                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2531
2532                         if(!has_reg_class(si, arg)) continue;
2533
2534                         if(arg == target) {
2535                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2536                                 write_copy_path_cst(si, copies, any_interfere);
2537                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2538                         } else {
2539                                 if(!pset_find_ptr(visited, arg)) {
2540                                         pset_insert(copies, INT_TO_PTR(copy), copy);
2541                                         find_copy_path(si, arg, target, any_interfere, copies, visited);
2542                                         pset_remove(copies, INT_TO_PTR(copy), copy);
2543                                 }
2544                         }
2545                 }
2546         }
2547
2548         /* visit all uses which are phis */
2549         foreach_out_edge(irn, edge) {
2550                 ir_node  *user = edge->src;
2551                 int       pos  = edge->pos;
2552                 op_t     *op = get_irn_link(user);
2553                 ilp_var_t copy;
2554
2555                 if(!is_Phi(user)) continue;
2556                 if(!has_reg_class(si, user)) continue;
2557
2558                 copy = op->attr.live_range.args.copies[pos];
2559
2560                 if(user == target) {
2561                         pset_insert(copies, INT_TO_PTR(copy), copy);
2562                         write_copy_path_cst(si, copies, any_interfere);
2563                         pset_remove(copies, INT_TO_PTR(copy), copy);
2564                 } else {
2565                         if(!pset_find_ptr(visited, user)) {
2566                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2567                                 find_copy_path(si, user, target, any_interfere, copies, visited);
2568                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2569                         }
2570                 }
2571         }
2572
2573         pset_remove_ptr(visited, irn);
2574 }
2575
2576 static void
2577 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2578 {
2579         pset * copies = pset_new_ptr_default();
2580         pset * visited = pset_new_ptr_default();
2581
2582         find_copy_path(si, a, b, any_interfere, copies, visited);
2583
2584         del_pset(visited);
2585         del_pset(copies);
2586 }
2587
2588
2589 static void
2590 memcopyhandler(spill_ilp_t * si)
2591 {
2592         interference_t   *interference;
2593         char              buf[256];
2594         /* teste Speicherwerte auf Interferenz */
2595
2596         /* analyze phi classes */
2597         phi_class_compute(si->chordal_env->irg);
2598
2599         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2600         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2601
2602 //      phi_class_free(si->chordal_env->irg);
2603
2604         /* now lets emit the ILP unequations for the crap */
2605         set_foreach(si->interferences, interference) {
2606                 irnlist_t      *irnlist;
2607                 ilp_var_t       interfere,
2608                                                 any_interfere;
2609                 ilp_cst_t       any_interfere_cst,
2610                                                 cst;
2611                 const ir_node  *a  = interference->a;
2612                 const ir_node  *b  = interference->b;
2613
2614                 /* any_interf <= \sum interf */
2615                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2616                 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2617                 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2618
2619                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2620
2621                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2622                         const ir_node  *bb = irnlist->irn;
2623                         spill_bb_t     *spill_bb = get_irn_link(bb);
2624                         spill_t        *spilla,
2625                                                    *spillb,
2626                                                    query;
2627                         char           buf[256];
2628
2629                         query.irn = a;
2630                         spilla = set_find_spill(spill_bb->ilp, a);
2631                         assert(spilla);
2632
2633                         query.irn = b;
2634                         spillb = set_find_spill(spill_bb->ilp, b);
2635                         assert(spillb);
2636
2637                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2638                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2639                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
2640                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
2641                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2642                         interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2643
2644                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2645                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2646
2647                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2648                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2649                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2650                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2651                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2652
2653                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2654                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2655
2656                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2657                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2658                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2659
2660                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2661                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2662
2663                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2664                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2665                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2666
2667
2668                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2669
2670                         /* any_interfere >= interf */
2671                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2672                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2673
2674                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2675                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2676                 }
2677
2678                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2679                 gen_copy_constraints(si,a,b,any_interfere);
2680         }
2681 }
2682
2683
2684 static INLINE int
2685 is_zero(double x)
2686 {
2687         return fabs(x) < 0.00001;
2688 }
2689
2690 #ifdef KEEPALIVE
2691 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2692 {
2693         spill_ilp_t *si = get_irg_link(current_ir_graph);
2694
2695         if(pset_find_ptr(si->all_possible_remats, n)) {
2696                 op_t   *op = (op_t*)get_irn_link(n);
2697                 assert(op && op->is_remat);
2698
2699                 if(!op->attr.remat.remat->inverse) {
2700                         if(op->attr.remat.pre) {
2701                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2702                         } else {
2703                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2704                         }
2705
2706                         return 1;
2707                 } else {
2708                         op_t   *op = (op_t*)get_irn_link(n);
2709                         assert(op && op->is_remat);
2710
2711                         if(op->attr.remat.pre) {
2712                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2713                         } else {
2714                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2715                         }
2716
2717                         return 1;
2718                 }
2719         }
2720
2721         return 0;
2722 }
2723
2724 static void
2725 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2726 {
2727         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2728         be_dump(irg, suffix, dump_ir_block_graph_sched);
2729         set_dump_node_vcgattr_hook(NULL);
2730 }
2731 #endif
2732
2733 /**
2734  * Edge hook to dump the schedule edges with annotated register pressure.
2735  */
2736 static int
2737 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2738 {
2739         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2740                 ir_node *prev = sched_prev(irn);
2741                 fprintf(F, "edge:{sourcename:\"");
2742                 PRINT_NODEID(irn);
2743                 fprintf(F, "\" targetname:\"");
2744                 PRINT_NODEID(prev);
2745                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2746                 fprintf(F, "\" color:magenta}\n");
2747         }
2748         return 1;
2749 }
2750
2751 static void
2752 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2753 {
2754         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
2755
2756         dump_consts_local(0);
2757         set_dump_node_edge_hook(sched_pressure_edge_hook);
2758         dump_ir_block_graph(irg, suffix);
2759         set_dump_node_edge_hook(old_edge_hook);
2760 }
2761
2762 static void
2763 walker_pressure_annotator(ir_node * bb, void * data)
2764 {
2765         spill_ilp_t  *si = data;
2766         ir_node      *irn;
2767         irn_live_t   *li;
2768         int           n;
2769         pset         *live = pset_new_ptr_default();
2770         int           projs = 0;
2771
2772         live_foreach(bb, li) {
2773                 irn = (ir_node *) li->irn;
2774
2775                 if (live_is_end(li) && has_reg_class(si, irn)) {
2776                         pset_insert_ptr(live, irn);
2777                 }
2778         }
2779
2780         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2781
2782         sched_foreach_reverse(bb, irn) {
2783                 if(is_Phi(irn)) {
2784                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2785                         continue;
2786                 }
2787
2788                 if(has_reg_class(si, irn)) {
2789                         pset_remove_ptr(live, irn);
2790                         if(is_Proj(irn)) ++projs;
2791                 }
2792
2793                 if(!is_Proj(irn)) projs = 0;
2794
2795                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2796                         ir_node    *arg = get_irn_n(irn, n);
2797
2798                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2799                 }
2800                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2801         }
2802
2803         del_pset(live);
2804 }
2805
2806 static void
2807 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2808 {
2809         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2810 }
2811
2812 #ifdef KEEPALIVE
2813 static void
2814 connect_all_remats_with_keep(spill_ilp_t * si)
2815 {
2816         ir_node   *irn;
2817         ir_node  **ins,
2818                          **pos;
2819         int        n_remats;
2820
2821
2822         n_remats = pset_count(si->all_possible_remats);
2823         if(n_remats) {
2824                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2825
2826                 pos = ins;
2827                 pset_foreach(si->all_possible_remats, irn) {
2828                         *pos = irn;
2829                         ++pos;
2830                 }
2831
2832                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2833
2834                 obstack_free(si->obst, ins);
2835         }
2836 }
2837 #endif
2838
2839 static void
2840 connect_all_spills_with_keep(spill_ilp_t * si)
2841 {
2842         ir_node   *irn;
2843         ir_node  **ins,
2844                          **pos;
2845         int        n_spills;
2846         ir_node   *keep;
2847
2848
2849         n_spills = pset_count(si->spills);
2850         if(n_spills) {
2851                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2852
2853                 pos = ins;
2854                 pset_foreach(si->spills, irn) {
2855                         *pos = irn;
2856                         ++pos;
2857                 }
2858
2859                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2860
2861                 obstack_free(si->obst, ins);
2862         }
2863 }
2864
2865 /** insert a spill at an arbitrary position */
2866 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2867 {
2868         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
2869         ir_graph *irg   = get_irn_irg(bl);
2870         ir_node *frame  = get_irg_frame(irg);
2871         ir_node *spill;
2872         ir_node *next;
2873
2874         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
2875         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2876
2877         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2878
2879         /*
2880          * search the right insertion point. a spill of a phi cannot be put
2881          * directly after the phi, if there are some phis behind the one which
2882          * is spilled. Also, a spill of a Proj must be after all Projs of the
2883          * same tuple node.
2884          *
2885          * Here's one special case:
2886          * If the spill is in the start block, the spill must be after the frame
2887          * pointer is set up. This is done by setting insert to the end of the block
2888          * which is its default initialization (see above).
2889          */
2890
2891         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2892                 insert = frame;
2893
2894         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2895                 insert = next;
2896
2897         sched_add_after(insert, spill);
2898         return spill;
2899 }
2900
2901 static void
2902 delete_remat(spill_ilp_t * si, ir_node * remat) {
2903         int       n;
2904         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2905
2906         sched_remove(remat);
2907
2908         /* kill links to operands */
2909         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2910                 set_irn_n(remat, n, bad);
2911         }
2912 }
2913
2914 static void
2915 clean_remat_info(spill_ilp_t * si)
2916 {
2917         int            n;
2918         remat_t       *remat;
2919         remat_info_t  *remat_info;
2920         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
2921
2922         set_foreach(si->remat_info, remat_info) {
2923                 if(!remat_info->remats) continue;
2924
2925                 pset_foreach(remat_info->remats, remat)
2926                 {
2927                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2928                                 set_irn_n(remat->proj, -1, bad);
2929                                 set_irn_n(remat->proj, 0, bad);
2930                         }
2931
2932                         if(get_irn_n_edges(remat->op) == 0) {
2933                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2934                                         set_irn_n(remat->op, n, bad);
2935                                 }
2936                         }
2937                 }
2938
2939                 if(remat_info->remats) del_pset(remat_info->remats);
2940                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
2941         }
2942 }
2943
2944 static void
2945 delete_unnecessary_remats(spill_ilp_t * si)
2946 {
2947 #ifdef KEEPALIVE
2948         int       n;
2949         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2950
2951         if(si->keep) {
2952                 ir_node   *end = get_irg_end(si->chordal_env->irg);
2953                 ir_node  **keeps;
2954
2955                 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
2956                         ir_node        *keep_arg = get_irn_n(si->keep, n);
2957                         op_t           *arg_op = get_irn_link(keep_arg);
2958                         lpp_name_t     *name;
2959
2960                         assert(arg_op->is_remat);
2961
2962                         name = si->lpp->vars[arg_op->attr.remat.ilp];
2963
2964                         if(is_zero(name->value)) {
2965                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
2966                                 /* TODO check whether reload is preferred over remat (could be bug) */
2967                                 delete_remat(si, keep_arg);
2968                         } else {
2969                                 if(!arg_op->attr.remat.remat->inverse) {
2970                                         if(arg_op->attr.remat.pre) {
2971                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
2972                                         } else {
2973                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
2974                                         }
2975                                 } else {
2976                                         if(arg_op->attr.remat.pre) {
2977                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
2978                                         } else {
2979                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
2980                                         }
2981                                 }
2982                         }
2983
2984                         set_irn_n(si->keep, n, bad);
2985                 }
2986 #if 0
2987                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
2988                         ir_node        *end_arg = get_End_keepalive(end, i);
2989
2990                         if(end_arg != si->keep) {
2991                                 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
2992                         }
2993                 }
2994                 keeps = obstack_finish(si->obst);
2995                 set_End_keepalives(end, n-1, keeps);
2996                 obstack_free(si->obst, keeps);
2997 #endif
2998         } else {
2999                 DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
3000         }
3001 #else
3002         ir_node  *remat;
3003
3004         pset_foreach(si->all_possible_remats, remat) {
3005                 op_t           *remat_op = get_irn_link(remat);
3006                 lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
3007
3008                 if(is_zero(name->value)) {
3009                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3010                         /* TODO check whether reload is preferred over remat (could be bug) */
3011                         delete_remat(si, remat);
3012                 } else {
3013                         if(!remat_op->attr.remat.remat->inverse) {
3014                                 if(remat_op->attr.remat.pre) {
3015                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3016                                 } else {
3017                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3018                                 }
3019                         } else {
3020                                 if(remat_op->attr.remat.pre) {
3021                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3022                                 } else {
3023                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3024                                 }
3025                         }
3026                 }
3027         }
3028 #endif
3029 }
3030
3031 static pset *
3032 get_spills_for_value(spill_ilp_t * si, ir_node * value)
3033 {
3034         pset     *spills = pset_new_ptr_default();
3035
3036         ir_node  *next;
3037         defs_t   *defs;
3038
3039         defs = set_find_def(si->values, value);
3040
3041         if(defs && defs->spills) {
3042                 for(next = defs->spills; next; next = get_irn_link(next)) {
3043                         pset_insert_ptr(spills, next);
3044                 }
3045         }
3046
3047         return spills;
3048 }
3049
3050 static pset *
3051 get_remats_for_value(spill_ilp_t * si, ir_node * value)
3052 {
3053         pset     *remats = pset_new_ptr_default();
3054
3055         ir_node  *next;
3056         defs_t   *defs;
3057
3058         pset_insert_ptr(remats, value);
3059         defs = set_find_def(si->values, value);
3060
3061         if(defs && defs->remats) {
3062                 for(next = defs->remats; next; next = get_irn_link(next)) {
3063                         pset_insert_ptr(remats, next);
3064                 }
3065         }
3066
3067         return remats;
3068 }
3069
3070
3071 /**
3072  * @param before   The node after which the spill will be placed in the schedule
3073  */
3074 /* TODO set context properly */
3075 static ir_node *
3076 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
3077 {
3078         defs_t   *defs;
3079         ir_node  *spill;
3080         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3081
3082         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3083
3084         spill = be_spill2(arch_env, irn, before, irn);
3085
3086         defs = set_insert_def(si->values, value);
3087         assert(defs);
3088
3089         /* enter into the linked list */
3090         set_irn_link(spill, defs->spills);
3091         defs->spills = spill;
3092
3093 #ifdef KEEPALIVE_SPILLS
3094         pset_insert_ptr(si->spills, spill);
3095 #endif
3096
3097         return spill;
3098 }
3099
3100 /**
3101  * @param before   The Phi node which has to be spilled
3102  */
3103 static ir_node *
3104 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
3105 {
3106         ir_node   *mem_phi;
3107         ir_node  **ins;
3108         defs_t    *defs;
3109         int        n;
3110         op_t      *op = get_irn_link(phi);
3111
3112         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3113
3114         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3115                 ins[n] = si->m_unknown;
3116         }
3117
3118         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3119
3120         defs = set_insert_def(si->values, phi);
3121         assert(defs);
3122
3123         /* enter into the linked list */
3124         set_irn_link(mem_phi, defs->spills);
3125         defs->spills = mem_phi;
3126
3127         sched_add_after(phi, mem_phi);
3128
3129 #ifdef KEEPALIVE_SPILLS
3130         pset_insert_ptr(si->spills, mem_phi);
3131 #endif
3132
3133
3134         return mem_phi;
3135 }
3136
3137 /**
3138  * Add remat to list of defs, destroys link field!
3139  */
3140 static void
3141 insert_remat(spill_ilp_t * si, ir_node * remat)
3142 {
3143         defs_t   *defs;
3144         op_t     *remat_op = get_irn_link(remat);
3145
3146         assert(remat_op->is_remat);
3147
3148         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3149         assert(defs);
3150
3151         /* enter into the linked list */
3152         set_irn_link(remat, defs->remats);
3153         defs->remats = remat;
3154 }
3155
3156
3157 /**
3158  * Add reload before operation and add to list of defs
3159  */
3160 static ir_node *
3161 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
3162 {
3163         defs_t   *defs;
3164         ir_node  *reload,
3165                          *spill;
3166         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3167
3168         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3169
3170         defs = set_find_def(si->values, value);
3171
3172         spill = defs->spills;
3173         assert(spill && "no spill placed before reload");
3174
3175         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3176
3177         /* enter into the linked list */
3178         set_irn_link(reload, defs->remats);
3179         defs->remats = reload;
3180
3181         return reload;
3182 }
3183
3184 static void
3185 walker_spill_placer(ir_node * bb, void * data) {
3186         spill_ilp_t   *si = (spill_ilp_t*)data;
3187         ir_node       *irn;
3188         spill_bb_t    *spill_bb = get_irn_link(bb);
3189         pset          *spills_to_do = pset_new_ptr_default();
3190         spill_t       *spill;
3191
3192         set_foreach(spill_bb->ilp, spill) {
3193                 lpp_name_t    *name;
3194
3195                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3196                         name = si->lpp->vars[spill->mem_in];
3197                         if(!is_zero(name->value)) {
3198                                 ir_node   *mem_phi;
3199
3200                                 mem_phi = insert_mem_phi(si, spill->irn);
3201
3202                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3203                         }
3204                 }
3205
3206                 name = si->lpp->vars[spill->spill];
3207                 if(!is_zero(name->value)) {
3208                         /* place spill directly after definition */
3209                         if(get_nodes_block(spill->irn) == bb) {
3210                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3211                                 continue;
3212                         }
3213
3214                         /* place spill at bb start */
3215                         if(spill->reg_in > 0) {
3216                                 name = si->lpp->vars[spill->reg_in];
3217                                 if(!is_zero(name->value)) {
3218                                         insert_spill(si, spill->irn, spill->irn, bb);
3219                                         continue;
3220                                 }
3221                         }
3222                         /* place spill after a remat */
3223                         pset_insert_ptr(spills_to_do, spill->irn);
3224                 }
3225         }
3226         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3227
3228
3229         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3230                 op_t     *op = get_irn_link(irn);
3231
3232                 if(be_is_Spill(irn)) continue;
3233
3234                 if(op->is_remat) {
3235                         /* TODO fix this if we want to support remats with more than two nodes */
3236                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3237                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3238
3239                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3240                         }
3241                 } else {
3242                         if(pset_find_ptr(spills_to_do, irn)) {
3243                                 pset_remove_ptr(spills_to_do, irn);
3244
3245                                 insert_spill(si, irn, irn, irn);
3246                         }
3247                 }
3248
3249         }
3250
3251         assert(pset_count(spills_to_do) == 0);
3252
3253         /* afterwards free data in block */
3254         del_pset(spills_to_do);
3255 }
3256
3257 static ir_node *
3258 insert_mem_copy(spill_ilp_t * si, const ir_node * bb, const ir_node * value)
3259 {
3260         ir_node          *insert_pos = bb;
3261         ir_node          *spill;
3262         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3263
3264         /* find last definition of arg value in block */
3265         ir_node  *next;
3266         defs_t   *defs;
3267         int       last = 0;
3268
3269         defs = set_find_def(si->values, value);
3270
3271         if(defs && defs->remats) {
3272                 for(next = defs->remats; next; next = get_irn_link(next)) {
3273                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3274                                 last = sched_get_time_step(next);
3275                                 insert_pos = next;
3276                         }
3277                 }
3278         }
3279
3280         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3281                 last = sched_get_time_step(value);
3282                 insert_pos = value;
3283         }
3284
3285         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3286
3287         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
3288
3289         return spill;
3290 }
3291
3292 static void
3293 phim_fixer(spill_ilp_t *si) {
3294         defs_t  *defs;
3295
3296         set_foreach(si->values, defs) {
3297                 const ir_node  *phi = defs->value;
3298                 op_t           *op = get_irn_link(phi);
3299                 ir_node        *phi_m = NULL;
3300                 ir_node        *next = defs->spills;
3301                 int             n;
3302
3303                 if(!is_Phi(phi)) continue;
3304
3305                 while(next) {
3306                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3307                                 phi_m = next;
3308                                 break;
3309                         } else {
3310                                 next = get_irn_link(next);
3311                         }
3312                 }
3313                 if(!phi_m) continue;
3314
3315                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3316                         const ir_node  *value = get_irn_n(phi, n);
3317                         defs_t         *val_defs = set_find_def(si->values, value);
3318                         ir_node        *arg = get_irn_n(phi_m, n);
3319
3320                         /* get a spill of this value */
3321                         ir_node      *spill = val_defs->spills;
3322
3323
3324 #ifndef NO_MEMCOPIES
3325                         ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3326                         lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3327
3328                         if(!is_zero(name->value)) {
3329                                 spill = insert_mem_copy(si, pred, value);
3330                         } else {
3331                                 assert(spill && "no spill placed before PhiM");
3332                         }
3333 #else
3334                         assert(spill && "no spill placed before PhiM");
3335 #endif
3336                         set_irn_n(phi_m, n, spill);
3337                 }
3338         }
3339 }
3340
3341 static void
3342 walker_reload_placer(ir_node * bb, void * data) {
3343         spill_ilp_t   *si = (spill_ilp_t*)data;
3344         ir_node       *irn;
3345         spill_bb_t    *spill_bb = get_irn_link(bb);
3346         int            i;
3347         irn_live_t    *li;
3348
3349         /* reloads at end of block */
3350         if(spill_bb->reloads) {
3351                 keyval_t    *keyval;
3352
3353                 set_foreach(spill_bb->reloads, keyval) {
3354                         ir_node        *irn = (ir_node*)keyval->key;
3355                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3356                         lpp_name_t     *name;
3357
3358                         name = si->lpp->vars[reload];
3359                         if(!is_zero(name->value)) {
3360                                 ir_node    *reload;
3361                                 ir_node    *insert_pos = bb;
3362                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3363                                 op_t       *prev_op = get_irn_link(prev);
3364
3365                                 while(be_is_Spill(prev)) {
3366                                         prev = sched_prev(prev);
3367                                 }
3368
3369                                 prev_op = get_irn_link(prev);
3370
3371                                 /* insert reload before pre-remats */
3372                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3373                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3374                                         insert_pos = prev;
3375
3376                                         do {
3377                                                 prev = sched_prev(prev);
3378                                         } while(be_is_Spill(prev));
3379
3380                                         prev_op = get_irn_link(prev);
3381
3382                                 }
3383
3384                                 reload = insert_reload(si, irn, insert_pos);
3385
3386 #ifdef KEEPALIVE_RELOADS
3387                                 pset_insert_ptr(si->spills, reload);
3388 #endif
3389                         }
3390                 }
3391         }
3392
3393         /* walk and insert more reloads and collect remats */
3394         sched_foreach_reverse(bb, irn) {
3395                 op_t     *op = get_irn_link(irn);
3396
3397                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3398                 if(is_Phi(irn)) break;
3399
3400                 if(op->is_remat) {
3401                         if(get_irn_mode(irn) != mode_T) {
3402                                 insert_remat(si, irn);
3403                         }
3404                 } else {
3405                         int    n;
3406
3407                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3408                                 ir_node    *arg = get_irn_n(irn, n);
3409
3410                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3411                                         lpp_name_t    *name;
3412
3413                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3414                                         if(!is_zero(name->value)) {
3415                                                 ir_node    *reload;
3416                                                 ir_node    *insert_pos = irn;
3417                                                 ir_node    *prev = sched_prev(insert_pos);
3418                                                 op_t       *prev_op;
3419
3420                                                 while(be_is_Spill(prev)) {
3421                                                         prev = sched_prev(prev);
3422                                                 }
3423
3424                                                 prev_op = get_irn_link(prev);
3425
3426                                                 /* insert reload before pre-remats */
3427                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3428                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3429                                                         insert_pos = prev;
3430
3431                                                         do {
3432                                                                 prev = sched_prev(prev);
3433                                                         } while(be_is_Spill(prev));
3434
3435                                                         prev_op = get_irn_link(prev);
3436
3437                                                 }
3438
3439                                                 reload = insert_reload(si, arg, insert_pos);
3440
3441                                                 set_irn_n(irn, n, reload);
3442
3443 #ifdef KEEPALIVE_RELOADS
3444                                                 pset_insert_ptr(si->spills, reload);
3445 #endif
3446                                         }
3447                                 }
3448                         }
3449                 }
3450         }
3451
3452         del_set(spill_bb->ilp);
3453         if(spill_bb->reloads) del_set(spill_bb->reloads);
3454 }
3455
3456 static void
3457 walker_collect_used(ir_node * irn, void * data)
3458 {
3459         lc_bitset_t   *used = data;
3460
3461         lc_bitset_set(used, get_irn_idx(irn));
3462 }
3463
3464 struct kill_helper {
3465         lc_bitset_t  *used;
3466         spill_ilp_t  *si;
3467 };
3468
3469 static void
3470 walker_kill_unused(ir_node * bb, void * data)
3471 {
3472         struct kill_helper *kh = data;
3473         const ir_node      *bad = get_irg_bad(get_irn_irg(bb));
3474         ir_node            *irn;
3475
3476
3477         for(irn=sched_first(bb); !sched_is_end(irn);) {
3478                 ir_node     *next = sched_next(irn);
3479                 int          n;
3480
3481                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3482                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3483                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3484 #if 0
3485                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3486 #endif
3487                         }
3488
3489                         sched_remove(irn);
3490
3491                         set_nodes_block(irn, bad);
3492                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3493                                 set_irn_n(irn, n, bad);
3494                         }
3495                 }
3496                 irn = next;
3497         }
3498 }
3499
3500 static void
3501 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3502 {
3503         struct kill_helper kh;
3504
3505         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3506         kh.si = si;
3507
3508         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3509         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3510
3511         lc_bitset_free(kh.used);
3512 }
3513
3514 static void
3515 print_irn_pset(pset * p)
3516 {
3517         ir_node   *irn;
3518
3519         pset_foreach(p, irn) {
3520                 ir_printf("%+F\n", irn);
3521         }
3522 }
3523
3524 static void
3525 rewire_uses(spill_ilp_t * si)
3526 {
3527         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3528         defs_t               *defs;
3529         pset                 *ignore = pset_new_ptr(1);
3530
3531         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3532
3533         /* then fix uses of spills */
3534         set_foreach(si->values, defs) {
3535                 pset     *reloads;
3536                 pset     *spills;
3537                 ir_node  *next = defs->remats;
3538                 int remats = 0;
3539
3540                 reloads = pset_new_ptr_default();
3541
3542                 while(next) {
3543                         if(be_is_Reload(next)) {
3544                                 pset_insert_ptr(reloads, next);
3545                         } else {
3546                                 ++remats;
3547                         }
3548                         next = get_irn_link(next);
3549                 }
3550
3551                 spills = get_spills_for_value(si, defs->value);
3552                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3553                 if(pset_count(spills) > 1) {
3554                         //assert(pset_count(reloads) > 0);
3555                         //                              print_irn_pset(spills);
3556                         //                              print_irn_pset(reloads);
3557
3558                         be_ssa_constr_set_ignore(dfi, spills, ignore);
3559                 }
3560
3561                 del_pset(reloads);
3562                 del_pset(spills);
3563         }
3564
3565         /* first fix uses of remats and reloads */
3566         set_foreach(si->values, defs) {
3567                 pset     *nodes;
3568                 ir_node  *next = defs->remats;
3569
3570                 if(next) {
3571                         nodes = pset_new_ptr_default();
3572                         pset_insert_ptr(nodes, defs->value);
3573
3574                         while(next) {
3575                                 pset_insert_ptr(nodes, next);
3576                                 next = get_irn_link(next);
3577                         }
3578
3579                         if(pset_count(nodes) > 1) {
3580                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3581                                 be_ssa_constr_set(dfi, nodes);
3582                         }
3583
3584                         del_pset(nodes);
3585                 }
3586         }
3587
3588 //      remove_unused_defs(si);
3589
3590         be_free_dominance_frontiers(dfi);
3591 }
3592
3593
3594 static void
3595 writeback_results(spill_ilp_t * si)
3596 {
3597         /* walk through the graph and collect all spills, reloads and remats for a value */
3598
3599         si->values = new_set(cmp_defs, 4096);
3600
3601         DBG((si->dbg, LEVEL_1, "Applying results\n"));
3602         delete_unnecessary_remats(si);
3603         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3604         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3605         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3606         phim_fixer(si);
3607
3608         /* clean the remat info! there are still back-edges leading there! */
3609         clean_remat_info(si);
3610
3611         rewire_uses(si);
3612
3613         connect_all_spills_with_keep(si);
3614
3615         del_set(si->values);
3616 }
3617
3618 static int
3619 get_n_regs(spill_ilp_t * si)
3620 {
3621         int     arch_n_regs = arch_register_class_n_regs(si->cls);
3622         int     free = 0;
3623         int     i;
3624
3625         for(i=0; i<arch_n_regs; i++) {
3626                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3627                         free++;
3628                 }
3629         }
3630
3631         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3632         return free;
3633 }
3634
3635 static void
3636 walker_reload_mover(ir_node * bb, void * data)
3637 {
3638         spill_ilp_t   *si = data;
3639         ir_node           *tmp;
3640
3641         sched_foreach(bb, tmp) {
3642                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3643                         ir_node       *reload = tmp;
3644                         ir_node       *irn = tmp;
3645
3646                         /* move reload upwards */
3647
3648                         int pressure = (int)get_irn_link(reload);
3649                         if(pressure < si->n_regs) {
3650                                 irn = sched_prev(reload);
3651                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3652                                 sched_remove(reload);
3653                                 pressure = (int)get_irn_link(irn);
3654
3655                                 while(pressure < si->n_regs) {
3656                                         if(sched_is_end(irn) || (be_is_Reload(irn) && has_reg_class(si, irn))) break;
3657
3658                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
3659                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3660                                         irn = sched_prev(irn);
3661
3662                                         pressure = (int)get_irn_link(irn);
3663                                 }
3664
3665                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3666                                 sched_put_after(irn, reload);
3667                         }
3668                 }
3669         }
3670 }
3671
3672 static void
3673 move_reloads_upward(spill_ilp_t * si)
3674 {
3675         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3676 }
3677
3678
3679 /**
3680  * Walk all irg blocks and check for interfering spills inside of phi classes
3681  */
3682 static void
3683 luke_meminterferencechecker(ir_node * bb, void * data)
3684 {
3685         spill_ilp_t    *si = (spill_ilp_t*)data;
3686         irn_live_t     *li1,
3687                        *li2;
3688
3689         live_foreach(bb, li1) {
3690                 ir_node        *a = (ir_node *) li1->irn;
3691
3692                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
3693
3694                 /* a is only interesting if it is inside a phi class */
3695                 if (get_phi_class(a)) {
3696                         for(li2=li1->next; li2; li2 = li2->next) {
3697                                 ir_node        *b = (ir_node *) li2->irn;
3698
3699                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
3700
3701                                 /* a and b are only interesting if they are in the same phi class */
3702                                 if(get_phi_class(a) == get_phi_class(b)) {
3703                                         if(values_interfere_in_block(bb, a, b)) {
3704                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
3705                                         }
3706                                 }
3707                         }
3708                 }
3709         }
3710 }
3711
3712 static void
3713 verify_phiclasses(spill_ilp_t * si)
3714 {
3715         /* analyze phi classes */
3716         phi_class_compute(si->chordal_env->irg);
3717
3718         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
3719         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
3720 }
3721
3722 static void
3723 walker_spillslotassigner(ir_node * irn, void * data)
3724 {
3725         spill_ilp_t            *si = (spill_ilp_t*)data;
3726         void                   *cls;
3727
3728         if(!be_is_Spill(irn)) return;
3729
3730         /* set spill context to phi class if it has one ;) */
3731
3732         cls = get_phi_class(irn);
3733         if(cls)
3734                 be_set_Spill_context(irn, cls);
3735         else
3736                 be_set_Spill_context(irn, irn);
3737 }
3738
3739
3740 static void
3741 assign_spillslots(spill_ilp_t * si)
3742 {
3743         DBG((si->dbg, LEVEL_2, "\t calling spill slot assigner\n"));
3744         irg_walk_graph(si->chordal_env->irg, walker_spillslotassigner, NULL, si);
3745 }
3746
3747 void
3748 be_spill_remat(const be_chordal_env_t * chordal_env)
3749 {
3750         char            problem_name[256];
3751         char            dump_suffix[256];
3752         char            dump_suffix2[256];
3753         struct obstack  obst;
3754         spill_ilp_t     si;
3755
3756         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3757         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3758         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3759
3760         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3761         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3762
3763     be_check_dominance(chordal_env->irg);
3764
3765         obstack_init(&obst);
3766         si.chordal_env = chordal_env;
3767         si.obst = &obst;
3768         si.cls = chordal_env->cls;
3769         si.lpp = new_lpp(problem_name, lpp_minimize);
3770         si.remat_info = new_set(cmp_remat_info, 4096);
3771         si.interferences = new_set(cmp_interference, 32);
3772         si.all_possible_remats = pset_new_ptr_default();
3773         si.spills = pset_new_ptr_default();
3774         si.inverse_ops = pset_new_ptr_default();
3775 #ifdef KEEPALIVE
3776         si.keep = NULL;
3777 #endif
3778         si.n_regs = get_n_regs(&si);
3779
3780         set_irg_link(chordal_env->irg, &si);
3781         compute_doms(chordal_env->irg);
3782
3783         /* compute phi classes */
3784 //      phi_class_compute(chordal_env->irg);
3785
3786         be_analyze_regpressure(chordal_env, "-pre");
3787
3788 #ifdef COLLECT_REMATS
3789         /* collect remats */
3790         DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3791         irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3792 #endif
3793
3794         /* insert possible remats */
3795         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3796         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3797         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3798
3799 #ifdef KEEPALIVE
3800         DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3801         connect_all_remats_with_keep(&si);
3802         /* dump graph with inserted remats */
3803         dump_graph_with_remats(chordal_env->irg, dump_suffix);
3804 #endif
3805
3806         /* insert copies for phi arguments not in my regclass */
3807         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
3808
3809         /* recompute liveness */
3810         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3811         be_liveness(chordal_env->irg);
3812
3813         /* build the ILP */
3814
3815         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3816         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3817         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3818
3819         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3820         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3821
3822 #ifndef NO_MEMCOPIES
3823         DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3824         memcopyhandler(&si);
3825 #endif
3826
3827 #ifdef DUMP_ILP
3828         {
3829                 FILE           *f;
3830                 char            buf[256];
3831
3832                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3833                 if ((f = fopen(buf, "wt")) != NULL) {
3834                         lpp_dump_plain(si.lpp, f);
3835                         fclose(f);
3836                 }
3837         }
3838 #endif
3839
3840 #ifdef SOLVE
3841         DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3842 #ifdef ILP_TIMEOUT
3843         lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3844 #endif
3845
3846 #ifdef SOLVE_LOCAL
3847         lpp_solve_cplex(si.lpp);
3848 #else
3849         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3850 #endif
3851         assert(lpp_is_sol_valid(si.lpp)
3852                && "solution of ILP must be valid");
3853
3854         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3855
3856 #ifdef DUMP_SOLUTION
3857         {
3858                 FILE           *f;
3859                 char            buf[256];
3860
3861                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3862                 if ((f = fopen(buf, "wt")) != NULL) {
3863                         int             i;
3864                         for (i = 0; i < si.lpp->var_next; ++i) {
3865                                 lpp_name_t     *name = si.lpp->vars[i];
3866                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3867                         }
3868                         fclose(f);
3869                 }
3870         }
3871 #endif
3872
3873         writeback_results(&si);
3874
3875 #endif                          /* SOLVE */
3876
3877         kill_all_unused_values_in_schedule(&si);
3878
3879 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3880         be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3881 #endif
3882
3883         // move reloads upwards
3884         be_liveness(chordal_env->irg);
3885         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3886         move_reloads_upward(&si);
3887
3888 #ifndef NO_MEMCOPIES
3889         verify_phiclasses(&si);
3890         assign_spillslots(&si);
3891 #endif
3892
3893         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3894
3895         dump_pressure_graph(&si, dump_suffix2);
3896
3897         be_analyze_regpressure(chordal_env, "-post");
3898
3899         be_check_dominance(chordal_env->irg);
3900
3901         free_dom(chordal_env->irg);
3902         del_set(si.interferences);
3903         del_pset(si.inverse_ops);
3904         del_pset(si.all_possible_remats);
3905         del_pset(si.spills);
3906         free_lpp(si.lpp);
3907         obstack_free(&obst, NULL);
3908         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
3909 }
3910
3911 #else                           /* WITH_ILP */
3912
3913 static void
3914 only_that_you_can_compile_without_WITH_ILP_defined(void)
3915 {
3916 }
3917
3918 #endif                          /* WITH_ILP */