use the correct mode for DIV
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35
36 #include <lpp/lpp.h>
37 #include <lpp/lpp_net.h>
38 #include <lpp/lpp_cplex.h>
39 //#include <lc_pset.h>
40 #include <libcore/lc_bitset.h>
41
42 #include "be_t.h"
43 #include "belive_t.h"
44 #include "besched_t.h"
45 #include "beirgmod.h"
46 #include "bearch.h"
47 #include "benode_t.h"
48 #include "beutil.h"
49 #include "bespillremat.h"
50 #include "bespill.h"
51 #include "bepressurestat.h"
52
53 #include "bechordal_t.h"
54
55 #define BIGM 100000.0
56
57 #define DUMP_SOLUTION
58 #define DUMP_ILP
59 //#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
60 #define COLLECT_REMATS /* enable rematerialization */
61 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
62 #define REMAT_WHILE_LIVE /* only remat values that are live */
63 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
64 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
65 #define MAY_DIE_AT_REMAT /* allow values to die after a pre remat */
66 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
67 //#define KEEPALIVE_SPILLS
68 //#define KEEPALIVE_RELOADS
69 #define GOODWIN_REDUCTION
70 //#define NO_MEMCOPIES
71
72 #define  SOLVE
73 //#define  SOLVE_LOCAL
74 #define LPP_SERVER "i44pc52"
75 #define LPP_SOLVER "cplex"
76
77 #define COST_LOAD      10
78 #define COST_STORE     50
79 #define COST_REMAT     1
80
81 #define ILP_TIMEOUT    120
82
83 #define ILP_UNDEF               -1
84
85 typedef struct _spill_ilp_t {
86         const arch_register_class_t  *cls;
87         int                           n_regs;
88         const be_chordal_env_t       *chordal_env;
89         lpp_t                        *lpp;
90         struct obstack               *obst;
91         set                          *remat_info;
92         pset                         *all_possible_remats;
93         pset                         *inverse_ops;
94 #ifdef KEEPALIVE
95         ir_node                      *keep;
96 #endif
97         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
98         set                          *execfreqs;
99         pset                         *spills;
100         set                          *interferences;
101         ir_node                      *m_unknown;
102         DEBUG_ONLY(firm_dbg_module_t * dbg);
103 } spill_ilp_t;
104
105 typedef int ilp_var_t;
106 typedef int ilp_cst_t;
107
108 typedef struct _spill_bb_t {
109         set      *ilp;
110         set      *reloads;
111 } spill_bb_t;
112
113 typedef struct _remat_t {
114         const ir_node        *op;      /**< for copy_irn */
115         const ir_node        *value;   /**< the value which is being recomputed by this remat */
116         ir_node              *proj;    /**< not NULL if the above op produces a tuple */
117         int                   cost;    /**< cost of this remat */
118         int                   inverse; /**< nonzero if this is an inverse remat */
119 } remat_t;
120
121 /**
122  * Data to be attached to each IR node. For remats this contains the ilp_var
123  * for this remat and for normal ops this contains the ilp_vars for
124  * reloading each operand
125  */
126 typedef struct _op_t {
127         int             is_remat;
128         union {
129                 struct {
130                         ilp_var_t       ilp;
131                         remat_t        *remat; /** the remat this op belongs to */
132                         int             pre; /** 1, if this is a pressure-increasing remat */
133                 } remat;
134                 struct {
135                         ilp_var_t       ilp;
136                         ir_node        *op; /** the operation this live range belongs to */
137                         union {
138                                 ilp_var_t      *reloads;
139                                 ilp_var_t      *copies;
140                         } args;
141                 } live_range;
142         } attr;
143 } op_t;
144
145 typedef struct _defs_t {
146         ir_node   *value;
147         ir_node   *spills;  /**< points to the first spill for this value (linked by link field) */
148         ir_node   *remats;  /**< points to the first definition for this value (linked by link field) */
149 } defs_t;
150
151 typedef struct _remat_info_t {
152         const ir_node       *irn; /**< the irn to which these remats belong */
153         pset                *remats; /**< possible remats for this value */
154         pset                *remats_by_operand; /**< remats with this value as operand */
155 } remat_info_t;
156
157 typedef struct _keyval_t {
158         const void          *key;
159         const void          *val;
160 } keyval_t;
161
162 typedef struct _spill_t {
163         ir_node      *irn;
164         ilp_var_t     reg_in;
165         ilp_var_t     mem_in;
166         ilp_var_t     reg_out;
167         ilp_var_t     mem_out;
168         ilp_var_t     spill;
169 } spill_t;
170
171 static INLINE int
172 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
173 {
174         return chordal_has_class(si->chordal_env, irn);
175 }
176
177 #if 0
178 static int
179 cmp_remat(const void *a, const void *b)
180 {
181         const keyval_t *p = a;
182         const keyval_t *q = b;
183         const remat_t  *r = p->val;
184         const remat_t  *s = q->val;
185
186         assert(r && s);
187
188         return !(r == s || r->op == s->op);
189 }
190 #endif
191 static int
192 cmp_remat(const void *a, const void *b)
193 {
194         const remat_t  *r = a;
195         const remat_t  *s = a;
196
197         return !(r == s || r->op == s->op);
198 }
199
200 static int
201 cmp_spill(const void *a, const void *b, size_t size)
202 {
203         const spill_t *p = a;
204         const spill_t *q = b;
205
206 //      return !(p->irn == q->irn && p->bb == q->bb);
207         return !(p->irn == q->irn);
208 }
209
210 static keyval_t *
211 set_find_keyval(set * set, void * key)
212 {
213         keyval_t     query;
214
215         query.key = key;
216         return set_find(set, &query, sizeof(query), HASH_PTR(key));
217 }
218
219 static keyval_t *
220 set_insert_keyval(set * set, void * key, void * val)
221 {
222         keyval_t     query;
223
224         query.key = key;
225         query.val = val;
226         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
227 }
228
229 static defs_t *
230 set_find_def(set * set, ir_node * value)
231 {
232         defs_t     query;
233
234         query.value = value;
235         return set_find(set, &query, sizeof(query), HASH_PTR(value));
236 }
237
238 static defs_t *
239 set_insert_def(set * set, ir_node * value)
240 {
241         defs_t     query;
242
243         query.value = value;
244         query.spills = NULL;
245         query.remats = NULL;
246         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
247 }
248
249 static spill_t *
250 set_find_spill(set * set, ir_node * value)
251 {
252         spill_t     query;
253
254         query.irn = value;
255         return set_find(set, &query, sizeof(query), HASH_PTR(value));
256 }
257
258 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
259 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
260 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
261 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
262 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
263
264 static int
265 cmp_remat_info(const void *a, const void *b, size_t size)
266 {
267         const remat_info_t *p = a;
268         const remat_info_t *q = b;
269
270         return !(p->irn == q->irn);
271 }
272
273 static int
274 cmp_defs(const void *a, const void *b, size_t size)
275 {
276         const defs_t *p = a;
277         const defs_t *q = b;
278
279         return !(p->value == q->value);
280 }
281
282 static int
283 cmp_keyval(const void *a, const void *b, size_t size)
284 {
285         const keyval_t *p = a;
286         const keyval_t *q = b;
287
288         return !(p->key == q->key);
289 }
290
291 static double
292 execution_frequency(const spill_ilp_t * si, const ir_node * irn)
293 {
294 #define FUDGE 0.001
295         if(si->execfreqs) {
296                 if(is_Block(irn)) {
297                         return get_block_execfreq(si->execfreqs, irn) + FUDGE;
298                 } else {
299                         return get_block_execfreq(si->execfreqs, get_nodes_block(irn)) + FUDGE;
300                 }
301         } else {
302                 if(is_Block(irn))
303                         return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
304                 else
305                         return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
306         }
307 }
308
309 static double
310 get_cost(const spill_ilp_t * si, const ir_node * irn)
311 {
312         if(be_is_Spill(irn)) {
313                 return COST_STORE;
314         } else if(be_is_Reload(irn)){
315                 return COST_LOAD;
316         } else {
317                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
318         }
319
320 }
321
322 /**
323  * Checks, whether node and its operands have suitable reg classes
324  */
325 static INLINE int
326 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
327 {
328         int               n;
329         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
330         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
331
332 #if 0
333         if(!remat)
334                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
335 #endif
336
337         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
338                 ir_node        *op = get_irn_n(irn, n);
339                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
340
341 //              if(!remat)
342 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
343         }
344
345         return remat;
346 }
347
348 /**
349  * Try to create a remat from @p op with destination value @p dest_value
350  */
351 static INLINE remat_t *
352 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
353 {
354         remat_t  *remat = NULL;
355
356 //      if(!mode_is_datab(get_irn_mode(dest_value)))
357 //              return NULL;
358
359         if(dest_value == op) {
360                 const ir_node *proj = NULL;
361
362                 if(is_Proj(dest_value)) {
363                         op = get_irn_n(op, 0);
364                         proj = dest_value;
365                 }
366
367                 if(!is_rematerializable(si, op))
368                         return NULL;
369
370                 remat = obstack_alloc(si->obst, sizeof(*remat));
371                 remat->op = op;
372                 remat->cost = get_cost(si, op);
373                 remat->value = dest_value;
374                 remat->proj = proj;
375                 remat->inverse = 0;
376         } else {
377                 arch_inverse_t     inverse;
378                 int                n;
379
380                 /* get the index of the operand we want to retrieve by the inverse op */
381                 for (n = get_irn_arity(op)-1; n>=0; --n) {
382                         ir_node        *arg = get_irn_n(op, n);
383
384                         if(arg == dest_value) break;
385                 }
386                 if(n<0) return NULL;
387
388                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
389
390                 /* else ask the backend to give an inverse op */
391                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
392                         int   i;
393
394                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
395
396                         assert(inverse.n > 0 && "inverse op should have at least one node");
397
398                         for(i=inverse.n-1; i>=0; --i) {
399                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
400                         }
401
402                         if(inverse.n <= 2) {
403                                 remat = obstack_alloc(si->obst, sizeof(*remat));
404                                 remat->op = inverse.nodes[0];
405                                 remat->cost = inverse.costs;
406                                 remat->value = dest_value;
407                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
408                                 remat->inverse = 1;
409
410                                 assert(is_Proj(remat->proj));
411                         } else {
412                                 assert(0 && "I can not handle remats with more than 2 nodes");
413                         }
414                 }
415         }
416
417         if(remat) {
418                 if(remat->proj) {
419                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
420                 } else {
421                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
422                 }
423         }
424         return remat;
425 }
426
427
428 static INLINE void
429 add_remat(const spill_ilp_t * si, const remat_t * remat)
430 {
431         remat_info_t    *remat_info,
432                      query;
433         int              n;
434
435         assert(remat->op);
436         assert(remat->value);
437
438         query.irn = remat->value;
439         query.remats = NULL;
440         query.remats_by_operand = NULL;
441         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
442
443         if(remat_info->remats == NULL) {
444                 remat_info->remats = new_pset(cmp_remat, 4096);
445         }
446         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
447
448         /* insert the remat into the remats_be_operand set of each argument of the remat op */
449         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
450                 ir_node        *arg = get_irn_n(remat->op, n);
451
452                 query.irn = arg;
453                 query.remats = NULL;
454                 query.remats_by_operand = NULL;
455                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
456
457                 if(remat_info->remats_by_operand == NULL) {
458                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
459                 }
460                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
461         }
462 }
463
464 static int
465 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
466 {
467         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
468         int                i = 0;
469
470         while(edge) {
471                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
472                         ++i;
473                 }
474                 edge = get_irn_out_edge_next(irn, edge);
475         }
476
477         return i;
478 }
479
480 static INLINE void
481 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
482 {
483         int      n;
484         remat_t *remat;
485
486 #ifdef NO_SINGLE_USE_REMATS
487         if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
488 #else
489         if(has_reg_class(si, op)) {
490 #endif
491                 remat = get_remat_from_op(si, op, op);
492                 if(remat) {
493                         add_remat(si, remat);
494                 }
495         }
496
497 #ifdef COLLECT_INVERSE_REMATS
498         /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
499            for each arg */
500         for (n = get_irn_arity(op)-1; n>=0; --n) {
501                 ir_node        *arg = get_irn_n(op, n);
502
503                 if(has_reg_class(si, arg)) {
504                         /* try to get an inverse remat */
505                         remat = get_remat_from_op(si, arg, op);
506                         if(remat) {
507                                 add_remat(si, remat);
508                         }
509                 }
510         }
511 #endif
512
513 }
514
515 static INLINE int
516 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
517 {
518         ir_node *block;
519         ir_node *def_block = get_nodes_block(val);
520         int      ret;
521
522         if(val == pos)
523                 return 0;
524
525         /* if pos is at end of a basic block */
526         if(is_Block(pos)) {
527                 ret = (pos == def_block || block_dominates(def_block, pos));
528 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
529                 return ret;
530         }
531
532         /* else if this is a normal operation */
533         block = get_nodes_block(pos);
534         if(block == def_block) {
535                 if(!sched_is_scheduled(val)) return 1;
536
537                 ret = sched_comes_after(val, pos);
538 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
539                 return ret;
540         }
541
542         ret = block_dominates(def_block, block);
543 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
544         return ret;
545 }
546
547 static INLINE ir_node *
548 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
549 {
550     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
551 }
552
553 /**
554  * Returns first non-Phi node of block @p bb
555  */
556 static INLINE ir_node *
557 sched_block_first_nonphi(const ir_node * bb)
558 {
559         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
560 }
561
562 static int
563 sched_skip_proj_predicator(const ir_node * irn, void * data)
564 {
565         return (is_Proj(irn));
566 }
567
568 static INLINE ir_node *
569 sched_next_nonproj(const ir_node * irn, int forward)
570 {
571         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
572 }
573
574 /**
575  * Returns next operation node (non-Proj) after @p irn
576  * or the basic block of this node
577  */
578 static INLINE ir_node *
579 sched_next_op(const ir_node * irn)
580 {
581         ir_node *next = sched_next(irn);
582
583         if(is_Block(next))
584                 return next;
585
586         return sched_next_nonproj(next, 1);
587 }
588
589 /**
590  * Returns previous operation node (non-Proj) before @p irn
591  * or the basic block of this node
592  */
593 static INLINE ir_node *
594 sched_prev_op(const ir_node * irn)
595 {
596         ir_node *prev = sched_prev(irn);
597
598         if(is_Block(prev))
599                 return prev;
600
601         return sched_next_nonproj(prev, 0);
602 }
603
604 static void
605 sched_put_after(ir_node * insert, ir_node * irn)
606 {
607         if(is_Block(insert)) {
608                 insert = sched_block_first_nonphi(insert);
609         } else {
610                 insert = sched_next_op(insert);
611         }
612         sched_add_before(insert, irn);
613 }
614
615 static void
616 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
617 {
618   if(is_Block(insert)) {
619           insert = sched_block_last_noncf(si, insert);
620   } else {
621           insert = sched_next_nonproj(insert, 0);
622           insert = sched_prev(insert);
623   }
624   sched_add_after(insert, irn);
625 }
626
627 /**
628  * Tells you whether a @p remat can be placed before the irn @p pos
629  */
630 static INLINE int
631 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
632 {
633         const ir_node   *op = remat->op;
634         const ir_node   *prev;
635         int        n,
636                            res = 1;
637
638         if(is_Block(pos)) {
639                 prev = sched_block_last_noncf(si, pos);
640                 prev = sched_next_nonproj(prev, 0);
641         } else {
642                 prev = sched_prev_op(pos);
643         }
644         /* do not remat if the rematted value is defined immediately before this op */
645         if(prev == remat->op) {
646                 return 0;
647         }
648
649 #if 0
650         /* this should be just fine, the following OP will be using this value, right? */
651
652         /* only remat AFTER the real definition of a value (?) */
653         if(!value_is_defined_before(si, pos, remat->value)) {
654 //              ir_fprintf(stderr, "error(not defined)");
655                 return 0;
656         }
657 #endif
658
659         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
660                 const ir_node   *arg = get_irn_n(op, n);
661
662 #ifdef NO_ENLARGE_L1V3N355
663                 if(has_reg_class(si, arg) && live) {
664                         res &= pset_find_ptr(live, arg)?1:0;
665                 } else {
666                         res &= value_is_defined_before(si, pos, arg);
667                 }
668 #else
669                 res &= value_is_defined_before(si, pos, arg);
670 #endif
671         }
672
673         return res;
674 }
675
676 /**
677  * Tells you whether a @p remat can be placed after the irn @p pos
678  */
679 static INLINE int
680 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
681 {
682         if(is_Block(pos)) {
683                 pos = sched_block_first_nonphi(pos);
684         } else {
685                 pos = sched_next_op(pos);
686         }
687
688         /* only remat AFTER the real definition of a value (?) */
689         if(!value_is_defined_before(si, pos, remat->value)) {
690                 return 0;
691         }
692
693         return can_remat_before(si, remat, pos, live);
694 }
695
696 /**
697  * Collect potetially rematerializable OPs
698  */
699 static void
700 walker_remat_collector(ir_node * irn, void * data)
701 {
702         spill_ilp_t    *si = data;
703
704         if(!is_Block(irn) && !is_Phi(irn)) {
705                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
706                 get_remats_from_op(si, irn);
707         }
708 }
709
710 /**
711  * Inserts a copy of @p irn before @p pos
712  */
713 static ir_node *
714 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
715 {
716         ir_node     *bb;
717         ir_node     *copy;
718
719         bb = is_Block(pos)?pos:get_nodes_block(pos);
720         copy = exact_copy(irn);
721
722         _set_phi_class(copy, NULL);
723         set_nodes_block(copy, bb);
724         sched_put_before(si, pos, copy);
725
726         return copy;
727 }
728
729 /**
730  * Inserts a copy of @p irn after @p pos
731  */
732 static ir_node *
733 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
734 {
735         ir_node     *bb;
736         ir_node     *copy;
737
738         bb = is_Block(pos)?pos:get_nodes_block(pos);
739         copy = exact_copy(irn);
740
741         _set_phi_class(copy, NULL);
742         set_nodes_block(copy, bb);
743         sched_put_after(pos, copy);
744
745         return copy;
746 }
747
748 static ir_node *
749 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
750 {
751         char     buf[256];
752
753         if(can_remat_after(si, remat, pos, live)) {
754                 ir_node         *copy,
755                                                 *proj_copy;
756                 op_t            *op;
757
758                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
759
760                 copy = insert_copy_after(si, remat->op, pos);
761
762                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
763                 op = obstack_alloc(si->obst, sizeof(*op));
764                 op->is_remat = 1;
765                 op->attr.remat.remat = remat;
766                 op->attr.remat.pre = 0;
767                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
768
769                 set_irn_link(copy, op);
770                 pset_insert_ptr(si->all_possible_remats, copy);
771                 if(remat->proj) {
772                         proj_copy = insert_copy_after(si, remat->proj, copy);
773                         set_irn_n(proj_copy, 0, copy);
774                         set_irn_link(proj_copy, op);
775                         pset_insert_ptr(si->all_possible_remats, proj_copy);
776                 } else {
777                         proj_copy = NULL;
778                 }
779
780                 return copy;
781         }
782
783         return NULL;
784 }
785
786 static ir_node *
787 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
788 {
789         char     buf[256];
790
791         if(can_remat_before(si, remat, pos, live)) {
792                 ir_node         *copy,
793                                                 *proj_copy;
794                 op_t            *op;
795
796                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
797
798                 copy = insert_copy_before(si, remat->op, pos);
799
800                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
801                 op = obstack_alloc(si->obst, sizeof(*op));
802                 op->is_remat = 1;
803                 op->attr.remat.remat = remat;
804                 op->attr.remat.pre = 1;
805                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
806
807                 set_irn_link(copy, op);
808                 pset_insert_ptr(si->all_possible_remats, copy);
809                 if(remat->proj) {
810                         proj_copy = insert_copy_after(si, remat->proj, copy);
811                         set_irn_n(proj_copy, 0, copy);
812                         set_irn_link(proj_copy, op);
813                         pset_insert_ptr(si->all_possible_remats, proj_copy);
814                 } else {
815                         proj_copy = NULL;
816                 }
817
818                 return copy;
819         }
820
821         return NULL;
822 }
823
824 static int
825 get_block_n_succs(const ir_node *block) {
826         const ir_edge_t *edge;
827
828         assert(edges_activated(current_ir_graph));
829
830         edge = get_block_succ_first(block);
831         if (! edge)
832                 return 0;
833
834         edge = get_block_succ_next(block, edge);
835         return edge ? 2 : 1;
836 }
837
838 static int
839 is_merge_edge(const ir_node * bb)
840 {
841 #ifdef GOODWIN_REDUCTION
842         return get_block_n_succs(bb) == 1;
843 #else
844         return 1;
845 #endif
846 }
847
848 static int
849 is_diverge_edge(const ir_node * bb)
850 {
851 #ifdef GOODWIN_REDUCTION
852         return get_Block_n_cfgpreds(bb) == 1;
853 #else
854         return 1;
855 #endif
856 }
857
858 static void
859 walker_regclass_copy_insertor(ir_node * irn, void * data)
860 {
861         spill_ilp_t    *si = data;
862
863         if(is_Phi(irn) && has_reg_class(si, irn)) {
864                 int n;
865
866                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
867                         ir_node  *phi_arg = get_irn_n(irn, n);
868                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
869
870                         if(!has_reg_class(si, phi_arg)) {
871                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
872                                 ir_node   *pos = sched_block_last_noncf(si, bb);
873                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
874
875                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
876                                 sched_add_after(pos, copy);
877                                 set_irn_n(irn, n, copy);
878
879                                 op->is_remat = 0;
880                                 op->attr.live_range.args.reloads = NULL;
881                                 op->attr.live_range.ilp = ILP_UNDEF;
882                                 set_irn_link(copy, op);
883                         }
884                 }
885         }
886 }
887
888
889 /**
890  * Insert (so far unused) remats into the irg to
891  * recompute the potential liveness of all values
892  */
893 static void
894 walker_remat_insertor(ir_node * bb, void * data)
895 {
896         spill_ilp_t    *si = data;
897         spill_bb_t     *spill_bb;
898         ir_node        *irn;
899         int             n;
900         irn_live_t     *li;
901         pset           *live = pset_new_ptr_default();
902
903         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
904
905         live_foreach(bb, li) {
906                 ir_node        *value = (ir_node *) li->irn;
907
908                 /* add remats at end of block */
909                 if (live_is_end(li) && has_reg_class(si, value)) {
910                         pset_insert_ptr(live, value);
911                 }
912         }
913
914         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
915         set_irn_link(bb, spill_bb);
916
917         irn = sched_last(bb);
918         while(!sched_is_end(irn)) {
919                 ir_node   *next;
920                 op_t      *op;
921                 pset      *args;
922                 ir_node   *arg;
923                 pset      *remat_args;
924
925                 next = sched_prev(irn);
926
927                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
928
929                 if(is_Phi(irn) || is_Proj(irn)) {
930                         op_t      *op;
931
932                         if(has_reg_class(si, irn)) {
933                                 pset_remove_ptr(live, irn);
934                         }
935
936                         op = obstack_alloc(si->obst, sizeof(*op));
937                         op->is_remat = 0;
938                         op->attr.live_range.args.reloads = NULL;
939                         op->attr.live_range.ilp = ILP_UNDEF;
940                         set_irn_link(irn, op);
941
942                         irn = next;
943                         continue;
944                 }
945
946                 op = obstack_alloc(si->obst, sizeof(*op));
947                 op->is_remat = 0;
948                 op->attr.live_range.ilp = ILP_UNDEF;
949                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
950                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
951                 set_irn_link(irn, op);
952
953                 args = pset_new_ptr_default();
954
955                 /* collect arguments of op */
956                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
957                         ir_node        *arg = get_irn_n(irn, n);
958
959                         pset_insert_ptr(args, arg);
960                 }
961
962                 /* set args of op already live in epilog */
963                 pset_foreach(args, arg) {
964                         if(has_reg_class(si, arg)) {
965                                 pset_insert_ptr(live, arg);
966                         }
967                 }
968                 /* delete defined value from live set */
969                 if(has_reg_class(si, irn)) {
970                         pset_remove_ptr(live, irn);
971                 }
972
973
974                 remat_args = pset_new_ptr_default();
975
976                 /* insert all possible remats before irn */
977                 pset_foreach(args, arg) {
978                         remat_info_t   *remat_info,
979                                                     query;
980                         remat_t        *remat;
981
982                         /* continue if the operand has the wrong reg class
983                          */
984                         if(!has_reg_class(si, arg))
985                                 continue;
986
987                         query.irn = arg;
988                         query.remats = NULL;
989                         query.remats_by_operand = NULL;
990                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
991
992                         if(!remat_info) {
993                                 continue;
994                         }
995
996                         if(remat_info->remats) {
997                                 pset_foreach(remat_info->remats, remat) {
998                                         ir_node  *remat_irn = NULL;
999
1000                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1001 #ifdef REMAT_WHILE_LIVE
1002                                         if(pset_find_ptr(live, remat->value)) {
1003                                                 remat_irn = insert_remat_before(si, remat, irn, live);
1004                                         }
1005 #else
1006                                         remat_irn = insert_remat_before(si, remat, irn, live);
1007 #endif
1008                                         if(remat_irn) {
1009                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1010                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1011
1012                                                         if(!has_reg_class(si, remat_arg)) continue;
1013
1014                                                         pset_insert_ptr(remat_args, remat_arg);
1015                                                 }
1016                                         }
1017                                 }
1018                         }
1019                 }
1020
1021                 /* now we add remat args to op's args because they could also die at this op */
1022                 pset_foreach(args,arg) {
1023                         if(pset_find_ptr(remat_args, arg)) {
1024                                 pset_remove_ptr(remat_args, arg);
1025                         }
1026                 }
1027                 pset_foreach(remat_args,arg) {
1028                         pset_insert_ptr(args, arg);
1029                 }
1030
1031                 /* insert all possible remats after irn */
1032                 pset_foreach(args, arg) {
1033                         remat_info_t   *remat_info,
1034                                                     query;
1035                         remat_t        *remat;
1036
1037                         /* continue if the operand has the wrong reg class */
1038                         if(!has_reg_class(si, arg))
1039                                 continue;
1040
1041                         query.irn = arg;
1042                         query.remats = NULL;
1043                         query.remats_by_operand = NULL;
1044                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1045
1046                         if(!remat_info) {
1047                                 continue;
1048                         }
1049
1050                         /* do not place post remats after jumps */
1051                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1052
1053                         if(remat_info->remats_by_operand) {
1054                                 pset_foreach(remat_info->remats_by_operand, remat) {
1055                                         /* do not insert remats producing the same value as one of the operands */
1056                                         if(!pset_find_ptr(args, remat->value)) {
1057                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1058 #ifdef REMAT_WHILE_LIVE
1059                                                 if(pset_find_ptr(live, remat->value)) {
1060                                                         insert_remat_after(si, remat, irn, live);
1061                                                 }
1062 #else
1063                                                 insert_remat_after(si, remat, irn, live);
1064 #endif
1065                                         }
1066                                 }
1067                         }
1068                 }
1069
1070                 del_pset(remat_args);
1071                 del_pset(args);
1072                 irn = next;
1073         }
1074
1075         live_foreach(bb, li) {
1076                 ir_node        *value = (ir_node *) li->irn;
1077
1078                 /* add remats at end if successor has multiple predecessors */
1079                 if(is_merge_edge(bb)) {
1080                         /* add remats at end of block */
1081                         if (live_is_end(li) && has_reg_class(si, value)) {
1082                                 remat_info_t   *remat_info,
1083                                                            query;
1084                                 remat_t        *remat;
1085
1086                                 query.irn = value;
1087                                 query.remats = NULL;
1088                                 query.remats_by_operand = NULL;
1089                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1090
1091                                 if(remat_info && remat_info->remats) {
1092                                         pset_foreach(remat_info->remats, remat) {
1093                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1094
1095                                                 insert_remat_before(si, remat, bb, NULL);
1096                                         }
1097                                 }
1098                         }
1099                 }
1100                 if(is_diverge_edge(bb)) {
1101                         /* add remat2s at beginning of block */
1102                         if ((live_is_in(li) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1103                                 remat_info_t   *remat_info,
1104                                                            query;
1105                                 remat_t        *remat;
1106
1107                                 query.irn = value;
1108                                 query.remats = NULL;
1109                                 query.remats_by_operand = NULL;
1110                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1111
1112                                 if(remat_info && remat_info->remats) {
1113                                         pset_foreach(remat_info->remats, remat) {
1114                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at beginning of block %+F\n", remat->op, bb));
1115
1116                                                 /* put the remat here if all its args are available */
1117                                                 insert_remat_after(si, remat, bb, NULL);
1118
1119                                         }
1120                                 }
1121                         }
1122                 }
1123         }
1124 }
1125
1126 /**
1127  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1128  */
1129 static void
1130 luke_endwalker(ir_node * bb, void * data)
1131 {
1132         spill_ilp_t    *si = (spill_ilp_t*)data;
1133         irn_live_t     *li;
1134         pset           *live;
1135         pset           *use_end;
1136         char            buf[256];
1137         ilp_cst_t       cst;
1138         ir_node        *irn;
1139         spill_bb_t     *spill_bb = get_irn_link(bb);
1140
1141
1142         live = pset_new_ptr_default();
1143         use_end = pset_new_ptr_default();
1144
1145         live_foreach(bb, li) {
1146                 irn = (ir_node *) li->irn;
1147                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1148                         op_t      *op;
1149
1150                         pset_insert_ptr(live, irn);
1151                         op = get_irn_link(irn);
1152                         assert(!op->is_remat);
1153                 }
1154         }
1155
1156         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1157         /* their reg_out must always be set */
1158         sched_foreach_reverse(bb, irn) {
1159                 int   n;
1160
1161                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1162
1163                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1164                         ir_node        *irn_arg = get_irn_n(irn, n);
1165
1166                         if(has_reg_class(si, irn_arg)) {
1167                                 pset_insert_ptr(use_end, irn_arg);
1168                         }
1169                 }
1170         }
1171
1172         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1173         //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1174         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1175
1176         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1177
1178         /* if this is a merge edge we can reload at the end of this block */
1179         if(is_merge_edge(bb)) {
1180                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1181         } else if(pset_count(use_end)){
1182                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1183         } else {
1184                 spill_bb->reloads = NULL;
1185         }
1186
1187         pset_foreach(live,irn) {
1188                 spill_t     query,
1189                                         *spill;
1190                 double      spill_cost;
1191
1192
1193                 /* handle values used by control flow nodes later separately */
1194                 if(pset_find_ptr(use_end, irn)) continue;
1195
1196                 query.irn = irn;
1197                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1198
1199                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1200
1201                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1202                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1203                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1204
1205                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1206                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1207
1208                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1209                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1210
1211                 if(is_merge_edge(bb)) {
1212                         ilp_var_t   reload;
1213                         ilp_cst_t   rel_cst;
1214
1215                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1216                         reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1217                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1218
1219                         /* reload <= mem_out */
1220                         rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1221                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1222                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1223         }
1224
1225                 spill->reg_in = ILP_UNDEF;
1226                 spill->mem_in = ILP_UNDEF;
1227         }
1228
1229         pset_foreach(use_end,irn) {
1230                 spill_t     query,
1231                                         *spill;
1232                 double      spill_cost;
1233                 ilp_cst_t   end_use_req,
1234                                         rel_cst;
1235                 ilp_var_t   reload;
1236
1237                 query.irn = irn;
1238                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1239
1240                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1241
1242                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1243                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1244                 /* if irn is used at the end of the block, then it is live anyway */
1245                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1246
1247                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1248                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1249
1250                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1251                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1252
1253                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1254                 reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1255                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1256
1257                 /* reload <= mem_out */
1258                 rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1259                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1260                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1261
1262                 spill->reg_in = ILP_UNDEF;
1263                 spill->mem_in = ILP_UNDEF;
1264
1265                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1266                 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1267                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1268         }
1269
1270         del_pset(live);
1271         del_pset(use_end);
1272 }
1273
1274 static ir_node *
1275 next_post_remat(const ir_node * irn)
1276 {
1277         op_t      *op;
1278
1279         if(is_Block(irn)) {
1280                 irn = sched_block_first_nonphi(irn);
1281         } else {
1282                 irn = sched_next_op(irn);
1283         }
1284
1285         if(sched_is_end(irn))
1286                 return NULL;
1287
1288         op = (op_t*)get_irn_link(irn);
1289         if(op->is_remat && !op->attr.remat.pre) {
1290                 return irn;
1291         }
1292
1293         return NULL;
1294 }
1295
1296
1297 static ir_node *
1298 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1299 {
1300         op_t      *op;
1301         ir_node   *ret;
1302
1303         if(is_Block(irn)) {
1304                 ret = sched_block_last_noncf(si, irn);
1305                 ret = sched_next(ret);
1306                 ret = sched_prev_op(ret);
1307         } else {
1308                 ret = sched_prev_op(irn);
1309         }
1310
1311         if(sched_is_end(ret) || is_Phi(ret))
1312                 return NULL;
1313
1314         op = (op_t*)get_irn_link(ret);
1315         if(op->is_remat && op->attr.remat.pre) {
1316                 return ret;
1317         }
1318
1319         return NULL;
1320 }
1321
1322 /**
1323  * Find a remat of value @p value in the epilog of @p pos
1324  */
1325 static ir_node *
1326 find_post_remat(const ir_node * value, const ir_node * pos)
1327 {
1328         while((pos = next_post_remat(pos)) != NULL) {
1329                 op_t   *op;
1330
1331                 op = get_irn_link(pos);
1332                 assert(op->is_remat && !op->attr.remat.pre);
1333
1334                 if(op->attr.remat.remat->value == value)
1335                         return (ir_node*)pos;
1336
1337 #if 0
1338         const ir_edge_t *edge;
1339                 foreach_out_edge(pos, edge) {
1340                         ir_node   *proj = get_edge_src_irn(edge);
1341                         assert(is_Proj(proj));
1342                 }
1343 #endif
1344
1345         }
1346
1347         return NULL;
1348 }
1349
1350 static spill_t *
1351 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1352 {
1353         spill_bb_t  *spill_bb = get_irn_link(bb);
1354         spill_t     *spill,
1355                                  query;
1356         char         buf[256];
1357
1358         query.irn = irn;
1359         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1360         if(!spill) {
1361                 double   spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1362
1363                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1364
1365                 spill->reg_out = ILP_UNDEF;
1366                 spill->reg_in  = ILP_UNDEF;
1367                 spill->mem_in  = ILP_UNDEF;
1368
1369                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1370                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1371
1372                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1373                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1374         }
1375
1376         return spill;
1377 }
1378
1379 static void
1380 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1381 {
1382         irn_live_t     *li;
1383         ir_node        *irn;
1384
1385         live_foreach(bb, li) {
1386                 irn = (ir_node *) li->irn;
1387
1388                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1389                         pset_insert_ptr(live, irn);
1390                 }
1391         }
1392
1393         irn = sched_last(bb);
1394
1395         /* all values eaten by control flow operations are also live until the end of the block */
1396         sched_foreach_reverse(bb, irn) {
1397                 int  i;
1398
1399                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1400
1401                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1402                         ir_node *arg = get_irn_n(irn,i);
1403
1404                         if(has_reg_class(si, arg)) {
1405                                 pset_insert_ptr(live, arg);
1406                         }
1407                 }
1408         }
1409 }
1410
1411 /**
1412  *  Inserts ILP-constraints and variables for memory copying before the given position
1413  */
1414 static void
1415 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1416 {
1417         const ir_node    *succ;
1418         const ir_edge_t  *edge;
1419         spill_bb_t       *spill_bb = get_irn_link(block);
1420         ir_node          *phi;
1421         int               pos;
1422         ilp_cst_t         cst;
1423         ilp_var_t         copyreg;
1424         char              buf[256];
1425         ir_node          *tmp;
1426
1427
1428         assert(edges_activated(current_ir_graph));
1429
1430         edge = get_block_succ_first(block);
1431         if(!edge) return;
1432
1433         succ = edge->src;
1434         pos = edge->pos;
1435
1436         edge = get_block_succ_next(block, edge);
1437         /* next block can only contain phis, if this is a merge edge */
1438         if(edge) return;
1439
1440         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1441         copyreg = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1442
1443         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1444         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1445
1446         pset_foreach(live, tmp) {
1447                 spill_t  *spill;
1448 #if 0
1449                 op_t  *op = get_irn_link(irn);
1450                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1451 #endif
1452                 spill = set_find_spill(spill_bb->ilp, tmp);
1453                 assert(spill);
1454
1455                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1456         }
1457         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1458
1459         sched_foreach(succ, phi) {
1460                 const ir_node  *to_copy;
1461                 op_t           *to_copy_op;
1462                 spill_t        *to_copy_spill;
1463                 op_t           *phi_op = get_irn_link(phi);
1464                 ilp_var_t       reload = ILP_UNDEF;
1465
1466
1467                 if(!is_Phi(phi)) break;
1468                 if(!has_reg_class(si, phi)) continue;
1469
1470                 to_copy = get_irn_n(phi, pos);
1471
1472                 to_copy_op = get_irn_link(to_copy);
1473
1474                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1475                 assert(to_copy_spill);
1476
1477                 if(spill_bb->reloads) {
1478                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1479
1480                         if(keyval) {
1481                                 reload = PTR_TO_INT(keyval->val);
1482                         }
1483                 }
1484
1485                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N", block, to_copy);
1486                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1487
1488                 /* copy - reg_out - reload - remat - live_range <= 0 */
1489                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1490                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1491                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1492                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1493                 foreach_pre_remat(si, block, tmp) {
1494                         op_t     *remat_op = get_irn_link(tmp);
1495                         if(remat_op->attr.remat.remat->value == to_copy) {
1496                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1497                         }
1498                 }
1499
1500                 ir_snprintf(buf, sizeof(buf), "copyreq_%N_%N", block, to_copy);
1501                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1502
1503                 /* copy - reg_out - copyreg <= 0 */
1504                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1505                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1506                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1507         }
1508 }
1509
1510
1511 /**
1512  * Walk all irg blocks and emit this ILP
1513  */
1514 static void
1515 luke_blockwalker(ir_node * bb, void * data)
1516 {
1517         spill_ilp_t    *si = (spill_ilp_t*)data;
1518         ir_node        *irn;
1519         pset           *live;
1520         char            buf[256];
1521         ilp_cst_t       cst;
1522         spill_bb_t     *spill_bb = get_irn_link(bb);
1523         ir_node        *tmp;
1524         spill_t        *spill;
1525         pset           *defs = pset_new_ptr_default();
1526
1527
1528         live = pset_new_ptr_default();
1529
1530         /****************************************
1531          *      B A S I C  B L O C K  E N D
1532          ***************************************/
1533
1534
1535         /* init live values at end of block */
1536         get_live_end(si, bb, live);
1537
1538         pset_foreach(live, irn) {
1539                 op_t           *op;
1540                 ilp_var_t       reload = ILP_UNDEF;
1541
1542                 spill = set_find_spill(spill_bb->ilp, irn);
1543                 assert(spill);
1544
1545                 if(spill_bb->reloads) {
1546                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1547
1548                         if(keyval) {
1549                                 reload = PTR_TO_INT(keyval->val);
1550                         }
1551                 }
1552
1553                 op = get_irn_link(irn);
1554                 assert(!op->is_remat);
1555
1556                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1557                 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1558                 op->attr.live_range.op = bb;
1559
1560                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1561                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1562
1563                 /* reg_out - reload - remat - live_range <= 0 */
1564                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1565                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1566                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1567                 foreach_pre_remat(si, bb, tmp) {
1568                         op_t     *remat_op = get_irn_link(tmp);
1569                         if(remat_op->attr.remat.remat->value == irn) {
1570                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1571                         }
1572                 }
1573                 /* maybe we should also assure that reg_out >= live_range etc. */
1574         }
1575
1576 #ifndef NO_MEMCOPIES
1577         insert_mem_copy_position(si, live, bb);
1578 #endif
1579
1580         /*
1581          * start new live ranges for values used by remats at end of block
1582          * and assure the remat args are available
1583          */
1584         foreach_pre_remat(si, bb, tmp) {
1585                 op_t     *remat_op = get_irn_link(tmp);
1586                 int       n;
1587
1588                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1589                         ir_node        *remat_arg = get_irn_n(tmp, n);
1590                         op_t           *arg_op = get_irn_link(remat_arg);
1591                         ilp_var_t       prev_lr;
1592
1593                         if(!has_reg_class(si, remat_arg)) continue;
1594
1595                         /* if value is becoming live through use by remat */
1596                         if(!pset_find_ptr(live, remat_arg)) {
1597                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1598                                 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1599
1600                                 arg_op->attr.live_range.ilp = prev_lr;
1601                                 arg_op->attr.live_range.op = bb;
1602
1603                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1604
1605                                 pset_insert_ptr(live, remat_arg);
1606                                 add_to_spill_bb(si, bb, remat_arg);
1607                         }
1608
1609                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1610                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1611                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1612
1613                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1614                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1615
1616                         /* use reload placed for this argument */
1617                         if(spill_bb->reloads) {
1618                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1619
1620                                 if(keyval) {
1621                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1622
1623                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1624                                 }
1625                         }
1626                 }
1627         }
1628         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1629
1630
1631
1632
1633         /**************************************
1634          *    B A S I C  B L O C K  B O D Y
1635          **************************************/
1636
1637         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1638                 op_t       *op;
1639                 op_t       *tmp_op;
1640                 int         n,
1641                                         u = 0,
1642                                         d = 0;
1643                 ilp_cst_t       check_pre,
1644                                         check_post;
1645                 set        *args;
1646                 pset       *used;
1647                 pset       *remat_defs;
1648                 keyval_t   *keyval;
1649
1650                 /* iterate only until first phi */
1651                 if(is_Phi(irn))
1652                         break;
1653
1654                 op = get_irn_link(irn);
1655                 /* skip remats */
1656                 if(op->is_remat) continue;
1657                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1658
1659                 /* collect defined values */
1660                 if(has_reg_class(si, irn)) {
1661                         pset_insert_ptr(defs, irn);
1662                 }
1663
1664                 /* skip projs */
1665                 if(is_Proj(irn)) continue;
1666
1667                 /*
1668                  * init set of irn's arguments
1669                  * and all possibly used values around this op
1670                  * and values defined by post remats
1671                  */
1672                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1673                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1674                 remat_defs = pset_new_ptr(pset_count(live));
1675
1676                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1677                         ir_node        *irn_arg = get_irn_n(irn, n);
1678                         if(has_reg_class(si, irn_arg)) {
1679                                 set_insert_keyval(args, irn_arg, (void*)n);
1680                                 pset_insert_ptr(used, irn_arg);
1681                         }
1682                 }
1683                 foreach_post_remat(irn, tmp) {
1684                         op_t    *remat_op = get_irn_link(tmp);
1685
1686                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1687
1688                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1689                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1690                                 if(has_reg_class(si, remat_arg)) {
1691                                         pset_insert_ptr(used, remat_arg);
1692                                 }
1693                         }
1694                 }
1695                 foreach_pre_remat(si, irn, tmp) {
1696                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1697                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1698                                 if(has_reg_class(si, remat_arg)) {
1699                                         pset_insert_ptr(used, remat_arg);
1700                                 }
1701                         }
1702                 }
1703
1704                 /**********************************
1705                  *   I N  E P I L O G  O F  irn
1706                  **********************************/
1707
1708                 /* ensure each dying value is used by only one post remat */
1709                 pset_foreach(live, tmp) {
1710                         ir_node     *value = tmp;
1711                         op_t        *value_op = get_irn_link(value);
1712                         ir_node     *remat;
1713                         int          n_remats = 0;
1714
1715                         cst = ILP_UNDEF;
1716                         foreach_post_remat(irn, remat) {
1717                                 op_t  *remat_op = get_irn_link(remat);
1718
1719                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1720                                         ir_node   *remat_arg = get_irn_n(remat, n);
1721
1722                                         /* if value is used by this remat add it to constraint */
1723                                         if(remat_arg == value) {
1724                                                 if(n_remats == 0) {
1725                                                         /* sum remat2s <= 1 + n_remats*live_range */
1726                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1727                                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1.0);
1728                                                 }
1729
1730                                                 n_remats++;
1731                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1732                                                 break;
1733                                         }
1734                                 }
1735                         }
1736
1737                         if(value_op->attr.live_range.ilp != ILP_UNDEF && cst != ILP_UNDEF) {
1738                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1739                         }
1740                 }
1741
1742
1743
1744                 /* new live ranges for values from L\U defined by post remats */
1745                 pset_foreach(live, tmp) {
1746                         ir_node     *value = tmp;
1747                         op_t        *value_op = get_irn_link(value);
1748
1749                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1750                                 ilp_var_t    prev_lr = ILP_UNDEF;
1751                                 ir_node     *remat;
1752
1753                                 if(pset_find_ptr(remat_defs, value)) {
1754
1755                                         /* next_live_range <= prev_live_range + sum remat2s */
1756                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1757                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1758
1759                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1760                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1761
1762                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1763                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1764
1765                                         foreach_post_remat(irn, remat) {
1766                                                 op_t        *remat_op = get_irn_link(remat);
1767
1768                                                 /* if value is being rematerialized by this remat */
1769                                                 if(value == remat_op->attr.remat.remat->value) {
1770                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1771                                                 }
1772                                         }
1773
1774                                         value_op->attr.live_range.ilp = prev_lr;
1775                                         value_op->attr.live_range.op = irn;
1776                                 }
1777                         }
1778                 }
1779
1780                 /* requirements for post remats and start live ranges from L/U' for values dying here */
1781                 foreach_post_remat(irn, tmp) {
1782                         op_t        *remat_op = get_irn_link(tmp);
1783                         int          n;
1784
1785                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1786                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1787                                 op_t           *arg_op = get_irn_link(remat_arg);
1788
1789                                 if(!has_reg_class(si, remat_arg)) continue;
1790
1791                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
1792                                 if(!pset_find_ptr(used, remat_arg)) {
1793                                         /* remat <= live_rang(remat_arg) */
1794                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1795                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1796
1797                                         /* if value is becoming live through use by remat2 */
1798                                         if(!pset_find_ptr(live, remat_arg)) {
1799                                                 ilp_var_t     lr;
1800
1801                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1802                                                 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1803
1804                                                 arg_op->attr.live_range.ilp = lr;
1805                                                 arg_op->attr.live_range.op = irn;
1806
1807                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1808
1809                                                 pset_insert_ptr(live, remat_arg);
1810                                                 add_to_spill_bb(si, bb, remat_arg);
1811                                         }
1812
1813                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1814                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1815                                 }
1816                         }
1817                 }
1818
1819                 d = pset_count(defs);
1820                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
1821
1822                 /* count how many regs irn needs for arguments */
1823                 u = set_count(args);
1824
1825
1826                 /* check the register pressure in the epilog */
1827                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
1828                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1829                 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1830
1831                 /* add L\U' to check_post */
1832                 pset_foreach(live, tmp) {
1833                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
1834                                 /* if a live value is not used by irn */
1835                                 tmp_op = get_irn_link(tmp);
1836                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1837                         }
1838                 }
1839
1840                 /***********************************************************
1841                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
1842                  **********************************************************/
1843
1844
1845                 pset_foreach(used, tmp) {
1846                         ilp_var_t       prev_lr;
1847                         ilp_var_t       post_use;
1848                         int             p = 0;
1849                         spill_t        *spill;
1850                         ir_node        *arg = tmp;
1851                         op_t           *arg_op = get_irn_link(arg);
1852                         ir_node        *remat;
1853
1854                         spill = add_to_spill_bb(si, bb, arg);
1855
1856                         /* new live range for each used value */
1857                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1858                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1859
1860                         /* the epilog stuff - including post_use, check_post, check_post_remat */
1861                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1862                         post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1863
1864                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1865
1866                         /* arg is live throughout epilog if the next live_range is in a register */
1867                         if(pset_find_ptr(live, arg)) {
1868                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
1869
1870                                 /* post_use >= next_lr + remat */
1871                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1872                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1873                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1874                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1875
1876                         }
1877
1878                         /* if value is not an arg of op and not possibly defined by post remat
1879                          * then it may only die and not become live
1880                          */
1881                         if(!set_find_keyval(args, arg)) {
1882                                 /* post_use <= prev_lr */
1883                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
1884                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1885                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1886                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1887
1888                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
1889                                         /* next_lr <= prev_lr */
1890                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
1891                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1892                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1893                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1894                                 }
1895                         }
1896
1897
1898
1899                         /* forall post remat which use arg add a similar cst */
1900                         foreach_post_remat(irn, remat) {
1901                                 int      n;
1902
1903                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
1904                                         ir_node    *remat_arg = get_irn_n(remat, n);
1905                                         op_t       *remat_op = get_irn_link(remat);
1906
1907                                         if(remat_arg == arg) {
1908                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
1909
1910                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1911                                                 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1912                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1913                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1914                                         }
1915                                 }
1916                         }
1917
1918                         /* new live range begins for each used value */
1919                         arg_op->attr.live_range.ilp = prev_lr;
1920                         arg_op->attr.live_range.op = irn;
1921
1922                         /*if(!pset_find_ptr(live, arg)) {
1923                                 pset_insert_ptr(live, arg);
1924                                 add_to_spill_bb(si, bb, arg);
1925                         }*/
1926                         pset_insert_ptr(live, arg);
1927
1928                 }
1929
1930                 /* just to be sure */
1931                 check_post = ILP_UNDEF;
1932
1933
1934
1935
1936                 /******************
1937                  *   P R O L O G
1938                  ******************/
1939
1940                 /* check the register pressure in the prolog */
1941                 /* sum_{L\U} lr <= k - |U| */
1942                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1943                 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
1944
1945                 /* for the prolog remove defined values from the live set */
1946                 pset_foreach(defs, tmp) {
1947                         pset_remove_ptr(live, tmp);
1948                 }
1949
1950                 /***********************************************************
1951                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
1952                  **********************************************************/
1953
1954
1955                 set_foreach(args, keyval) {
1956                         spill_t        *spill;
1957                         ir_node        *arg = keyval->key;
1958                         int             i = PTR_TO_INT(keyval->val);
1959                         op_t           *arg_op = get_irn_link(arg);
1960
1961                         spill = set_find_spill(spill_bb->ilp, arg);
1962                         assert(spill);
1963
1964                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1965                         op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1966
1967                         /* reload <= mem_out */
1968                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
1969                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1970                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1971                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1972
1973                         /* requirement: arg must be in register for use */
1974                         /* reload + remat + live_range == 1 */
1975                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
1976                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
1977
1978                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1979                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1980                         foreach_pre_remat(si, irn, tmp) {
1981                                 op_t     *remat_op = get_irn_link(tmp);
1982                                 if(remat_op->attr.remat.remat->value == arg) {
1983                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1984                                 }
1985                         }
1986                 }
1987
1988                 /* iterate over L\U */
1989                 pset_foreach(live, tmp) {
1990                         if(!set_find_keyval(args, tmp)) {
1991                                 /* if a live value is not used by irn */
1992                                 tmp_op = get_irn_link(tmp);
1993                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
1994                         }
1995                 }
1996
1997
1998                 /* requirements for remats */
1999                 /* start new live ranges for values used by remats */
2000                 foreach_pre_remat(si, irn, tmp) {
2001                         op_t        *remat_op = get_irn_link(tmp);
2002                         int          n;
2003
2004                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2005                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2006                                 op_t           *arg_op = get_irn_link(remat_arg);
2007                                 ilp_var_t       prev_lr;
2008
2009                                 if(!has_reg_class(si, remat_arg)) continue;
2010
2011                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2012                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2013                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2014
2015                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2016                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2017
2018                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2019                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2020                                         int    index = (int)keyval->val;
2021
2022                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2023                                 }
2024                         }
2025                 }
2026
2027
2028
2029
2030                 /*************************
2031                  *  D O N E  W I T H  O P
2032                  *************************/
2033
2034                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2035
2036                 pset_foreach(live, tmp) {
2037                         assert(has_reg_class(si, tmp));
2038                 }
2039
2040                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2041                         ir_node        *arg = get_irn_n(irn, n);
2042
2043                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2044                 }
2045
2046                 del_pset(remat_defs);
2047                 del_pset(used);
2048                 del_set(args);
2049                 del_pset(defs);
2050                 defs = pset_new_ptr_default();
2051         }
2052
2053
2054
2055         /***************************************
2056          *   B E G I N N I N G  O F  B L O C K
2057          ***************************************/
2058
2059
2060         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2061         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2062
2063         pset_foreach(live, irn) {
2064                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2065         }
2066
2067         /* construct mem_outs for all values */
2068
2069         set_foreach(spill_bb->ilp, spill) {
2070                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2071                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2072
2073                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2074                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2075
2076                 if(pset_find_ptr(live, spill->irn)) {
2077                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2078
2079                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2080                         spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2081                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2082
2083                         if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2084                                 int   n;
2085                                 op_t *op = get_irn_link(spill->irn);
2086
2087                                 /* do we have to copy a phi argument? */
2088                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2089                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2090
2091                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2092                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2093                                         double          freq=0.0;
2094                                         int             m;
2095                                         ilp_var_t       var;
2096
2097
2098                                         /* argument already done? */
2099                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2100
2101                                         /* get sum of execution frequencies of blocks with the same phi argument */
2102                                         for(m=n; m>=0; --m) {
2103                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2104
2105                                                 if(arg==arg2) {
2106                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2107                                                 }
2108                                         }
2109
2110                                         /* copies are not for free */
2111                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2112                                         var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
2113
2114                                         for(m=n; m>=0; --m) {
2115                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2116
2117                                                 if(arg==arg2) {
2118                                                         op->attr.live_range.args.copies[m] = var;
2119                                                 }
2120                                         }
2121
2122                                         /* copy <= mem_in */
2123                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2124                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2125                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2126                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2127                                 }
2128                         }
2129                 }
2130         }
2131
2132
2133         /* L\U is empty at bb start */
2134         /* arg is live throughout epilog if it is reg_in into this block */
2135
2136         /* check the register pressure at the beginning of the block
2137          * including remats
2138          */
2139         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2140         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
2141
2142         pset_foreach(live, irn) {
2143         ilp_cst_t  nospill;
2144
2145                 spill = set_find_spill(spill_bb->ilp, irn);
2146                 assert(spill);
2147
2148                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2149                 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2150
2151                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2152
2153                 /* spill + mem_in <= 1 */
2154                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2155                 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2156
2157                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2158                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2159
2160         }
2161         foreach_post_remat(bb, irn) {
2162                 op_t     *remat_op = get_irn_link(irn);
2163
2164                 DBG((si->dbg, LEVEL_4, "\t  next post remat: %+F\n", irn));
2165                 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
2166
2167                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2168         }
2169
2170         /* forall post remats add requirements */
2171         foreach_post_remat(bb, tmp) {
2172                 int         n;
2173
2174                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2175                         ir_node    *remat_arg = get_irn_n(tmp, n);
2176                         op_t       *remat_op = get_irn_link(tmp);
2177
2178                         if(!has_reg_class(si, remat_arg)) continue;
2179
2180                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2181                         assert(spill);
2182
2183                         /* remat <= reg_in_argument */
2184                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2185                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2186                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2187                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2188                 }
2189         }
2190
2191         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2192         pset_foreach(live, irn) {
2193                 int          p = 0,
2194                                          n;
2195
2196                 spill = set_find_spill(spill_bb->ilp, irn);
2197                 assert(spill && spill->irn == irn);
2198
2199                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2200                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2201                                 ilp_cst_t       mem_in,
2202                                                                 reg_in;
2203                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2204                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2205                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2206                                 spill_t        *spill_p;
2207
2208                                 /* although the phi is in the right regclass one or more of
2209                                  * its arguments can be in a different one or at least to
2210                                  * ignore
2211                                  */
2212                                 if(has_reg_class(si, phi_arg)) {
2213                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2214                                         mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2215                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2216                                         reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2217
2218                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2219                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2220
2221                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2222                                         assert(spill_p);
2223
2224                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2225                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2226                                 }
2227                         }
2228                 } else {
2229                         /* else assure the value arrives on all paths in the same resource */
2230
2231                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2232                                 ilp_cst_t       mem_in,
2233                                                                 reg_in;
2234                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2235                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2236                                 spill_t        *spill_p;
2237
2238                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2239                                 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2240                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2241                                 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2242
2243                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2244                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2245
2246                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2247                                 assert(spill_p);
2248
2249                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2250                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2251                         }
2252                 }
2253         }
2254
2255         /* first live ranges from reg_ins */
2256         pset_foreach(live, irn) {
2257                 op_t      *op = get_irn_link(irn);
2258
2259                 spill = set_find_spill(spill_bb->ilp, irn);
2260                 assert(spill && spill->irn == irn);
2261
2262                 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2263                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2264                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2265                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2266
2267                 foreach_post_remat(bb, tmp) {
2268                         op_t     *remat_op = get_irn_link(tmp);
2269
2270                         if(remat_op->attr.remat.remat->value == irn) {
2271                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2272                         }
2273                 }
2274         }
2275
2276         /* walk forward now and compute constraints for placing spills */
2277         /* this must only be done for values that are not defined in this block */
2278         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2279         pset_foreach(live, irn) {
2280                 /*
2281                  * if value is defined in this block we can anways place the spill directly after the def
2282                  *    -> no constraint necessary
2283                  */
2284                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2285
2286
2287                 spill = set_find_spill(spill_bb->ilp, irn);
2288                 assert(spill);
2289
2290                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2291                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2292
2293                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2294                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2295
2296                 if(!is_Phi(irn)) {
2297                         sched_foreach_op(bb, tmp) {
2298                                 op_t   *op = get_irn_link(tmp);
2299
2300                                 if(is_Phi(tmp)) continue;
2301                                 assert(!is_Proj(tmp));
2302
2303                                 if(op->is_remat) {
2304                                         ir_node   *value = op->attr.remat.remat->value;
2305
2306                                         if(value == irn) {
2307                                                 /* only collect remats up to the first use of a value */
2308                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2309                                         }
2310                                 } else {
2311                                         int   n;
2312
2313                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2314                                                 ir_node    *arg = get_irn_n(tmp, n);
2315
2316                                                 if(arg == irn) {
2317                                                         /* if a value is used stop collecting remats */
2318                                                         cst = ILP_UNDEF;
2319                                                 }
2320                                                 break;
2321                                         }
2322                                 }
2323                                 if(cst == ILP_UNDEF) break;
2324                         }
2325                 }
2326         }
2327
2328         del_pset(live);
2329 }
2330
2331 typedef struct _irnlist_t {
2332         struct list_head   list;
2333         ir_node           *irn;
2334 } irnlist_t;
2335
2336 typedef struct _interference_t {
2337         struct list_head    blocklist;
2338         ir_node            *a;
2339         ir_node            *b;
2340 } interference_t;
2341
2342 static int
2343 cmp_interference(const void *a, const void *b, size_t size)
2344 {
2345         const interference_t *p = a;
2346         const interference_t *q = b;
2347
2348         return !(p->a == q->a && p->b == q->b);
2349 }
2350
2351 static interference_t *
2352 set_find_interference(set * set, ir_node * a, ir_node * b)
2353 {
2354         interference_t     query;
2355
2356         query.a = (a>b)?a:b;
2357         query.b = (a>b)?b:a;
2358
2359         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2360 }
2361
2362 static interference_t *
2363 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2364 {
2365         interference_t     query,
2366                                           *result;
2367         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2368
2369         list->irn = bb;
2370
2371         result = set_find_interference(set, a, b);
2372         if(result) {
2373
2374                 list_add(&list->list, &result->blocklist);
2375                 return result;
2376         }
2377
2378         query.a = (a>b)?a:b;
2379         query.b = (a>b)?b:a;
2380
2381         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2382
2383         INIT_LIST_HEAD(&result->blocklist);
2384         list_add(&list->list, &result->blocklist);
2385
2386         return result;
2387 }
2388
2389 static int
2390 values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
2391 {
2392         const ir_edge_t *edge;
2393
2394         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2395                 /* both values are live in, so they interfere */
2396                 return 1;
2397         }
2398
2399         /* ensure a dominates b */
2400         if(value_dominates(b,a)) {
2401                 const ir_node * t;
2402                 t = b;
2403                 b = a;
2404                 a = t;
2405         }
2406         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2407
2408
2409         /* the following code is stolen from bera.c */
2410         if(is_live_end(bb, a))
2411                 return 1;
2412
2413         foreach_out_edge(a, edge) {
2414                 const ir_node *user = edge->src;
2415                 if(get_nodes_block(user) == bb
2416                                 && !is_Phi(user)
2417                                 && b != user
2418                                 && value_dominates(b, user))
2419                         return 1;
2420         }
2421
2422         return 0;
2423 }
2424
2425 /**
2426  * Walk all irg blocks and collect interfering values inside of phi classes
2427  */
2428 static void
2429 luke_interferencewalker(ir_node * bb, void * data)
2430 {
2431         spill_ilp_t    *si = (spill_ilp_t*)data;
2432         irn_live_t     *li1,
2433                        *li2;
2434
2435         live_foreach(bb, li1) {
2436                 ir_node        *a = (ir_node *) li1->irn;
2437                 op_t           *a_op = get_irn_link(a);
2438
2439                 if(a_op->is_remat) continue;
2440
2441                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2442                 if (has_reg_class(si, a) && get_phi_class(a)) {
2443                         for(li2=li1->next; li2; li2 = li2->next) {
2444                                 ir_node        *b = (ir_node *) li2->irn;
2445                                 op_t           *b_op = get_irn_link(b);
2446
2447                                 if(b_op->is_remat) continue;
2448
2449                                 /* a and b are only interesting if they are in the same phi class */
2450                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2451                                         if(values_interfere_in_block(bb, a, b)) {
2452                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2453                                                 set_insert_interference(si, si->interferences, a, b, bb);
2454                                         }
2455                                 }
2456                         }
2457                 }
2458         }
2459 }
2460
2461 static unsigned int copy_path_id = 0;
2462
2463 static void
2464 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2465 {
2466         ilp_cst_t  cst;
2467         ilp_var_t  copy;
2468         char       buf[256];
2469         void      *ptr;
2470
2471         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2472         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2473
2474         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2475
2476         pset_foreach(copies, ptr) {
2477                 copy = PTR_TO_INT(ptr);
2478                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2479         }
2480 }
2481
2482 /**
2483  * @parameter copies   contains a path of copies which lead us to irn
2484  * @parameter visited  contains a set of nodes already visited on this path
2485  */
2486 static void
2487 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2488 {
2489         ir_edge_t *edge;
2490         op_t      *op = get_irn_link(irn);
2491
2492         if(op->is_remat) return;
2493
2494         pset_insert_ptr(visited, irn);
2495
2496         if(is_Phi(irn)) {
2497                 int    n;
2498
2499                 /* visit all operands */
2500                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2501                         ir_node  *arg = get_irn_n(irn, n);
2502                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2503
2504                         if(!has_reg_class(si, arg)) continue;
2505
2506                         if(arg == target) {
2507                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2508                                 write_copy_path_cst(si, copies, any_interfere);
2509                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2510                         } else {
2511                                 if(!pset_find_ptr(visited, arg)) {
2512                                         pset_insert(copies, INT_TO_PTR(copy), copy);
2513                                         find_copy_path(si, arg, target, any_interfere, copies, visited);
2514                                         pset_remove(copies, INT_TO_PTR(copy), copy);
2515                                 }
2516                         }
2517                 }
2518         }
2519
2520         /* visit all uses which are phis */
2521         foreach_out_edge(irn, edge) {
2522                 ir_node  *user = edge->src;
2523                 int       pos  = edge->pos;
2524                 op_t     *op = get_irn_link(user);
2525                 ilp_var_t copy;
2526
2527                 if(!is_Phi(user)) continue;
2528                 if(!has_reg_class(si, user)) continue;
2529
2530                 copy = op->attr.live_range.args.copies[pos];
2531
2532                 if(user == target) {
2533                         pset_insert(copies, INT_TO_PTR(copy), copy);
2534                         write_copy_path_cst(si, copies, any_interfere);
2535                         pset_remove(copies, INT_TO_PTR(copy), copy);
2536                 } else {
2537                         if(!pset_find_ptr(visited, user)) {
2538                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2539                                 find_copy_path(si, user, target, any_interfere, copies, visited);
2540                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2541                         }
2542                 }
2543         }
2544
2545         pset_remove_ptr(visited, irn);
2546 }
2547
2548 static void
2549 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2550 {
2551         pset * copies = pset_new_ptr_default();
2552         pset * visited = pset_new_ptr_default();
2553
2554         find_copy_path(si, a, b, any_interfere, copies, visited);
2555
2556         del_pset(visited);
2557         del_pset(copies);
2558 }
2559
2560
2561 static void
2562 memcopyhandler(spill_ilp_t * si)
2563 {
2564         interference_t   *interference;
2565         char              buf[256];
2566         /* teste Speicherwerte auf Interferenz */
2567
2568         /* analyze phi classes */
2569         phi_class_compute(si->chordal_env->irg);
2570
2571         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2572         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2573
2574 //      phi_class_free(si->chordal_env->irg);
2575
2576         /* now lets emit the ILP unequations for the crap */
2577         set_foreach(si->interferences, interference) {
2578                 irnlist_t      *irnlist;
2579                 ilp_var_t       interfere,
2580                                                 any_interfere;
2581                 ilp_cst_t       any_interfere_cst,
2582                                                 cst;
2583                 const ir_node  *a  = interference->a;
2584                 const ir_node  *b  = interference->b;
2585
2586                 /* any_interf <= \sum interf */
2587                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2588                 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2589                 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2590
2591                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2592
2593                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2594                         const ir_node  *bb = irnlist->irn;
2595                         spill_bb_t     *spill_bb = get_irn_link(bb);
2596                         spill_t        *spilla,
2597                                                    *spillb,
2598                                                    query;
2599                         char           buf[256];
2600
2601                         query.irn = a;
2602                         spilla = set_find_spill(spill_bb->ilp, a);
2603                         assert(spilla);
2604
2605                         query.irn = b;
2606                         spillb = set_find_spill(spill_bb->ilp, b);
2607                         assert(spillb);
2608
2609                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2610                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2611                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
2612                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
2613                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2614                         interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2615
2616                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2617                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2618
2619                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2620                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2621                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2622                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2623                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2624
2625                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2626                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2627
2628                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2629                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2630                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2631
2632                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2633                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2634
2635                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2636                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2637                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2638
2639
2640                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2641
2642                         /* any_interfere >= interf */
2643                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2644                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2645
2646                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2647                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2648                 }
2649
2650                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2651                 gen_copy_constraints(si,a,b,any_interfere);
2652         }
2653 }
2654
2655
2656
2657 static void
2658 memcopyinsertor(spill_ilp_t * si)
2659 {
2660         /* weise Spillkontexte zu. Sorge bei Phis dafuer, dass gleiche
2661          * Kontexte zusammenfliessen (Operanden und Ergebnis hat gleichen
2662          * Kontext)
2663          */
2664
2665
2666
2667
2668
2669 }
2670
2671
2672
2673
2674 static INLINE int
2675 is_zero(double x)
2676 {
2677         return fabs(x) < 0.00001;
2678 }
2679
2680 #ifdef KEEPALIVE
2681 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2682 {
2683         spill_ilp_t *si = get_irg_link(current_ir_graph);
2684
2685         if(pset_find_ptr(si->all_possible_remats, n)) {
2686                 op_t   *op = (op_t*)get_irn_link(n);
2687                 assert(op && op->is_remat);
2688
2689                 if(!op->attr.remat.remat->inverse) {
2690                         if(op->attr.remat.pre) {
2691                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2692                         } else {
2693                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2694                         }
2695
2696                         return 1;
2697                 } else {
2698                         op_t   *op = (op_t*)get_irn_link(n);
2699                         assert(op && op->is_remat);
2700
2701                         if(op->attr.remat.pre) {
2702                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2703                         } else {
2704                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2705                         }
2706
2707                         return 1;
2708                 }
2709         }
2710
2711         return 0;
2712 }
2713
2714 static void
2715 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2716 {
2717         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2718         be_dump(irg, suffix, dump_ir_block_graph_sched);
2719         set_dump_node_vcgattr_hook(NULL);
2720 }
2721 #endif
2722
2723 /**
2724  * Edge hook to dump the schedule edges with annotated register pressure.
2725  */
2726 static int
2727 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2728 {
2729         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2730                 ir_node *prev = sched_prev(irn);
2731                 fprintf(F, "edge:{sourcename:\"");
2732                 PRINT_NODEID(irn);
2733                 fprintf(F, "\" targetname:\"");
2734                 PRINT_NODEID(prev);
2735                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2736                 fprintf(F, "\" color:magenta}\n");
2737         }
2738         return 1;
2739 }
2740
2741 static void
2742 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2743 {
2744         DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
2745
2746         dump_consts_local(0);
2747         set_dump_node_edge_hook(sched_pressure_edge_hook);
2748         dump_ir_block_graph(irg, suffix);
2749         set_dump_node_edge_hook(old);
2750 }
2751
2752 static void
2753 walker_pressure_annotator(ir_node * bb, void * data)
2754 {
2755         spill_ilp_t  *si = data;
2756         ir_node      *irn;
2757         irn_live_t   *li;
2758         int           n;
2759         pset         *live = pset_new_ptr_default();
2760         int           projs = 0;
2761
2762         live_foreach(bb, li) {
2763                 irn = (ir_node *) li->irn;
2764
2765                 if (live_is_end(li) && has_reg_class(si, irn)) {
2766                         pset_insert_ptr(live, irn);
2767                 }
2768         }
2769
2770         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2771
2772         sched_foreach_reverse(bb, irn) {
2773                 if(is_Phi(irn)) {
2774                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2775                         continue;
2776                 }
2777
2778                 if(has_reg_class(si, irn)) {
2779                         pset_remove_ptr(live, irn);
2780                         if(is_Proj(irn)) ++projs;
2781                 }
2782
2783                 if(!is_Proj(irn)) projs = 0;
2784
2785                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2786                         ir_node    *arg = get_irn_n(irn, n);
2787
2788                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2789                 }
2790                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2791         }
2792
2793         del_pset(live);
2794 }
2795
2796 static void
2797 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2798 {
2799         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2800 }
2801
2802 #ifdef KEEPALIVE
2803 static void
2804 connect_all_remats_with_keep(spill_ilp_t * si)
2805 {
2806         ir_node   *irn;
2807         ir_node  **ins,
2808                          **pos;
2809         int        n_remats;
2810
2811
2812         n_remats = pset_count(si->all_possible_remats);
2813         if(n_remats) {
2814                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2815
2816                 pos = ins;
2817                 pset_foreach(si->all_possible_remats, irn) {
2818                         *pos = irn;
2819                         ++pos;
2820                 }
2821
2822                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2823
2824                 obstack_free(si->obst, ins);
2825         }
2826 }
2827 #endif
2828
2829 static void
2830 connect_all_spills_with_keep(spill_ilp_t * si)
2831 {
2832         ir_node   *irn;
2833         ir_node  **ins,
2834                          **pos;
2835         int        n_spills;
2836         ir_node   *keep;
2837
2838
2839         n_spills = pset_count(si->spills);
2840         if(n_spills) {
2841                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2842
2843                 pos = ins;
2844                 pset_foreach(si->spills, irn) {
2845                         *pos = irn;
2846                         ++pos;
2847                 }
2848
2849                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2850
2851                 obstack_free(si->obst, ins);
2852         }
2853 }
2854
2855 /** insert a spill at an arbitrary position */
2856 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2857 {
2858         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
2859         ir_graph *irg   = get_irn_irg(bl);
2860         ir_node *frame  = get_irg_frame(irg);
2861         ir_node *spill;
2862         ir_node *next;
2863
2864         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
2865         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2866
2867         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2868
2869         /*
2870          * search the right insertion point. a spill of a phi cannot be put
2871          * directly after the phi, if there are some phis behind the one which
2872          * is spilled. Also, a spill of a Proj must be after all Projs of the
2873          * same tuple node.
2874          *
2875          * Here's one special case:
2876          * If the spill is in the start block, the spill must be after the frame
2877          * pointer is set up. This is done by setting insert to the end of the block
2878          * which is its default initialization (see above).
2879          */
2880
2881         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2882                 insert = frame;
2883
2884         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2885                 insert = next;
2886
2887         sched_add_after(insert, spill);
2888         return spill;
2889 }
2890
2891 static void
2892 delete_remat(spill_ilp_t * si, ir_node * remat) {
2893         int       n;
2894         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2895
2896         sched_remove(remat);
2897
2898         /* kill links to operands */
2899         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2900                 set_irn_n(remat, n, bad);
2901         }
2902 }
2903
2904 static void
2905 clean_remat_info(spill_ilp_t * si)
2906 {
2907         int            n;
2908         remat_t       *remat;
2909         remat_info_t  *remat_info;
2910         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
2911
2912         set_foreach(si->remat_info, remat_info) {
2913                 if(!remat_info->remats) continue;
2914
2915                 pset_foreach(remat_info->remats, remat)
2916                 {
2917                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2918                                 set_irn_n(remat->proj, -1, bad);
2919                                 set_irn_n(remat->proj, 0, bad);
2920                         }
2921
2922                         if(get_irn_n_edges(remat->op) == 0) {
2923                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2924                                         set_irn_n(remat->op, n, bad);
2925                                 }
2926                         }
2927                 }
2928
2929                 if(remat_info->remats) del_pset(remat_info->remats);
2930                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
2931         }
2932 }
2933
2934 static void
2935 delete_unnecessary_remats(spill_ilp_t * si)
2936 {
2937 #ifdef KEEPALIVE
2938         int       n;
2939         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2940
2941         if(si->keep) {
2942                 ir_node   *end = get_irg_end(si->chordal_env->irg);
2943                 ir_node  **keeps;
2944
2945                 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
2946                         ir_node        *keep_arg = get_irn_n(si->keep, n);
2947                         op_t           *arg_op = get_irn_link(keep_arg);
2948                         lpp_name_t     *name;
2949
2950                         assert(arg_op->is_remat);
2951
2952                         name = si->lpp->vars[arg_op->attr.remat.ilp];
2953
2954                         if(is_zero(name->value)) {
2955                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
2956                                 /* TODO check whether reload is preferred over remat (could be bug) */
2957                                 delete_remat(si, keep_arg);
2958                         } else {
2959                                 if(!arg_op->attr.remat.remat->inverse) {
2960                                         if(arg_op->attr.remat.pre) {
2961                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
2962                                         } else {
2963                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
2964                                         }
2965                                 } else {
2966                                         if(arg_op->attr.remat.pre) {
2967                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
2968                                         } else {
2969                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
2970                                         }
2971                                 }
2972                         }
2973
2974                         set_irn_n(si->keep, n, bad);
2975                 }
2976 #if 0
2977                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
2978                         ir_node        *end_arg = get_End_keepalive(end, i);
2979
2980                         if(end_arg != si->keep) {
2981                                 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
2982                         }
2983                 }
2984                 keeps = obstack_finish(si->obst);
2985                 set_End_keepalives(end, n-1, keeps);
2986                 obstack_free(si->obst, keeps);
2987 #endif
2988         } else {
2989                 DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
2990         }
2991 #else
2992         ir_node  *remat;
2993
2994         pset_foreach(si->all_possible_remats, remat) {
2995                 op_t           *remat_op = get_irn_link(remat);
2996                 lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
2997
2998                 if(is_zero(name->value)) {
2999                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3000                         /* TODO check whether reload is preferred over remat (could be bug) */
3001                         delete_remat(si, remat);
3002                 } else {
3003                         if(!remat_op->attr.remat.remat->inverse) {
3004                                 if(remat_op->attr.remat.pre) {
3005                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3006                                 } else {
3007                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3008                                 }
3009                         } else {
3010                                 if(remat_op->attr.remat.pre) {
3011                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3012                                 } else {
3013                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3014                                 }
3015                         }
3016                 }
3017         }
3018 #endif
3019 }
3020
3021 static pset *
3022 get_spills_for_value(spill_ilp_t * si, ir_node * value)
3023 {
3024         pset     *spills = pset_new_ptr_default();
3025
3026         ir_node  *next;
3027         defs_t   *defs;
3028
3029         defs = set_find_def(si->values, value);
3030
3031         if(defs && defs->spills) {
3032                 for(next = defs->spills; next; next = get_irn_link(next)) {
3033                         pset_insert_ptr(spills, next);
3034                 }
3035         }
3036
3037         return spills;
3038 }
3039
3040 static pset *
3041 get_remats_for_value(spill_ilp_t * si, ir_node * value)
3042 {
3043         pset     *remats = pset_new_ptr_default();
3044
3045         ir_node  *next;
3046         defs_t   *defs;
3047
3048         pset_insert_ptr(remats, value);
3049         defs = set_find_def(si->values, value);
3050
3051         if(defs && defs->remats) {
3052                 for(next = defs->remats; next; next = get_irn_link(next)) {
3053                         pset_insert_ptr(remats, next);
3054                 }
3055         }
3056
3057         return remats;
3058 }
3059
3060
3061 /**
3062  * @param before   The node after which the spill will be placed in the schedule
3063  */
3064 /* TODO set context properly */
3065 static ir_node *
3066 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
3067 {
3068         defs_t   *defs;
3069         ir_node  *spill;
3070         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3071
3072         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3073
3074         spill = be_spill2(arch_env, irn, before, irn);
3075
3076         defs = set_insert_def(si->values, value);
3077         assert(defs);
3078
3079         /* enter into the linked list */
3080         set_irn_link(spill, defs->spills);
3081         defs->spills = spill;
3082
3083 #ifdef KEEPALIVE_SPILLS
3084         pset_insert_ptr(si->spills, spill);
3085 #endif
3086
3087         return spill;
3088 }
3089
3090 /**
3091  * @param before   The Phi node which has to be spilled
3092  */
3093 static ir_node *
3094 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
3095 {
3096         ir_node   *mem_phi;
3097         ir_node  **ins;
3098         defs_t    *defs;
3099         int        n;
3100         op_t      *op = get_irn_link(phi);
3101
3102         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3103
3104         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3105                 ins[n] = si->m_unknown;
3106         }
3107
3108         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3109
3110         defs = set_insert_def(si->values, phi);
3111         assert(defs);
3112
3113         /* enter into the linked list */
3114         set_irn_link(mem_phi, defs->spills);
3115         defs->spills = mem_phi;
3116
3117         sched_add_after(phi, mem_phi);
3118
3119 #ifdef KEEPALIVE_SPILLS
3120         pset_insert_ptr(si->spills, mem_phi);
3121 #endif
3122
3123
3124         return mem_phi;
3125 }
3126
3127 /**
3128  * Add remat to list of defs, destroys link field!
3129  */
3130 static void
3131 insert_remat(spill_ilp_t * si, ir_node * remat)
3132 {
3133         defs_t   *defs;
3134         op_t     *remat_op = get_irn_link(remat);
3135
3136         assert(remat_op->is_remat);
3137
3138         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3139         assert(defs);
3140
3141         /* enter into the linked list */
3142         set_irn_link(remat, defs->remats);
3143         defs->remats = remat;
3144 }
3145
3146
3147 /**
3148  * Add reload before operation and add to list of defs
3149  */
3150 static ir_node *
3151 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
3152 {
3153         defs_t   *defs;
3154         ir_node  *reload,
3155                          *spill;
3156         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3157
3158         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3159
3160         defs = set_find_def(si->values, value);
3161
3162         spill = defs->spills;
3163         assert(spill && "no spill placed before reload");
3164
3165         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3166
3167         /* enter into the linked list */
3168         set_irn_link(reload, defs->remats);
3169         defs->remats = reload;
3170
3171         return reload;
3172 }
3173
3174 static void
3175 walker_spill_placer(ir_node * bb, void * data) {
3176         spill_ilp_t   *si = (spill_ilp_t*)data;
3177         ir_node       *irn;
3178         spill_bb_t    *spill_bb = get_irn_link(bb);
3179         pset          *spills_to_do = pset_new_ptr_default();
3180         spill_t       *spill;
3181
3182         set_foreach(spill_bb->ilp, spill) {
3183                 lpp_name_t    *name;
3184
3185                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3186                         name = si->lpp->vars[spill->mem_in];
3187                         if(!is_zero(name->value)) {
3188                                 ir_node   *mem_phi;
3189
3190                                 mem_phi = insert_mem_phi(si, spill->irn);
3191
3192                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3193                         }
3194                 }
3195
3196                 name = si->lpp->vars[spill->spill];
3197                 if(!is_zero(name->value)) {
3198                         /* place spill directly after definition */
3199                         if(get_nodes_block(spill->irn) == bb) {
3200                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3201                                 continue;
3202                         }
3203
3204                         /* place spill at bb start */
3205                         if(spill->reg_in > 0) {
3206                                 name = si->lpp->vars[spill->reg_in];
3207                                 if(!is_zero(name->value)) {
3208                                         insert_spill(si, spill->irn, spill->irn, bb);
3209                                         continue;
3210                                 }
3211                         }
3212                         /* place spill after a remat */
3213                         pset_insert_ptr(spills_to_do, spill->irn);
3214                 }
3215         }
3216         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3217
3218
3219         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3220                 op_t     *op = get_irn_link(irn);
3221
3222                 if(be_is_Spill(irn)) continue;
3223
3224                 if(op->is_remat) {
3225                         /* TODO fix this if we want to support remats with more than two nodes */
3226                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3227                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3228
3229                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3230                         }
3231                 } else {
3232                         if(pset_find_ptr(spills_to_do, irn)) {
3233                                 pset_remove_ptr(spills_to_do, irn);
3234
3235                                 insert_spill(si, irn, irn, irn);
3236                         }
3237                 }
3238
3239         }
3240
3241         assert(pset_count(spills_to_do) == 0);
3242
3243         /* afterwards free data in block */
3244         del_pset(spills_to_do);
3245 }
3246
3247 static ir_node *
3248 insert_mem_copy(spill_ilp_t * si, const ir_node * bb, const ir_node * value)
3249 {
3250         ir_node          *insert_pos = bb;
3251         ir_node          *spill;
3252         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3253
3254         /* find last definition of arg value in block */
3255         ir_node  *next;
3256         defs_t   *defs;
3257         int       last = 0;
3258
3259         defs = set_find_def(si->values, value);
3260
3261         if(defs && defs->remats) {
3262                 for(next = defs->remats; next; next = get_irn_link(next)) {
3263                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3264                                 last = sched_get_time_step(next);
3265                                 insert_pos = next;
3266                         }
3267                 }
3268         }
3269
3270         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3271                 last = sched_get_time_step(value);
3272                 insert_pos = value;
3273         }
3274
3275         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3276
3277         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
3278
3279         return spill;
3280 }
3281
3282 static void
3283 phim_fixer(spill_ilp_t *si) {
3284         defs_t  *defs;
3285
3286         set_foreach(si->values, defs) {
3287                 const ir_node  *phi = defs->value;
3288                 op_t           *op = get_irn_link(phi);
3289                 ir_node        *phi_m = NULL;
3290                 ir_node        *next = defs->spills;
3291                 int             n;
3292
3293                 if(!is_Phi(phi)) continue;
3294
3295                 while(next) {
3296                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3297                                 phi_m = next;
3298                                 break;
3299                         } else {
3300                                 next = get_irn_link(next);
3301                         }
3302                 }
3303                 if(!phi_m) continue;
3304
3305                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3306                         const ir_node  *value = get_irn_n(phi, n);
3307                         defs_t         *val_defs = set_find_def(si->values, value);
3308                         ir_node        *arg = get_irn_n(phi_m, n);
3309
3310                         /* get a spill of this value */
3311                         ir_node      *spill = val_defs->spills;
3312
3313
3314 #ifndef NO_MEMCOPIES
3315                         ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3316                         lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3317
3318                         if(!is_zero(name->value)) {
3319                                 spill = insert_mem_copy(si, pred, value);
3320                         } else {
3321                                 assert(spill && "no spill placed before PhiM");
3322                         }
3323 #else
3324                         assert(spill && "no spill placed before PhiM");
3325 #endif
3326                         set_irn_n(phi_m, n, spill);
3327                 }
3328         }
3329 }
3330
3331 static void
3332 walker_reload_placer(ir_node * bb, void * data) {
3333         spill_ilp_t   *si = (spill_ilp_t*)data;
3334         ir_node       *irn;
3335         spill_bb_t    *spill_bb = get_irn_link(bb);
3336         int            i;
3337         irn_live_t    *li;
3338
3339         /* reloads at end of block */
3340         if(spill_bb->reloads) {
3341                 keyval_t    *keyval;
3342
3343                 set_foreach(spill_bb->reloads, keyval) {
3344                         ir_node        *irn = (ir_node*)keyval->key;
3345                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3346                         lpp_name_t     *name;
3347
3348                         name = si->lpp->vars[reload];
3349                         if(!is_zero(name->value)) {
3350                                 ir_node    *reload;
3351                                 ir_node    *insert_pos = bb;
3352                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3353                                 op_t       *prev_op = get_irn_link(prev);
3354
3355                                 while(be_is_Spill(prev)) {
3356                                         prev = sched_prev(prev);
3357                                 }
3358
3359                                 prev_op = get_irn_link(prev);
3360
3361                                 /* insert reload before pre-remats */
3362                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3363                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3364                                         insert_pos = prev;
3365
3366                                         do {
3367                                                 prev = sched_prev(prev);
3368                                         } while(be_is_Spill(prev));
3369
3370                                         prev_op = get_irn_link(prev);
3371
3372                                 }
3373
3374                                 reload = insert_reload(si, irn, insert_pos);
3375
3376 #ifdef KEEPALIVE_RELOADS
3377                                 pset_insert_ptr(si->spills, reload);
3378 #endif
3379                         }
3380                 }
3381         }
3382
3383         /* walk and insert more reloads and collect remats */
3384         sched_foreach_reverse(bb, irn) {
3385                 op_t     *op = get_irn_link(irn);
3386
3387                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3388                 if(is_Phi(irn)) break;
3389
3390                 if(op->is_remat) {
3391                         if(get_irn_mode(irn) != mode_T) {
3392                                 insert_remat(si, irn);
3393                         }
3394                 } else {
3395                         int    n;
3396
3397                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3398                                 ir_node    *arg = get_irn_n(irn, n);
3399
3400                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3401                                         lpp_name_t    *name;
3402
3403                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3404                                         if(!is_zero(name->value)) {
3405                                                 ir_node    *reload;
3406                                                 ir_node    *insert_pos = irn;
3407                                                 ir_node    *prev = sched_prev(insert_pos);
3408                                                 op_t       *prev_op;
3409
3410                                                 while(be_is_Spill(prev)) {
3411                                                         prev = sched_prev(prev);
3412                                                 }
3413
3414                                                 prev_op = get_irn_link(prev);
3415
3416                                                 /* insert reload before pre-remats */
3417                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3418                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3419                                                         insert_pos = prev;
3420
3421                                                         do {
3422                                                                 prev = sched_prev(prev);
3423                                                         } while(be_is_Spill(prev));
3424
3425                                                         prev_op = get_irn_link(prev);
3426
3427                                                 }
3428
3429                                                 reload = insert_reload(si, arg, insert_pos);
3430
3431                                                 set_irn_n(irn, n, reload);
3432
3433 #ifdef KEEPALIVE_RELOADS
3434                                                 pset_insert_ptr(si->spills, reload);
3435 #endif
3436                                         }
3437                                 }
3438                         }
3439                 }
3440         }
3441
3442         del_set(spill_bb->ilp);
3443         if(spill_bb->reloads) del_set(spill_bb->reloads);
3444 }
3445
3446 static void
3447 walker_collect_used(ir_node * irn, void * data)
3448 {
3449         lc_bitset_t   *used = data;
3450
3451         lc_bitset_set(used, get_irn_idx(irn));
3452 }
3453
3454 struct kill_helper {
3455         lc_bitset_t  *used;
3456         spill_ilp_t  *si;
3457 };
3458
3459 static void
3460 walker_kill_unused(ir_node * bb, void * data)
3461 {
3462         struct kill_helper *kh = data;
3463         const ir_node      *bad = get_irg_bad(get_irn_irg(bb));
3464         ir_node            *irn;
3465
3466
3467         for(irn=sched_first(bb); !sched_is_end(irn);) {
3468                 ir_node     *next = sched_next(irn);
3469                 int          n;
3470
3471                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3472                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3473                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3474 #if 0
3475                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3476 #endif
3477                         }
3478
3479                         sched_remove(irn);
3480
3481                         set_nodes_block(irn, bad);
3482                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3483                                 set_irn_n(irn, n, bad);
3484                         }
3485                 }
3486                 irn = next;
3487         }
3488 }
3489
3490 static void
3491 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3492 {
3493         struct kill_helper kh;
3494
3495         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3496         kh.si = si;
3497
3498         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3499         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3500
3501         lc_bitset_free(kh.used);
3502 }
3503
3504 static void
3505 print_irn_pset(pset * p)
3506 {
3507         ir_node   *irn;
3508
3509         pset_foreach(p, irn) {
3510                 ir_printf("%+F\n", irn);
3511         }
3512 }
3513
3514 static void
3515 rewire_uses(spill_ilp_t * si)
3516 {
3517         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3518         defs_t               *defs;
3519         pset                 *ignore = pset_new_ptr(1);
3520
3521         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3522
3523         /* then fix uses of spills */
3524         set_foreach(si->values, defs) {
3525                 pset     *reloads;
3526                 pset     *spills;
3527                 ir_node  *next = defs->remats;
3528                 int remats = 0;
3529
3530                 reloads = pset_new_ptr_default();
3531
3532                 while(next) {
3533                         if(be_is_Reload(next)) {
3534                                 pset_insert_ptr(reloads, next);
3535                         } else {
3536                                 ++remats;
3537                         }
3538                         next = get_irn_link(next);
3539                 }
3540
3541                 spills = get_spills_for_value(si, defs->value);
3542                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3543                 if(pset_count(spills) > 1) {
3544                         //assert(pset_count(reloads) > 0);
3545                         //                              print_irn_pset(spills);
3546                         //                              print_irn_pset(reloads);
3547
3548                         be_ssa_constr_set_ignore(dfi, spills, ignore);
3549                 }
3550
3551                 del_pset(reloads);
3552                 del_pset(spills);
3553         }
3554
3555         /* first fix uses of remats and reloads */
3556         set_foreach(si->values, defs) {
3557                 pset     *nodes;
3558                 ir_node  *next = defs->remats;
3559
3560                 if(next) {
3561                         nodes = pset_new_ptr_default();
3562                         pset_insert_ptr(nodes, defs->value);
3563
3564                         while(next) {
3565                                 pset_insert_ptr(nodes, next);
3566                                 next = get_irn_link(next);
3567                         }
3568
3569                         if(pset_count(nodes) > 1) {
3570                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3571                                 be_ssa_constr_set(dfi, nodes);
3572                         }
3573
3574                         del_pset(nodes);
3575                 }
3576         }
3577
3578 //      remove_unused_defs(si);
3579
3580         be_free_dominance_frontiers(dfi);
3581 }
3582
3583
3584 static void
3585 writeback_results(spill_ilp_t * si)
3586 {
3587         /* walk through the graph and collect all spills, reloads and remats for a value */
3588
3589         si->values = new_set(cmp_defs, 4096);
3590
3591         DBG((si->dbg, LEVEL_1, "Applying results\n"));
3592         delete_unnecessary_remats(si);
3593         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3594         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3595         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3596         phim_fixer(si);
3597
3598         /* clean the remat info! there are still back-edges leading there! */
3599         clean_remat_info(si);
3600
3601         rewire_uses(si);
3602
3603         connect_all_spills_with_keep(si);
3604
3605         del_set(si->values);
3606 }
3607
3608 static int
3609 get_n_regs(spill_ilp_t * si)
3610 {
3611         int     arch_n_regs = arch_register_class_n_regs(si->cls);
3612         int     free = 0;
3613         int     i;
3614
3615         for(i=0; i<arch_n_regs; i++) {
3616                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3617                         free++;
3618                 }
3619         }
3620
3621         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3622         return free;
3623 }
3624
3625 static void
3626 walker_reload_mover(ir_node * bb, void * data)
3627 {
3628         spill_ilp_t   *si = data;
3629         ir_node           *tmp;
3630
3631         sched_foreach(bb, tmp) {
3632                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3633                         ir_node       *reload = tmp;
3634                         ir_node       *irn = tmp;
3635
3636                         /* move reload upwards */
3637
3638                         int pressure = (int)get_irn_link(reload);
3639                         if(pressure < si->n_regs) {
3640                                 irn = sched_prev(reload);
3641                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3642                                 sched_remove(reload);
3643                                 pressure = (int)get_irn_link(irn);
3644
3645                                 while(pressure < si->n_regs) {
3646                                         if(sched_is_end(irn) || (be_is_Reload(irn) && has_reg_class(si, irn))) break;
3647
3648                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
3649                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3650                                         irn = sched_prev(irn);
3651
3652                                         pressure = (int)get_irn_link(irn);
3653                                 }
3654
3655                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3656                                 sched_put_after(irn, reload);
3657                         }
3658                 }
3659         }
3660 }
3661
3662 static void
3663 move_reloads_upward(spill_ilp_t * si)
3664 {
3665         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3666 }
3667
3668
3669 /**
3670  * Walk all irg blocks and check for interfering spills inside of phi classes
3671  */
3672 static void
3673 luke_meminterferencechecker(ir_node * bb, void * data)
3674 {
3675         spill_ilp_t    *si = (spill_ilp_t*)data;
3676         irn_live_t     *li1,
3677                        *li2;
3678
3679         live_foreach(bb, li1) {
3680                 ir_node        *a = (ir_node *) li1->irn;
3681
3682                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
3683
3684                 /* a is only interesting if it is inside a phi class */
3685                 if (get_phi_class(a)) {
3686                         for(li2=li1->next; li2; li2 = li2->next) {
3687                                 ir_node        *b = (ir_node *) li2->irn;
3688
3689                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
3690
3691                                 /* a and b are only interesting if they are in the same phi class */
3692                                 if(get_phi_class(a) == get_phi_class(b)) {
3693                                         if(values_interfere_in_block(bb, a, b)) {
3694                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
3695                                         }
3696                                 }
3697                         }
3698                 }
3699         }
3700 }
3701
3702 static void
3703 verify_phiclasses(spill_ilp_t * si)
3704 {
3705         /* analyze phi classes */
3706         phi_class_compute(si->chordal_env->irg);
3707
3708         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
3709         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
3710 }
3711
3712 static void
3713 walker_spillslotassigner(ir_node * irn, void * data)
3714 {
3715         spill_ilp_t            *si = (spill_ilp_t*)data;
3716         void                   *cls;
3717
3718         if(!be_is_Spill(irn)) return;
3719
3720         /* set spill context to phi class if it has one ;) */
3721
3722         cls = get_phi_class(irn);
3723         if(cls)
3724                 be_set_Spill_context(irn, cls);
3725         else
3726                 be_set_Spill_context(irn, irn);
3727 }
3728
3729
3730 static void
3731 assign_spillslots(spill_ilp_t * si)
3732 {
3733         DBG((si->dbg, LEVEL_2, "\t calling spill slot assigner\n"));
3734         irg_walk_graph(si->chordal_env->irg, walker_spillslotassigner, NULL, si);
3735 }
3736
3737 void
3738 be_spill_remat(const be_chordal_env_t * chordal_env)
3739 {
3740         char            problem_name[256];
3741         char            dump_suffix[256];
3742         char            dump_suffix2[256];
3743         char            dump_suffix3[256];
3744         struct obstack  obst;
3745         spill_ilp_t     si;
3746
3747         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3748         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3749         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3750
3751         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3752         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3753
3754         obstack_init(&obst);
3755         si.chordal_env = chordal_env;
3756         si.obst = &obst;
3757         si.cls = chordal_env->cls;
3758         si.lpp = new_lpp(problem_name, lpp_minimize);
3759         si.remat_info = new_set(cmp_remat_info, 4096);
3760         si.interferences = new_set(cmp_interference, 32);
3761         si.all_possible_remats = pset_new_ptr_default();
3762         si.spills = pset_new_ptr_default();
3763         si.inverse_ops = pset_new_ptr_default();
3764 #ifndef EXECFREQ_LOOPDEPH
3765         si.execfreqs = compute_execfreq(chordal_env->irg);
3766 #else
3767         si.execfreqs = NULL;
3768 #endif
3769 #ifdef KEEPALIVE
3770         si.keep = NULL;
3771 #endif
3772         si.n_regs = get_n_regs(&si);
3773
3774         set_irg_link(chordal_env->irg, &si);
3775         compute_doms(chordal_env->irg);
3776
3777         /* compute phi classes */
3778 //      phi_class_compute(chordal_env->irg);
3779
3780         be_analyze_regpressure(chordal_env, "-pre");
3781
3782 #ifdef COLLECT_REMATS
3783         /* collect remats */
3784         DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3785         irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3786 #endif
3787
3788         /* insert possible remats */
3789         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3790         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3791         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3792
3793 #ifdef KEEPALIVE
3794         DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3795         connect_all_remats_with_keep(&si);
3796         /* dump graph with inserted remats */
3797         dump_graph_with_remats(chordal_env->irg, dump_suffix);
3798 #endif
3799
3800         /* insert copies for phi arguments not in my regclass */
3801         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
3802
3803         /* recompute liveness */
3804         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3805         be_liveness(chordal_env->irg);
3806
3807         /* build the ILP */
3808
3809         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3810         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3811         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3812
3813         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3814         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3815
3816 #ifndef NO_MEMCOPIES
3817         DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3818         memcopyhandler(&si);
3819 #endif
3820
3821 #ifdef DUMP_ILP
3822         {
3823                 FILE           *f;
3824                 char            buf[256];
3825
3826                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3827                 if ((f = fopen(buf, "wt")) != NULL) {
3828                         lpp_dump_plain(si.lpp, f);
3829                         fclose(f);
3830                 }
3831         }
3832 #endif
3833
3834 #ifdef SOLVE
3835         DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3836 #ifdef ILP_TIMEOUT
3837         lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3838 #endif
3839
3840 #ifdef SOLVE_LOCAL
3841         lpp_solve_cplex(si.lpp);
3842 #else
3843         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3844 #endif
3845         assert(lpp_is_sol_valid(si.lpp)
3846                && "solution of ILP must be valid");
3847
3848         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3849
3850 #ifdef DUMP_SOLUTION
3851         {
3852                 FILE           *f;
3853                 char            buf[256];
3854
3855                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3856                 if ((f = fopen(buf, "wt")) != NULL) {
3857                         int             i;
3858                         for (i = 0; i < si.lpp->var_next; ++i) {
3859                                 lpp_name_t     *name = si.lpp->vars[i];
3860                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3861                         }
3862                         fclose(f);
3863                 }
3864         }
3865 #endif
3866
3867         writeback_results(&si);
3868
3869 #endif                          /* SOLVE */
3870
3871         kill_all_unused_values_in_schedule(&si);
3872
3873 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3874         be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3875 #endif
3876
3877         // move reloads upwards
3878         be_liveness(chordal_env->irg);
3879         //irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3880         //move_reloads_upward(&si);
3881
3882 #ifndef NO_MEMCOPIES
3883         verify_phiclasses(&si);
3884         assign_spillslots(&si);
3885 #endif
3886
3887         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3888
3889         dump_pressure_graph(&si, dump_suffix2);
3890
3891         be_analyze_regpressure(chordal_env, "-post");
3892
3893         free_dom(chordal_env->irg);
3894         del_set(si.interferences);
3895         del_pset(si.inverse_ops);
3896         del_pset(si.all_possible_remats);
3897         del_pset(si.spills);
3898 #ifndef EXECFREQ_LOOPDEPH
3899         free_execfreq(si.execfreqs);
3900 #endif
3901         free_lpp(si.lpp);
3902         obstack_free(&obst, NULL);
3903         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
3904 }
3905
3906 #else                           /* WITH_ILP */
3907
3908 static void
3909 only_that_you_can_compile_without_WITH_ILP_defined(void)
3910 {
3911 }
3912
3913 #endif                          /* WITH_ILP */