7de9864911a2238cec48f307e574a920fd9a7473
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35
36 #include <lpp/lpp.h>
37 #include <lpp/lpp_net.h>
38 #include <lpp/lpp_cplex.h>
39 //#include <lc_pset.h>
40 #include <libcore/lc_bitset.h>
41
42 #include "be_t.h"
43 #include "belive_t.h"
44 #include "besched_t.h"
45 #include "beirgmod.h"
46 #include "bearch.h"
47 #include "benode_t.h"
48 #include "beutil.h"
49 #include "bespillremat.h"
50 #include "bespill.h"
51 #include "bepressurestat.h"
52
53 #include "bechordal_t.h"
54
55 #define BIGM 100000.0
56
57 #define DUMP_SOLUTION
58 #define DUMP_ILP
59 //#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
60 #define COLLECT_REMATS /* enable rematerialization */
61 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
62 #define REMAT_WHILE_LIVE /* only remat values that are live */
63 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
64 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
65 #define MAY_DIE_AT_REMAT /* allow values to die after a pre remat */
66 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
67 //#define KEEPALIVE_SPILLS
68 //#define KEEPALIVE_RELOADS
69 #define GOODWIN_REDUCTION
70 //#define NO_MEMCOPIES
71
72 #define  SOLVE
73 //#define  SOLVE_LOCAL
74 #define LPP_SERVER "i44pc52"
75 #define LPP_SOLVER "cplex"
76
77 #define COST_LOAD      8
78 #define COST_STORE     50
79 #define COST_REMAT     1
80
81 #define LOOP_WEIGHT    12
82
83 #define ILP_TIMEOUT    120
84
85 #define ILP_UNDEF               -1
86
87 typedef struct _spill_ilp_t {
88         const arch_register_class_t  *cls;
89         int                           n_regs;
90         const be_chordal_env_t       *chordal_env;
91         lpp_t                        *lpp;
92         struct obstack               *obst;
93         set                          *remat_info;
94         pset                         *all_possible_remats;
95         pset                         *inverse_ops;
96 #ifdef KEEPALIVE
97         ir_node                      *keep;
98 #endif
99         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
100         set                          *execfreqs;
101         pset                         *spills;
102         set                          *interferences;
103         ir_node                      *m_unknown;
104         DEBUG_ONLY(firm_dbg_module_t * dbg);
105 } spill_ilp_t;
106
107 typedef int ilp_var_t;
108 typedef int ilp_cst_t;
109
110 typedef struct _spill_bb_t {
111         set      *ilp;
112         set      *reloads;
113 } spill_bb_t;
114
115 typedef struct _remat_t {
116         const ir_node        *op;      /**< for copy_irn */
117         const ir_node        *value;   /**< the value which is being recomputed by this remat */
118         ir_node              *proj;    /**< not NULL if the above op produces a tuple */
119         int                   cost;    /**< cost of this remat */
120         int                   inverse; /**< nonzero if this is an inverse remat */
121 } remat_t;
122
123 /**
124  * Data to be attached to each IR node. For remats this contains the ilp_var
125  * for this remat and for normal ops this contains the ilp_vars for
126  * reloading each operand
127  */
128 typedef struct _op_t {
129         int             is_remat;
130         union {
131                 struct {
132                         ilp_var_t       ilp;
133                         remat_t        *remat; /** the remat this op belongs to */
134                         int             pre; /** 1, if this is a pressure-increasing remat */
135                 } remat;
136                 struct {
137                         ilp_var_t       ilp;
138                         ir_node        *op; /** the operation this live range belongs to */
139                         union {
140                                 ilp_var_t      *reloads;
141                                 ilp_var_t      *copies;
142                         } args;
143                 } live_range;
144         } attr;
145 } op_t;
146
147 typedef struct _defs_t {
148         ir_node   *value;
149         ir_node   *spills;  /**< points to the first spill for this value (linked by link field) */
150         ir_node   *remats;  /**< points to the first definition for this value (linked by link field) */
151 } defs_t;
152
153 typedef struct _remat_info_t {
154         const ir_node       *irn; /**< the irn to which these remats belong */
155         pset                *remats; /**< possible remats for this value */
156         pset                *remats_by_operand; /**< remats with this value as operand */
157 } remat_info_t;
158
159 typedef struct _keyval_t {
160         const void          *key;
161         const void          *val;
162 } keyval_t;
163
164 typedef struct _spill_t {
165         ir_node      *irn;
166         ilp_var_t     reg_in;
167         ilp_var_t     mem_in;
168         ilp_var_t     reg_out;
169         ilp_var_t     mem_out;
170         ilp_var_t     spill;
171 } spill_t;
172
173 static INLINE int
174 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
175 {
176         return chordal_has_class(si->chordal_env, irn);
177 }
178
179 #if 0
180 static int
181 cmp_remat(const void *a, const void *b)
182 {
183         const keyval_t *p = a;
184         const keyval_t *q = b;
185         const remat_t  *r = p->val;
186         const remat_t  *s = q->val;
187
188         assert(r && s);
189
190         return !(r == s || r->op == s->op);
191 }
192 #endif
193 static int
194 cmp_remat(const void *a, const void *b)
195 {
196         const remat_t  *r = a;
197         const remat_t  *s = a;
198
199         return !(r == s || r->op == s->op);
200 }
201
202 static int
203 cmp_spill(const void *a, const void *b, size_t size)
204 {
205         const spill_t *p = a;
206         const spill_t *q = b;
207
208 //      return !(p->irn == q->irn && p->bb == q->bb);
209         return !(p->irn == q->irn);
210 }
211
212 static keyval_t *
213 set_find_keyval(set * set, void * key)
214 {
215         keyval_t     query;
216
217         query.key = key;
218         return set_find(set, &query, sizeof(query), HASH_PTR(key));
219 }
220
221 static keyval_t *
222 set_insert_keyval(set * set, void * key, void * val)
223 {
224         keyval_t     query;
225
226         query.key = key;
227         query.val = val;
228         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
229 }
230
231 static defs_t *
232 set_find_def(set * set, ir_node * value)
233 {
234         defs_t     query;
235
236         query.value = value;
237         return set_find(set, &query, sizeof(query), HASH_PTR(value));
238 }
239
240 static defs_t *
241 set_insert_def(set * set, ir_node * value)
242 {
243         defs_t     query;
244
245         query.value = value;
246         query.spills = NULL;
247         query.remats = NULL;
248         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
249 }
250
251 static spill_t *
252 set_find_spill(set * set, ir_node * value)
253 {
254         spill_t     query;
255
256         query.irn = value;
257         return set_find(set, &query, sizeof(query), HASH_PTR(value));
258 }
259
260 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
261 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
262 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
263 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
264 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
265
266 static int
267 cmp_remat_info(const void *a, const void *b, size_t size)
268 {
269         const remat_info_t *p = a;
270         const remat_info_t *q = b;
271
272         return !(p->irn == q->irn);
273 }
274
275 static int
276 cmp_defs(const void *a, const void *b, size_t size)
277 {
278         const defs_t *p = a;
279         const defs_t *q = b;
280
281         return !(p->value == q->value);
282 }
283
284 static int
285 cmp_keyval(const void *a, const void *b, size_t size)
286 {
287         const keyval_t *p = a;
288         const keyval_t *q = b;
289
290         return !(p->key == q->key);
291 }
292
293 static double
294 execution_frequency(const ir_node * irn)
295 {
296 #define FUDGE 0.001
297 #ifndef EXECFREQ_LOOPDEPH
298         if(is_Block(irn)) {
299                 return get_block_execfreq(irn) + FUDGE;
300         } else {
301                 return get_block_execfreq(get_nodes_block(irn)) + FUDGE;
302         }
303 #else
304         if(is_Block(irn))
305                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
306         else
307                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
308 #endif
309 }
310
311 static double
312 get_cost(const spill_ilp_t * si, const ir_node * irn)
313 {
314         if(be_is_Spill(irn)) {
315                 return COST_STORE;
316         } else if(be_is_Reload(irn)){
317                 return COST_LOAD;
318         } else {
319                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
320         }
321
322 }
323
324 /**
325  * Checks, whether node and its operands have suitable reg classes
326  */
327 static INLINE int
328 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
329 {
330         int               n;
331         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
332         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
333
334 #if 0
335         if(!remat)
336                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
337 #endif
338
339         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
340                 ir_node        *op = get_irn_n(irn, n);
341                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
342
343 //              if(!remat)
344 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
345         }
346
347         return remat;
348 }
349
350 /**
351  * Try to create a remat from @p op with destination value @p dest_value
352  */
353 static INLINE remat_t *
354 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
355 {
356         remat_t  *remat = NULL;
357
358 //      if(!mode_is_datab(get_irn_mode(dest_value)))
359 //              return NULL;
360
361         if(dest_value == op) {
362                 const ir_node *proj = NULL;
363
364                 if(is_Proj(dest_value)) {
365                         op = get_irn_n(op, 0);
366                         proj = dest_value;
367                 }
368
369                 if(!is_rematerializable(si, op))
370                         return NULL;
371
372                 remat = obstack_alloc(si->obst, sizeof(*remat));
373                 remat->op = op;
374                 remat->cost = get_cost(si, op);
375                 remat->value = dest_value;
376                 remat->proj = proj;
377                 remat->inverse = 0;
378         } else {
379                 arch_inverse_t     inverse;
380                 int                n;
381
382                 /* get the index of the operand we want to retrieve by the inverse op */
383                 for (n = get_irn_arity(op)-1; n>=0; --n) {
384                         ir_node        *arg = get_irn_n(op, n);
385
386                         if(arg == dest_value) break;
387                 }
388                 if(n<0) return NULL;
389
390                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
391
392                 /* else ask the backend to give an inverse op */
393                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
394                         int   i;
395
396                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
397
398                         assert(inverse.n > 0 && "inverse op should have at least one node");
399
400                         for(i=inverse.n-1; i>=0; --i) {
401                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
402                         }
403
404                         if(inverse.n <= 2) {
405                                 remat = obstack_alloc(si->obst, sizeof(*remat));
406                                 remat->op = inverse.nodes[0];
407                                 remat->cost = inverse.costs;
408                                 remat->value = dest_value;
409                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
410                                 remat->inverse = 1;
411
412                                 assert(is_Proj(remat->proj));
413                         } else {
414                                 assert(0 && "I can not handle remats with more than 2 nodes");
415                         }
416                 }
417         }
418
419         if(remat) {
420                 if(remat->proj) {
421                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
422                 } else {
423                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
424                 }
425         }
426         return remat;
427 }
428
429
430 static INLINE void
431 add_remat(const spill_ilp_t * si, const remat_t * remat)
432 {
433         remat_info_t    *remat_info,
434                      query;
435         int              n;
436
437         assert(remat->op);
438         assert(remat->value);
439
440         query.irn = remat->value;
441         query.remats = NULL;
442         query.remats_by_operand = NULL;
443         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
444
445         if(remat_info->remats == NULL) {
446                 remat_info->remats = new_pset(cmp_remat, 4096);
447         }
448         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
449
450         /* insert the remat into the remats_be_operand set of each argument of the remat op */
451         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
452                 ir_node        *arg = get_irn_n(remat->op, n);
453
454                 query.irn = arg;
455                 query.remats = NULL;
456                 query.remats_by_operand = NULL;
457                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
458
459                 if(remat_info->remats_by_operand == NULL) {
460                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
461                 }
462                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
463         }
464 }
465
466 static int
467 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
468 {
469         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
470         int                i = 0;
471
472         while(edge) {
473                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
474                         ++i;
475                 }
476                 edge = get_irn_out_edge_next(irn, edge);
477         }
478
479         return i;
480 }
481
482 static INLINE void
483 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
484 {
485         int      n;
486         remat_t *remat;
487
488 #ifdef NO_SINGLE_USE_REMATS
489         if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
490 #else
491         if(has_reg_class(si, op)) {
492 #endif
493                 remat = get_remat_from_op(si, op, op);
494                 if(remat) {
495                         add_remat(si, remat);
496                 }
497         }
498
499 #ifdef COLLECT_INVERSE_REMATS
500         /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
501            for each arg */
502         for (n = get_irn_arity(op)-1; n>=0; --n) {
503                 ir_node        *arg = get_irn_n(op, n);
504
505                 if(has_reg_class(si, arg)) {
506                         /* try to get an inverse remat */
507                         remat = get_remat_from_op(si, arg, op);
508                         if(remat) {
509                                 add_remat(si, remat);
510                         }
511                 }
512         }
513 #endif
514
515 }
516
517 static INLINE int
518 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
519 {
520         ir_node *block;
521         ir_node *def_block = get_nodes_block(val);
522         int      ret;
523
524         if(val == pos)
525                 return 0;
526
527         /* if pos is at end of a basic block */
528         if(is_Block(pos)) {
529                 ret = (pos == def_block || block_dominates(def_block, pos));
530 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
531                 return ret;
532         }
533
534         /* else if this is a normal operation */
535         block = get_nodes_block(pos);
536         if(block == def_block) {
537                 if(!sched_is_scheduled(val)) return 1;
538
539                 ret = sched_comes_after(val, pos);
540 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
541                 return ret;
542         }
543
544         ret = block_dominates(def_block, block);
545 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
546         return ret;
547 }
548
549 static INLINE ir_node *
550 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
551 {
552     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
553 }
554
555 /**
556  * Returns first non-Phi node of block @p bb
557  */
558 static INLINE ir_node *
559 sched_block_first_nonphi(const ir_node * bb)
560 {
561         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
562 }
563
564 static int
565 sched_skip_proj_predicator(const ir_node * irn, void * data)
566 {
567         return (is_Proj(irn));
568 }
569
570 static INLINE ir_node *
571 sched_next_nonproj(const ir_node * irn, int forward)
572 {
573         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
574 }
575
576 /**
577  * Returns next operation node (non-Proj) after @p irn
578  * or the basic block of this node
579  */
580 static INLINE ir_node *
581 sched_next_op(const ir_node * irn)
582 {
583         ir_node *next = sched_next(irn);
584
585         if(is_Block(next))
586                 return next;
587
588         return sched_next_nonproj(next, 1);
589 }
590
591 /**
592  * Returns previous operation node (non-Proj) before @p irn
593  * or the basic block of this node
594  */
595 static INLINE ir_node *
596 sched_prev_op(const ir_node * irn)
597 {
598         ir_node *prev = sched_prev(irn);
599
600         if(is_Block(prev))
601                 return prev;
602
603         return sched_next_nonproj(prev, 0);
604 }
605
606 static void
607 sched_put_after(ir_node * insert, ir_node * irn)
608 {
609         if(is_Block(insert)) {
610                 insert = sched_block_first_nonphi(insert);
611         } else {
612                 insert = sched_next_op(insert);
613         }
614         sched_add_before(insert, irn);
615 }
616
617 static void
618 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
619 {
620   if(is_Block(insert)) {
621           insert = sched_block_last_noncf(si, insert);
622   } else {
623           insert = sched_next_nonproj(insert, 0);
624           insert = sched_prev(insert);
625   }
626   sched_add_after(insert, irn);
627 }
628
629 /**
630  * Tells you whether a @p remat can be placed before the irn @p pos
631  */
632 static INLINE int
633 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
634 {
635         const ir_node   *op = remat->op;
636         const ir_node   *prev;
637         int        n,
638                            res = 1;
639
640         if(is_Block(pos)) {
641                 prev = sched_block_last_noncf(si, pos);
642                 prev = sched_next_nonproj(prev, 0);
643         } else {
644                 prev = sched_prev_op(pos);
645         }
646         /* do not remat if the rematted value is defined immediately before this op */
647         if(prev == remat->op) {
648                 return 0;
649         }
650
651 #if 0
652         /* this should be just fine, the following OP will be using this value, right? */
653
654         /* only remat AFTER the real definition of a value (?) */
655         if(!value_is_defined_before(si, pos, remat->value)) {
656 //              ir_fprintf(stderr, "error(not defined)");
657                 return 0;
658         }
659 #endif
660
661         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
662                 const ir_node   *arg = get_irn_n(op, n);
663
664 #ifdef NO_ENLARGE_L1V3N355
665                 if(has_reg_class(si, arg) && live) {
666                         res &= pset_find_ptr(live, arg)?1:0;
667                 } else {
668                         res &= value_is_defined_before(si, pos, arg);
669                 }
670 #else
671                 res &= value_is_defined_before(si, pos, arg);
672 #endif
673         }
674
675         return res;
676 }
677
678 /**
679  * Tells you whether a @p remat can be placed after the irn @p pos
680  */
681 static INLINE int
682 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
683 {
684         if(is_Block(pos)) {
685                 pos = sched_block_first_nonphi(pos);
686         } else {
687                 pos = sched_next_op(pos);
688         }
689
690         /* only remat AFTER the real definition of a value (?) */
691         if(!value_is_defined_before(si, pos, remat->value)) {
692                 return 0;
693         }
694
695         return can_remat_before(si, remat, pos, live);
696 }
697
698 /**
699  * Collect potetially rematerializable OPs
700  */
701 static void
702 walker_remat_collector(ir_node * irn, void * data)
703 {
704         spill_ilp_t    *si = data;
705
706         if(!is_Block(irn) && !is_Phi(irn)) {
707                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
708                 get_remats_from_op(si, irn);
709         }
710 }
711
712 /**
713  * Inserts a copy of @p irn before @p pos
714  */
715 static ir_node *
716 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
717 {
718         ir_node     *bb;
719         ir_node     *copy;
720
721         bb = is_Block(pos)?pos:get_nodes_block(pos);
722         copy = exact_copy(irn);
723
724         _set_phi_class(copy, NULL);
725         set_nodes_block(copy, bb);
726         sched_put_before(si, pos, copy);
727
728         return copy;
729 }
730
731 /**
732  * Inserts a copy of @p irn after @p pos
733  */
734 static ir_node *
735 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
736 {
737         ir_node     *bb;
738         ir_node     *copy;
739
740         bb = is_Block(pos)?pos:get_nodes_block(pos);
741         copy = exact_copy(irn);
742
743         _set_phi_class(copy, NULL);
744         set_nodes_block(copy, bb);
745         sched_put_after(pos, copy);
746
747         return copy;
748 }
749
750 static ir_node *
751 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
752 {
753         char     buf[256];
754
755         if(can_remat_after(si, remat, pos, live)) {
756                 ir_node         *copy,
757                                                 *proj_copy;
758                 op_t            *op;
759
760                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
761
762                 copy = insert_copy_after(si, remat->op, pos);
763
764                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
765                 op = obstack_alloc(si->obst, sizeof(*op));
766                 op->is_remat = 1;
767                 op->attr.remat.remat = remat;
768                 op->attr.remat.pre = 0;
769                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(pos));
770
771                 set_irn_link(copy, op);
772                 pset_insert_ptr(si->all_possible_remats, copy);
773                 if(remat->proj) {
774                         proj_copy = insert_copy_after(si, remat->proj, copy);
775                         set_irn_n(proj_copy, 0, copy);
776                         set_irn_link(proj_copy, op);
777                         pset_insert_ptr(si->all_possible_remats, proj_copy);
778                 } else {
779                         proj_copy = NULL;
780                 }
781
782                 return copy;
783         }
784
785         return NULL;
786 }
787
788 static ir_node *
789 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
790 {
791         char     buf[256];
792
793         if(can_remat_before(si, remat, pos, live)) {
794                 ir_node         *copy,
795                                                 *proj_copy;
796                 op_t            *op;
797
798                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
799
800                 copy = insert_copy_before(si, remat->op, pos);
801
802                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
803                 op = obstack_alloc(si->obst, sizeof(*op));
804                 op->is_remat = 1;
805                 op->attr.remat.remat = remat;
806                 op->attr.remat.pre = 1;
807                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(pos));
808
809                 set_irn_link(copy, op);
810                 pset_insert_ptr(si->all_possible_remats, copy);
811                 if(remat->proj) {
812                         proj_copy = insert_copy_after(si, remat->proj, copy);
813                         set_irn_n(proj_copy, 0, copy);
814                         set_irn_link(proj_copy, op);
815                         pset_insert_ptr(si->all_possible_remats, proj_copy);
816                 } else {
817                         proj_copy = NULL;
818                 }
819
820                 return copy;
821         }
822
823         return NULL;
824 }
825
826 static int
827 get_block_n_succs(const ir_node *block) {
828         const ir_edge_t *edge;
829
830         assert(edges_activated(current_ir_graph));
831
832         edge = get_block_succ_first(block);
833         if (! edge)
834                 return 0;
835
836         edge = get_block_succ_next(block, edge);
837         return edge ? 2 : 1;
838 }
839
840 static int
841 is_merge_edge(const ir_node * bb)
842 {
843 #ifdef GOODWIN_REDUCTION
844         return get_block_n_succs(bb) == 1;
845 #else
846         return 1;
847 #endif
848 }
849
850 static int
851 is_diverge_edge(const ir_node * bb)
852 {
853 #ifdef GOODWIN_REDUCTION
854         return get_Block_n_cfgpreds(bb) == 1;
855 #else
856         return 1;
857 #endif
858 }
859
860 static void
861 walker_regclass_copy_insertor(ir_node * irn, void * data)
862 {
863         spill_ilp_t    *si = data;
864
865         if(is_Phi(irn) && has_reg_class(si, irn)) {
866                 int n;
867
868                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
869                         ir_node  *phi_arg = get_irn_n(irn, n);
870                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
871
872                         if(!has_reg_class(si, phi_arg)) {
873                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
874                                 ir_node   *pos = sched_block_last_noncf(si, bb);
875                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
876
877                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
878                                 sched_add_after(pos, copy);
879                                 set_irn_n(irn, n, copy);
880
881                                 op->is_remat = 0;
882                                 op->attr.live_range.args.reloads = NULL;
883                                 op->attr.live_range.ilp = ILP_UNDEF;
884                                 set_irn_link(copy, op);
885                         }
886                 }
887         }
888 }
889
890
891 /**
892  * Insert (so far unused) remats into the irg to
893  * recompute the potential liveness of all values
894  */
895 static void
896 walker_remat_insertor(ir_node * bb, void * data)
897 {
898         spill_ilp_t    *si = data;
899         spill_bb_t     *spill_bb;
900         ir_node        *irn;
901         int             n;
902         irn_live_t     *li;
903         pset           *live = pset_new_ptr_default();
904
905         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
906
907         live_foreach(bb, li) {
908                 ir_node        *value = (ir_node *) li->irn;
909
910                 /* add remats at end of block */
911                 if (live_is_end(li) && has_reg_class(si, value)) {
912                         pset_insert_ptr(live, value);
913                 }
914         }
915
916         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
917         set_irn_link(bb, spill_bb);
918
919         irn = sched_last(bb);
920         while(!sched_is_end(irn)) {
921                 ir_node   *next;
922                 op_t      *op;
923                 pset      *args;
924                 ir_node   *arg;
925                 pset      *remat_args;
926
927                 next = sched_prev(irn);
928
929                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
930
931                 if(is_Phi(irn) || is_Proj(irn)) {
932                         op_t      *op;
933
934                         if(has_reg_class(si, irn)) {
935                                 pset_remove_ptr(live, irn);
936                         }
937
938                         op = obstack_alloc(si->obst, sizeof(*op));
939                         op->is_remat = 0;
940                         op->attr.live_range.args.reloads = NULL;
941                         op->attr.live_range.ilp = ILP_UNDEF;
942                         set_irn_link(irn, op);
943
944                         irn = next;
945                         continue;
946                 }
947
948                 op = obstack_alloc(si->obst, sizeof(*op));
949                 op->is_remat = 0;
950                 op->attr.live_range.ilp = ILP_UNDEF;
951                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
952                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
953                 set_irn_link(irn, op);
954
955                 args = pset_new_ptr_default();
956
957                 /* collect arguments of op */
958                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
959                         ir_node        *arg = get_irn_n(irn, n);
960
961                         pset_insert_ptr(args, arg);
962                 }
963
964                 /* set args of op already live in epilog */
965                 pset_foreach(args, arg) {
966                         if(has_reg_class(si, arg)) {
967                                 pset_insert_ptr(live, arg);
968                         }
969                 }
970                 /* delete defined value from live set */
971                 if(has_reg_class(si, irn)) {
972                         pset_remove_ptr(live, irn);
973                 }
974
975
976                 remat_args = pset_new_ptr_default();
977
978                 /* insert all possible remats before irn */
979                 pset_foreach(args, arg) {
980                         remat_info_t   *remat_info,
981                                                     query;
982                         remat_t        *remat;
983
984                         /* continue if the operand has the wrong reg class
985                          */
986                         if(!has_reg_class(si, arg))
987                                 continue;
988
989                         query.irn = arg;
990                         query.remats = NULL;
991                         query.remats_by_operand = NULL;
992                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
993
994                         if(!remat_info) {
995                                 continue;
996                         }
997
998                         if(remat_info->remats) {
999                                 pset_foreach(remat_info->remats, remat) {
1000                                         ir_node  *remat_irn = NULL;
1001
1002                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1003 #ifdef REMAT_WHILE_LIVE
1004                                         if(pset_find_ptr(live, remat->value)) {
1005                                                 remat_irn = insert_remat_before(si, remat, irn, live);
1006                                         }
1007 #else
1008                                         remat_irn = insert_remat_before(si, remat, irn, live);
1009 #endif
1010                                         if(remat_irn) {
1011                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1012                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1013
1014                                                         if(!has_reg_class(si, remat_arg)) continue;
1015
1016                                                         pset_insert_ptr(remat_args, remat_arg);
1017                                                 }
1018                                         }
1019                                 }
1020                         }
1021                 }
1022
1023                 /* now we add remat args to op's args because they could also die at this op */
1024                 pset_foreach(args,arg) {
1025                         if(pset_find_ptr(remat_args, arg)) {
1026                                 pset_remove_ptr(remat_args, arg);
1027                         }
1028                 }
1029                 pset_foreach(remat_args,arg) {
1030                         pset_insert_ptr(args, arg);
1031                 }
1032
1033                 /* insert all possible remats after irn */
1034                 pset_foreach(args, arg) {
1035                         remat_info_t   *remat_info,
1036                                                     query;
1037                         remat_t        *remat;
1038
1039                         /* continue if the operand has the wrong reg class */
1040                         if(!has_reg_class(si, arg))
1041                                 continue;
1042
1043                         query.irn = arg;
1044                         query.remats = NULL;
1045                         query.remats_by_operand = NULL;
1046                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1047
1048                         if(!remat_info) {
1049                                 continue;
1050                         }
1051
1052                         /* do not place post remats after jumps */
1053                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1054
1055                         if(remat_info->remats_by_operand) {
1056                                 pset_foreach(remat_info->remats_by_operand, remat) {
1057                                         /* do not insert remats producing the same value as one of the operands */
1058                                         if(!pset_find_ptr(args, remat->value)) {
1059                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1060 #ifdef REMAT_WHILE_LIVE
1061                                                 if(pset_find_ptr(live, remat->value)) {
1062                                                         insert_remat_after(si, remat, irn, live);
1063                                                 }
1064 #else
1065                                                 insert_remat_after(si, remat, irn, live);
1066 #endif
1067                                         }
1068                                 }
1069                         }
1070                 }
1071
1072                 del_pset(remat_args);
1073                 del_pset(args);
1074                 irn = next;
1075         }
1076
1077         live_foreach(bb, li) {
1078                 ir_node        *value = (ir_node *) li->irn;
1079
1080                 /* add remats at end if successor has multiple predecessors */
1081                 if(is_merge_edge(bb)) {
1082                         /* add remats at end of block */
1083                         if (live_is_end(li) && has_reg_class(si, value)) {
1084                                 remat_info_t   *remat_info,
1085                                                            query;
1086                                 remat_t        *remat;
1087
1088                                 query.irn = value;
1089                                 query.remats = NULL;
1090                                 query.remats_by_operand = NULL;
1091                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1092
1093                                 if(remat_info && remat_info->remats) {
1094                                         pset_foreach(remat_info->remats, remat) {
1095                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1096
1097                                                 insert_remat_before(si, remat, bb, NULL);
1098                                         }
1099                                 }
1100                         }
1101                 }
1102                 if(is_diverge_edge(bb)) {
1103                         /* add remat2s at beginning of block */
1104                         if ((live_is_in(li) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1105                                 remat_info_t   *remat_info,
1106                                                            query;
1107                                 remat_t        *remat;
1108
1109                                 query.irn = value;
1110                                 query.remats = NULL;
1111                                 query.remats_by_operand = NULL;
1112                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1113
1114                                 if(remat_info && remat_info->remats) {
1115                                         pset_foreach(remat_info->remats, remat) {
1116                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at beginning of block %+F\n", remat->op, bb));
1117
1118                                                 /* put the remat here if all its args are available */
1119                                                 insert_remat_after(si, remat, bb, NULL);
1120
1121                                         }
1122                                 }
1123                         }
1124                 }
1125         }
1126 }
1127
1128 /**
1129  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1130  */
1131 static void
1132 luke_endwalker(ir_node * bb, void * data)
1133 {
1134         spill_ilp_t    *si = (spill_ilp_t*)data;
1135         irn_live_t     *li;
1136         pset           *live;
1137         pset           *use_end;
1138         char            buf[256];
1139         ilp_cst_t       cst;
1140         ir_node        *irn;
1141         spill_bb_t     *spill_bb = get_irn_link(bb);
1142
1143
1144         live = pset_new_ptr_default();
1145         use_end = pset_new_ptr_default();
1146
1147         live_foreach(bb, li) {
1148                 irn = (ir_node *) li->irn;
1149                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1150                         op_t      *op;
1151
1152                         pset_insert_ptr(live, irn);
1153                         op = get_irn_link(irn);
1154                         assert(!op->is_remat);
1155                 }
1156         }
1157
1158         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1159         /* their reg_out must always be set */
1160         sched_foreach_reverse(bb, irn) {
1161                 int   n;
1162
1163                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1164
1165                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1166                         ir_node        *irn_arg = get_irn_n(irn, n);
1167
1168                         if(has_reg_class(si, irn_arg)) {
1169                                 pset_insert_ptr(use_end, irn_arg);
1170                         }
1171                 }
1172         }
1173
1174         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1175         //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1176         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1177
1178         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1179
1180         /* if this is a merge edge we can reload at the end of this block */
1181         if(is_merge_edge(bb)) {
1182                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1183         } else if(pset_count(use_end)){
1184                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1185         } else {
1186                 spill_bb->reloads = NULL;
1187         }
1188
1189         pset_foreach(live,irn) {
1190                 spill_t     query,
1191                                         *spill;
1192                 double      spill_cost;
1193
1194
1195                 /* handle values used by control flow nodes later separately */
1196                 if(pset_find_ptr(use_end, irn)) continue;
1197
1198                 query.irn = irn;
1199                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1200
1201                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(bb);
1202
1203                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1204                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1205                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1206
1207                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1208                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1209
1210                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1211                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1212
1213                 if(is_merge_edge(bb)) {
1214                         ilp_var_t   reload;
1215                         ilp_cst_t   rel_cst;
1216
1217                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1218                         reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(bb));
1219                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1220
1221                         /* reload <= mem_out */
1222                         rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1223                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1224                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1225         }
1226
1227                 spill->reg_in = ILP_UNDEF;
1228                 spill->mem_in = ILP_UNDEF;
1229         }
1230
1231         pset_foreach(use_end,irn) {
1232                 spill_t     query,
1233                                         *spill;
1234                 double      spill_cost;
1235                 ilp_cst_t   end_use_req,
1236                                         rel_cst;
1237                 ilp_var_t   reload;
1238
1239                 query.irn = irn;
1240                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1241
1242                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(bb);
1243
1244                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1245                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1246                 /* if irn is used at the end of the block, then it is live anyway */
1247                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1248
1249                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1250                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1251
1252                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1253                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1254
1255                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1256                 reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(bb));
1257                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1258
1259                 /* reload <= mem_out */
1260                 rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1261                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1262                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1263
1264                 spill->reg_in = ILP_UNDEF;
1265                 spill->mem_in = ILP_UNDEF;
1266
1267                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1268                 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1269                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1270         }
1271
1272         del_pset(live);
1273         del_pset(use_end);
1274 }
1275
1276 static ir_node *
1277 next_post_remat(const ir_node * irn)
1278 {
1279         op_t      *op;
1280
1281         if(is_Block(irn)) {
1282                 irn = sched_block_first_nonphi(irn);
1283         } else {
1284                 irn = sched_next_op(irn);
1285         }
1286
1287         if(sched_is_end(irn))
1288                 return NULL;
1289
1290         op = (op_t*)get_irn_link(irn);
1291         if(op->is_remat && !op->attr.remat.pre) {
1292                 return irn;
1293         }
1294
1295         return NULL;
1296 }
1297
1298
1299 static ir_node *
1300 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1301 {
1302         op_t      *op;
1303         ir_node   *ret;
1304
1305         if(is_Block(irn)) {
1306                 ret = sched_block_last_noncf(si, irn);
1307                 ret = sched_next(ret);
1308                 ret = sched_prev_op(ret);
1309         } else {
1310                 ret = sched_prev_op(irn);
1311         }
1312
1313         if(sched_is_end(ret) || is_Phi(ret))
1314                 return NULL;
1315
1316         op = (op_t*)get_irn_link(ret);
1317         if(op->is_remat && op->attr.remat.pre) {
1318                 return ret;
1319         }
1320
1321         return NULL;
1322 }
1323
1324 /**
1325  * Find a remat of value @p value in the epilog of @p pos
1326  */
1327 static ir_node *
1328 find_post_remat(const ir_node * value, const ir_node * pos)
1329 {
1330         while((pos = next_post_remat(pos)) != NULL) {
1331                 op_t   *op;
1332
1333                 op = get_irn_link(pos);
1334                 assert(op->is_remat && !op->attr.remat.pre);
1335
1336                 if(op->attr.remat.remat->value == value)
1337                         return (ir_node*)pos;
1338
1339 #if 0
1340         const ir_edge_t *edge;
1341                 foreach_out_edge(pos, edge) {
1342                         ir_node   *proj = get_edge_src_irn(edge);
1343                         assert(is_Proj(proj));
1344                 }
1345 #endif
1346
1347         }
1348
1349         return NULL;
1350 }
1351
1352 static spill_t *
1353 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1354 {
1355         spill_bb_t  *spill_bb = get_irn_link(bb);
1356         spill_t     *spill,
1357                                  query;
1358         char         buf[256];
1359
1360         query.irn = irn;
1361         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1362         if(!spill) {
1363                 double   spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(bb);
1364
1365                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1366
1367                 spill->reg_out = ILP_UNDEF;
1368                 spill->reg_in  = ILP_UNDEF;
1369                 spill->mem_in  = ILP_UNDEF;
1370
1371                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1372                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1373
1374                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1375                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1376         }
1377
1378         return spill;
1379 }
1380
1381 static void
1382 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1383 {
1384         irn_live_t     *li;
1385         ir_node        *irn;
1386
1387         live_foreach(bb, li) {
1388                 irn = (ir_node *) li->irn;
1389
1390                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1391                         pset_insert_ptr(live, irn);
1392                 }
1393         }
1394
1395         irn = sched_last(bb);
1396
1397         /* all values eaten by control flow operations are also live until the end of the block */
1398         sched_foreach_reverse(bb, irn) {
1399                 int  i;
1400
1401                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1402
1403                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1404                         ir_node *arg = get_irn_n(irn,i);
1405
1406                         if(has_reg_class(si, arg)) {
1407                                 pset_insert_ptr(live, arg);
1408                         }
1409                 }
1410         }
1411 }
1412
1413 /**
1414  *  Inserts ILP-constraints and variables for memory copying before the given position
1415  */
1416 static void
1417 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1418 {
1419         const ir_node    *succ;
1420         const ir_edge_t  *edge;
1421         spill_bb_t       *spill_bb = get_irn_link(block);
1422         ir_node          *phi;
1423         int               pos;
1424         ilp_cst_t         cst;
1425         ilp_var_t         copyreg;
1426         char              buf[256];
1427         ir_node          *tmp;
1428
1429
1430         assert(edges_activated(current_ir_graph));
1431
1432         edge = get_block_succ_first(block);
1433         if(!edge) return;
1434
1435         succ = edge->src;
1436         pos = edge->pos;
1437
1438         edge = get_block_succ_next(block, edge);
1439         /* next block can only contain phis, if this is a merge edge */
1440         if(edge) return;
1441
1442         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1443         copyreg = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1444
1445         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1446         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1447
1448         pset_foreach(live, tmp) {
1449                 spill_t  *spill;
1450 #if 0
1451                 op_t  *op = get_irn_link(irn);
1452                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1453 #endif
1454                 spill = set_find_spill(spill_bb->ilp, tmp);
1455                 assert(spill);
1456
1457                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1458         }
1459         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1460
1461         sched_foreach(succ, phi) {
1462                 const ir_node  *to_copy;
1463                 op_t           *to_copy_op;
1464                 spill_t        *to_copy_spill;
1465                 op_t           *phi_op = get_irn_link(phi);
1466                 ilp_var_t       reload = ILP_UNDEF;
1467
1468
1469                 if(!is_Phi(phi)) break;
1470                 if(!has_reg_class(si, phi)) continue;
1471
1472                 to_copy = get_irn_n(phi, pos);
1473
1474                 to_copy_op = get_irn_link(to_copy);
1475
1476                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1477                 assert(to_copy_spill);
1478
1479                 if(spill_bb->reloads) {
1480                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1481
1482                         if(keyval) {
1483                                 reload = PTR_TO_INT(keyval->val);
1484                         }
1485                 }
1486
1487                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N", block, to_copy);
1488                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1489
1490                 /* copy - reg_out - reload - remat - live_range <= 0 */
1491                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1492                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1493                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1494                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1495                 foreach_pre_remat(si, block, tmp) {
1496                         op_t     *remat_op = get_irn_link(tmp);
1497                         if(remat_op->attr.remat.remat->value == to_copy) {
1498                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1499                         }
1500                 }
1501
1502                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N", block, to_copy);
1503                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1504
1505                 /* copy - reg_out - copyreg <= 0 */
1506                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1507                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1508                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1509         }
1510 }
1511
1512
1513 /**
1514  * Walk all irg blocks and emit this ILP
1515  */
1516 static void
1517 luke_blockwalker(ir_node * bb, void * data)
1518 {
1519         spill_ilp_t    *si = (spill_ilp_t*)data;
1520         ir_node        *irn;
1521         pset           *live;
1522         char            buf[256];
1523         ilp_cst_t       cst;
1524         spill_bb_t     *spill_bb = get_irn_link(bb);
1525         ir_node        *tmp;
1526         spill_t        *spill;
1527         pset           *defs = pset_new_ptr_default();
1528
1529
1530         live = pset_new_ptr_default();
1531
1532         /****************************************
1533          *      B A S I C  B L O C K  E N D
1534          ***************************************/
1535
1536
1537         /* init live values at end of block */
1538         get_live_end(si, bb, live);
1539
1540         pset_foreach(live, irn) {
1541                 op_t           *op;
1542                 ilp_var_t       reload = ILP_UNDEF;
1543
1544                 spill = set_find_spill(spill_bb->ilp, irn);
1545                 assert(spill);
1546
1547                 if(spill_bb->reloads) {
1548                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1549
1550                         if(keyval) {
1551                                 reload = PTR_TO_INT(keyval->val);
1552                         }
1553                 }
1554
1555                 op = get_irn_link(irn);
1556                 assert(!op->is_remat);
1557
1558                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1559                 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1560                 op->attr.live_range.op = bb;
1561
1562                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1563                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1564
1565                 /* reg_out - reload - remat - live_range <= 0 */
1566                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1567                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1568                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1569                 foreach_pre_remat(si, bb, tmp) {
1570                         op_t     *remat_op = get_irn_link(tmp);
1571                         if(remat_op->attr.remat.remat->value == irn) {
1572                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1573                         }
1574                 }
1575                 /* maybe we should also assure that reg_out >= live_range etc. */
1576         }
1577
1578 #ifndef NO_MEMCOPIES
1579         insert_mem_copy_position(si, live, bb);
1580 #endif
1581
1582         /*
1583          * start new live ranges for values used by remats at end of block
1584          * and assure the remat args are available
1585          */
1586         foreach_pre_remat(si, bb, tmp) {
1587                 op_t     *remat_op = get_irn_link(tmp);
1588                 int       n;
1589
1590                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1591                         ir_node        *remat_arg = get_irn_n(tmp, n);
1592                         op_t           *arg_op = get_irn_link(remat_arg);
1593                         ilp_var_t       prev_lr;
1594
1595                         if(!has_reg_class(si, remat_arg)) continue;
1596
1597                         /* if value is becoming live through use by remat */
1598                         if(!pset_find_ptr(live, remat_arg)) {
1599                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1600                                 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1601
1602                                 arg_op->attr.live_range.ilp = prev_lr;
1603                                 arg_op->attr.live_range.op = bb;
1604
1605                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1606
1607                                 pset_insert_ptr(live, remat_arg);
1608                                 add_to_spill_bb(si, bb, remat_arg);
1609                         }
1610
1611                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1612                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1613                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1614
1615                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1616                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1617
1618                         /* use reload placed for this argument */
1619                         if(spill_bb->reloads) {
1620                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1621
1622                                 if(keyval) {
1623                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1624
1625                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1626                                 }
1627                         }
1628                 }
1629         }
1630         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1631
1632
1633
1634
1635         /**************************************
1636          *    B A S I C  B L O C K  B O D Y
1637          **************************************/
1638
1639         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1640                 op_t       *op;
1641                 op_t       *tmp_op;
1642                 int         n,
1643                                         u = 0,
1644                                         d = 0;
1645                 ilp_cst_t       check_pre,
1646                                         check_post;
1647                 set        *args;
1648                 pset       *used;
1649                 pset       *remat_defs;
1650                 keyval_t   *keyval;
1651
1652                 /* iterate only until first phi */
1653                 if(is_Phi(irn))
1654                         break;
1655
1656                 op = get_irn_link(irn);
1657                 /* skip remats */
1658                 if(op->is_remat) continue;
1659                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1660
1661                 /* collect defined values */
1662                 if(has_reg_class(si, irn)) {
1663                         pset_insert_ptr(defs, irn);
1664                 }
1665
1666                 /* skip projs */
1667                 if(is_Proj(irn)) continue;
1668
1669                 /*
1670                  * init set of irn's arguments
1671                  * and all possibly used values around this op
1672                  * and values defined by post remats
1673                  */
1674                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1675                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1676                 remat_defs = pset_new_ptr(pset_count(live));
1677
1678                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1679                         ir_node        *irn_arg = get_irn_n(irn, n);
1680                         if(has_reg_class(si, irn_arg)) {
1681                                 set_insert_keyval(args, irn_arg, (void*)n);
1682                                 pset_insert_ptr(used, irn_arg);
1683                         }
1684                 }
1685                 foreach_post_remat(irn, tmp) {
1686                         op_t    *remat_op = get_irn_link(tmp);
1687
1688                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1689
1690                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1691                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1692                                 if(has_reg_class(si, remat_arg)) {
1693                                         pset_insert_ptr(used, remat_arg);
1694                                 }
1695                         }
1696                 }
1697                 foreach_pre_remat(si, irn, tmp) {
1698                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1699                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1700                                 if(has_reg_class(si, remat_arg)) {
1701                                         pset_insert_ptr(used, remat_arg);
1702                                 }
1703                         }
1704                 }
1705
1706                 /**********************************
1707                  *   I N  E P I L O G  O F  irn
1708                  **********************************/
1709
1710                 /* ensure each dying value is used by only one post remat */
1711                 pset_foreach(live, tmp) {
1712                         ir_node     *value = tmp;
1713                         op_t        *value_op = get_irn_link(value);
1714                         ir_node     *remat;
1715                         int          n_remats = 0;
1716
1717                         cst = ILP_UNDEF;
1718                         foreach_post_remat(irn, remat) {
1719                                 op_t  *remat_op = get_irn_link(remat);
1720
1721                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1722                                         ir_node   *remat_arg = get_irn_n(remat, n);
1723
1724                                         /* if value is used by this remat add it to constraint */
1725                                         if(remat_arg == value) {
1726                                                 if(n_remats == 0) {
1727                                                         /* sum remat2s <= 1 + n_remats*live_range */
1728                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1729                                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1.0);
1730                                                 }
1731
1732                                                 n_remats++;
1733                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1734                                                 break;
1735                                         }
1736                                 }
1737                         }
1738
1739                         if(value_op->attr.live_range.ilp != ILP_UNDEF && cst != ILP_UNDEF) {
1740                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1741                         }
1742                 }
1743
1744
1745
1746                 /* new live ranges for values from L\U defined by post remats */
1747                 pset_foreach(live, tmp) {
1748                         ir_node     *value = tmp;
1749                         op_t        *value_op = get_irn_link(value);
1750
1751                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1752                                 ilp_var_t    prev_lr = ILP_UNDEF;
1753                                 ir_node     *remat;
1754
1755                                 if(pset_find_ptr(remat_defs, value)) {
1756
1757                                         /* next_live_range <= prev_live_range + sum remat2s */
1758                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1759                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1760
1761                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1762                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1763
1764                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1765                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1766
1767                                         foreach_post_remat(irn, remat) {
1768                                                 op_t        *remat_op = get_irn_link(remat);
1769
1770                                                 /* if value is being rematerialized by this remat */
1771                                                 if(value == remat_op->attr.remat.remat->value) {
1772                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1773                                                 }
1774                                         }
1775
1776                                         value_op->attr.live_range.ilp = prev_lr;
1777                                         value_op->attr.live_range.op = irn;
1778                                 }
1779                         }
1780                 }
1781
1782                 /* requirements for post remats and start live ranges from L/U' for values dying here */
1783                 foreach_post_remat(irn, tmp) {
1784                         op_t        *remat_op = get_irn_link(tmp);
1785                         int          n;
1786
1787                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1788                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1789                                 op_t           *arg_op = get_irn_link(remat_arg);
1790
1791                                 if(!has_reg_class(si, remat_arg)) continue;
1792
1793                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
1794                                 if(!pset_find_ptr(used, remat_arg)) {
1795                                         /* remat <= live_range(remat_arg) */
1796                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1797                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1798
1799                                         /* if value is becoming live through use by remat2 */
1800                                         if(!pset_find_ptr(live, remat_arg)) {
1801                                                 ilp_var_t     lr;
1802
1803                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1804                                                 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1805
1806                                                 arg_op->attr.live_range.ilp = lr;
1807                                                 arg_op->attr.live_range.op = irn;
1808
1809                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1810
1811                                                 pset_insert_ptr(live, remat_arg);
1812                                                 add_to_spill_bb(si, bb, remat_arg);
1813                                         }
1814
1815                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1816                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1817                                 }
1818                         }
1819                 }
1820
1821                 d = pset_count(defs);
1822                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
1823
1824                 /* count how many regs irn needs for arguments */
1825                 u = set_count(args);
1826
1827
1828                 /* check the register pressure in the epilog */
1829                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
1830                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1831                 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1832
1833                 /* add L\U' to check_post */
1834                 pset_foreach(live, tmp) {
1835                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
1836                                 /* if a live value is not used by irn */
1837                                 tmp_op = get_irn_link(tmp);
1838                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1839                         }
1840                 }
1841
1842                 /***********************************************************
1843                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
1844                  **********************************************************/
1845
1846
1847                 pset_foreach(used, tmp) {
1848                         ilp_var_t       prev_lr;
1849                         ilp_var_t       post_use;
1850                         int             p = 0;
1851                         spill_t        *spill;
1852                         ir_node        *arg = tmp;
1853                         op_t           *arg_op = get_irn_link(arg);
1854                         ir_node        *remat;
1855
1856                         spill = add_to_spill_bb(si, bb, arg);
1857
1858                         /* new live range for each used value */
1859                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1860                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1861
1862                         /* the epilog stuff - including post_use, check_post, check_post_remat */
1863                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1864                         post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1865
1866                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1867
1868                         /* arg is live throughout epilog if the next live_range is in a register */
1869                         if(pset_find_ptr(live, arg)) {
1870                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
1871
1872                                 /* post_use >= next_lr + remat */
1873                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1874                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1875                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1876                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1877
1878                         }
1879
1880                         /* if value is not an arg of op and not possibly defined by post remat
1881                          * then it may only die and not become live
1882                          */
1883                         if(!set_find_keyval(args, arg)) {
1884                                 /* post_use <= prev_lr */
1885                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
1886                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1887                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1888                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1889
1890                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
1891                                         /* next_lr <= prev_lr */
1892                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
1893                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1894                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1895                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1896                                 }
1897                         }
1898
1899
1900
1901                         /* forall post remat which use arg add a similar cst */
1902                         foreach_post_remat(irn, remat) {
1903                                 int      n;
1904
1905                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
1906                                         ir_node    *remat_arg = get_irn_n(remat, n);
1907                                         op_t       *remat_op = get_irn_link(remat);
1908
1909                                         if(remat_arg == arg) {
1910                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
1911
1912                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1913                                                 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1914                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1915                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1916                                         }
1917                                 }
1918                         }
1919
1920                         /* new live range begins for each used value */
1921                         arg_op->attr.live_range.ilp = prev_lr;
1922                         arg_op->attr.live_range.op = irn;
1923
1924                         /*if(!pset_find_ptr(live, arg)) {
1925                                 pset_insert_ptr(live, arg);
1926                                 add_to_spill_bb(si, bb, arg);
1927                         }*/
1928                         pset_insert_ptr(live, arg);
1929
1930                 }
1931
1932                 /* just to be sure */
1933                 check_post = ILP_UNDEF;
1934
1935
1936
1937
1938                 /******************
1939                  *   P R O L O G
1940                  ******************/
1941
1942                 /* check the register pressure in the prolog */
1943                 /* sum_{L\U} lr <= k - |U| */
1944                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1945                 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
1946
1947                 /* for the prolog remove defined values from the live set */
1948                 pset_foreach(defs, tmp) {
1949                         pset_remove_ptr(live, tmp);
1950                 }
1951
1952                 /***********************************************************
1953                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
1954                  **********************************************************/
1955
1956
1957                 set_foreach(args, keyval) {
1958                         spill_t        *spill;
1959                         ir_node        *arg = keyval->key;
1960                         int             i = PTR_TO_INT(keyval->val);
1961                         op_t           *arg_op = get_irn_link(arg);
1962
1963                         spill = set_find_spill(spill_bb->ilp, arg);
1964                         assert(spill);
1965
1966                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1967                         op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(bb));
1968
1969                         /* reload <= mem_out */
1970                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
1971                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1972                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1973                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1974
1975                         /* requirement: arg must be in register for use */
1976                         /* reload + remat + live_range == 1 */
1977                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
1978                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
1979
1980                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1981                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1982                         foreach_pre_remat(si, irn, tmp) {
1983                                 op_t     *remat_op = get_irn_link(tmp);
1984                                 if(remat_op->attr.remat.remat->value == arg) {
1985                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1986                                 }
1987                         }
1988                 }
1989
1990                 /* iterate over L\U */
1991                 pset_foreach(live, tmp) {
1992                         if(!set_find_keyval(args, tmp)) {
1993                                 /* if a live value is not used by irn */
1994                                 tmp_op = get_irn_link(tmp);
1995                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
1996                         }
1997                 }
1998
1999
2000                 /* requirements for remats */
2001                 /* start new live ranges for values used by remats */
2002                 foreach_pre_remat(si, irn, tmp) {
2003                         op_t        *remat_op = get_irn_link(tmp);
2004                         int          n;
2005
2006                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2007                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2008                                 op_t           *arg_op = get_irn_link(remat_arg);
2009                                 ilp_var_t       prev_lr;
2010
2011                                 if(!has_reg_class(si, remat_arg)) continue;
2012
2013                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2014                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2015                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2016
2017                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2018                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2019
2020                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2021                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2022                                         int    index = (int)keyval->val;
2023
2024                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2025                                 }
2026                         }
2027                 }
2028
2029
2030
2031
2032                 /*************************
2033                  *  D O N E  W I T H  O P
2034                  *************************/
2035
2036                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2037
2038                 pset_foreach(live, tmp) {
2039                         assert(has_reg_class(si, tmp));
2040                 }
2041
2042                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2043                         ir_node        *arg = get_irn_n(irn, n);
2044
2045                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2046                 }
2047
2048                 del_pset(remat_defs);
2049                 del_pset(used);
2050                 del_set(args);
2051                 del_pset(defs);
2052                 defs = pset_new_ptr_default();
2053         }
2054
2055
2056
2057         /***************************************
2058          *   B E G I N N I N G  O F  B L O C K
2059          ***************************************/
2060
2061
2062         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2063         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2064
2065         pset_foreach(live, irn) {
2066                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2067         }
2068
2069         /* construct mem_outs for all values */
2070
2071         set_foreach(spill_bb->ilp, spill) {
2072                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2073                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2074
2075                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2076                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2077
2078                 if(pset_find_ptr(live, spill->irn)) {
2079                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2080
2081                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2082                         spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2083                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2084
2085                         if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2086                                 int   n;
2087                                 op_t *op = get_irn_link(spill->irn);
2088
2089                                 /* do we have to copy a phi argument? */
2090                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2091                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2092
2093                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2094                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2095                                         double          freq=0.0;
2096                                         int             m;
2097                                         ilp_var_t       var;
2098
2099
2100                                         /* argument already done? */
2101                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2102
2103                                         /* get sum of execution frequencies of blocks with the same phi argument */
2104                                         for(m=n; m>=0; --m) {
2105                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2106
2107                                                 if(arg==arg2) {
2108                                                         freq += execution_frequency(get_Block_cfgpred_block(bb, m));
2109                                                 }
2110                                         }
2111
2112                                         /* copies are not for free */
2113                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2114                                         var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
2115
2116                                         for(m=n; m>=0; --m) {
2117                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2118
2119                                                 if(arg==arg2) {
2120                                                         op->attr.live_range.args.copies[m] = var;
2121                                                 }
2122                                         }
2123
2124                                         /* copy <= mem_in */
2125                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2126                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2127                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2128                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2129                                 }
2130                         }
2131                 }
2132         }
2133
2134
2135         /* L\U is empty at bb start */
2136         /* arg is live throughout epilog if it is reg_in into this block */
2137
2138         /* check the register pressure at the beginning of the block
2139          * including remats
2140          */
2141         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2142         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
2143
2144         pset_foreach(live, irn) {
2145         ilp_cst_t  nospill;
2146
2147                 spill = set_find_spill(spill_bb->ilp, irn);
2148                 assert(spill);
2149
2150                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2151                 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2152
2153                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2154
2155                 /* spill + mem_in <= 1 */
2156                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2157                 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2158
2159                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2160                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2161
2162         }
2163         foreach_post_remat(bb, irn) {
2164                 op_t     *remat_op = get_irn_link(irn);
2165
2166                 DBG((si->dbg, LEVEL_4, "\t  next post remat: %+F\n", irn));
2167                 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
2168
2169                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2170         }
2171
2172         /* forall post remats add requirements */
2173         foreach_post_remat(bb, tmp) {
2174                 int         n;
2175
2176                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2177                         ir_node    *remat_arg = get_irn_n(tmp, n);
2178                         op_t       *remat_op = get_irn_link(tmp);
2179
2180                         if(!has_reg_class(si, remat_arg)) continue;
2181
2182                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2183                         assert(spill);
2184
2185                         /* remat <= reg_in_argument */
2186                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2187                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2188                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2189                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2190                 }
2191         }
2192
2193         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2194         pset_foreach(live, irn) {
2195                 int          p = 0,
2196                                          n;
2197
2198                 spill = set_find_spill(spill_bb->ilp, irn);
2199                 assert(spill && spill->irn == irn);
2200
2201                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2202                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2203                                 ilp_cst_t       mem_in,
2204                                                                 reg_in;
2205                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2206                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2207                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2208                                 spill_t        *spill_p;
2209
2210                                 /* although the phi is in the right regclass one or more of
2211                                  * its arguments can be in a different one or at least to
2212                                  * ignore
2213                                  */
2214                                 if(has_reg_class(si, phi_arg)) {
2215                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2216                                         mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2217                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2218                                         reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2219
2220                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2221                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2222
2223                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2224                                         assert(spill_p);
2225
2226                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2227                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2228                                 }
2229                         }
2230                 } else {
2231                         /* else assure the value arrives on all paths in the same resource */
2232
2233                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2234                                 ilp_cst_t       mem_in,
2235                                                                 reg_in;
2236                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2237                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2238                                 spill_t        *spill_p;
2239
2240                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2241                                 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2242                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2243                                 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2244
2245                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2246                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2247
2248                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2249                                 assert(spill_p);
2250
2251                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2252                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2253                         }
2254                 }
2255         }
2256
2257         /* first live ranges from reg_ins */
2258         pset_foreach(live, irn) {
2259                 op_t      *op = get_irn_link(irn);
2260
2261                 spill = set_find_spill(spill_bb->ilp, irn);
2262                 assert(spill && spill->irn == irn);
2263
2264                 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2265                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2266                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2267                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2268
2269                 foreach_post_remat(bb, tmp) {
2270                         op_t     *remat_op = get_irn_link(tmp);
2271
2272                         if(remat_op->attr.remat.remat->value == irn) {
2273                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2274                         }
2275                 }
2276         }
2277
2278         /* walk forward now and compute constraints for placing spills */
2279         /* this must only be done for values that are not defined in this block */
2280         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2281         pset_foreach(live, irn) {
2282                 /*
2283                  * if value is defined in this block we can anways place the spill directly after the def
2284                  *    -> no constraint necessary
2285                  */
2286                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2287
2288
2289                 spill = set_find_spill(spill_bb->ilp, irn);
2290                 assert(spill);
2291
2292                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2293                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2294
2295                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2296                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2297
2298                 if(!is_Phi(irn)) {
2299                         sched_foreach_op(bb, tmp) {
2300                                 op_t   *op = get_irn_link(tmp);
2301
2302                                 if(is_Phi(tmp)) continue;
2303                                 assert(!is_Proj(tmp));
2304
2305                                 if(op->is_remat) {
2306                                         ir_node   *value = op->attr.remat.remat->value;
2307
2308                                         if(value == irn) {
2309                                                 /* only collect remats up to the first use of a value */
2310                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2311                                         }
2312                                 } else {
2313                                         int   n;
2314
2315                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2316                                                 ir_node    *arg = get_irn_n(tmp, n);
2317
2318                                                 if(arg == irn) {
2319                                                         /* if a value is used stop collecting remats */
2320                                                         cst = ILP_UNDEF;
2321                                                 }
2322                                                 break;
2323                                         }
2324                                 }
2325                                 if(cst == ILP_UNDEF) break;
2326                         }
2327                 }
2328         }
2329
2330         del_pset(live);
2331 }
2332
2333 typedef struct _irnlist_t {
2334         struct list_head   list;
2335         ir_node           *irn;
2336 } irnlist_t;
2337
2338 typedef struct _interference_t {
2339         struct list_head    blocklist;
2340         ir_node            *a;
2341         ir_node            *b;
2342 } interference_t;
2343
2344 static int
2345 cmp_interference(const void *a, const void *b, size_t size)
2346 {
2347         const interference_t *p = a;
2348         const interference_t *q = b;
2349
2350         return !(p->a == q->a && p->b == q->b);
2351 }
2352
2353 static interference_t *
2354 set_find_interference(set * set, ir_node * a, ir_node * b)
2355 {
2356         interference_t     query;
2357
2358         query.a = (a>b)?a:b;
2359         query.b = (a>b)?b:a;
2360
2361         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2362 }
2363
2364 static interference_t *
2365 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2366 {
2367         interference_t     query,
2368                                           *result;
2369         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2370
2371         list->irn = bb;
2372
2373         result = set_find_interference(set, a, b);
2374         if(result) {
2375
2376                 list_add(&list->list, &result->blocklist);
2377                 return result;
2378         }
2379
2380         query.a = (a>b)?a:b;
2381         query.b = (a>b)?b:a;
2382
2383         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2384
2385         INIT_LIST_HEAD(&result->blocklist);
2386         list_add(&list->list, &result->blocklist);
2387
2388         return result;
2389 }
2390
2391 static int
2392 values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
2393 {
2394         const ir_edge_t *edge;
2395
2396         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2397                 /* both values are live in, so they interfere */
2398                 return 1;
2399         }
2400
2401         /* ensure a dominates b */
2402         if(value_dominates(b,a)) {
2403                 const ir_node * t;
2404                 t = b;
2405                 b = a;
2406                 a = t;
2407         }
2408         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2409
2410
2411         /* the following code is stolen from bera.c */
2412         if(is_live_end(bb, a))
2413                 return 1;
2414
2415         foreach_out_edge(a, edge) {
2416                 const ir_node *user = edge->src;
2417                 if(get_nodes_block(user) == bb
2418                                 && !is_Phi(user)
2419                                 && b != user
2420                                 && value_dominates(b, user))
2421                         return 1;
2422         }
2423
2424         return 0;
2425 }
2426
2427 /**
2428  * Walk all irg blocks and collect interfering values inside of phi classes
2429  */
2430 static void
2431 luke_interferencewalker(ir_node * bb, void * data)
2432 {
2433         spill_ilp_t    *si = (spill_ilp_t*)data;
2434         irn_live_t     *li1,
2435                        *li2;
2436
2437         live_foreach(bb, li1) {
2438                 ir_node        *a = (ir_node *) li1->irn;
2439                 op_t           *a_op = get_irn_link(a);
2440
2441                 if(a_op->is_remat) continue;
2442
2443                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2444                 if (has_reg_class(si, a) && get_phi_class(a)) {
2445                         for(li2=li1->next; li2; li2 = li2->next) {
2446                                 ir_node        *b = (ir_node *) li2->irn;
2447                                 op_t           *b_op = get_irn_link(b);
2448
2449                                 if(b_op->is_remat) continue;
2450
2451                                 /* a and b are only interesting if they are in the same phi class */
2452                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2453                                         if(values_interfere_in_block(bb, a, b)) {
2454                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2455                                                 set_insert_interference(si, si->interferences, a, b, bb);
2456                                         }
2457                                 }
2458                         }
2459                 }
2460         }
2461 }
2462
2463 static unsigned int copy_path_id = 0;
2464
2465 static void
2466 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2467 {
2468         ilp_cst_t  cst;
2469         ilp_var_t  copy;
2470         char       buf[256];
2471         void      *ptr;
2472
2473         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2474         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2475
2476         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2477
2478         pset_foreach(copies, ptr) {
2479                 copy = PTR_TO_INT(ptr);
2480                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2481         }
2482 }
2483
2484 /**
2485  * @parameter copies   contains a path of copies which lead us to irn
2486  * @parameter visited  contains a set of nodes already visited on this path
2487  */
2488 static void
2489 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2490 {
2491         ir_edge_t *edge;
2492         op_t      *op = get_irn_link(irn);
2493
2494         if(op->is_remat) return;
2495
2496         pset_insert_ptr(visited, irn);
2497
2498         if(is_Phi(irn)) {
2499                 int    n;
2500
2501                 /* visit all operands */
2502                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2503                         ir_node  *arg = get_irn_n(irn, n);
2504                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2505
2506                         if(!has_reg_class(si, arg)) continue;
2507
2508                         if(arg == target) {
2509                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2510                                 write_copy_path_cst(si, copies, any_interfere);
2511                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2512                         } else {
2513                                 if(!pset_find_ptr(visited, arg)) {
2514                                         pset_insert(copies, INT_TO_PTR(copy), copy);
2515                                         find_copy_path(si, arg, target, any_interfere, copies, visited);
2516                                         pset_remove(copies, INT_TO_PTR(copy), copy);
2517                                 }
2518                         }
2519                 }
2520         }
2521
2522         /* visit all uses which are phis */
2523         foreach_out_edge(irn, edge) {
2524                 ir_node  *user = edge->src;
2525                 int       pos  = edge->pos;
2526                 op_t     *op = get_irn_link(user);
2527                 ilp_var_t copy;
2528
2529                 if(!is_Phi(user)) continue;
2530                 if(!has_reg_class(si, user)) continue;
2531
2532                 copy = op->attr.live_range.args.copies[pos];
2533
2534                 if(user == target) {
2535                         pset_insert(copies, INT_TO_PTR(copy), copy);
2536                         write_copy_path_cst(si, copies, any_interfere);
2537                         pset_remove(copies, INT_TO_PTR(copy), copy);
2538                 } else {
2539                         if(!pset_find_ptr(visited, user)) {
2540                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2541                                 find_copy_path(si, user, target, any_interfere, copies, visited);
2542                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2543                         }
2544                 }
2545         }
2546
2547         pset_remove_ptr(visited, irn);
2548 }
2549
2550 static void
2551 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2552 {
2553         pset * copies = pset_new_ptr_default();
2554         pset * visited = pset_new_ptr_default();
2555
2556         find_copy_path(si, a, b, any_interfere, copies, visited);
2557
2558         del_pset(visited);
2559         del_pset(copies);
2560 }
2561
2562
2563 static void
2564 memcopyhandler(spill_ilp_t * si)
2565 {
2566         interference_t   *interference;
2567         char              buf[256];
2568         /* teste Speicherwerte auf Interferenz */
2569
2570         /* analyze phi classes */
2571         phi_class_compute(si->chordal_env->irg);
2572
2573         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2574         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2575
2576 //      phi_class_free(si->chordal_env->irg);
2577
2578         /* now lets emit the ILP unequations for the crap */
2579         set_foreach(si->interferences, interference) {
2580                 irnlist_t      *irnlist;
2581                 ilp_var_t       interfere,
2582                                                 any_interfere;
2583                 ilp_cst_t       any_interfere_cst,
2584                                                 cst;
2585                 const ir_node  *a  = interference->a;
2586                 const ir_node  *b  = interference->b;
2587
2588                 /* any_interf <= \sum interf */
2589                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2590                 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2591                 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2592
2593                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2594
2595                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2596                         const ir_node  *bb = irnlist->irn;
2597                         spill_bb_t     *spill_bb = get_irn_link(bb);
2598                         spill_t        *spilla,
2599                                                    *spillb,
2600                                                    query;
2601                         char           buf[256];
2602
2603                         query.irn = a;
2604                         spilla = set_find_spill(spill_bb->ilp, a);
2605                         assert(spilla);
2606
2607                         query.irn = b;
2608                         spillb = set_find_spill(spill_bb->ilp, b);
2609                         assert(spillb);
2610
2611                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2612                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2613                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
2614                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
2615                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2616                         interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2617
2618                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2619                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2620
2621                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2622                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2623                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2624                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2625                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2626
2627                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2628                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2629
2630                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2631                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2632                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2633
2634                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2635                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2636
2637                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2638                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2639                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2640
2641
2642                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2643
2644                         /* any_interfere >= interf */
2645                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2646                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2647
2648                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2649                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2650                 }
2651
2652                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2653                 gen_copy_constraints(si,a,b,any_interfere);
2654         }
2655 }
2656
2657
2658 static INLINE int
2659 is_zero(double x)
2660 {
2661         return fabs(x) < 0.00001;
2662 }
2663
2664 #ifdef KEEPALIVE
2665 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2666 {
2667         spill_ilp_t *si = get_irg_link(current_ir_graph);
2668
2669         if(pset_find_ptr(si->all_possible_remats, n)) {
2670                 op_t   *op = (op_t*)get_irn_link(n);
2671                 assert(op && op->is_remat);
2672
2673                 if(!op->attr.remat.remat->inverse) {
2674                         if(op->attr.remat.pre) {
2675                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2676                         } else {
2677                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2678                         }
2679
2680                         return 1;
2681                 } else {
2682                         op_t   *op = (op_t*)get_irn_link(n);
2683                         assert(op && op->is_remat);
2684
2685                         if(op->attr.remat.pre) {
2686                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2687                         } else {
2688                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2689                         }
2690
2691                         return 1;
2692                 }
2693         }
2694
2695         return 0;
2696 }
2697
2698 static void
2699 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2700 {
2701         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2702         be_dump(irg, suffix, dump_ir_block_graph_sched);
2703         set_dump_node_vcgattr_hook(NULL);
2704 }
2705 #endif
2706
2707 /**
2708  * Edge hook to dump the schedule edges with annotated register pressure.
2709  */
2710 static int
2711 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2712 {
2713         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2714                 ir_node *prev = sched_prev(irn);
2715                 fprintf(F, "edge:{sourcename:\"");
2716                 PRINT_NODEID(irn);
2717                 fprintf(F, "\" targetname:\"");
2718                 PRINT_NODEID(prev);
2719                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2720                 fprintf(F, "\" color:magenta}\n");
2721         }
2722         return 1;
2723 }
2724
2725 static int
2726 sched_block_attr_hook(FILE *F, ir_node *node, ir_node *local)
2727 {
2728     if(is_Block(node)) {
2729         fprintf(F, " info3:\"execfreq %g\"", execution_frequency(node));
2730     }
2731
2732     return 0;
2733 }
2734
2735 static void
2736 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2737 {
2738         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
2739
2740         dump_consts_local(0);
2741     set_dump_node_vcgattr_hook(sched_block_attr_hook);
2742         set_dump_node_edge_hook(sched_pressure_edge_hook);
2743         dump_ir_block_graph(irg, suffix);
2744     set_dump_node_vcgattr_hook(NULL);
2745         set_dump_node_edge_hook(old_edge_hook);
2746 }
2747
2748 static void
2749 walker_pressure_annotator(ir_node * bb, void * data)
2750 {
2751         spill_ilp_t  *si = data;
2752         ir_node      *irn;
2753         irn_live_t   *li;
2754         int           n;
2755         pset         *live = pset_new_ptr_default();
2756         int           projs = 0;
2757
2758         live_foreach(bb, li) {
2759                 irn = (ir_node *) li->irn;
2760
2761                 if (live_is_end(li) && has_reg_class(si, irn)) {
2762                         pset_insert_ptr(live, irn);
2763                 }
2764         }
2765
2766         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2767
2768         sched_foreach_reverse(bb, irn) {
2769                 if(is_Phi(irn)) {
2770                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2771                         continue;
2772                 }
2773
2774                 if(has_reg_class(si, irn)) {
2775                         pset_remove_ptr(live, irn);
2776                         if(is_Proj(irn)) ++projs;
2777                 }
2778
2779                 if(!is_Proj(irn)) projs = 0;
2780
2781                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2782                         ir_node    *arg = get_irn_n(irn, n);
2783
2784                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2785                 }
2786                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2787         }
2788
2789         del_pset(live);
2790 }
2791
2792 static void
2793 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2794 {
2795         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2796 }
2797
2798 #ifdef KEEPALIVE
2799 static void
2800 connect_all_remats_with_keep(spill_ilp_t * si)
2801 {
2802         ir_node   *irn;
2803         ir_node  **ins,
2804                          **pos;
2805         int        n_remats;
2806
2807
2808         n_remats = pset_count(si->all_possible_remats);
2809         if(n_remats) {
2810                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2811
2812                 pos = ins;
2813                 pset_foreach(si->all_possible_remats, irn) {
2814                         *pos = irn;
2815                         ++pos;
2816                 }
2817
2818                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2819
2820                 obstack_free(si->obst, ins);
2821         }
2822 }
2823 #endif
2824
2825 static void
2826 connect_all_spills_with_keep(spill_ilp_t * si)
2827 {
2828         ir_node   *irn;
2829         ir_node  **ins,
2830                          **pos;
2831         int        n_spills;
2832         ir_node   *keep;
2833
2834
2835         n_spills = pset_count(si->spills);
2836         if(n_spills) {
2837                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2838
2839                 pos = ins;
2840                 pset_foreach(si->spills, irn) {
2841                         *pos = irn;
2842                         ++pos;
2843                 }
2844
2845                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2846
2847                 obstack_free(si->obst, ins);
2848         }
2849 }
2850
2851 /** insert a spill at an arbitrary position */
2852 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2853 {
2854         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
2855         ir_graph *irg   = get_irn_irg(bl);
2856         ir_node *frame  = get_irg_frame(irg);
2857         ir_node *spill;
2858         ir_node *next;
2859
2860         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
2861         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2862
2863         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2864
2865         /*
2866          * search the right insertion point. a spill of a phi cannot be put
2867          * directly after the phi, if there are some phis behind the one which
2868          * is spilled. Also, a spill of a Proj must be after all Projs of the
2869          * same tuple node.
2870          *
2871          * Here's one special case:
2872          * If the spill is in the start block, the spill must be after the frame
2873          * pointer is set up. This is done by setting insert to the end of the block
2874          * which is its default initialization (see above).
2875          */
2876
2877         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2878                 insert = frame;
2879
2880         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2881                 insert = next;
2882
2883         sched_add_after(insert, spill);
2884         return spill;
2885 }
2886
2887 static void
2888 delete_remat(spill_ilp_t * si, ir_node * remat) {
2889         int       n;
2890         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2891
2892         sched_remove(remat);
2893
2894         /* kill links to operands */
2895         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2896                 set_irn_n(remat, n, bad);
2897         }
2898 }
2899
2900 static void
2901 clean_remat_info(spill_ilp_t * si)
2902 {
2903         int            n;
2904         remat_t       *remat;
2905         remat_info_t  *remat_info;
2906         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
2907
2908         set_foreach(si->remat_info, remat_info) {
2909                 if(!remat_info->remats) continue;
2910
2911                 pset_foreach(remat_info->remats, remat)
2912                 {
2913                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2914                                 set_irn_n(remat->proj, -1, bad);
2915                                 set_irn_n(remat->proj, 0, bad);
2916                         }
2917
2918                         if(get_irn_n_edges(remat->op) == 0) {
2919                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2920                                         set_irn_n(remat->op, n, bad);
2921                                 }
2922                         }
2923                 }
2924
2925                 if(remat_info->remats) del_pset(remat_info->remats);
2926                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
2927         }
2928 }
2929
2930 static void
2931 delete_unnecessary_remats(spill_ilp_t * si)
2932 {
2933 #ifdef KEEPALIVE
2934         int       n;
2935         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2936
2937         if(si->keep) {
2938                 ir_node   *end = get_irg_end(si->chordal_env->irg);
2939                 ir_node  **keeps;
2940
2941                 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
2942                         ir_node        *keep_arg = get_irn_n(si->keep, n);
2943                         op_t           *arg_op = get_irn_link(keep_arg);
2944                         lpp_name_t     *name;
2945
2946                         assert(arg_op->is_remat);
2947
2948                         name = si->lpp->vars[arg_op->attr.remat.ilp];
2949
2950                         if(is_zero(name->value)) {
2951                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
2952                                 /* TODO check whether reload is preferred over remat (could be bug) */
2953                                 delete_remat(si, keep_arg);
2954                         } else {
2955                                 if(!arg_op->attr.remat.remat->inverse) {
2956                                         if(arg_op->attr.remat.pre) {
2957                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
2958                                         } else {
2959                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
2960                                         }
2961                                 } else {
2962                                         if(arg_op->attr.remat.pre) {
2963                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
2964                                         } else {
2965                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
2966                                         }
2967                                 }
2968                         }
2969
2970                         set_irn_n(si->keep, n, bad);
2971                 }
2972 #if 0
2973                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
2974                         ir_node        *end_arg = get_End_keepalive(end, i);
2975
2976                         if(end_arg != si->keep) {
2977                                 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
2978                         }
2979                 }
2980                 keeps = obstack_finish(si->obst);
2981                 set_End_keepalives(end, n-1, keeps);
2982                 obstack_free(si->obst, keeps);
2983 #endif
2984         } else {
2985                 DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
2986         }
2987 #else
2988         ir_node  *remat;
2989
2990         pset_foreach(si->all_possible_remats, remat) {
2991                 op_t           *remat_op = get_irn_link(remat);
2992                 lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
2993
2994                 if(is_zero(name->value)) {
2995                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
2996                         /* TODO check whether reload is preferred over remat (could be bug) */
2997                         delete_remat(si, remat);
2998                 } else {
2999                         if(!remat_op->attr.remat.remat->inverse) {
3000                                 if(remat_op->attr.remat.pre) {
3001                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3002                                 } else {
3003                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3004                                 }
3005                         } else {
3006                                 if(remat_op->attr.remat.pre) {
3007                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3008                                 } else {
3009                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3010                                 }
3011                         }
3012                 }
3013         }
3014 #endif
3015 }
3016
3017 static pset *
3018 get_spills_for_value(spill_ilp_t * si, ir_node * value)
3019 {
3020         pset     *spills = pset_new_ptr_default();
3021
3022         ir_node  *next;
3023         defs_t   *defs;
3024
3025         defs = set_find_def(si->values, value);
3026
3027         if(defs && defs->spills) {
3028                 for(next = defs->spills; next; next = get_irn_link(next)) {
3029                         pset_insert_ptr(spills, next);
3030                 }
3031         }
3032
3033         return spills;
3034 }
3035
3036 static pset *
3037 get_remats_for_value(spill_ilp_t * si, ir_node * value)
3038 {
3039         pset     *remats = pset_new_ptr_default();
3040
3041         ir_node  *next;
3042         defs_t   *defs;
3043
3044         pset_insert_ptr(remats, value);
3045         defs = set_find_def(si->values, value);
3046
3047         if(defs && defs->remats) {
3048                 for(next = defs->remats; next; next = get_irn_link(next)) {
3049                         pset_insert_ptr(remats, next);
3050                 }
3051         }
3052
3053         return remats;
3054 }
3055
3056
3057 /**
3058  * @param before   The node after which the spill will be placed in the schedule
3059  */
3060 /* TODO set context properly */
3061 static ir_node *
3062 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
3063 {
3064         defs_t   *defs;
3065         ir_node  *spill;
3066         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3067
3068         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3069
3070         spill = be_spill2(arch_env, irn, before, irn);
3071
3072         defs = set_insert_def(si->values, value);
3073         assert(defs);
3074
3075         /* enter into the linked list */
3076         set_irn_link(spill, defs->spills);
3077         defs->spills = spill;
3078
3079 #ifdef KEEPALIVE_SPILLS
3080         pset_insert_ptr(si->spills, spill);
3081 #endif
3082
3083         return spill;
3084 }
3085
3086 /**
3087  * @param before   The Phi node which has to be spilled
3088  */
3089 static ir_node *
3090 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
3091 {
3092         ir_node   *mem_phi;
3093         ir_node  **ins;
3094         defs_t    *defs;
3095         int        n;
3096         op_t      *op = get_irn_link(phi);
3097
3098         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3099
3100         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3101                 ins[n] = si->m_unknown;
3102         }
3103
3104         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3105
3106         defs = set_insert_def(si->values, phi);
3107         assert(defs);
3108
3109         /* enter into the linked list */
3110         set_irn_link(mem_phi, defs->spills);
3111         defs->spills = mem_phi;
3112
3113         sched_add_after(phi, mem_phi);
3114
3115 #ifdef KEEPALIVE_SPILLS
3116         pset_insert_ptr(si->spills, mem_phi);
3117 #endif
3118
3119
3120         return mem_phi;
3121 }
3122
3123 /**
3124  * Add remat to list of defs, destroys link field!
3125  */
3126 static void
3127 insert_remat(spill_ilp_t * si, ir_node * remat)
3128 {
3129         defs_t   *defs;
3130         op_t     *remat_op = get_irn_link(remat);
3131
3132         assert(remat_op->is_remat);
3133
3134         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3135         assert(defs);
3136
3137         /* enter into the linked list */
3138         set_irn_link(remat, defs->remats);
3139         defs->remats = remat;
3140 }
3141
3142
3143 /**
3144  * Add reload before operation and add to list of defs
3145  */
3146 static ir_node *
3147 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
3148 {
3149         defs_t   *defs;
3150         ir_node  *reload,
3151                          *spill;
3152         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3153
3154         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3155
3156         defs = set_find_def(si->values, value);
3157
3158         spill = defs->spills;
3159         assert(spill && "no spill placed before reload");
3160
3161         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3162
3163         /* enter into the linked list */
3164         set_irn_link(reload, defs->remats);
3165         defs->remats = reload;
3166
3167         return reload;
3168 }
3169
3170 static void
3171 walker_spill_placer(ir_node * bb, void * data) {
3172         spill_ilp_t   *si = (spill_ilp_t*)data;
3173         ir_node       *irn;
3174         spill_bb_t    *spill_bb = get_irn_link(bb);
3175         pset          *spills_to_do = pset_new_ptr_default();
3176         spill_t       *spill;
3177
3178         set_foreach(spill_bb->ilp, spill) {
3179                 lpp_name_t    *name;
3180
3181                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3182                         name = si->lpp->vars[spill->mem_in];
3183                         if(!is_zero(name->value)) {
3184                                 ir_node   *mem_phi;
3185
3186                                 mem_phi = insert_mem_phi(si, spill->irn);
3187
3188                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3189                         }
3190                 }
3191
3192                 name = si->lpp->vars[spill->spill];
3193                 if(!is_zero(name->value)) {
3194                         /* place spill directly after definition */
3195                         if(get_nodes_block(spill->irn) == bb) {
3196                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3197                                 continue;
3198                         }
3199
3200                         /* place spill at bb start */
3201                         if(spill->reg_in > 0) {
3202                                 name = si->lpp->vars[spill->reg_in];
3203                                 if(!is_zero(name->value)) {
3204                                         insert_spill(si, spill->irn, spill->irn, bb);
3205                                         continue;
3206                                 }
3207                         }
3208                         /* place spill after a remat */
3209                         pset_insert_ptr(spills_to_do, spill->irn);
3210                 }
3211         }
3212         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3213
3214
3215         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3216                 op_t     *op = get_irn_link(irn);
3217
3218                 if(be_is_Spill(irn)) continue;
3219
3220                 if(op->is_remat) {
3221                         /* TODO fix this if we want to support remats with more than two nodes */
3222                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3223                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3224
3225                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3226                         }
3227                 } else {
3228                         if(pset_find_ptr(spills_to_do, irn)) {
3229                                 pset_remove_ptr(spills_to_do, irn);
3230
3231                                 insert_spill(si, irn, irn, irn);
3232                         }
3233                 }
3234
3235         }
3236
3237         assert(pset_count(spills_to_do) == 0);
3238
3239         /* afterwards free data in block */
3240         del_pset(spills_to_do);
3241 }
3242
3243 static ir_node *
3244 insert_mem_copy(spill_ilp_t * si, const ir_node * bb, const ir_node * value)
3245 {
3246         ir_node          *insert_pos = bb;
3247         ir_node          *spill;
3248         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3249
3250         /* find last definition of arg value in block */
3251         ir_node  *next;
3252         defs_t   *defs;
3253         int       last = 0;
3254
3255         defs = set_find_def(si->values, value);
3256
3257         if(defs && defs->remats) {
3258                 for(next = defs->remats; next; next = get_irn_link(next)) {
3259                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3260                                 last = sched_get_time_step(next);
3261                                 insert_pos = next;
3262                         }
3263                 }
3264         }
3265
3266         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3267                 last = sched_get_time_step(value);
3268                 insert_pos = value;
3269         }
3270
3271         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3272
3273         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
3274
3275         return spill;
3276 }
3277
3278 static void
3279 phim_fixer(spill_ilp_t *si) {
3280         defs_t  *defs;
3281
3282         set_foreach(si->values, defs) {
3283                 const ir_node  *phi = defs->value;
3284                 op_t           *op = get_irn_link(phi);
3285                 ir_node        *phi_m = NULL;
3286                 ir_node        *next = defs->spills;
3287                 int             n;
3288
3289                 if(!is_Phi(phi)) continue;
3290
3291                 while(next) {
3292                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3293                                 phi_m = next;
3294                                 break;
3295                         } else {
3296                                 next = get_irn_link(next);
3297                         }
3298                 }
3299                 if(!phi_m) continue;
3300
3301                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3302                         const ir_node  *value = get_irn_n(phi, n);
3303                         defs_t         *val_defs = set_find_def(si->values, value);
3304                         ir_node        *arg = get_irn_n(phi_m, n);
3305
3306                         /* get a spill of this value */
3307                         ir_node      *spill = val_defs->spills;
3308
3309
3310 #ifndef NO_MEMCOPIES
3311                         ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3312                         lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3313
3314                         if(!is_zero(name->value)) {
3315                                 spill = insert_mem_copy(si, pred, value);
3316                         } else {
3317                                 assert(spill && "no spill placed before PhiM");
3318                         }
3319 #else
3320                         assert(spill && "no spill placed before PhiM");
3321 #endif
3322                         set_irn_n(phi_m, n, spill);
3323                 }
3324         }
3325 }
3326
3327 static void
3328 walker_reload_placer(ir_node * bb, void * data) {
3329         spill_ilp_t   *si = (spill_ilp_t*)data;
3330         ir_node       *irn;
3331         spill_bb_t    *spill_bb = get_irn_link(bb);
3332         int            i;
3333         irn_live_t    *li;
3334
3335         /* reloads at end of block */
3336         if(spill_bb->reloads) {
3337                 keyval_t    *keyval;
3338
3339                 set_foreach(spill_bb->reloads, keyval) {
3340                         ir_node        *irn = (ir_node*)keyval->key;
3341                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3342                         lpp_name_t     *name;
3343
3344                         name = si->lpp->vars[reload];
3345                         if(!is_zero(name->value)) {
3346                                 ir_node    *reload;
3347                                 ir_node    *insert_pos = bb;
3348                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3349                                 op_t       *prev_op = get_irn_link(prev);
3350
3351                                 while(be_is_Spill(prev)) {
3352                                         prev = sched_prev(prev);
3353                                 }
3354
3355                                 prev_op = get_irn_link(prev);
3356
3357                                 /* insert reload before pre-remats */
3358                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3359                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3360                                         insert_pos = prev;
3361
3362                                         do {
3363                                                 prev = sched_prev(prev);
3364                                         } while(be_is_Spill(prev));
3365
3366                                         prev_op = get_irn_link(prev);
3367
3368                                 }
3369
3370                                 reload = insert_reload(si, irn, insert_pos);
3371
3372 #ifdef KEEPALIVE_RELOADS
3373                                 pset_insert_ptr(si->spills, reload);
3374 #endif
3375                         }
3376                 }
3377         }
3378
3379         /* walk and insert more reloads and collect remats */
3380         sched_foreach_reverse(bb, irn) {
3381                 op_t     *op = get_irn_link(irn);
3382
3383                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3384                 if(is_Phi(irn)) break;
3385
3386                 if(op->is_remat) {
3387                         if(get_irn_mode(irn) != mode_T) {
3388                                 insert_remat(si, irn);
3389                         }
3390                 } else {
3391                         int    n;
3392
3393                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3394                                 ir_node    *arg = get_irn_n(irn, n);
3395
3396                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3397                                         lpp_name_t    *name;
3398
3399                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3400                                         if(!is_zero(name->value)) {
3401                                                 ir_node    *reload;
3402                                                 ir_node    *insert_pos = irn;
3403                                                 ir_node    *prev = sched_prev(insert_pos);
3404                                                 op_t       *prev_op;
3405
3406                                                 while(be_is_Spill(prev)) {
3407                                                         prev = sched_prev(prev);
3408                                                 }
3409
3410                                                 prev_op = get_irn_link(prev);
3411
3412                                                 /* insert reload before pre-remats */
3413                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3414                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3415                                                         insert_pos = prev;
3416
3417                                                         do {
3418                                                                 prev = sched_prev(prev);
3419                                                         } while(be_is_Spill(prev));
3420
3421                                                         prev_op = get_irn_link(prev);
3422
3423                                                 }
3424
3425                                                 reload = insert_reload(si, arg, insert_pos);
3426
3427                                                 set_irn_n(irn, n, reload);
3428
3429 #ifdef KEEPALIVE_RELOADS
3430                                                 pset_insert_ptr(si->spills, reload);
3431 #endif
3432                                         }
3433                                 }
3434                         }
3435                 }
3436         }
3437
3438         del_set(spill_bb->ilp);
3439         if(spill_bb->reloads) del_set(spill_bb->reloads);
3440 }
3441
3442 static void
3443 walker_collect_used(ir_node * irn, void * data)
3444 {
3445         lc_bitset_t   *used = data;
3446
3447         lc_bitset_set(used, get_irn_idx(irn));
3448 }
3449
3450 struct kill_helper {
3451         lc_bitset_t  *used;
3452         spill_ilp_t  *si;
3453 };
3454
3455 static void
3456 walker_kill_unused(ir_node * bb, void * data)
3457 {
3458         struct kill_helper *kh = data;
3459         const ir_node      *bad = get_irg_bad(get_irn_irg(bb));
3460         ir_node            *irn;
3461
3462
3463         for(irn=sched_first(bb); !sched_is_end(irn);) {
3464                 ir_node     *next = sched_next(irn);
3465                 int          n;
3466
3467                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3468                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3469                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(bb)));
3470 #if 0
3471                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3472 #endif
3473                         }
3474
3475                         sched_remove(irn);
3476
3477                         set_nodes_block(irn, bad);
3478                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3479                                 set_irn_n(irn, n, bad);
3480                         }
3481                 }
3482                 irn = next;
3483         }
3484 }
3485
3486 static void
3487 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3488 {
3489         struct kill_helper kh;
3490
3491         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3492         kh.si = si;
3493
3494         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3495         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3496
3497         lc_bitset_free(kh.used);
3498 }
3499
3500 static void
3501 print_irn_pset(pset * p)
3502 {
3503         ir_node   *irn;
3504
3505         pset_foreach(p, irn) {
3506                 ir_printf("%+F\n", irn);
3507         }
3508 }
3509
3510 static void
3511 rewire_uses(spill_ilp_t * si)
3512 {
3513         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3514         defs_t               *defs;
3515         pset                 *ignore = pset_new_ptr(1);
3516
3517         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3518
3519         /* then fix uses of spills */
3520         set_foreach(si->values, defs) {
3521                 pset     *reloads;
3522                 pset     *spills;
3523                 ir_node  *next = defs->remats;
3524                 int remats = 0;
3525
3526                 reloads = pset_new_ptr_default();
3527
3528                 while(next) {
3529                         if(be_is_Reload(next)) {
3530                                 pset_insert_ptr(reloads, next);
3531                         } else {
3532                                 ++remats;
3533                         }
3534                         next = get_irn_link(next);
3535                 }
3536
3537                 spills = get_spills_for_value(si, defs->value);
3538                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3539                 if(pset_count(spills) > 1) {
3540                         //assert(pset_count(reloads) > 0);
3541                         //                              print_irn_pset(spills);
3542                         //                              print_irn_pset(reloads);
3543
3544                         be_ssa_constr_set_ignore(dfi, spills, ignore);
3545                 }
3546
3547                 del_pset(reloads);
3548                 del_pset(spills);
3549         }
3550
3551         /* first fix uses of remats and reloads */
3552         set_foreach(si->values, defs) {
3553                 pset     *nodes;
3554                 ir_node  *next = defs->remats;
3555
3556                 if(next) {
3557                         nodes = pset_new_ptr_default();
3558                         pset_insert_ptr(nodes, defs->value);
3559
3560                         while(next) {
3561                                 pset_insert_ptr(nodes, next);
3562                                 next = get_irn_link(next);
3563                         }
3564
3565                         if(pset_count(nodes) > 1) {
3566                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3567                                 be_ssa_constr_set(dfi, nodes);
3568                         }
3569
3570                         del_pset(nodes);
3571                 }
3572         }
3573
3574 //      remove_unused_defs(si);
3575
3576         be_free_dominance_frontiers(dfi);
3577 }
3578
3579
3580 static void
3581 writeback_results(spill_ilp_t * si)
3582 {
3583         /* walk through the graph and collect all spills, reloads and remats for a value */
3584
3585         si->values = new_set(cmp_defs, 4096);
3586
3587         DBG((si->dbg, LEVEL_1, "Applying results\n"));
3588         delete_unnecessary_remats(si);
3589         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3590         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3591         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3592         phim_fixer(si);
3593
3594         /* clean the remat info! there are still back-edges leading there! */
3595         clean_remat_info(si);
3596
3597         rewire_uses(si);
3598
3599         connect_all_spills_with_keep(si);
3600
3601         del_set(si->values);
3602 }
3603
3604 static int
3605 get_n_regs(spill_ilp_t * si)
3606 {
3607         int     arch_n_regs = arch_register_class_n_regs(si->cls);
3608         int     free = 0;
3609         int     i;
3610
3611         for(i=0; i<arch_n_regs; i++) {
3612                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3613                         free++;
3614                 }
3615         }
3616
3617         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3618         return free;
3619 }
3620
3621 static void
3622 walker_reload_mover(ir_node * bb, void * data)
3623 {
3624         spill_ilp_t   *si = data;
3625         ir_node           *tmp;
3626
3627         sched_foreach(bb, tmp) {
3628                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3629                         ir_node       *reload = tmp;
3630                         ir_node       *irn = tmp;
3631
3632                         /* move reload upwards */
3633
3634                         int pressure = (int)get_irn_link(reload);
3635                         if(pressure < si->n_regs) {
3636                                 irn = sched_prev(reload);
3637                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3638                                 sched_remove(reload);
3639                                 pressure = (int)get_irn_link(irn);
3640
3641                                 while(pressure < si->n_regs) {
3642                                         if(sched_is_end(irn) || (be_is_Reload(irn) && has_reg_class(si, irn))) break;
3643
3644                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
3645                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3646                                         irn = sched_prev(irn);
3647
3648                                         pressure = (int)get_irn_link(irn);
3649                                 }
3650
3651                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3652                                 sched_put_after(irn, reload);
3653                         }
3654                 }
3655         }
3656 }
3657
3658 static void
3659 move_reloads_upward(spill_ilp_t * si)
3660 {
3661         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3662 }
3663
3664
3665 /**
3666  * Walk all irg blocks and check for interfering spills inside of phi classes
3667  */
3668 static void
3669 luke_meminterferencechecker(ir_node * bb, void * data)
3670 {
3671         spill_ilp_t    *si = (spill_ilp_t*)data;
3672         irn_live_t     *li1,
3673                        *li2;
3674
3675         live_foreach(bb, li1) {
3676                 ir_node        *a = (ir_node *) li1->irn;
3677
3678                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
3679
3680                 /* a is only interesting if it is inside a phi class */
3681                 if (get_phi_class(a)) {
3682                         for(li2=li1->next; li2; li2 = li2->next) {
3683                                 ir_node        *b = (ir_node *) li2->irn;
3684
3685                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
3686
3687                                 /* a and b are only interesting if they are in the same phi class */
3688                                 if(get_phi_class(a) == get_phi_class(b)) {
3689                                         if(values_interfere_in_block(bb, a, b)) {
3690                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
3691                                         }
3692                                 }
3693                         }
3694                 }
3695         }
3696 }
3697
3698 static void
3699 verify_phiclasses(spill_ilp_t * si)
3700 {
3701         /* analyze phi classes */
3702         phi_class_compute(si->chordal_env->irg);
3703
3704         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
3705         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
3706 }
3707
3708 static void
3709 walker_spillslotassigner(ir_node * irn, void * data)
3710 {
3711         spill_ilp_t            *si = (spill_ilp_t*)data;
3712         void                   *cls;
3713
3714         if(!be_is_Spill(irn)) return;
3715
3716         /* set spill context to phi class if it has one ;) */
3717
3718         cls = get_phi_class(irn);
3719         if(cls)
3720                 be_set_Spill_context(irn, cls);
3721         else
3722                 be_set_Spill_context(irn, irn);
3723 }
3724
3725
3726 static void
3727 assign_spillslots(spill_ilp_t * si)
3728 {
3729         DBG((si->dbg, LEVEL_2, "\t calling spill slot assigner\n"));
3730         irg_walk_graph(si->chordal_env->irg, walker_spillslotassigner, NULL, si);
3731 }
3732
3733 void
3734 be_spill_remat(const be_chordal_env_t * chordal_env)
3735 {
3736         char            problem_name[256];
3737         char            dump_suffix[256];
3738         char            dump_suffix2[256];
3739         char            dump_suffix3[256];
3740         struct obstack  obst;
3741         spill_ilp_t     si;
3742
3743         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3744         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3745         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3746
3747         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3748         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3749
3750         obstack_init(&obst);
3751         si.chordal_env = chordal_env;
3752         si.obst = &obst;
3753         si.cls = chordal_env->cls;
3754         si.lpp = new_lpp(problem_name, lpp_minimize);
3755         si.remat_info = new_set(cmp_remat_info, 4096);
3756         si.interferences = new_set(cmp_interference, 32);
3757         si.all_possible_remats = pset_new_ptr_default();
3758         si.spills = pset_new_ptr_default();
3759         si.inverse_ops = pset_new_ptr_default();
3760 #ifndef EXECFREQ_LOOPDEPH
3761         compute_execfreq(chordal_env->irg, LOOP_WEIGHT);
3762 #else
3763         si.execfreqs = NULL;
3764 #endif
3765 #ifdef KEEPALIVE
3766         si.keep = NULL;
3767 #endif
3768         si.n_regs = get_n_regs(&si);
3769
3770         set_irg_link(chordal_env->irg, &si);
3771         compute_doms(chordal_env->irg);
3772
3773         /* compute phi classes */
3774 //      phi_class_compute(chordal_env->irg);
3775
3776         be_analyze_regpressure(chordal_env, "-pre");
3777
3778 #ifdef COLLECT_REMATS
3779         /* collect remats */
3780         DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3781         irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3782 #endif
3783
3784         /* insert possible remats */
3785         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3786         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3787         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3788
3789 #ifdef KEEPALIVE
3790         DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3791         connect_all_remats_with_keep(&si);
3792         /* dump graph with inserted remats */
3793         dump_graph_with_remats(chordal_env->irg, dump_suffix);
3794 #endif
3795
3796         /* insert copies for phi arguments not in my regclass */
3797         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
3798
3799         /* recompute liveness */
3800         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3801         be_liveness(chordal_env->irg);
3802
3803         /* build the ILP */
3804
3805         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3806         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3807         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3808
3809         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3810         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3811
3812 #ifndef NO_MEMCOPIES
3813         DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3814         memcopyhandler(&si);
3815 #endif
3816
3817 #ifdef DUMP_ILP
3818         {
3819                 FILE           *f;
3820                 char            buf[256];
3821
3822                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3823                 if ((f = fopen(buf, "wt")) != NULL) {
3824                         lpp_dump_plain(si.lpp, f);
3825                         fclose(f);
3826                 }
3827         }
3828 #endif
3829
3830 #ifdef SOLVE
3831         DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3832 #ifdef ILP_TIMEOUT
3833         lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3834 #endif
3835
3836 #ifdef SOLVE_LOCAL
3837         lpp_solve_cplex(si.lpp);
3838 #else
3839         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3840 #endif
3841         assert(lpp_is_sol_valid(si.lpp)
3842                && "solution of ILP must be valid");
3843
3844         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3845
3846 #ifdef DUMP_SOLUTION
3847         {
3848                 FILE           *f;
3849                 char            buf[256];
3850
3851                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3852                 if ((f = fopen(buf, "wt")) != NULL) {
3853                         int             i;
3854                         for (i = 0; i < si.lpp->var_next; ++i) {
3855                                 lpp_name_t     *name = si.lpp->vars[i];
3856                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3857                         }
3858                         fclose(f);
3859                 }
3860         }
3861 #endif
3862
3863         writeback_results(&si);
3864
3865 #endif                          /* SOLVE */
3866
3867         kill_all_unused_values_in_schedule(&si);
3868
3869 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3870         be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3871 #endif
3872
3873         // move reloads upwards
3874         be_liveness(chordal_env->irg);
3875         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3876         move_reloads_upward(&si);
3877
3878 #ifndef NO_MEMCOPIES
3879         verify_phiclasses(&si);
3880         assign_spillslots(&si);
3881 #endif
3882
3883         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3884
3885         dump_pressure_graph(&si, dump_suffix2);
3886
3887         be_analyze_regpressure(chordal_env, "-post");
3888
3889         free_dom(chordal_env->irg);
3890         del_set(si.interferences);
3891         del_pset(si.inverse_ops);
3892         del_pset(si.all_possible_remats);
3893         del_pset(si.spills);
3894 #ifndef EXECFREQ_LOOPDEPH
3895         free_execfreq();
3896 #endif
3897         free_lpp(si.lpp);
3898         obstack_free(&obst, NULL);
3899         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
3900 }
3901
3902 #else                           /* WITH_ILP */
3903
3904 static void
3905 only_that_you_can_compile_without_WITH_ILP_defined(void)
3906 {
3907 }
3908
3909 #endif                          /* WITH_ILP */