67cc4985006bd2889c8a14e7086ba7a006dcbe3e
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass_t.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35 #include "irvrfy.h"
36
37 #include <lpp/lpp.h>
38 #include <lpp/lpp_net.h>
39 #include <lpp/lpp_cplex.h>
40 //#include <lc_pset.h>
41 #include <libcore/lc_bitset.h>
42
43 #include "be_t.h"
44 #include "belive_t.h"
45 #include "besched_t.h"
46 #include "beirgmod.h"
47 #include "bearch.h"
48 #include "benode_t.h"
49 #include "beutil.h"
50 #include "bespillremat.h"
51 #include "bespill.h"
52 #include "bepressurestat.h"
53
54 #include "bechordal_t.h"
55
56 #define BIGM 100000.0
57
58 #define DUMP_SOLUTION
59 #define DUMP_ILP
60 //#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
61 #define COLLECT_REMATS /* enable rematerialization */
62 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
63 #define REMAT_WHILE_LIVE /* only remat values that are live */
64 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
65 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
66 #define MAY_DIE_AT_REMAT /* allow values to die after a pre remat */
67 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
68 //#define KEEPALIVE_SPILLS
69 //#define KEEPALIVE_RELOADS
70 #define GOODWIN_REDUCTION
71 //#define NO_MEMCOPIES
72 //#define VERIFY_DOMINANCE
73
74 #define  SOLVE
75 //#define  SOLVE_LOCAL
76 #define LPP_SERVER "i44pc52"
77 #define LPP_SOLVER "cplex"
78
79 #define COST_LOAD      8
80 #define COST_STORE     50
81 #define COST_REMAT     1
82
83 #define ILP_TIMEOUT    300
84 #define MAX_PATHS      16
85 #define ILP_UNDEF               -1
86
87 typedef struct _spill_ilp_t {
88         const arch_register_class_t  *cls;
89         int                           n_regs;
90         const be_chordal_env_t       *chordal_env;
91         be_lv_t                      *lv;
92         lpp_t                        *lpp;
93         struct obstack               *obst;
94         set                          *remat_info;
95         pset                         *all_possible_remats;
96         pset                         *inverse_ops;
97 #ifdef KEEPALIVE
98         ir_node                      *keep;
99 #endif
100         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
101         pset                         *spills;
102         set                          *interferences;
103         ir_node                      *m_unknown;
104         DEBUG_ONLY(firm_dbg_module_t * dbg);
105 } spill_ilp_t;
106
107 typedef int ilp_var_t;
108 typedef int ilp_cst_t;
109
110 typedef struct _spill_bb_t {
111         set      *ilp;
112         set      *reloads;
113 } spill_bb_t;
114
115 typedef struct _remat_t {
116         const ir_node        *op;      /**< for copy_irn */
117         const ir_node        *value;   /**< the value which is being recomputed by this remat */
118         ir_node              *proj;    /**< not NULL if the above op produces a tuple */
119         int                   cost;    /**< cost of this remat */
120         int                   inverse; /**< nonzero if this is an inverse remat */
121 } remat_t;
122
123 /**
124  * Data to be attached to each IR node. For remats this contains the ilp_var
125  * for this remat and for normal ops this contains the ilp_vars for
126  * reloading each operand
127  */
128 typedef struct _op_t {
129         int             is_remat;
130         union {
131                 struct {
132                         ilp_var_t       ilp;
133                         remat_t        *remat; /** the remat this op belongs to */
134                         int             pre; /** 1, if this is a pressure-increasing remat */
135                 } remat;
136                 struct {
137                         ilp_var_t       ilp;
138                         ir_node        *op; /** the operation this live range belongs to */
139                         union {
140                                 ilp_var_t      *reloads;
141                                 ilp_var_t      *copies;
142                         } args;
143                 } live_range;
144         } attr;
145 } op_t;
146
147 typedef struct _defs_t {
148         ir_node   *value;
149         ir_node   *spills;  /**< points to the first spill for this value (linked by link field) */
150         ir_node   *remats;  /**< points to the first definition for this value (linked by link field) */
151 } defs_t;
152
153 typedef struct _remat_info_t {
154         const ir_node       *irn; /**< the irn to which these remats belong */
155         pset                *remats; /**< possible remats for this value */
156         pset                *remats_by_operand; /**< remats with this value as operand */
157 } remat_info_t;
158
159 typedef struct _keyval_t {
160         const void          *key;
161         const void          *val;
162 } keyval_t;
163
164 typedef struct _spill_t {
165         ir_node      *irn;
166         ilp_var_t     reg_in;
167         ilp_var_t     mem_in;
168         ilp_var_t     reg_out;
169         ilp_var_t     mem_out;
170         ilp_var_t     spill;
171 } spill_t;
172
173 static INLINE int
174 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
175 {
176         return chordal_has_class(si->chordal_env, irn);
177 }
178
179 #if 0
180 static int
181 cmp_remat(const void *a, const void *b)
182 {
183         const keyval_t *p = a;
184         const keyval_t *q = b;
185         const remat_t  *r = p->val;
186         const remat_t  *s = q->val;
187
188         assert(r && s);
189
190         return !(r == s || r->op == s->op);
191 }
192 #endif
193 static int
194 cmp_remat(const void *a, const void *b)
195 {
196         const remat_t  *r = a;
197         const remat_t  *s = a;
198
199         return !(r == s || r->op == s->op);
200 }
201
202 static int
203 cmp_spill(const void *a, const void *b, size_t size)
204 {
205         const spill_t *p = a;
206         const spill_t *q = b;
207
208 //      return !(p->irn == q->irn && p->bb == q->bb);
209         return !(p->irn == q->irn);
210 }
211
212 static keyval_t *
213 set_find_keyval(set * set, void * key)
214 {
215         keyval_t     query;
216
217         query.key = key;
218         return set_find(set, &query, sizeof(query), HASH_PTR(key));
219 }
220
221 static keyval_t *
222 set_insert_keyval(set * set, void * key, void * val)
223 {
224         keyval_t     query;
225
226         query.key = key;
227         query.val = val;
228         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
229 }
230
231 static defs_t *
232 set_find_def(set * set, ir_node * value)
233 {
234         defs_t     query;
235
236         query.value = value;
237         return set_find(set, &query, sizeof(query), HASH_PTR(value));
238 }
239
240 static defs_t *
241 set_insert_def(set * set, ir_node * value)
242 {
243         defs_t     query;
244
245         query.value = value;
246         query.spills = NULL;
247         query.remats = NULL;
248         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
249 }
250
251 static spill_t *
252 set_find_spill(set * set, ir_node * value)
253 {
254         spill_t     query;
255
256         query.irn = value;
257         return set_find(set, &query, sizeof(query), HASH_PTR(value));
258 }
259
260 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
261 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
262 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
263 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
264 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
265
266 static int
267 cmp_remat_info(const void *a, const void *b, size_t size)
268 {
269         const remat_info_t *p = a;
270         const remat_info_t *q = b;
271
272         return !(p->irn == q->irn);
273 }
274
275 static int
276 cmp_defs(const void *a, const void *b, size_t size)
277 {
278         const defs_t *p = a;
279         const defs_t *q = b;
280
281         return !(p->value == q->value);
282 }
283
284 static int
285 cmp_keyval(const void *a, const void *b, size_t size)
286 {
287         const keyval_t *p = a;
288         const keyval_t *q = b;
289
290         return !(p->key == q->key);
291 }
292
293 static double
294 execution_frequency(const spill_ilp_t *si, const ir_node * irn)
295 {
296 #define FUDGE 0.001
297 #ifndef EXECFREQ_LOOPDEPH
298         return get_block_execfreq(si->chordal_env->exec_freq, get_block(irn)) + FUDGE;
299 #else
300         if(is_Block(irn))
301                 return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
302         else
303                 return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
304 #endif
305 }
306
307 static double
308 get_cost(const spill_ilp_t * si, const ir_node * irn)
309 {
310         if(be_is_Spill(irn)) {
311                 return COST_STORE;
312         } else if(be_is_Reload(irn)){
313                 return COST_LOAD;
314         } else {
315                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
316         }
317 }
318
319 /**
320  * Checks, whether node and its operands have suitable reg classes
321  */
322 static INLINE int
323 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
324 {
325         int               n;
326         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
327         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
328
329 #if 0
330         if(!remat)
331                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
332 #endif
333
334         for (n = get_irn_arity(irn)-1; n>=0 && remat; --n) {
335                 ir_node        *op = get_irn_n(irn, n);
336                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
337
338 //              if(!remat)
339 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
340         }
341
342         return remat;
343 }
344
345 /**
346  * Try to create a remat from @p op with destination value @p dest_value
347  */
348 static INLINE remat_t *
349 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
350 {
351         remat_t  *remat = NULL;
352
353 //      if(!mode_is_datab(get_irn_mode(dest_value)))
354 //              return NULL;
355
356         if(dest_value == op) {
357                 const ir_node *proj = NULL;
358
359                 if(is_Proj(dest_value)) {
360                         op = get_irn_n(op, 0);
361                         proj = dest_value;
362                 }
363
364                 if(!is_rematerializable(si, op))
365                         return NULL;
366
367                 remat = obstack_alloc(si->obst, sizeof(*remat));
368                 remat->op = op;
369                 remat->cost = get_cost(si, op);
370                 remat->value = dest_value;
371                 remat->proj = proj;
372                 remat->inverse = 0;
373         } else {
374                 arch_inverse_t     inverse;
375                 int                n;
376
377                 /* get the index of the operand we want to retrieve by the inverse op */
378                 for (n = get_irn_arity(op)-1; n>=0; --n) {
379                         ir_node        *arg = get_irn_n(op, n);
380
381                         if(arg == dest_value) break;
382                 }
383                 if(n<0) return NULL;
384
385                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", n, op));
386
387                 /* else ask the backend to give an inverse op */
388                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, n, &inverse, si->obst)) {
389                         int   i;
390
391                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
392
393                         assert(inverse.n > 0 && "inverse op should have at least one node");
394
395                         for(i=inverse.n-1; i>=0; --i) {
396                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
397                         }
398
399                         if(inverse.n <= 2) {
400                                 remat = obstack_alloc(si->obst, sizeof(*remat));
401                                 remat->op = inverse.nodes[0];
402                                 remat->cost = inverse.costs;
403                                 remat->value = dest_value;
404                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
405                                 remat->inverse = 1;
406
407                                 assert(is_Proj(remat->proj));
408                         } else {
409                                 assert(0 && "I can not handle remats with more than 2 nodes");
410                         }
411                 }
412         }
413
414         if(remat) {
415                 if(remat->proj) {
416                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
417                 } else {
418                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
419                 }
420         }
421         return remat;
422 }
423
424
425 static INLINE void
426 add_remat(const spill_ilp_t * si, const remat_t * remat)
427 {
428         remat_info_t    *remat_info,
429                      query;
430         int              n;
431
432         assert(remat->op);
433         assert(remat->value);
434
435         query.irn = remat->value;
436         query.remats = NULL;
437         query.remats_by_operand = NULL;
438         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
439
440         if(remat_info->remats == NULL) {
441                 remat_info->remats = new_pset(cmp_remat, 4096);
442         }
443         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
444
445         /* insert the remat into the remats_be_operand set of each argument of the remat op */
446         for (n = get_irn_arity(remat->op)-1; n>=0; --n) {
447                 ir_node        *arg = get_irn_n(remat->op, n);
448
449                 query.irn = arg;
450                 query.remats = NULL;
451                 query.remats_by_operand = NULL;
452                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
453
454                 if(remat_info->remats_by_operand == NULL) {
455                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
456                 }
457                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
458         }
459 }
460
461 static int
462 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
463 {
464         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
465         int                i = 0;
466
467         while(edge) {
468                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
469                         ++i;
470                 }
471                 edge = get_irn_out_edge_next(irn, edge);
472         }
473
474         return i;
475 }
476
477 static INLINE void
478 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
479 {
480         int      n;
481         remat_t *remat;
482
483 #ifdef NO_SINGLE_USE_REMATS
484         if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
485 #else
486         if(has_reg_class(si, op)) {
487 #endif
488                 remat = get_remat_from_op(si, op, op);
489                 if(remat) {
490                         add_remat(si, remat);
491                 }
492         }
493
494 #ifdef COLLECT_INVERSE_REMATS
495         /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
496            for each arg */
497         for (n = get_irn_arity(op)-1; n>=0; --n) {
498                 ir_node        *arg = get_irn_n(op, n);
499
500                 if(has_reg_class(si, arg)) {
501                         /* try to get an inverse remat */
502                         remat = get_remat_from_op(si, arg, op);
503                         if(remat) {
504                                 add_remat(si, remat);
505                         }
506                 }
507         }
508 #endif
509
510 }
511
512 static INLINE int
513 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
514 {
515         ir_node *block;
516         ir_node *def_block = get_nodes_block(val);
517         int      ret;
518
519         if(val == pos)
520                 return 0;
521
522         /* if pos is at end of a basic block */
523         if(is_Block(pos)) {
524                 ret = (pos == def_block || block_dominates(def_block, pos));
525 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
526                 return ret;
527         }
528
529         /* else if this is a normal operation */
530         block = get_nodes_block(pos);
531         if(block == def_block) {
532                 if(!sched_is_scheduled(val)) return 1;
533
534                 ret = sched_comes_after(val, pos);
535 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
536                 return ret;
537         }
538
539         ret = block_dominates(def_block, block);
540 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
541         return ret;
542 }
543
544 static INLINE ir_node *
545 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
546 {
547     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
548 }
549
550 /**
551  * Returns first non-Phi node of block @p bb
552  */
553 static INLINE ir_node *
554 sched_block_first_nonphi(const ir_node * bb)
555 {
556         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
557 }
558
559 static int
560 sched_skip_proj_predicator(const ir_node * irn, void * data)
561 {
562         return (is_Proj(irn));
563 }
564
565 static INLINE ir_node *
566 sched_next_nonproj(const ir_node * irn, int forward)
567 {
568         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
569 }
570
571 /**
572  * Returns next operation node (non-Proj) after @p irn
573  * or the basic block of this node
574  */
575 static INLINE ir_node *
576 sched_next_op(const ir_node * irn)
577 {
578         ir_node *next = sched_next(irn);
579
580         if(is_Block(next))
581                 return next;
582
583         return sched_next_nonproj(next, 1);
584 }
585
586 /**
587  * Returns previous operation node (non-Proj) before @p irn
588  * or the basic block of this node
589  */
590 static INLINE ir_node *
591 sched_prev_op(const ir_node * irn)
592 {
593         ir_node *prev = sched_prev(irn);
594
595         if(is_Block(prev))
596                 return prev;
597
598         return sched_next_nonproj(prev, 0);
599 }
600
601 static void
602 sched_put_after(ir_node * insert, ir_node * irn)
603 {
604         if(is_Block(insert)) {
605                 insert = sched_block_first_nonphi(insert);
606         } else {
607                 insert = sched_next_op(insert);
608         }
609         sched_add_before(insert, irn);
610 }
611
612 static void
613 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
614 {
615   if(is_Block(insert)) {
616           insert = sched_block_last_noncf(si, insert);
617   } else {
618           insert = sched_next_nonproj(insert, 0);
619           insert = sched_prev(insert);
620   }
621   sched_add_after(insert, irn);
622 }
623
624 /**
625  * Tells you whether a @p remat can be placed before the irn @p pos
626  */
627 static INLINE int
628 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
629 {
630         const ir_node   *op = remat->op;
631         const ir_node   *prev;
632         int        n,
633                            res = 1;
634
635         if(is_Block(pos)) {
636                 prev = sched_block_last_noncf(si, pos);
637                 prev = sched_next_nonproj(prev, 0);
638         } else {
639                 prev = sched_prev_op(pos);
640         }
641         /* do not remat if the rematted value is defined immediately before this op */
642         if(prev == remat->op) {
643                 return 0;
644         }
645
646 #if 0
647         /* this should be just fine, the following OP will be using this value, right? */
648
649         /* only remat AFTER the real definition of a value (?) */
650         if(!value_is_defined_before(si, pos, remat->value)) {
651 //              ir_fprintf(stderr, "error(not defined)");
652                 return 0;
653         }
654 #endif
655
656         for(n=get_irn_arity(op)-1; n>=0 && res; --n) {
657                 const ir_node   *arg = get_irn_n(op, n);
658
659 #ifdef NO_ENLARGE_L1V3N355
660                 if(has_reg_class(si, arg) && live) {
661                         res &= pset_find_ptr(live, arg)?1:0;
662                 } else {
663                         res &= value_is_defined_before(si, pos, arg);
664                 }
665 #else
666                 res &= value_is_defined_before(si, pos, arg);
667 #endif
668         }
669
670         return res;
671 }
672
673 /**
674  * Tells you whether a @p remat can be placed after the irn @p pos
675  */
676 static INLINE int
677 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
678 {
679         if(is_Block(pos)) {
680                 pos = sched_block_first_nonphi(pos);
681         } else {
682                 pos = sched_next_op(pos);
683         }
684
685         /* only remat AFTER the real definition of a value (?) */
686         if(!value_is_defined_before(si, pos, remat->value)) {
687                 return 0;
688         }
689
690         return can_remat_before(si, remat, pos, live);
691 }
692
693 /**
694  * Collect potetially rematerializable OPs
695  */
696 static void
697 walker_remat_collector(ir_node * irn, void * data)
698 {
699         spill_ilp_t    *si = data;
700
701         if(!is_Block(irn) && !is_Phi(irn)) {
702                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
703                 get_remats_from_op(si, irn);
704         }
705 }
706
707 /**
708  * Inserts a copy of @p irn before @p pos
709  */
710 static ir_node *
711 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
712 {
713         ir_node     *bb;
714         ir_node     *copy;
715
716         bb = is_Block(pos)?pos:get_nodes_block(pos);
717         copy = exact_copy(irn);
718
719         _set_phi_class(copy, NULL);
720         set_nodes_block(copy, bb);
721         sched_put_before(si, pos, copy);
722
723         return copy;
724 }
725
726 /**
727  * Inserts a copy of @p irn after @p pos
728  */
729 static ir_node *
730 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
731 {
732         ir_node     *bb;
733         ir_node     *copy;
734
735         bb = is_Block(pos)?pos:get_nodes_block(pos);
736         copy = exact_copy(irn);
737
738         _set_phi_class(copy, NULL);
739         set_nodes_block(copy, bb);
740         sched_put_after(pos, copy);
741
742         return copy;
743 }
744
745 static ir_node *
746 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
747 {
748         char     buf[256];
749
750         if(can_remat_after(si, remat, pos, live)) {
751                 ir_node         *copy,
752                                                 *proj_copy;
753                 op_t            *op;
754
755                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
756
757                 copy = insert_copy_after(si, remat->op, pos);
758
759                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
760                 op = obstack_alloc(si->obst, sizeof(*op));
761                 op->is_remat = 1;
762                 op->attr.remat.remat = remat;
763                 op->attr.remat.pre = 0;
764                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
765
766                 set_irn_link(copy, op);
767                 pset_insert_ptr(si->all_possible_remats, copy);
768                 if(remat->proj) {
769                         proj_copy = insert_copy_after(si, remat->proj, copy);
770                         set_irn_n(proj_copy, 0, copy);
771                         set_irn_link(proj_copy, op);
772                         pset_insert_ptr(si->all_possible_remats, proj_copy);
773                 } else {
774                         proj_copy = NULL;
775                 }
776
777                 return copy;
778         }
779
780         return NULL;
781 }
782
783 static ir_node *
784 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
785 {
786         char     buf[256];
787
788         if(can_remat_before(si, remat, pos, live)) {
789                 ir_node         *copy,
790                                                 *proj_copy;
791                 op_t            *op;
792
793                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
794
795                 copy = insert_copy_before(si, remat->op, pos);
796
797                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
798                 op = obstack_alloc(si->obst, sizeof(*op));
799                 op->is_remat = 1;
800                 op->attr.remat.remat = remat;
801                 op->attr.remat.pre = 1;
802                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
803
804                 set_irn_link(copy, op);
805                 pset_insert_ptr(si->all_possible_remats, copy);
806                 if(remat->proj) {
807                         proj_copy = insert_copy_after(si, remat->proj, copy);
808                         set_irn_n(proj_copy, 0, copy);
809                         set_irn_link(proj_copy, op);
810                         pset_insert_ptr(si->all_possible_remats, proj_copy);
811                 } else {
812                         proj_copy = NULL;
813                 }
814
815                 return copy;
816         }
817
818         return NULL;
819 }
820
821 static int
822 get_block_n_succs(const ir_node *block) {
823         const ir_edge_t *edge;
824
825         assert(edges_activated(current_ir_graph));
826
827         edge = get_block_succ_first(block);
828         if (! edge)
829                 return 0;
830
831         edge = get_block_succ_next(block, edge);
832         return edge ? 2 : 1;
833 }
834
835 static int
836 is_merge_edge(const ir_node * bb)
837 {
838 #ifdef GOODWIN_REDUCTION
839         return get_block_n_succs(bb) == 1;
840 #else
841         return 1;
842 #endif
843 }
844
845 static int
846 is_diverge_edge(const ir_node * bb)
847 {
848 #ifdef GOODWIN_REDUCTION
849         return get_Block_n_cfgpreds(bb) == 1;
850 #else
851         return 1;
852 #endif
853 }
854
855 static void
856 walker_regclass_copy_insertor(ir_node * irn, void * data)
857 {
858         spill_ilp_t    *si = data;
859
860         if(is_Phi(irn) && has_reg_class(si, irn)) {
861                 int n;
862
863                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
864                         ir_node  *phi_arg = get_irn_n(irn, n);
865                         ir_node  *bb = get_Block_cfgpred_block(get_nodes_block(irn), n);
866
867                         if(!has_reg_class(si, phi_arg)) {
868                                 ir_node   *copy = be_new_Copy(si->cls, si->chordal_env->irg, bb, phi_arg);
869                                 ir_node   *pos = sched_block_last_noncf(si, bb);
870                                 op_t      *op = obstack_alloc(si->obst, sizeof(*op));
871
872                                 DBG((si->dbg, LEVEL_2, "\t copy to my regclass for arg %+F of %+F\n", phi_arg, irn));
873                                 sched_add_after(pos, copy);
874                                 set_irn_n(irn, n, copy);
875
876                                 op->is_remat = 0;
877                                 op->attr.live_range.args.reloads = NULL;
878                                 op->attr.live_range.ilp = ILP_UNDEF;
879                                 set_irn_link(copy, op);
880                         }
881                 }
882         }
883 }
884
885
886 /**
887  * Insert (so far unused) remats into the irg to
888  * recompute the potential liveness of all values
889  */
890 static void
891 walker_remat_insertor(ir_node * bb, void * data)
892 {
893         spill_ilp_t    *si = data;
894         spill_bb_t     *spill_bb;
895         ir_node        *irn;
896         int             n, i;
897         pset           *live = pset_new_ptr_default();
898
899         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
900
901         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
902                 ir_node        *value = be_lv_get_irn(si->lv, bb, i);
903
904                 /* add remats at end of block */
905                 if (has_reg_class(si, value)) {
906                         pset_insert_ptr(live, value);
907                 }
908         }
909
910         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
911         set_irn_link(bb, spill_bb);
912
913         irn = sched_last(bb);
914         while(!sched_is_end(irn)) {
915                 ir_node   *next;
916                 op_t      *op;
917                 pset      *args;
918                 ir_node   *arg;
919                 pset      *remat_args;
920
921                 next = sched_prev(irn);
922
923                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
924
925                 if(is_Phi(irn) || is_Proj(irn)) {
926                         op_t      *op;
927
928                         if(has_reg_class(si, irn)) {
929                                 pset_remove_ptr(live, irn);
930                         }
931
932                         op = obstack_alloc(si->obst, sizeof(*op));
933                         op->is_remat = 0;
934                         op->attr.live_range.args.reloads = NULL;
935                         op->attr.live_range.ilp = ILP_UNDEF;
936                         set_irn_link(irn, op);
937
938                         irn = next;
939                         continue;
940                 }
941
942                 op = obstack_alloc(si->obst, sizeof(*op));
943                 op->is_remat = 0;
944                 op->attr.live_range.ilp = ILP_UNDEF;
945                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
946                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
947                 set_irn_link(irn, op);
948
949                 args = pset_new_ptr_default();
950
951                 /* collect arguments of op */
952                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
953                         ir_node        *arg = get_irn_n(irn, n);
954
955                         pset_insert_ptr(args, arg);
956                 }
957
958                 /* set args of op already live in epilog */
959                 pset_foreach(args, arg) {
960                         if(has_reg_class(si, arg)) {
961                                 pset_insert_ptr(live, arg);
962                         }
963                 }
964                 /* delete defined value from live set */
965                 if(has_reg_class(si, irn)) {
966                         pset_remove_ptr(live, irn);
967                 }
968
969
970                 remat_args = pset_new_ptr_default();
971
972                 /* insert all possible remats before irn */
973                 pset_foreach(args, arg) {
974                         remat_info_t   *remat_info,
975                                                     query;
976                         remat_t        *remat;
977
978                         /* continue if the operand has the wrong reg class
979                          */
980                         if(!has_reg_class(si, arg))
981                                 continue;
982
983                         query.irn = arg;
984                         query.remats = NULL;
985                         query.remats_by_operand = NULL;
986                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
987
988                         if(!remat_info) {
989                                 continue;
990                         }
991
992                         if(remat_info->remats) {
993                                 pset_foreach(remat_info->remats, remat) {
994                                         ir_node  *remat_irn = NULL;
995
996                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
997 #ifdef REMAT_WHILE_LIVE
998                                         if(pset_find_ptr(live, remat->value)) {
999                                                 remat_irn = insert_remat_before(si, remat, irn, live);
1000                                         }
1001 #else
1002                                         remat_irn = insert_remat_before(si, remat, irn, live);
1003 #endif
1004                                         if(remat_irn) {
1005                                                 for(n=get_irn_arity(remat_irn)-1; n>=0; --n) {
1006                                                         ir_node  *remat_arg = get_irn_n(remat_irn, n);
1007
1008                                                         if(!has_reg_class(si, remat_arg)) continue;
1009
1010                                                         pset_insert_ptr(remat_args, remat_arg);
1011                                                 }
1012                                         }
1013                                 }
1014                         }
1015                 }
1016
1017                 /* now we add remat args to op's args because they could also die at this op */
1018                 pset_foreach(args,arg) {
1019                         if(pset_find_ptr(remat_args, arg)) {
1020                                 pset_remove_ptr(remat_args, arg);
1021                         }
1022                 }
1023                 pset_foreach(remat_args,arg) {
1024                         pset_insert_ptr(args, arg);
1025                 }
1026
1027                 /* insert all possible remats after irn */
1028                 pset_foreach(args, arg) {
1029                         remat_info_t   *remat_info,
1030                                                     query;
1031                         remat_t        *remat;
1032
1033                         /* continue if the operand has the wrong reg class */
1034                         if(!has_reg_class(si, arg))
1035                                 continue;
1036
1037                         query.irn = arg;
1038                         query.remats = NULL;
1039                         query.remats_by_operand = NULL;
1040                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
1041
1042                         if(!remat_info) {
1043                                 continue;
1044                         }
1045
1046                         /* do not place post remats after jumps */
1047                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
1048
1049                         if(remat_info->remats_by_operand) {
1050                                 pset_foreach(remat_info->remats_by_operand, remat) {
1051                                         /* do not insert remats producing the same value as one of the operands */
1052                                         if(!pset_find_ptr(args, remat->value)) {
1053                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
1054 #ifdef REMAT_WHILE_LIVE
1055                                                 if(pset_find_ptr(live, remat->value)) {
1056                                                         insert_remat_after(si, remat, irn, live);
1057                                                 }
1058 #else
1059                                                 insert_remat_after(si, remat, irn, live);
1060 #endif
1061                                         }
1062                                 }
1063                         }
1064                 }
1065
1066                 del_pset(remat_args);
1067                 del_pset(args);
1068                 irn = next;
1069         }
1070
1071         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_in, i) {
1072                 ir_node        *value = be_lv_get_irn(si->lv, bb, i);
1073
1074                 /* add remats at end if successor has multiple predecessors */
1075                 if(is_merge_edge(bb)) {
1076                         /* add remats at end of block */
1077                         if (be_is_live_end(si->lv, bb, value) && has_reg_class(si, value)) {
1078                                 remat_info_t   *remat_info,
1079                                                            query;
1080                                 remat_t        *remat;
1081
1082                                 query.irn = value;
1083                                 query.remats = NULL;
1084                                 query.remats_by_operand = NULL;
1085                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1086
1087                                 if(remat_info && remat_info->remats) {
1088                                         pset_foreach(remat_info->remats, remat) {
1089                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1090
1091                                                 insert_remat_before(si, remat, bb, NULL);
1092                                         }
1093                                 }
1094                         }
1095                 }
1096                 if(is_diverge_edge(bb)) {
1097                         /* add remat2s at beginning of block */
1098                         if ((be_is_live_in(si->lv, bb, value) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1099                                 remat_info_t   *remat_info,
1100                                                            query;
1101                                 remat_t        *remat;
1102
1103                                 query.irn = value;
1104                                 query.remats = NULL;
1105                                 query.remats_by_operand = NULL;
1106                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1107
1108                                 if(remat_info && remat_info->remats) {
1109                                         pset_foreach(remat_info->remats, remat) {
1110                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at beginning of block %+F\n", remat->op, bb));
1111
1112                                                 /* put the remat here if all its args are available */
1113                                                 insert_remat_after(si, remat, bb, NULL);
1114
1115                                         }
1116                                 }
1117                         }
1118                 }
1119         }
1120 }
1121
1122 /**
1123  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1124  */
1125 static void
1126 luke_endwalker(ir_node * bb, void * data)
1127 {
1128         spill_ilp_t    *si = (spill_ilp_t*)data;
1129         pset           *live;
1130         pset           *use_end;
1131         char            buf[256];
1132         ilp_cst_t       cst;
1133         ir_node        *irn;
1134         spill_bb_t     *spill_bb = get_irn_link(bb);
1135         int             i;
1136
1137
1138         live = pset_new_ptr_default();
1139         use_end = pset_new_ptr_default();
1140
1141         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1142                 irn = be_lv_get_irn(si->lv, bb, i);
1143                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1144                         op_t      *op;
1145
1146                         pset_insert_ptr(live, irn);
1147                         op = get_irn_link(irn);
1148                         assert(!op->is_remat);
1149                 }
1150         }
1151
1152         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1153         /* their reg_out must always be set */
1154         sched_foreach_reverse(bb, irn) {
1155                 int   n;
1156
1157                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1158
1159                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1160                         ir_node        *irn_arg = get_irn_n(irn, n);
1161
1162                         if(has_reg_class(si, irn_arg)) {
1163                                 pset_insert_ptr(use_end, irn_arg);
1164                         }
1165                 }
1166         }
1167
1168         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1169         //cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1170         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1171
1172         spill_bb->ilp = new_set(cmp_spill, pset_count(live)+pset_count(use_end));
1173
1174         /* if this is a merge edge we can reload at the end of this block */
1175         if(is_merge_edge(bb)) {
1176                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live)+pset_count(use_end));
1177         } else if(pset_count(use_end)){
1178                 spill_bb->reloads = new_set(cmp_keyval, pset_count(use_end));
1179         } else {
1180                 spill_bb->reloads = NULL;
1181         }
1182
1183         pset_foreach(live,irn) {
1184                 spill_t     query,
1185                                         *spill;
1186                 double      spill_cost;
1187
1188
1189                 /* handle values used by control flow nodes later separately */
1190                 if(pset_find_ptr(use_end, irn)) continue;
1191
1192                 query.irn = irn;
1193                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1194
1195                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1196
1197                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1198                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1199                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1200
1201                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1202                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1203
1204                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1205                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1206
1207                 if(is_merge_edge(bb)) {
1208                         ilp_var_t   reload;
1209                         ilp_cst_t   rel_cst;
1210
1211                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1212                         reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1213                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1214
1215                         /* reload <= mem_out */
1216                         rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1217                         lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1218                         lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1219         }
1220
1221                 spill->reg_in = ILP_UNDEF;
1222                 spill->mem_in = ILP_UNDEF;
1223         }
1224
1225         pset_foreach(use_end,irn) {
1226                 spill_t     query,
1227                                         *spill;
1228                 double      spill_cost;
1229                 ilp_cst_t   end_use_req,
1230                                         rel_cst;
1231                 ilp_var_t   reload;
1232
1233                 query.irn = irn;
1234                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1235
1236                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1237
1238                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1239                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1240                 /* if irn is used at the end of the block, then it is live anyway */
1241                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1242
1243                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1244                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1245
1246                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1247                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1248
1249                 ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1250                 reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1251                 set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1252
1253                 /* reload <= mem_out */
1254                 rel_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1255                 lpp_set_factor_fast(si->lpp, rel_cst, reload, 1.0);
1256                 lpp_set_factor_fast(si->lpp, rel_cst, spill->mem_out, -1.0);
1257
1258                 spill->reg_in = ILP_UNDEF;
1259                 spill->mem_in = ILP_UNDEF;
1260
1261                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1262                 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1263                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1264         }
1265
1266         del_pset(live);
1267         del_pset(use_end);
1268 }
1269
1270 static ir_node *
1271 next_post_remat(const ir_node * irn)
1272 {
1273         op_t      *op;
1274
1275         if(is_Block(irn)) {
1276                 irn = sched_block_first_nonphi(irn);
1277         } else {
1278                 irn = sched_next_op(irn);
1279         }
1280
1281         if(sched_is_end(irn))
1282                 return NULL;
1283
1284         op = (op_t*)get_irn_link(irn);
1285         if(op->is_remat && !op->attr.remat.pre) {
1286                 return irn;
1287         }
1288
1289         return NULL;
1290 }
1291
1292
1293 static ir_node *
1294 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1295 {
1296         op_t      *op;
1297         ir_node   *ret;
1298
1299         if(is_Block(irn)) {
1300                 ret = sched_block_last_noncf(si, irn);
1301                 ret = sched_next(ret);
1302                 ret = sched_prev_op(ret);
1303         } else {
1304                 ret = sched_prev_op(irn);
1305         }
1306
1307         if(sched_is_end(ret) || is_Phi(ret))
1308                 return NULL;
1309
1310         op = (op_t*)get_irn_link(ret);
1311         if(op->is_remat && op->attr.remat.pre) {
1312                 return ret;
1313         }
1314
1315         return NULL;
1316 }
1317
1318 /**
1319  * Find a remat of value @p value in the epilog of @p pos
1320  */
1321 static ir_node *
1322 find_post_remat(const ir_node * value, const ir_node * pos)
1323 {
1324         while((pos = next_post_remat(pos)) != NULL) {
1325                 op_t   *op;
1326
1327                 op = get_irn_link(pos);
1328                 assert(op->is_remat && !op->attr.remat.pre);
1329
1330                 if(op->attr.remat.remat->value == value)
1331                         return (ir_node*)pos;
1332
1333 #if 0
1334         const ir_edge_t *edge;
1335                 foreach_out_edge(pos, edge) {
1336                         ir_node   *proj = get_edge_src_irn(edge);
1337                         assert(is_Proj(proj));
1338                 }
1339 #endif
1340
1341         }
1342
1343         return NULL;
1344 }
1345
1346 static spill_t *
1347 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1348 {
1349         spill_bb_t  *spill_bb = get_irn_link(bb);
1350         spill_t     *spill,
1351                                  query;
1352         char         buf[256];
1353
1354         query.irn = irn;
1355         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1356         if(!spill) {
1357                 double   spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1358
1359                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1360
1361                 spill->reg_out = ILP_UNDEF;
1362                 spill->reg_in  = ILP_UNDEF;
1363                 spill->mem_in  = ILP_UNDEF;
1364
1365                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1366                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1367
1368                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1369                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1370         }
1371
1372         return spill;
1373 }
1374
1375 static void
1376 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1377 {
1378         ir_node        *irn;
1379         int i;
1380
1381         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
1382                 irn = be_lv_get_irn(si->lv, bb, i);
1383
1384                 if (has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1385                         pset_insert_ptr(live, irn);
1386                 }
1387         }
1388
1389         irn = sched_last(bb);
1390
1391         /* all values eaten by control flow operations are also live until the end of the block */
1392         sched_foreach_reverse(bb, irn) {
1393                 int  i;
1394
1395                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1396
1397                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1398                         ir_node *arg = get_irn_n(irn,i);
1399
1400                         if(has_reg_class(si, arg)) {
1401                                 pset_insert_ptr(live, arg);
1402                         }
1403                 }
1404         }
1405 }
1406
1407 /**
1408  *  Inserts ILP-constraints and variables for memory copying before the given position
1409  */
1410 static void
1411 insert_mem_copy_position(spill_ilp_t * si, pset * live, const ir_node * block)
1412 {
1413         const ir_node    *succ;
1414         const ir_edge_t  *edge;
1415         spill_bb_t       *spill_bb = get_irn_link(block);
1416         ir_node          *phi;
1417         int               pos;
1418         ilp_cst_t         cst;
1419         ilp_var_t         copyreg;
1420         char              buf[256];
1421         ir_node          *tmp;
1422
1423
1424         assert(edges_activated(current_ir_graph));
1425
1426         edge = get_block_succ_first(block);
1427         if(!edge) return;
1428
1429         succ = edge->src;
1430         pos = edge->pos;
1431
1432         edge = get_block_succ_next(block, edge);
1433         /* next block can only contain phis, if this is a merge edge */
1434         if(edge) return;
1435
1436         ir_snprintf(buf, sizeof(buf), "copyreg_%N", block);
1437         copyreg = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1438
1439         ir_snprintf(buf, sizeof(buf), "check_copyreg_%N", block);
1440         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1441
1442         pset_foreach(live, tmp) {
1443                 spill_t  *spill;
1444 #if 0
1445                 op_t  *op = get_irn_link(irn);
1446                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
1447 #endif
1448                 spill = set_find_spill(spill_bb->ilp, tmp);
1449                 assert(spill);
1450
1451                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1452         }
1453         lpp_set_factor_fast(si->lpp, cst, copyreg, 1.0);
1454
1455         sched_foreach(succ, phi) {
1456                 const ir_node  *to_copy;
1457                 op_t           *to_copy_op;
1458                 spill_t        *to_copy_spill;
1459                 op_t           *phi_op = get_irn_link(phi);
1460                 ilp_var_t       reload = ILP_UNDEF;
1461
1462
1463                 if(!is_Phi(phi)) break;
1464                 if(!has_reg_class(si, phi)) continue;
1465
1466                 to_copy = get_irn_n(phi, pos);
1467
1468                 to_copy_op = get_irn_link(to_copy);
1469
1470                 to_copy_spill = set_find_spill(spill_bb->ilp, to_copy);
1471                 assert(to_copy_spill);
1472
1473                 if(spill_bb->reloads) {
1474                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, to_copy);
1475
1476                         if(keyval) {
1477                                 reload = PTR_TO_INT(keyval->val);
1478                         }
1479                 }
1480
1481                 ir_snprintf(buf, sizeof(buf), "req_copy_%N_%N_%N", block, phi, to_copy);
1482                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1483
1484                 /* copy - reg_out - reload - remat - live_range <= 0 */
1485                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1486                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1487                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1488                 lpp_set_factor_fast(si->lpp, cst, to_copy_op->attr.live_range.ilp, -1.0);
1489                 foreach_pre_remat(si, block, tmp) {
1490                         op_t     *remat_op = get_irn_link(tmp);
1491                         if(remat_op->attr.remat.remat->value == to_copy) {
1492                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1493                         }
1494                 }
1495
1496                 ir_snprintf(buf, sizeof(buf), "copyreg_%N_%N_%N", block, phi, to_copy);
1497                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1498
1499                 /* copy - reg_out - copyreg <= 0 */
1500                 lpp_set_factor_fast(si->lpp, cst, phi_op->attr.live_range.args.copies[pos], 1.0);
1501                 lpp_set_factor_fast(si->lpp, cst, to_copy_spill->reg_out, -1.0);
1502                 lpp_set_factor_fast(si->lpp, cst, copyreg, -1.0);
1503         }
1504 }
1505
1506
1507 /**
1508  * Walk all irg blocks and emit this ILP
1509  */
1510 static void
1511 luke_blockwalker(ir_node * bb, void * data)
1512 {
1513         spill_ilp_t    *si = (spill_ilp_t*)data;
1514         ir_node        *irn;
1515         pset           *live;
1516         char            buf[256];
1517         ilp_cst_t       cst;
1518         spill_bb_t     *spill_bb = get_irn_link(bb);
1519         ir_node        *tmp;
1520         spill_t        *spill;
1521         pset           *defs = pset_new_ptr_default();
1522
1523
1524         live = pset_new_ptr_default();
1525
1526         /****************************************
1527          *      B A S I C  B L O C K  E N D
1528          ***************************************/
1529
1530
1531         /* init live values at end of block */
1532         get_live_end(si, bb, live);
1533
1534         pset_foreach(live, irn) {
1535                 op_t           *op;
1536                 ilp_var_t       reload = ILP_UNDEF;
1537
1538                 spill = set_find_spill(spill_bb->ilp, irn);
1539                 assert(spill);
1540
1541                 if(spill_bb->reloads) {
1542                         keyval_t *keyval = set_find_keyval(spill_bb->reloads, irn);
1543
1544                         if(keyval) {
1545                                 reload = PTR_TO_INT(keyval->val);
1546                         }
1547                 }
1548
1549                 op = get_irn_link(irn);
1550                 assert(!op->is_remat);
1551
1552                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1553                 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1554                 op->attr.live_range.op = bb;
1555
1556                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1557                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1558
1559                 /* reg_out - reload - remat - live_range <= 0 */
1560                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1561                 if(reload != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1562                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1563                 foreach_pre_remat(si, bb, tmp) {
1564                         op_t     *remat_op = get_irn_link(tmp);
1565                         if(remat_op->attr.remat.remat->value == irn) {
1566                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1567                         }
1568                 }
1569                 /* maybe we should also assure that reg_out >= live_range etc. */
1570         }
1571
1572 #ifndef NO_MEMCOPIES
1573         insert_mem_copy_position(si, live, bb);
1574 #endif
1575
1576         /*
1577          * start new live ranges for values used by remats at end of block
1578          * and assure the remat args are available
1579          */
1580         foreach_pre_remat(si, bb, tmp) {
1581                 op_t     *remat_op = get_irn_link(tmp);
1582                 int       n;
1583
1584                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1585                         ir_node        *remat_arg = get_irn_n(tmp, n);
1586                         op_t           *arg_op = get_irn_link(remat_arg);
1587                         ilp_var_t       prev_lr;
1588
1589                         if(!has_reg_class(si, remat_arg)) continue;
1590
1591                         /* if value is becoming live through use by remat */
1592                         if(!pset_find_ptr(live, remat_arg)) {
1593                                 ir_snprintf(buf, sizeof(buf), "lr_%N_end%N", remat_arg, bb);
1594                                 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1595
1596                                 arg_op->attr.live_range.ilp = prev_lr;
1597                                 arg_op->attr.live_range.op = bb;
1598
1599                                 DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat at end of block %+F\n", remat_arg, tmp));
1600
1601                                 pset_insert_ptr(live, remat_arg);
1602                                 add_to_spill_bb(si, bb, remat_arg);
1603                         }
1604
1605                         /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1606                         ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1607                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1608
1609                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1610                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1611
1612                         /* use reload placed for this argument */
1613                         if(spill_bb->reloads) {
1614                                 keyval_t *keyval = set_find_keyval(spill_bb->reloads, remat_arg);
1615
1616                                 if(keyval) {
1617                                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
1618
1619                                         lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1620                                 }
1621                         }
1622                 }
1623         }
1624         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1625
1626
1627
1628
1629         /**************************************
1630          *    B A S I C  B L O C K  B O D Y
1631          **************************************/
1632
1633         sched_foreach_reverse_from(sched_block_last_noncf(si, bb), irn) {
1634                 op_t       *op;
1635                 op_t       *tmp_op;
1636                 int         n,
1637                                         u = 0,
1638                                         d = 0;
1639                 ilp_cst_t       check_pre,
1640                                         check_post;
1641                 set        *args;
1642                 pset       *used;
1643                 pset       *remat_defs;
1644                 keyval_t   *keyval;
1645
1646                 /* iterate only until first phi */
1647                 if(is_Phi(irn))
1648                         break;
1649
1650                 op = get_irn_link(irn);
1651                 /* skip remats */
1652                 if(op->is_remat) continue;
1653                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1654
1655                 /* collect defined values */
1656                 if(has_reg_class(si, irn)) {
1657                         pset_insert_ptr(defs, irn);
1658                 }
1659
1660                 /* skip projs */
1661                 if(is_Proj(irn)) continue;
1662
1663                 /*
1664                  * init set of irn's arguments
1665                  * and all possibly used values around this op
1666                  * and values defined by post remats
1667                  */
1668                 args =       new_set(cmp_keyval, get_irn_arity(irn));
1669                 used =       pset_new_ptr(pset_count(live) + get_irn_arity(irn));
1670                 remat_defs = pset_new_ptr(pset_count(live));
1671
1672                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1673                         ir_node        *irn_arg = get_irn_n(irn, n);
1674                         if(has_reg_class(si, irn_arg)) {
1675                                 set_insert_keyval(args, irn_arg, (void*)n);
1676                                 pset_insert_ptr(used, irn_arg);
1677                         }
1678                 }
1679                 foreach_post_remat(irn, tmp) {
1680                         op_t    *remat_op = get_irn_link(tmp);
1681
1682                         pset_insert_ptr(remat_defs, remat_op->attr.remat.remat->value);
1683
1684                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1685                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1686                                 if(has_reg_class(si, remat_arg)) {
1687                                         pset_insert_ptr(used, remat_arg);
1688                                 }
1689                         }
1690                 }
1691                 foreach_pre_remat(si, irn, tmp) {
1692                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1693                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1694                                 if(has_reg_class(si, remat_arg)) {
1695                                         pset_insert_ptr(used, remat_arg);
1696                                 }
1697                         }
1698                 }
1699
1700                 /**********************************
1701                  *   I N  E P I L O G  O F  irn
1702                  **********************************/
1703
1704                 /* ensure each dying value is used by only one post remat */
1705                 pset_foreach(used, tmp) {
1706                         ir_node     *value = tmp;
1707                         op_t        *value_op = get_irn_link(value);
1708                         ir_node     *remat;
1709                         int          n_remats = 0;
1710
1711                         cst = ILP_UNDEF;
1712                         foreach_post_remat(irn, remat) {
1713                                 op_t  *remat_op = get_irn_link(remat);
1714
1715                                 for(n=get_irn_arity(remat)-1; n>=0; --n) {
1716                                         ir_node   *remat_arg = get_irn_n(remat, n);
1717
1718                                         /* if value is used by this remat add it to constraint */
1719                                         if(remat_arg == value) {
1720                                                 if(n_remats == 0) {
1721                                                         /* sum remat2s <= 1 + n_remats*live_range */
1722                                                         ir_snprintf(buf, sizeof(buf), "dying_lr_%N_%N", value, irn);
1723                                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1.0);
1724                                                 }
1725
1726                                                 n_remats++;
1727                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1728                                                 break;
1729                                         }
1730                                 }
1731                         }
1732
1733             // value_op->attr.live_range.ilp != ILP_UNDEF
1734                         if(pset_find_ptr(live, value) && cst != ILP_UNDEF) {
1735                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, -n_remats);
1736                         }
1737                 }
1738
1739         /* ensure at least one value dies at post remat */
1740         foreach_post_remat(irn, tmp) {
1741             op_t     *remat_op = get_irn_link(tmp);
1742             pset     *remat_args = pset_new_ptr(get_irn_arity(tmp));
1743             ir_node  *remat_arg;
1744
1745             for(n=get_irn_arity(tmp)-1; n>=0; --n) {
1746                 remat_arg = get_irn_n(tmp, n);
1747
1748                 if(has_reg_class(si, remat_arg)) {
1749
1750                     /* does arg always die at this op? */
1751                     if(!pset_find_ptr(live, remat_arg))
1752                         goto skip_one_must_die;
1753
1754                     pset_insert_ptr(remat_args, remat_arg);
1755                 }
1756             }
1757
1758             /* remat + \sum live_range(remat_arg) <= |args| */
1759             ir_snprintf(buf, sizeof(buf), "one_must_die_%+F", tmp);
1760             cst = lpp_add_cst(si->lpp, buf, lpp_less, pset_count(remat_args));
1761             lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1762
1763             pset_foreach(remat_args, remat_arg) {
1764                 op_t  *arg_op = get_irn_link(remat_arg);
1765
1766                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1767             }
1768
1769 skip_one_must_die:
1770             del_pset(remat_args);
1771         }
1772
1773                 /* new live ranges for values from L\U defined by post remats */
1774                 pset_foreach(live, tmp) {
1775                         ir_node     *value = tmp;
1776                         op_t        *value_op = get_irn_link(value);
1777
1778                         if(!set_find_keyval(args, value) && !pset_find_ptr(defs, value)) {
1779                                 ilp_var_t    prev_lr = ILP_UNDEF;
1780                                 ir_node     *remat;
1781
1782                                 if(pset_find_ptr(remat_defs, value)) {
1783
1784                                         /* next_live_range <= prev_live_range + sum remat2s */
1785                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1786                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1787
1788                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1789                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1790
1791                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1792                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1793
1794                                         foreach_post_remat(irn, remat) {
1795                                                 op_t        *remat_op = get_irn_link(remat);
1796
1797                                                 /* if value is being rematerialized by this remat */
1798                                                 if(value == remat_op->attr.remat.remat->value) {
1799                                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1800                                                 }
1801                                         }
1802
1803                                         value_op->attr.live_range.ilp = prev_lr;
1804                                         value_op->attr.live_range.op = irn;
1805                                 }
1806                         }
1807                 }
1808
1809                 /* requirements for post remats and start live ranges from L/U' for values dying here */
1810                 foreach_post_remat(irn, tmp) {
1811                         op_t        *remat_op = get_irn_link(tmp);
1812                         int          n;
1813
1814                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1815                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1816                                 op_t           *arg_op = get_irn_link(remat_arg);
1817
1818                                 if(!has_reg_class(si, remat_arg)) continue;
1819
1820                                 /* only for values in L\U (TODO and D?), the others are handled with post_use */
1821                                 if(!pset_find_ptr(used, remat_arg)) {
1822                                         /* remat <= live_range(remat_arg) */
1823                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1824                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1825
1826                                         /* if value is becoming live through use by remat2 */
1827                                         if(!pset_find_ptr(live, remat_arg)) {
1828                                                 ilp_var_t     lr;
1829
1830                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1831                                                 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1832
1833                                                 arg_op->attr.live_range.ilp = lr;
1834                                                 arg_op->attr.live_range.op = irn;
1835
1836                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1837
1838                                                 pset_insert_ptr(live, remat_arg);
1839                                                 add_to_spill_bb(si, bb, remat_arg);
1840                                         }
1841
1842                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1843                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1844                                 }
1845                         }
1846                 }
1847
1848                 d = pset_count(defs);
1849                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
1850
1851                 /* count how many regs irn needs for arguments */
1852                 u = set_count(args);
1853
1854
1855                 /* check the register pressure in the epilog */
1856                 /* sum_{L\U'} lr + sum_{U'} post_use <= k - |D| */
1857                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1858                 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1859
1860                 /* add L\U' to check_post */
1861                 pset_foreach(live, tmp) {
1862                         if(!pset_find_ptr(used, tmp) && !pset_find_ptr(defs, tmp)) {
1863                                 /* if a live value is not used by irn */
1864                                 tmp_op = get_irn_link(tmp);
1865                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1866                         }
1867                 }
1868
1869                 /***********************************************************
1870                  *  I T E R A T I O N  O V E R  U S E S  F O R  E P I L O G
1871                  **********************************************************/
1872
1873
1874                 pset_foreach(used, tmp) {
1875                         ilp_var_t       prev_lr;
1876                         ilp_var_t       post_use;
1877                         int             p = 0;
1878                         spill_t        *spill;
1879                         ir_node        *arg = tmp;
1880                         op_t           *arg_op = get_irn_link(arg);
1881                         ir_node        *remat;
1882
1883                         spill = add_to_spill_bb(si, bb, arg);
1884
1885                         /* new live range for each used value */
1886                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1887                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1888
1889                         /* the epilog stuff - including post_use, check_post, check_post_remat */
1890                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1891                         post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1892
1893                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1894
1895                         /* arg is live throughout epilog if the next live_range is in a register */
1896                         if(pset_find_ptr(live, arg)) {
1897                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
1898
1899                                 /* post_use >= next_lr + remat */
1900                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1901                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1902                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1903                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1904
1905                         }
1906
1907                         /* if value is not an arg of op and not possibly defined by post remat
1908                          * then it may only die and not become live
1909                          */
1910                         if(!set_find_keyval(args, arg)) {
1911                                 /* post_use <= prev_lr */
1912                                 ir_snprintf(buf, sizeof(buf), "req_post_use_%N_%N", arg, irn);
1913                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1914                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1915                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1916
1917                                 if(!pset_find_ptr(remat_defs, arg) && pset_find_ptr(live, arg)) {
1918                                         /* next_lr <= prev_lr */
1919                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", arg, irn);
1920                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1921                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1922                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1923                                 }
1924                         }
1925
1926
1927
1928                         /* forall post remat which use arg add a similar cst */
1929                         foreach_post_remat(irn, remat) {
1930                                 int      n;
1931
1932                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
1933                                         ir_node    *remat_arg = get_irn_n(remat, n);
1934                                         op_t       *remat_op = get_irn_link(remat);
1935
1936                                         if(remat_arg == arg) {
1937                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
1938
1939                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1940                                                 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1941                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1942                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1943                                         }
1944                                 }
1945                         }
1946
1947                         /* new live range begins for each used value */
1948                         arg_op->attr.live_range.ilp = prev_lr;
1949                         arg_op->attr.live_range.op = irn;
1950
1951                         /*if(!pset_find_ptr(live, arg)) {
1952                                 pset_insert_ptr(live, arg);
1953                                 add_to_spill_bb(si, bb, arg);
1954                         }*/
1955                         pset_insert_ptr(live, arg);
1956
1957                 }
1958
1959                 /* just to be sure */
1960                 check_post = ILP_UNDEF;
1961
1962
1963
1964
1965                 /******************
1966                  *   P R O L O G
1967                  ******************/
1968
1969                 /* check the register pressure in the prolog */
1970                 /* sum_{L\U} lr <= k - |U| */
1971                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1972                 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - u);
1973
1974                 /* for the prolog remove defined values from the live set */
1975                 pset_foreach(defs, tmp) {
1976                         pset_remove_ptr(live, tmp);
1977                 }
1978
1979                 /***********************************************************
1980                  *  I T E R A T I O N  O V E R  A R G S  F O R  P R O L O G
1981                  **********************************************************/
1982
1983
1984                 set_foreach(args, keyval) {
1985                         spill_t        *spill;
1986                         ir_node        *arg = keyval->key;
1987                         int             i = PTR_TO_INT(keyval->val);
1988                         op_t           *arg_op = get_irn_link(arg);
1989
1990                         spill = set_find_spill(spill_bb->ilp, arg);
1991                         assert(spill);
1992
1993                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1994                         op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1995
1996                         /* reload <= mem_out */
1997                         ir_snprintf(buf, sizeof(buf), "req_reload_%N_%N", arg, irn);
1998                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1999                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2000                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
2001
2002                         /* requirement: arg must be in register for use */
2003                         /* reload + remat + live_range == 1 */
2004                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
2005                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
2006
2007                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
2008                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
2009                         foreach_pre_remat(si, irn, tmp) {
2010                                 op_t     *remat_op = get_irn_link(tmp);
2011                                 if(remat_op->attr.remat.remat->value == arg) {
2012                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2013                                 }
2014                         }
2015                 }
2016
2017                 /* iterate over L\U */
2018                 pset_foreach(live, tmp) {
2019                         if(!set_find_keyval(args, tmp)) {
2020                                 /* if a live value is not used by irn */
2021                                 tmp_op = get_irn_link(tmp);
2022                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
2023                         }
2024                 }
2025
2026
2027                 /* requirements for remats */
2028                 /* start new live ranges for values used by remats */
2029                 foreach_pre_remat(si, irn, tmp) {
2030                         op_t        *remat_op = get_irn_link(tmp);
2031                         int          n;
2032
2033                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2034                                 ir_node        *remat_arg = get_irn_n(tmp, n);
2035                                 op_t           *arg_op = get_irn_link(remat_arg);
2036                                 ilp_var_t       prev_lr;
2037
2038                                 if(!has_reg_class(si, remat_arg)) continue;
2039
2040                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
2041                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
2042                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2043
2044                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2045                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
2046
2047                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
2048                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
2049                                         int    index = (int)keyval->val;
2050
2051                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
2052                                 }
2053                         }
2054                 }
2055
2056
2057
2058
2059                 /*************************
2060                  *  D O N E  W I T H  O P
2061                  *************************/
2062
2063                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
2064
2065                 pset_foreach(live, tmp) {
2066                         assert(has_reg_class(si, tmp));
2067                 }
2068
2069                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2070                         ir_node        *arg = get_irn_n(irn, n);
2071
2072                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
2073                 }
2074
2075                 del_pset(remat_defs);
2076                 del_pset(used);
2077                 del_set(args);
2078                 del_pset(defs);
2079                 defs = pset_new_ptr_default();
2080         }
2081
2082
2083
2084         /***************************************
2085          *   B E G I N N I N G  O F  B L O C K
2086          ***************************************/
2087
2088
2089         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
2090         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
2091
2092         pset_foreach(live, irn) {
2093                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
2094         }
2095
2096         /* construct mem_outs for all values */
2097
2098         set_foreach(spill_bb->ilp, spill) {
2099                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
2100                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2101
2102                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
2103                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
2104
2105                 if(pset_find_ptr(live, spill->irn)) {
2106                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
2107
2108                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
2109                         spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2110                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2111
2112                         if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2113                                 int   n;
2114                                 op_t *op = get_irn_link(spill->irn);
2115
2116                                 /* do we have to copy a phi argument? */
2117                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2118                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
2119
2120                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
2121                                         const ir_node  *arg = get_irn_n(spill->irn, n);
2122                                         double          freq=0.0;
2123                                         int             m;
2124                                         ilp_var_t       var;
2125
2126
2127                                         /* argument already done? */
2128                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
2129
2130                                         /* get sum of execution frequencies of blocks with the same phi argument */
2131                                         for(m=n; m>=0; --m) {
2132                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2133
2134                                                 if(arg==arg2) {
2135                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
2136                                                 }
2137                                         }
2138
2139                                         /* copies are not for free */
2140                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
2141                                         var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
2142
2143                                         for(m=n; m>=0; --m) {
2144                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
2145
2146                                                 if(arg==arg2) {
2147                                                         op->attr.live_range.args.copies[m] = var;
2148                                                 }
2149                                         }
2150
2151                                         /* copy <= mem_in */
2152                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
2153                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2154                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
2155                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
2156                                 }
2157                         }
2158                 }
2159         }
2160
2161
2162         /* L\U is empty at bb start */
2163         /* arg is live throughout epilog if it is reg_in into this block */
2164
2165         /* check the register pressure at the beginning of the block
2166          * including remats
2167          */
2168         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
2169         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
2170
2171         pset_foreach(live, irn) {
2172         ilp_cst_t  nospill;
2173
2174                 spill = set_find_spill(spill_bb->ilp, irn);
2175                 assert(spill);
2176
2177                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
2178                 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2179
2180                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
2181
2182                 /* spill + mem_in <= 1 */
2183                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
2184                 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2185
2186                 lpp_set_factor_fast(si->lpp, nospill, spill->mem_in, 1.0);
2187                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
2188
2189         }
2190         foreach_post_remat(bb, irn) {
2191                 op_t     *remat_op = get_irn_link(irn);
2192
2193                 DBG((si->dbg, LEVEL_4, "\t  next post remat: %+F\n", irn));
2194                 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
2195
2196                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2197         }
2198
2199         /* forall post remats add requirements */
2200         foreach_post_remat(bb, tmp) {
2201                 int         n;
2202
2203                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2204                         ir_node    *remat_arg = get_irn_n(tmp, n);
2205                         op_t       *remat_op = get_irn_link(tmp);
2206
2207                         if(!has_reg_class(si, remat_arg)) continue;
2208
2209                         spill = set_find_spill(spill_bb->ilp, remat_arg);
2210                         assert(spill);
2211
2212                         /* remat <= reg_in_argument */
2213                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
2214                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2215                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2216                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
2217                 }
2218         }
2219
2220         /* mem_in/reg_in for live_in values, especially phis and their arguments */
2221         pset_foreach(live, irn) {
2222                 int          p = 0,
2223                                          n;
2224
2225                 spill = set_find_spill(spill_bb->ilp, irn);
2226                 assert(spill && spill->irn == irn);
2227
2228                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
2229                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
2230                                 ilp_cst_t       mem_in,
2231                                                                 reg_in;
2232                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
2233                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2234                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2235                                 spill_t        *spill_p;
2236                                 op_t           *op = get_irn_link(irn);
2237
2238                                 /* although the phi is in the right regclass one or more of
2239                                  * its arguments can be in a different one or at least to
2240                                  * ignore
2241                                  */
2242                                 if(has_reg_class(si, phi_arg)) {
2243                                         /* mem_in < mem_out_arg + copy */
2244                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2245                                         mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2246
2247                                         /* reg_in < reg_out_arg */
2248                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2249                                         reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2250
2251                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2252                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2253
2254                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
2255                                         assert(spill_p);
2256
2257                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2258                                         lpp_set_factor_fast(si->lpp, mem_in, op->attr.live_range.args.copies[n], -1.0);
2259                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2260                                 }
2261                         }
2262                 } else {
2263                         /* else assure the value arrives on all paths in the same resource */
2264
2265                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
2266                                 ilp_cst_t       mem_in,
2267                                                                 reg_in;
2268                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
2269                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
2270                                 spill_t        *spill_p;
2271
2272                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
2273                                 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2274                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
2275                                 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2276
2277                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
2278                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
2279
2280                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
2281                                 assert(spill_p);
2282
2283                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
2284                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2285                         }
2286                 }
2287         }
2288
2289         /* first live ranges from reg_ins */
2290         pset_foreach(live, irn) {
2291                 op_t      *op = get_irn_link(irn);
2292
2293                 spill = set_find_spill(spill_bb->ilp, irn);
2294                 assert(spill && spill->irn == irn);
2295
2296                 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2297                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2298                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2299                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2300
2301                 foreach_post_remat(bb, tmp) {
2302                         op_t     *remat_op = get_irn_link(tmp);
2303
2304                         if(remat_op->attr.remat.remat->value == irn) {
2305                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2306                         }
2307                 }
2308         }
2309
2310         /* walk forward now and compute constraints for placing spills */
2311         /* this must only be done for values that are not defined in this block */
2312         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2313         pset_foreach(live, irn) {
2314                 /*
2315                  * if value is defined in this block we can anways place the spill directly after the def
2316                  *    -> no constraint necessary
2317                  */
2318                 if(!is_Phi(irn) && get_nodes_block(irn) == bb) continue;
2319
2320
2321                 spill = set_find_spill(spill_bb->ilp, irn);
2322                 assert(spill);
2323
2324                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2325                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2326
2327                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2328                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2329
2330                 if(!is_Phi(irn)) {
2331                         sched_foreach_op(bb, tmp) {
2332                                 op_t   *op = get_irn_link(tmp);
2333
2334                                 if(is_Phi(tmp)) continue;
2335                                 assert(!is_Proj(tmp));
2336
2337                                 if(op->is_remat) {
2338                                         ir_node   *value = op->attr.remat.remat->value;
2339
2340                                         if(value == irn) {
2341                                                 /* only collect remats up to the first real use of a value */
2342                                                 lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2343                                         }
2344                                 } else {
2345                                         int   n;
2346
2347                                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2348                                                 ir_node    *arg = get_irn_n(tmp, n);
2349
2350                                                 if(arg == irn) {
2351                                                         /* if a value is used stop collecting remats */
2352                             goto next_live;
2353                                                 }
2354                                         }
2355                                 }
2356                         }
2357                 }
2358 next_live:
2359         }
2360
2361         del_pset(live);
2362 }
2363
2364 typedef struct _irnlist_t {
2365         struct list_head   list;
2366         ir_node           *irn;
2367 } irnlist_t;
2368
2369 typedef struct _interference_t {
2370         struct list_head    blocklist;
2371         ir_node            *a;
2372         ir_node            *b;
2373 } interference_t;
2374
2375 static int
2376 cmp_interference(const void *a, const void *b, size_t size)
2377 {
2378         const interference_t *p = a;
2379         const interference_t *q = b;
2380
2381         return !(p->a == q->a && p->b == q->b);
2382 }
2383
2384 static interference_t *
2385 set_find_interference(set * set, ir_node * a, ir_node * b)
2386 {
2387         interference_t     query;
2388
2389         query.a = (a>b)?a:b;
2390         query.b = (a>b)?b:a;
2391
2392         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2393 }
2394
2395 static interference_t *
2396 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2397 {
2398         interference_t     query,
2399                                           *result;
2400         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2401
2402         list->irn = bb;
2403
2404         result = set_find_interference(set, a, b);
2405         if(result) {
2406
2407                 list_add(&list->list, &result->blocklist);
2408                 return result;
2409         }
2410
2411         query.a = (a>b)?a:b;
2412         query.b = (a>b)?b:a;
2413
2414         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2415
2416         INIT_LIST_HEAD(&result->blocklist);
2417         list_add(&list->list, &result->blocklist);
2418
2419         return result;
2420 }
2421
2422 static int
2423 values_interfere_in_block(const spill_ilp_t * si, ir_node * bb, ir_node * a, ir_node * b)
2424 {
2425         const ir_edge_t *edge;
2426
2427         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2428                 /* both values are live in, so they interfere */
2429                 return 1;
2430         }
2431
2432         /* ensure a dominates b */
2433         if(value_dominates(b,a)) {
2434                 const ir_node * t;
2435                 t = b;
2436                 b = a;
2437                 a = t;
2438         }
2439         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2440
2441
2442         /* the following code is stolen from bera.c */
2443         if(be_is_live_end(si->lv, bb, a))
2444                 return 1;
2445
2446         foreach_out_edge(a, edge) {
2447                 const ir_node *user = edge->src;
2448                 if(get_nodes_block(user) == bb
2449                                 && !is_Phi(user)
2450                                 && b != user
2451                                 && value_dominates(b, user))
2452                         return 1;
2453         }
2454
2455         return 0;
2456 }
2457
2458 /**
2459  * Walk all irg blocks and collect interfering values inside of phi classes
2460  */
2461 static void
2462 luke_interferencewalker(ir_node * bb, void * data)
2463 {
2464         spill_ilp_t    *si = (spill_ilp_t*)data;
2465         int             l1, l2;
2466
2467         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
2468                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
2469                 op_t           *a_op = get_irn_link(a);
2470
2471                 if(a_op->is_remat) continue;
2472
2473                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2474                 if (has_reg_class(si, a) && get_phi_class(a)) {
2475                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
2476                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
2477                                 op_t           *b_op = get_irn_link(b);
2478
2479                                 if(b_op->is_remat) continue;
2480
2481                                 /* a and b are only interesting if they are in the same phi class */
2482                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2483                                         if(values_interfere_in_block(si, bb, a, b)) {
2484                                                 DBG((si->dbg, LEVEL_4, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2485                                                 set_insert_interference(si, si->interferences, a, b, bb);
2486                                         }
2487                                 }
2488                         }
2489                 }
2490         }
2491 }
2492
2493 static unsigned int copy_path_id = 0;
2494
2495 static void
2496 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2497 {
2498         ilp_cst_t  cst;
2499         ilp_var_t  copy;
2500         char       buf[256];
2501         void      *ptr;
2502
2503         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2504         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2505
2506         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2507
2508         pset_foreach(copies, ptr) {
2509                 copy = PTR_TO_INT(ptr);
2510                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2511         }
2512 }
2513
2514 /**
2515  * @parameter copies   contains a path of copies which lead us to irn
2516  * @parameter visited  contains a set of nodes already visited on this path
2517  */
2518 static int
2519 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2520 {
2521         ir_edge_t *edge;
2522         op_t      *op = get_irn_link(irn);
2523     pset      *visited_users = pset_new_ptr_default();
2524         int        paths = 0;
2525
2526         if(op->is_remat) return;
2527
2528         pset_insert_ptr(visited, irn);
2529
2530         if(is_Phi(irn)) {
2531                 int    n;
2532         pset  *visited_operands = pset_new_ptr(get_irn_arity(irn));
2533
2534                 /* visit all operands */
2535                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2536                         ir_node  *arg = get_irn_n(irn, n);
2537                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2538
2539                         if(!has_reg_class(si, arg)) continue;
2540             if(pset_find_ptr(visited_operands, arg)) continue;
2541             pset_insert_ptr(visited_operands, arg);
2542
2543                         if(arg == target) {
2544                                 if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2545                                         del_pset(visited_operands);
2546                                         del_pset(visited_users);
2547                                         pset_remove_ptr(visited, irn);
2548                                         return paths;
2549                                 }
2550                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2551                                 write_copy_path_cst(si, copies, any_interfere);
2552                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2553                         } else if(!pset_find_ptr(visited, arg)) {
2554                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2555                                 paths += find_copy_path(si, arg, target, any_interfere, copies, visited);
2556                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2557
2558                 /*if(paths > MAX_PATHS) {
2559                     if(pset_count(copies) == 0) {
2560                         ilp_cst_t  cst;
2561                         char       buf[256];
2562
2563                         ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2564                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0);
2565                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2566                         lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2567                         DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2568
2569                         paths = 0;
2570                     } else {
2571                         del_pset(visited_operands);
2572                         del_pset(visited_users);
2573                         pset_remove_ptr(visited, irn);
2574                         return paths;
2575                     }
2576                 } else if(pset_count(copies) == 0) {
2577                                         paths = 0;
2578                                 }*/
2579                         }
2580                 }
2581
2582         del_pset(visited_operands);
2583         }
2584
2585         /* visit all uses which are phis */
2586         foreach_out_edge(irn, edge) {
2587                 ir_node  *user = edge->src;
2588                 int       pos  = edge->pos;
2589                 op_t     *op = get_irn_link(user);
2590                 ilp_var_t copy;
2591
2592                 if(!is_Phi(user)) continue;
2593                 if(!has_reg_class(si, user)) continue;
2594         if(pset_find_ptr(visited_users, user)) continue;
2595         pset_insert_ptr(visited_users, user);
2596
2597                 copy = op->attr.live_range.args.copies[pos];
2598
2599                 if(user == target) {
2600                         if(++paths > MAX_PATHS && pset_count(copies) != 0) {
2601                                 del_pset(visited_users);
2602                                 pset_remove_ptr(visited, irn);
2603                                 return paths;
2604                         }
2605                         pset_insert(copies, INT_TO_PTR(copy), copy);
2606                         write_copy_path_cst(si, copies, any_interfere);
2607                         pset_remove(copies, INT_TO_PTR(copy), copy);
2608                 } else if(!pset_find_ptr(visited, user)) {
2609                         pset_insert(copies, INT_TO_PTR(copy), copy);
2610                         paths += find_copy_path(si, user, target, any_interfere, copies, visited);
2611                         pset_remove(copies, INT_TO_PTR(copy), copy);
2612
2613             /*if(paths > MAX_PATHS) {
2614                 if(pset_count(copies) == 0) {
2615                     ilp_cst_t  cst;
2616                     char       buf[256];
2617
2618                     ir_snprintf(buf, sizeof(buf), "always_copy-%d-%d", any_interfere, copy);
2619                     cst = lpp_add_cst(si->lpp, buf, lpp_equal, 0);
2620                     lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2621                     lpp_set_factor_fast(si->lpp, cst, copy, 1.0);
2622                     DBG((si->dbg, LEVEL_1, "ALWAYS COPYING %d FOR INTERFERENCE %d\n", copy, any_interfere));
2623
2624                     paths = 0;
2625                 } else {
2626                     del_pset(visited_users);
2627                     pset_remove_ptr(visited, irn);
2628                     return paths;
2629                 }
2630             } else if(pset_count(copies) == 0) {
2631                                 paths = 0;
2632                         }*/
2633                 }
2634         }
2635
2636     del_pset(visited_users);
2637         pset_remove_ptr(visited, irn);
2638         return paths;
2639 }
2640
2641 static void
2642 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2643 {
2644         pset * copies = pset_new_ptr_default();
2645         pset * visited = pset_new_ptr_default();
2646
2647         find_copy_path(si, a, b, any_interfere, copies, visited);
2648
2649         del_pset(visited);
2650         del_pset(copies);
2651 }
2652
2653
2654 static void
2655 memcopyhandler(spill_ilp_t * si)
2656 {
2657         interference_t   *interference;
2658         char              buf[256];
2659         /* teste Speicherwerte auf Interferenz */
2660
2661         /* analyze phi classes */
2662         phi_class_compute(si->chordal_env->irg);
2663
2664         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2665         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2666
2667         /* now lets emit the ILP unequations for the crap */
2668         set_foreach(si->interferences, interference) {
2669                 irnlist_t      *irnlist;
2670                 ilp_var_t       interfere,
2671                                                 any_interfere;
2672                 ilp_cst_t       any_interfere_cst,
2673                                                 cst;
2674                 const ir_node  *a  = interference->a;
2675                 const ir_node  *b  = interference->b;
2676
2677                 /* any_interf <= \sum interf */
2678                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2679                 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2680                 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2681
2682                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2683
2684                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2685                         const ir_node  *bb = irnlist->irn;
2686                         spill_bb_t     *spill_bb = get_irn_link(bb);
2687                         spill_t        *spilla,
2688                                                    *spillb,
2689                                                    query;
2690                         char           buf[256];
2691
2692                         query.irn = a;
2693                         spilla = set_find_spill(spill_bb->ilp, a);
2694                         assert(spilla);
2695
2696                         query.irn = b;
2697                         spillb = set_find_spill(spill_bb->ilp, b);
2698                         assert(spillb);
2699
2700                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2701                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2702                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
2703                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
2704                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2705                         interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2706
2707                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2708                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2709
2710                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2711                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2712                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2713                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2714                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2715
2716                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2717                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2718
2719                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2720                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2721                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2722
2723                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2724                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2725
2726                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2727                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2728                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2729
2730
2731                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2732
2733                         /* any_interfere >= interf */
2734                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2735                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2736
2737                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2738                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2739                 }
2740
2741                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2742                 gen_copy_constraints(si,a,b,any_interfere);
2743         }
2744 }
2745
2746
2747 static INLINE int
2748 is_zero(double x)
2749 {
2750         return fabs(x) < 0.00001;
2751 }
2752
2753 #ifdef KEEPALIVE
2754 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2755 {
2756         spill_ilp_t *si = get_irg_link(current_ir_graph);
2757
2758         if(pset_find_ptr(si->all_possible_remats, n)) {
2759                 op_t   *op = (op_t*)get_irn_link(n);
2760                 assert(op && op->is_remat);
2761
2762                 if(!op->attr.remat.remat->inverse) {
2763                         if(op->attr.remat.pre) {
2764                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2765                         } else {
2766                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2767                         }
2768
2769                         return 1;
2770                 } else {
2771                         op_t   *op = (op_t*)get_irn_link(n);
2772                         assert(op && op->is_remat);
2773
2774                         if(op->attr.remat.pre) {
2775                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2776                         } else {
2777                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2778                         }
2779
2780                         return 1;
2781                 }
2782         }
2783
2784         return 0;
2785 }
2786
2787 static void
2788 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2789 {
2790         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2791         be_dump(irg, suffix, dump_ir_block_graph_sched);
2792         set_dump_node_vcgattr_hook(NULL);
2793 }
2794 #endif
2795
2796 /**
2797  * Edge hook to dump the schedule edges with annotated register pressure.
2798  */
2799 static int
2800 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2801 {
2802         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2803                 ir_node *prev = sched_prev(irn);
2804                 fprintf(F, "edge:{sourcename:\"");
2805                 PRINT_NODEID(irn);
2806                 fprintf(F, "\" targetname:\"");
2807                 PRINT_NODEID(prev);
2808                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2809                 fprintf(F, "\" color:magenta}\n");
2810         }
2811         return 1;
2812 }
2813
2814 static void
2815 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2816 {
2817         DUMP_NODE_EDGE_FUNC old_edge_hook = get_dump_node_edge_hook();
2818
2819         dump_consts_local(0);
2820         set_dump_node_edge_hook(sched_pressure_edge_hook);
2821         dump_ir_block_graph(irg, suffix);
2822         set_dump_node_edge_hook(old_edge_hook);
2823 }
2824
2825 static void
2826 walker_pressure_annotator(ir_node * bb, void * data)
2827 {
2828         spill_ilp_t  *si = data;
2829         ir_node      *irn;
2830         int           n, i;
2831         pset         *live = pset_new_ptr_default();
2832         int           projs = 0;
2833
2834         be_lv_foreach(si->lv, bb, be_lv_state_end, i) {
2835                 irn = be_lv_get_irn(si->lv, bb, i);
2836
2837                 if (has_reg_class(si, irn)) {
2838                         pset_insert_ptr(live, irn);
2839                 }
2840         }
2841
2842         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2843
2844         sched_foreach_reverse(bb, irn) {
2845                 if(is_Phi(irn)) {
2846                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2847                         continue;
2848                 }
2849
2850                 if(has_reg_class(si, irn)) {
2851                         pset_remove_ptr(live, irn);
2852                         if(is_Proj(irn)) ++projs;
2853                 }
2854
2855                 if(!is_Proj(irn)) projs = 0;
2856
2857                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2858                         ir_node    *arg = get_irn_n(irn, n);
2859
2860                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2861                 }
2862                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2863         }
2864
2865         del_pset(live);
2866 }
2867
2868 static void
2869 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2870 {
2871         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2872 }
2873
2874 #ifdef KEEPALIVE
2875 static void
2876 connect_all_remats_with_keep(spill_ilp_t * si)
2877 {
2878         ir_node   *irn;
2879         ir_node  **ins,
2880                          **pos;
2881         int        n_remats;
2882
2883
2884         n_remats = pset_count(si->all_possible_remats);
2885         if(n_remats) {
2886                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2887
2888                 pos = ins;
2889                 pset_foreach(si->all_possible_remats, irn) {
2890                         *pos = irn;
2891                         ++pos;
2892                 }
2893
2894                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2895
2896                 obstack_free(si->obst, ins);
2897         }
2898 }
2899 #endif
2900
2901 static void
2902 connect_all_spills_with_keep(spill_ilp_t * si)
2903 {
2904         ir_node   *irn;
2905         ir_node  **ins,
2906                          **pos;
2907         int        n_spills;
2908         ir_node   *keep;
2909
2910
2911         n_spills = pset_count(si->spills);
2912         if(n_spills) {
2913                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2914
2915                 pos = ins;
2916                 pset_foreach(si->spills, irn) {
2917                         *pos = irn;
2918                         ++pos;
2919                 }
2920
2921                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2922
2923                 obstack_free(si->obst, ins);
2924         }
2925 }
2926
2927 /** insert a spill at an arbitrary position */
2928 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2929 {
2930         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
2931         ir_graph *irg   = get_irn_irg(bl);
2932         ir_node *frame  = get_irg_frame(irg);
2933         ir_node *spill;
2934         ir_node *next;
2935
2936         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
2937         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2938
2939         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2940
2941         /*
2942          * search the right insertion point. a spill of a phi cannot be put
2943          * directly after the phi, if there are some phis behind the one which
2944          * is spilled. Also, a spill of a Proj must be after all Projs of the
2945          * same tuple node.
2946          *
2947          * Here's one special case:
2948          * If the spill is in the start block, the spill must be after the frame
2949          * pointer is set up. This is done by setting insert to the end of the block
2950          * which is its default initialization (see above).
2951          */
2952
2953         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2954                 insert = frame;
2955
2956         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2957                 insert = next;
2958
2959         sched_add_after(insert, spill);
2960         return spill;
2961 }
2962
2963 static void
2964 delete_remat(spill_ilp_t * si, ir_node * remat) {
2965         int       n;
2966         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2967
2968         sched_remove(remat);
2969
2970         /* kill links to operands */
2971         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2972                 set_irn_n(remat, n, bad);
2973         }
2974 }
2975
2976 static void
2977 clean_remat_info(spill_ilp_t * si)
2978 {
2979         int            n;
2980         remat_t       *remat;
2981         remat_info_t  *remat_info;
2982         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
2983
2984         set_foreach(si->remat_info, remat_info) {
2985                 if(!remat_info->remats) continue;
2986
2987                 pset_foreach(remat_info->remats, remat)
2988                 {
2989                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2990                                 set_irn_n(remat->proj, -1, bad);
2991                                 set_irn_n(remat->proj, 0, bad);
2992                         }
2993
2994                         if(get_irn_n_edges(remat->op) == 0) {
2995                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2996                                         set_irn_n(remat->op, n, bad);
2997                                 }
2998                         }
2999                 }
3000
3001                 if(remat_info->remats) del_pset(remat_info->remats);
3002                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
3003         }
3004 }
3005
3006 static void
3007 delete_unnecessary_remats(spill_ilp_t * si)
3008 {
3009 #ifdef KEEPALIVE
3010         int       n;
3011         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
3012
3013         if(si->keep) {
3014                 ir_node   *end = get_irg_end(si->chordal_env->irg);
3015                 ir_node  **keeps;
3016
3017                 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
3018                         ir_node        *keep_arg = get_irn_n(si->keep, n);
3019                         op_t           *arg_op = get_irn_link(keep_arg);
3020                         lpp_name_t     *name;
3021
3022                         assert(arg_op->is_remat);
3023
3024                         name = si->lpp->vars[arg_op->attr.remat.ilp];
3025
3026                         if(is_zero(name->value)) {
3027                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
3028                                 /* TODO check whether reload is preferred over remat (could be bug) */
3029                                 delete_remat(si, keep_arg);
3030                         } else {
3031                                 if(!arg_op->attr.remat.remat->inverse) {
3032                                         if(arg_op->attr.remat.pre) {
3033                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
3034                                         } else {
3035                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
3036                                         }
3037                                 } else {
3038                                         if(arg_op->attr.remat.pre) {
3039                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
3040                                         } else {
3041                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
3042                                         }
3043                                 }
3044                         }
3045
3046                         set_irn_n(si->keep, n, bad);
3047                 }
3048 #if 0
3049                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
3050                         ir_node        *end_arg = get_End_keepalive(end, i);
3051
3052                         if(end_arg != si->keep) {
3053                                 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
3054                         }
3055                 }
3056                 keeps = obstack_finish(si->obst);
3057                 set_End_keepalives(end, n-1, keeps);
3058                 obstack_free(si->obst, keeps);
3059 #endif
3060         } else {
3061                 DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
3062         }
3063 #else
3064         ir_node  *remat;
3065
3066         pset_foreach(si->all_possible_remats, remat) {
3067                 op_t           *remat_op = get_irn_link(remat);
3068                 lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
3069
3070                 if(is_zero(name->value)) {
3071                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
3072                         /* TODO check whether reload is preferred over remat (could be bug) */
3073                         delete_remat(si, remat);
3074                 } else {
3075                         if(!remat_op->attr.remat.remat->inverse) {
3076                                 if(remat_op->attr.remat.pre) {
3077                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
3078                                 } else {
3079                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
3080                                 }
3081                         } else {
3082                                 if(remat_op->attr.remat.pre) {
3083                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
3084                                 } else {
3085                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
3086                                 }
3087                         }
3088                 }
3089         }
3090 #endif
3091 }
3092
3093 static pset *
3094 get_spills_for_value(spill_ilp_t * si, ir_node * value)
3095 {
3096         pset     *spills = pset_new_ptr_default();
3097
3098         ir_node  *next;
3099         defs_t   *defs;
3100
3101         defs = set_find_def(si->values, value);
3102
3103         if(defs && defs->spills) {
3104                 for(next = defs->spills; next; next = get_irn_link(next)) {
3105                         pset_insert_ptr(spills, next);
3106                 }
3107         }
3108
3109         return spills;
3110 }
3111
3112 static pset *
3113 get_remats_for_value(spill_ilp_t * si, ir_node * value)
3114 {
3115         pset     *remats = pset_new_ptr_default();
3116
3117         ir_node  *next;
3118         defs_t   *defs;
3119
3120         pset_insert_ptr(remats, value);
3121         defs = set_find_def(si->values, value);
3122
3123         if(defs && defs->remats) {
3124                 for(next = defs->remats; next; next = get_irn_link(next)) {
3125                         pset_insert_ptr(remats, next);
3126                 }
3127         }
3128
3129         return remats;
3130 }
3131
3132
3133 /**
3134  * @param before   The node after which the spill will be placed in the schedule
3135  */
3136 /* TODO set context properly */
3137 static ir_node *
3138 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
3139 {
3140         defs_t   *defs;
3141         ir_node  *spill;
3142         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3143
3144         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
3145
3146         spill = be_spill2(arch_env, irn, before, irn);
3147
3148         defs = set_insert_def(si->values, value);
3149         assert(defs);
3150
3151         /* enter into the linked list */
3152         set_irn_link(spill, defs->spills);
3153         defs->spills = spill;
3154
3155 #ifdef KEEPALIVE_SPILLS
3156         pset_insert_ptr(si->spills, spill);
3157 #endif
3158
3159         return spill;
3160 }
3161
3162 /**
3163  * @param before   The Phi node which has to be spilled
3164  */
3165 static ir_node *
3166 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
3167 {
3168         ir_node   *mem_phi;
3169         ir_node  **ins;
3170         defs_t    *defs;
3171         int        n;
3172         op_t      *op = get_irn_link(phi);
3173
3174         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
3175
3176         for(n=get_irn_arity(phi)-1; n>=0; --n) {
3177                 ins[n] = si->m_unknown;
3178         }
3179
3180         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
3181
3182         defs = set_insert_def(si->values, phi);
3183         assert(defs);
3184
3185         /* enter into the linked list */
3186         set_irn_link(mem_phi, defs->spills);
3187         defs->spills = mem_phi;
3188
3189         sched_add_after(phi, mem_phi);
3190
3191 #ifdef KEEPALIVE_SPILLS
3192         pset_insert_ptr(si->spills, mem_phi);
3193 #endif
3194
3195
3196         return mem_phi;
3197 }
3198
3199 /**
3200  * Add remat to list of defs, destroys link field!
3201  */
3202 static void
3203 insert_remat(spill_ilp_t * si, ir_node * remat)
3204 {
3205         defs_t   *defs;
3206         op_t     *remat_op = get_irn_link(remat);
3207
3208         assert(remat_op->is_remat);
3209
3210         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
3211         assert(defs);
3212
3213         /* enter into the linked list */
3214         set_irn_link(remat, defs->remats);
3215         defs->remats = remat;
3216 }
3217
3218
3219 /**
3220  * Add reload before operation and add to list of defs
3221  */
3222 static ir_node *
3223 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
3224 {
3225         defs_t   *defs;
3226         ir_node  *reload,
3227                          *spill;
3228         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3229
3230         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
3231
3232         defs = set_find_def(si->values, value);
3233
3234         spill = defs->spills;
3235         assert(spill && "no spill placed before reload");
3236
3237         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
3238
3239         /* enter into the linked list */
3240         set_irn_link(reload, defs->remats);
3241         defs->remats = reload;
3242
3243         return reload;
3244 }
3245
3246 static void
3247 walker_spill_placer(ir_node * bb, void * data) {
3248         spill_ilp_t   *si = (spill_ilp_t*)data;
3249         ir_node       *irn;
3250         spill_bb_t    *spill_bb = get_irn_link(bb);
3251         pset          *spills_to_do = pset_new_ptr_default();
3252         spill_t       *spill;
3253
3254         set_foreach(spill_bb->ilp, spill) {
3255                 lpp_name_t    *name;
3256
3257                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
3258                         name = si->lpp->vars[spill->mem_in];
3259                         if(!is_zero(name->value)) {
3260                                 ir_node   *mem_phi;
3261
3262                                 mem_phi = insert_mem_phi(si, spill->irn);
3263
3264                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
3265                         }
3266                 }
3267
3268                 name = si->lpp->vars[spill->spill];
3269                 if(!is_zero(name->value)) {
3270                         /* place spill directly after definition */
3271                         if(get_nodes_block(spill->irn) == bb) {
3272                                 insert_spill(si, spill->irn, spill->irn, spill->irn);
3273                                 continue;
3274                         }
3275
3276                         /* place spill at bb start */
3277                         if(spill->reg_in > 0) {
3278                                 name = si->lpp->vars[spill->reg_in];
3279                                 if(!is_zero(name->value)) {
3280                                         insert_spill(si, spill->irn, spill->irn, bb);
3281                                         continue;
3282                                 }
3283                         }
3284                         /* place spill after a remat */
3285                         pset_insert_ptr(spills_to_do, spill->irn);
3286                 }
3287         }
3288         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
3289
3290
3291         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
3292                 op_t     *op = get_irn_link(irn);
3293
3294                 if(be_is_Spill(irn)) continue;
3295
3296                 if(op->is_remat) {
3297                         /* TODO fix this if we want to support remats with more than two nodes */
3298                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3299                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3300
3301                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3302                         }
3303                 } else {
3304                         if(pset_find_ptr(spills_to_do, irn)) {
3305                                 pset_remove_ptr(spills_to_do, irn);
3306
3307                                 insert_spill(si, irn, irn, irn);
3308                         }
3309                 }
3310
3311         }
3312
3313         assert(pset_count(spills_to_do) == 0);
3314
3315         /* afterwards free data in block */
3316         del_pset(spills_to_do);
3317 }
3318
3319 static ir_node *
3320 insert_mem_copy(spill_ilp_t * si, const ir_node * bb, const ir_node * value)
3321 {
3322         ir_node          *insert_pos = bb;
3323         ir_node          *spill;
3324         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
3325
3326         /* find last definition of arg value in block */
3327         ir_node  *next;
3328         defs_t   *defs;
3329         int       last = 0;
3330
3331         defs = set_find_def(si->values, value);
3332
3333         if(defs && defs->remats) {
3334                 for(next = defs->remats; next; next = get_irn_link(next)) {
3335                         if(get_nodes_block(next) == bb && sched_get_time_step(next) > last) {
3336                                 last = sched_get_time_step(next);
3337                                 insert_pos = next;
3338                         }
3339                 }
3340         }
3341
3342         if(get_nodes_block(value) == bb && sched_get_time_step(value) > last) {
3343                 last = sched_get_time_step(value);
3344                 insert_pos = value;
3345         }
3346
3347         DBG((si->dbg, LEVEL_2, "\t  inserting mem copy for value %+F after %+F\n", value, insert_pos));
3348
3349         spill = be_spill2(arch_env, is_Block(insert_pos)?value:insert_pos, insert_pos, value);
3350
3351         return spill;
3352 }
3353
3354 static void
3355 phim_fixer(spill_ilp_t *si) {
3356         defs_t  *defs;
3357
3358         set_foreach(si->values, defs) {
3359                 const ir_node  *phi = defs->value;
3360                 op_t           *op = get_irn_link(phi);
3361                 ir_node        *phi_m = NULL;
3362                 ir_node        *next = defs->spills;
3363                 int             n;
3364
3365                 if(!is_Phi(phi)) continue;
3366
3367                 while(next) {
3368                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3369                                 phi_m = next;
3370                                 break;
3371                         } else {
3372                                 next = get_irn_link(next);
3373                         }
3374                 }
3375                 if(!phi_m) continue;
3376
3377                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3378                         const ir_node  *value = get_irn_n(phi, n);
3379                         defs_t         *val_defs = set_find_def(si->values, value);
3380                         ir_node        *arg = get_irn_n(phi_m, n);
3381
3382                         /* a spill of this value */
3383                         ir_node      *spill;
3384
3385
3386 #ifndef NO_MEMCOPIES
3387                         ir_node    *pred = get_Block_cfgpred_block(get_nodes_block(phi), n);
3388                         lpp_name_t *name = si->lpp->vars[op->attr.live_range.args.copies[n]];
3389
3390                         if(!is_zero(name->value)) {
3391                                 spill = insert_mem_copy(si, pred, value);
3392                         } else {
3393                                 spill = val_defs->spills;
3394                         }
3395 #else
3396                         spill = val_defs->spills;
3397 #endif
3398                         assert(spill && "no spill placed before PhiM");
3399                         set_irn_n(phi_m, n, spill);
3400                 }
3401         }
3402 }
3403
3404 static void
3405 walker_reload_placer(ir_node * bb, void * data) {
3406         spill_ilp_t   *si = (spill_ilp_t*)data;
3407         ir_node       *irn;
3408         spill_bb_t    *spill_bb = get_irn_link(bb);
3409         int            i;
3410
3411         /* reloads at end of block */
3412         if(spill_bb->reloads) {
3413                 keyval_t    *keyval;
3414
3415                 set_foreach(spill_bb->reloads, keyval) {
3416                         ir_node        *irn = (ir_node*)keyval->key;
3417                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3418                         lpp_name_t     *name;
3419
3420                         name = si->lpp->vars[reload];
3421                         if(!is_zero(name->value)) {
3422                                 ir_node    *reload;
3423                                 ir_node    *insert_pos = bb;
3424                                 ir_node    *prev = sched_block_last_noncf(si, bb);
3425                                 op_t       *prev_op = get_irn_link(prev);
3426
3427                                 while(be_is_Spill(prev)) {
3428                                         prev = sched_prev(prev);
3429                                 }
3430
3431                                 prev_op = get_irn_link(prev);
3432
3433                                 /* insert reload before pre-remats */
3434                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3435                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3436                                         insert_pos = prev;
3437
3438                                         do {
3439                                                 prev = sched_prev(prev);
3440                                         } while(be_is_Spill(prev));
3441
3442                                         prev_op = get_irn_link(prev);
3443
3444                                 }
3445
3446                                 reload = insert_reload(si, irn, insert_pos);
3447
3448 #ifdef KEEPALIVE_RELOADS
3449                                 pset_insert_ptr(si->spills, reload);
3450 #endif
3451                         }
3452                 }
3453         }
3454
3455         /* walk and insert more reloads and collect remats */
3456         sched_foreach_reverse(bb, irn) {
3457                 op_t     *op = get_irn_link(irn);
3458
3459                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3460                 if(is_Phi(irn)) break;
3461
3462                 if(op->is_remat) {
3463                         if(get_irn_mode(irn) != mode_T) {
3464                                 insert_remat(si, irn);
3465                         }
3466                 } else {
3467                         int    n;
3468
3469                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3470                                 ir_node    *arg = get_irn_n(irn, n);
3471
3472                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3473                                         lpp_name_t    *name;
3474
3475                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3476                                         if(!is_zero(name->value)) {
3477                                                 ir_node    *reload;
3478                                                 ir_node    *insert_pos = irn;
3479                                                 ir_node    *prev = sched_prev(insert_pos);
3480                                                 op_t       *prev_op;
3481
3482                                                 while(be_is_Spill(prev)) {
3483                                                         prev = sched_prev(prev);
3484                                                 }
3485
3486                                                 prev_op = get_irn_link(prev);
3487
3488                                                 /* insert reload before pre-remats */
3489                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3490                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3491                                                         insert_pos = prev;
3492
3493                                                         do {
3494                                                                 prev = sched_prev(prev);
3495                                                         } while(be_is_Spill(prev));
3496
3497                                                         prev_op = get_irn_link(prev);
3498
3499                                                 }
3500
3501                                                 reload = insert_reload(si, arg, insert_pos);
3502
3503                                                 set_irn_n(irn, n, reload);
3504
3505 #ifdef KEEPALIVE_RELOADS
3506                                                 pset_insert_ptr(si->spills, reload);
3507 #endif
3508                                         }
3509                                 }
3510                         }
3511                 }
3512         }
3513
3514         del_set(spill_bb->ilp);
3515         if(spill_bb->reloads) del_set(spill_bb->reloads);
3516 }
3517
3518 static void
3519 walker_collect_used(ir_node * irn, void * data)
3520 {
3521         lc_bitset_t   *used = data;
3522
3523         lc_bitset_set(used, get_irn_idx(irn));
3524 }
3525
3526 struct kill_helper {
3527         lc_bitset_t  *used;
3528         spill_ilp_t  *si;
3529 };
3530
3531 static void
3532 walker_kill_unused(ir_node * bb, void * data)
3533 {
3534         struct kill_helper *kh = data;
3535         const ir_node      *bad = get_irg_bad(get_irn_irg(bb));
3536         ir_node            *irn;
3537
3538
3539         for(irn=sched_first(bb); !sched_is_end(irn);) {
3540                 ir_node     *next = sched_next(irn);
3541                 int          n;
3542
3543                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3544                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3545                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3546 #if 0
3547                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3548 #endif
3549                         }
3550
3551                         sched_remove(irn);
3552
3553                         set_nodes_block(irn, bad);
3554                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3555                                 set_irn_n(irn, n, bad);
3556                         }
3557                 }
3558                 irn = next;
3559         }
3560 }
3561
3562 static void
3563 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3564 {
3565         struct kill_helper kh;
3566
3567         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3568         kh.si = si;
3569
3570         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3571         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3572
3573         lc_bitset_free(kh.used);
3574 }
3575
3576 static void
3577 print_irn_pset(pset * p)
3578 {
3579         ir_node   *irn;
3580
3581         pset_foreach(p, irn) {
3582                 ir_printf("%+F\n", irn);
3583         }
3584 }
3585
3586 static void
3587 dump_phi_class(spill_ilp_t * si, pset * phiclass, const char * file)
3588 {
3589     FILE           *f = fopen(file, "w");
3590     ir_node        *irn;
3591     interference_t *interference;
3592
3593     pset_break(phiclass);
3594     set_break(si->interferences);
3595
3596     ir_fprintf(f, "digraph phiclass {\n");
3597
3598     pset_foreach(phiclass, irn) {
3599         if(is_Phi(irn))
3600             ir_fprintf(f, "  %F%N [shape=box]\n",irn,irn);
3601     }
3602
3603     pset_foreach(phiclass, irn) {
3604         int n;
3605
3606         if(!is_Phi(irn)) continue;
3607
3608         for(n=get_irn_arity(irn)-1; n>=0; --n) {
3609             ir_node  *arg = get_irn_n(irn, n);
3610
3611             ir_fprintf(f, "  %F%N -> %F%N\n",irn,irn,arg,arg);
3612         }
3613     }
3614
3615     set_foreach(si->interferences, interference) {
3616         const ir_node  *a  = interference->a;
3617         const ir_node  *b  = interference->b;
3618         if(get_phi_class(a) == phiclass) {
3619             ir_fprintf(f, "  %F%N -> %F%N [color=red,dir=none,style=bold]\n",a,a,b,b);
3620         }
3621     }
3622
3623     ir_fprintf(f, "}");
3624     fclose(f);
3625 }
3626
3627 static void
3628 rewire_uses(spill_ilp_t * si)
3629 {
3630         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3631         defs_t               *defs;
3632         pset                 *ignore = pset_new_ptr(1);
3633
3634         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3635
3636         /* then fix uses of spills */
3637         set_foreach(si->values, defs) {
3638                 pset     *reloads;
3639                 pset     *spills;
3640                 ir_node  *next = defs->remats;
3641                 int remats = 0;
3642
3643                 reloads = pset_new_ptr_default();
3644
3645                 while(next) {
3646                         if(be_is_Reload(next)) {
3647                                 pset_insert_ptr(reloads, next);
3648                         } else {
3649                                 ++remats;
3650                         }
3651                         next = get_irn_link(next);
3652                 }
3653
3654                 spills = get_spills_for_value(si, defs->value);
3655                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3656                 if(pset_count(spills) > 1) {
3657                         //assert(pset_count(reloads) > 0);
3658                         //                              print_irn_pset(spills);
3659                         //                              print_irn_pset(reloads);
3660
3661                         be_ssa_constr_set_ignore(dfi, si->lv, spills, ignore);
3662                 }
3663
3664                 del_pset(reloads);
3665                 del_pset(spills);
3666         }
3667
3668         /* first fix uses of remats and reloads */
3669         set_foreach(si->values, defs) {
3670                 pset     *nodes;
3671                 ir_node  *next = defs->remats;
3672
3673                 if(next) {
3674                         nodes = pset_new_ptr_default();
3675                         pset_insert_ptr(nodes, defs->value);
3676
3677                         while(next) {
3678                                 pset_insert_ptr(nodes, next);
3679                                 next = get_irn_link(next);
3680                         }
3681
3682                         if(pset_count(nodes) > 1) {
3683                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3684                                 be_ssa_constr_set(dfi, si->lv, nodes);
3685                         }
3686
3687                         del_pset(nodes);
3688                 }
3689         }
3690
3691 //      remove_unused_defs(si);
3692
3693         be_free_dominance_frontiers(dfi);
3694 }
3695
3696
3697 static void
3698 writeback_results(spill_ilp_t * si)
3699 {
3700         /* walk through the graph and collect all spills, reloads and remats for a value */
3701
3702         si->values = new_set(cmp_defs, 4096);
3703
3704         DBG((si->dbg, LEVEL_1, "Applying results\n"));
3705         delete_unnecessary_remats(si);
3706         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3707         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3708         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3709         phim_fixer(si);
3710
3711         /* clean the remat info! there are still back-edges leading there! */
3712         clean_remat_info(si);
3713
3714         rewire_uses(si);
3715
3716         connect_all_spills_with_keep(si);
3717
3718         del_set(si->values);
3719 }
3720
3721 static int
3722 get_n_regs(spill_ilp_t * si)
3723 {
3724         int     arch_n_regs = arch_register_class_n_regs(si->cls);
3725         int     free = 0;
3726         int     i;
3727
3728         for(i=0; i<arch_n_regs; i++) {
3729                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3730                         free++;
3731                 }
3732         }
3733
3734         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3735         return free;
3736 }
3737
3738 static void
3739 walker_reload_mover(ir_node * bb, void * data)
3740 {
3741         spill_ilp_t   *si = data;
3742         ir_node           *tmp;
3743
3744         sched_foreach(bb, tmp) {
3745                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3746                         ir_node       *reload = tmp;
3747                         ir_node       *irn = tmp;
3748
3749                         /* move reload upwards */
3750
3751                         int pressure = (int)get_irn_link(reload);
3752                         if(pressure < si->n_regs) {
3753                                 irn = sched_prev(reload);
3754                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3755                                 sched_remove(reload);
3756                                 pressure = (int)get_irn_link(irn);
3757
3758                                 while(pressure < si->n_regs) {
3759                                         if( sched_is_end(irn) ||
3760                                            (be_is_Reload(irn) && has_reg_class(si, irn)) ||
3761                                            /* do not move reload before its spill */
3762                                            (irn == be_get_Reload_mem(reload)) ) break;
3763
3764                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
3765                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3766                                         irn = sched_prev(irn);
3767
3768                                         pressure = (int)get_irn_link(irn);
3769                                 }
3770
3771                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3772                                 sched_put_after(irn, reload);
3773                         }
3774                 }
3775         }
3776 }
3777
3778 static void
3779 move_reloads_upward(spill_ilp_t * si)
3780 {
3781         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3782 }
3783
3784
3785 /**
3786  * Walk all irg blocks and check for interfering spills inside of phi classes
3787  */
3788 static void
3789 luke_meminterferencechecker(ir_node * bb, void * data)
3790 {
3791         spill_ilp_t    *si = (spill_ilp_t*)data;
3792         int             l1, l2;
3793
3794         be_lv_foreach(si->lv, bb, be_lv_state_end | be_lv_state_out | be_lv_state_in, l1) {
3795                 ir_node        *a = be_lv_get_irn(si->lv, bb, l1);
3796
3797                 if(!be_is_Spill(a) && (!is_Phi(a) || get_irn_mode(a) != mode_T)) continue;
3798
3799                 /* a is only interesting if it is in my register class and if it is inside a phi class */
3800                 if (has_reg_class(si, a) && get_phi_class(a)) {
3801                         for(l2=_be_lv_next_irn(si->lv, bb, 0xff, l1+1); l2>=0; l2=_be_lv_next_irn(si->lv, bb, 0xff, l2+1)) {
3802                                 ir_node        *b = be_lv_get_irn(si->lv, bb, l2);
3803
3804                                 if(!be_is_Spill(b) && (!is_Phi(b) || get_irn_mode(b) != mode_T)) continue;
3805
3806                                 /* a and b are only interesting if they are in the same phi class */
3807                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
3808                                         if(values_interfere_in_block(si, bb, a, b)) {
3809                                                 ir_fprintf(stderr, "$$ Spills interfere in %+F: %+F, %+F \t$$\n", bb, a, b);
3810                                         }
3811                                 }
3812                         }
3813                 }
3814         }
3815 }
3816
3817 static void
3818 verify_phiclasses(spill_ilp_t * si)
3819 {
3820         /* analyze phi classes */
3821         phi_class_compute(si->chordal_env->irg);
3822
3823         DBG((si->dbg, LEVEL_2, "\t calling memory interference checker\n"));
3824         irg_block_walk_graph(si->chordal_env->irg, luke_meminterferencechecker, NULL, si);
3825 }
3826
3827 static void
3828 walker_spillslotassigner(ir_node * irn, void * data)
3829 {
3830         spill_ilp_t            *si = (spill_ilp_t*)data;
3831         void                   *cls;
3832
3833         if(!be_is_Spill(irn)) return;
3834
3835         /* set spill context to phi class if it has one ;) */
3836
3837         cls = get_phi_class(irn);
3838         if(cls)
3839                 be_set_Spill_context(irn, cls);
3840         else
3841                 be_set_Spill_context(irn, irn);
3842 }
3843
3844
3845 static void
3846 assign_spillslots(spill_ilp_t * si)
3847 {
3848         DBG((si->dbg, LEVEL_2, "\t calling spill slot assigner\n"));
3849         irg_walk_graph(si->chordal_env->irg, walker_spillslotassigner, NULL, si);
3850 }
3851
3852 void
3853 be_spill_remat(const be_chordal_env_t * chordal_env)
3854 {
3855         char            problem_name[256];
3856         char            dump_suffix[256];
3857         char            dump_suffix2[256];
3858         struct obstack  obst;
3859         spill_ilp_t     si;
3860
3861         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3862         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3863         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3864
3865         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3866         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3867
3868 #ifdef VERIFY_DOMINANCE
3869     be_check_dominance(chordal_env->irg);
3870 #endif
3871
3872         obstack_init(&obst);
3873         si.chordal_env = chordal_env;
3874         si.obst = &obst;
3875         si.cls = chordal_env->cls;
3876         si.lpp = new_lpp(problem_name, lpp_minimize);
3877         si.remat_info = new_set(cmp_remat_info, 4096);
3878         si.interferences = new_set(cmp_interference, 32);
3879         si.all_possible_remats = pset_new_ptr_default();
3880         si.spills = pset_new_ptr_default();
3881         si.inverse_ops = pset_new_ptr_default();
3882         si.lv = chordal_env->lv;
3883 #ifdef KEEPALIVE
3884         si.keep = NULL;
3885 #endif
3886         si.n_regs = get_n_regs(&si);
3887
3888         set_irg_link(chordal_env->irg, &si);
3889         compute_doms(chordal_env->irg);
3890
3891         /* compute phi classes */
3892 //      phi_class_compute(chordal_env->irg);
3893
3894         be_analyze_regpressure(chordal_env, "-pre");
3895
3896 #ifdef COLLECT_REMATS
3897         /* collect remats */
3898         DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3899         irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3900 #endif
3901
3902         /* insert possible remats */
3903         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3904         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3905         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3906
3907 #ifdef KEEPALIVE
3908         DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3909         connect_all_remats_with_keep(&si);
3910         /* dump graph with inserted remats */
3911         dump_graph_with_remats(chordal_env->irg, dump_suffix);
3912 #endif
3913
3914         /* insert copies for phi arguments not in my regclass */
3915         irg_walk_graph(chordal_env->irg, walker_regclass_copy_insertor, NULL, &si);
3916
3917         /* recompute liveness */
3918         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3919         be_liveness_recompute(si.lv);
3920
3921         /* build the ILP */
3922
3923         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3924         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3925         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3926
3927         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3928         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3929
3930 #ifndef NO_MEMCOPIES
3931         DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3932         memcopyhandler(&si);
3933 #endif
3934
3935 #ifdef DUMP_ILP
3936         {
3937                 FILE           *f;
3938                 char            buf[256];
3939
3940                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3941                 if ((f = fopen(buf, "wt")) != NULL) {
3942                         lpp_dump_plain(si.lpp, f);
3943                         fclose(f);
3944                 }
3945         }
3946 #endif
3947
3948 #ifdef SOLVE
3949         DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3950 #ifdef ILP_TIMEOUT
3951         lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3952 #endif
3953
3954 #ifdef SOLVE_LOCAL
3955         lpp_solve_cplex(si.lpp);
3956 #else
3957         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3958 #endif
3959         assert(lpp_is_sol_valid(si.lpp)
3960                && "solution of ILP must be valid");
3961
3962         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3963
3964 #ifdef DUMP_SOLUTION
3965         {
3966                 FILE           *f;
3967                 char            buf[256];
3968
3969                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3970                 if ((f = fopen(buf, "wt")) != NULL) {
3971                         int             i;
3972                         for (i = 0; i < si.lpp->var_next; ++i) {
3973                                 lpp_name_t     *name = si.lpp->vars[i];
3974                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3975                         }
3976                         fclose(f);
3977                 }
3978         }
3979 #endif
3980
3981         writeback_results(&si);
3982
3983 #endif                          /* SOLVE */
3984
3985         kill_all_unused_values_in_schedule(&si);
3986
3987 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3988         be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3989 #endif
3990
3991         // move reloads upwards
3992         be_liveness_recompute(si.lv);
3993         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3994         move_reloads_upward(&si);
3995
3996 #ifndef NO_MEMCOPIES
3997         verify_phiclasses(&si);
3998         assign_spillslots(&si);
3999 #endif
4000
4001         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
4002
4003         dump_pressure_graph(&si, dump_suffix2);
4004
4005         be_analyze_regpressure(chordal_env, "-post");
4006
4007 #ifdef VERIFY_DOMINANCE
4008         be_check_dominance(chordal_env->irg);
4009 #endif
4010
4011         free_dom(chordal_env->irg);
4012         del_set(si.interferences);
4013         del_pset(si.inverse_ops);
4014         del_pset(si.all_possible_remats);
4015         del_pset(si.spills);
4016         free_lpp(si.lpp);
4017         obstack_free(&obst, NULL);
4018         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
4019 }
4020
4021 #else                           /* WITH_ILP */
4022
4023 static void
4024 only_that_you_can_compile_without_WITH_ILP_defined(void)
4025 {
4026 }
4027
4028 #endif                          /* WITH_ILP */