8f8a7f1bfe4b5b77015b01e3ac69525d41639da3
[libfirm] / ir / be / bespillremat.c
1 /** vim: set sw=4 ts=4:
2  * @file   bespillremat.c
3  * @date   2006-04-06
4  * @author Adam M. Szalkowski & Sebastian Hack
5  *
6  * ILP based spilling & rematerialization
7  *
8  * Copyright (C) 2006 Universitaet Karlsruhe
9  * Released under the GPL
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #include "hashptr.h"
20 #include "debug.h"
21 #include "obst.h"
22 #include "set.h"
23 #include "list.h"
24 #include "pmap.h"
25
26 #include "irprintf.h"
27 #include "irgwalk.h"
28 #include "irdump_t.h"
29 #include "irnode_t.h"
30 #include "ircons_t.h"
31 #include "irloop_t.h"
32 #include "phiclass.h"
33 #include "iredges.h"
34 #include "execfreq.h"
35
36 #include <lpp/lpp.h>
37 #include <lpp/lpp_net.h>
38 #include <lpp/lpp_cplex.h>
39 //#include <lc_pset.h>
40 #include <libcore/lc_bitset.h>
41
42 #include "be_t.h"
43 #include "belive_t.h"
44 #include "besched_t.h"
45 #include "beirgmod.h"
46 #include "bearch.h"
47 #include "benode_t.h"
48 #include "beutil.h"
49 #include "bespillremat.h"
50 #include "bespill.h"
51 #include "bepressurestat.h"
52
53 #include "bechordal_t.h"
54
55 #define BIGM 100000.0
56
57 #define DUMP_SOLUTION
58 #define DUMP_ILP
59 //#define KEEPALIVE /* keep alive all inserted remats and dump graph with remats */
60 #define COLLECT_REMATS /* enable rematerialization */
61 #define COLLECT_INVERSE_REMATS /* enable placement of inverse remats */
62 #define REMAT_WHILE_LIVE /* only remat values that are live */
63 //#define NO_ENLARGE_L1V3N355 /* do not remat after the death of some operand */
64 //#define EXECFREQ_LOOPDEPH /* compute execution frequency from loop depth only */
65 //#define MAY_DIE_AT_PRE_REMAT /* allow values to die after a pre remat */
66 #define CHECK_POST_REMAT /* check pressure after post remats (conservative but otherwise we can temporarily exceed the register pressure) */
67 #define NO_SINGLE_USE_REMATS /* do not repair schedule */
68 //#define KEEPALIVE_SPILLS
69 //#define KEEPALIVE_RELOADS
70 #define GOODWIN_REDUCTION
71 //#define NO_MEMCOPIES
72
73 #define  SOLVE
74 //#define  SOLVE_LOCAL
75 #define LPP_SERVER "i44pc52"
76 #define LPP_SOLVER "cplex"
77
78 #define COST_LOAD      10
79 #define COST_STORE     50
80 #define COST_REMAT     1
81
82 #define ILP_TIMEOUT    120
83
84 #define ILP_UNDEF               -1
85
86 typedef struct _spill_ilp_t {
87         const arch_register_class_t  *cls;
88         int                           n_regs;
89         const be_chordal_env_t       *chordal_env;
90         lpp_t                        *lpp;
91         struct obstack               *obst;
92         set                          *remat_info;
93         pset                         *all_possible_remats;
94         pset                         *inverse_ops;
95 #ifdef KEEPALIVE
96         ir_node                      *keep;
97 #endif
98         set                          *values; /**< for collecting all definitions of values before running ssa-construction */
99         set                          *execfreqs;
100         pset                         *spills;
101         set                          *interferences;
102         ir_node                      *m_unknown;
103         DEBUG_ONLY(firm_dbg_module_t * dbg);
104 } spill_ilp_t;
105
106 typedef int ilp_var_t;
107 typedef int ilp_cst_t;
108
109 typedef struct _spill_bb_t {
110         set      *ilp;
111         set      *reloads;
112 } spill_bb_t;
113
114 typedef struct _remat_t {
115         const ir_node        *op;      /**< for copy_irn */
116         const ir_node        *value;   /**< the value which is being recomputed by this remat */
117         ir_node              *proj;    /**< not NULL if the above op produces a tuple */
118         int                   cost;    /**< cost of this remat */
119         int                   inverse; /**< nonzero if this is an inverse remat */
120 } remat_t;
121
122 /**
123  * Data to be attached to each IR node. For remats this contains the ilp_var
124  * for this remat and for normal ops this contains the ilp_vars for
125  * reloading each operand
126  */
127 typedef struct _op_t {
128         int             is_remat;
129         union {
130                 struct {
131                         ilp_var_t       ilp;
132                         remat_t        *remat; /** the remat this op belongs to */
133                         int             pre; /** 1, if this is a pressure-increasing remat */
134                 } remat;
135                 struct {
136                         ilp_var_t       ilp;
137                         ir_node        *op; /** the operation this live range belongs to */
138                         union {
139                                 ilp_var_t      *reloads;
140                                 ilp_var_t      *copies;
141                         } args;
142                 } live_range;
143         } attr;
144 } op_t;
145
146 typedef struct _defs_t {
147         ir_node   *value;
148         ir_node   *spills;  /**< points to the first spill for this value (linked by link field) */
149         ir_node   *remats;  /**< points to the first definition for this value (linked by link field) */
150 } defs_t;
151
152 typedef struct _remat_info_t {
153         const ir_node       *irn; /**< the irn to which these remats belong */
154         pset                *remats; /**< possible remats for this value */
155         pset                *remats_by_operand; /**< remats with this value as operand */
156 } remat_info_t;
157
158 typedef struct _keyval_t {
159         const void          *key;
160         const void          *val;
161 } keyval_t;
162
163 typedef struct _spill_t {
164         ir_node      *irn;
165         ilp_var_t     reg_in;
166         ilp_var_t     mem_in;
167         ilp_var_t     reg_out;
168         ilp_var_t     mem_out;
169         ilp_var_t     spill;
170 } spill_t;
171
172 static INLINE int
173 has_reg_class(const spill_ilp_t * si, const ir_node * irn)
174 {
175         return chordal_has_class(si->chordal_env, irn);
176 }
177
178 #if 0
179 static int
180 cmp_remat(const void *a, const void *b)
181 {
182         const keyval_t *p = a;
183         const keyval_t *q = b;
184         const remat_t  *r = p->val;
185         const remat_t  *s = q->val;
186
187         assert(r && s);
188
189         return !(r == s || r->op == s->op);
190 }
191 #endif
192 static int
193 cmp_remat(const void *a, const void *b)
194 {
195         const remat_t  *r = a;
196         const remat_t  *s = a;
197
198         return !(r == s || r->op == s->op);
199 }
200
201 static int
202 cmp_spill(const void *a, const void *b, size_t size)
203 {
204         const spill_t *p = a;
205         const spill_t *q = b;
206
207 //      return !(p->irn == q->irn && p->bb == q->bb);
208         return !(p->irn == q->irn);
209 }
210
211 static keyval_t *
212 set_find_keyval(set * set, void * key)
213 {
214         keyval_t     query;
215
216         query.key = key;
217         return set_find(set, &query, sizeof(query), HASH_PTR(key));
218 }
219
220 static keyval_t *
221 set_insert_keyval(set * set, void * key, void * val)
222 {
223         keyval_t     query;
224
225         query.key = key;
226         query.val = val;
227         return set_insert(set, &query, sizeof(query), HASH_PTR(key));
228 }
229
230 static defs_t *
231 set_find_def(set * set, ir_node * value)
232 {
233         defs_t     query;
234
235         query.value = value;
236         return set_find(set, &query, sizeof(query), HASH_PTR(value));
237 }
238
239 static defs_t *
240 set_insert_def(set * set, ir_node * value)
241 {
242         defs_t     query;
243
244         query.value = value;
245         query.spills = NULL;
246         query.remats = NULL;
247         return set_insert(set, &query, sizeof(query), HASH_PTR(value));
248 }
249
250 static spill_t *
251 set_find_spill(set * set, ir_node * value)
252 {
253         spill_t     query;
254
255         query.irn = value;
256         return set_find(set, &query, sizeof(query), HASH_PTR(value));
257 }
258
259 #define pset_foreach(s,i) for((i)=pset_first((s)); (i); (i)=pset_next((s)))
260 #define set_foreach(s,i) for((i)=set_first((s)); (i); (i)=set_next((s)))
261 #define foreach_post_remat(s,i) for((i)=next_post_remat((s)); (i); (i)=next_post_remat((i)))
262 #define foreach_pre_remat(si,s,i) for((i)=next_pre_remat((si),(s)); (i); (i)=next_pre_remat((si),(i)))
263 #define sched_foreach_op(s,i) for((i)=sched_next_op((s));!sched_is_end((i));(i)=sched_next_op((i)))
264
265 static int
266 cmp_remat_info(const void *a, const void *b, size_t size)
267 {
268         const remat_info_t *p = a;
269         const remat_info_t *q = b;
270
271         return !(p->irn == q->irn);
272 }
273
274 static int
275 cmp_defs(const void *a, const void *b, size_t size)
276 {
277         const defs_t *p = a;
278         const defs_t *q = b;
279
280         return !(p->value == q->value);
281 }
282
283 static int
284 cmp_keyval(const void *a, const void *b, size_t size)
285 {
286         const keyval_t *p = a;
287         const keyval_t *q = b;
288
289         return !(p->key == q->key);
290 }
291
292 static double
293 execution_frequency(const spill_ilp_t * si, const ir_node * irn)
294 {
295 #define FUDGE 0.001
296         if(si->execfreqs) {
297                 if(is_Block(irn)) {
298                         return get_block_execfreq(si->execfreqs, irn) + FUDGE;
299                 } else {
300                         return get_block_execfreq(si->execfreqs, get_nodes_block(irn)) + FUDGE;
301                 }
302         } else {
303                 if(is_Block(irn))
304                         return exp(get_loop_depth(get_irn_loop(irn)) * log(10)) + FUDGE;
305                 else
306                         return exp(get_loop_depth(get_irn_loop(get_nodes_block(irn))) * log(10)) + FUDGE;
307         }
308 }
309
310 static double
311 get_cost(const spill_ilp_t * si, const ir_node * irn)
312 {
313         if(be_is_Spill(irn)) {
314                 return COST_STORE;
315         } else if(be_is_Reload(irn)){
316                 return COST_LOAD;
317         } else {
318                 return arch_get_op_estimated_cost(si->chordal_env->birg->main_env->arch_env, irn);
319         }
320
321 }
322
323 /**
324  * Checks, whether node and its operands have suitable reg classes
325  */
326 static INLINE int
327 is_rematerializable(const spill_ilp_t * si, const ir_node * irn)
328 {
329         int             i,
330                         n;
331         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
332         int               remat = (arch_irn_get_flags(arch_env, irn) & arch_irn_flags_rematerializable) != 0;
333
334 #if 0
335         if(!remat)
336                 ir_fprintf(stderr, "  Node %+F is not rematerializable\n", irn);
337 #endif
338
339         for (i = 0, n = get_irn_arity(irn); i < n && remat; ++i) {
340                 ir_node        *op = get_irn_n(irn, i);
341                 remat &= has_reg_class(si, op) || arch_irn_get_flags(arch_env, op) & arch_irn_flags_ignore || (get_irn_op(op) == op_NoMem);
342
343 //              if(!remat)
344 //                      ir_fprintf(stderr, "  Argument %d (%+F) of Node %+F has wrong regclass\n", i, op, irn);
345         }
346
347         return remat;
348 }
349
350 /**
351  * Try to create a remat from @p op with destination value @p dest_value
352  */
353 static INLINE remat_t *
354 get_remat_from_op(spill_ilp_t * si, const ir_node * dest_value, const ir_node * op)
355 {
356         remat_t  *remat = NULL;
357
358 //      if(!mode_is_datab(get_irn_mode(dest_value)))
359 //              return NULL;
360
361         if(dest_value == op) {
362                 const ir_node *proj = NULL;
363
364                 if(is_Proj(dest_value)) {
365                         op = get_irn_n(op, 0);
366                         proj = dest_value;
367                 }
368
369                 if(!is_rematerializable(si, op))
370                         return NULL;
371
372                 remat = obstack_alloc(si->obst, sizeof(*remat));
373                 remat->op = op;
374                 remat->cost = get_cost(si, op);
375                 remat->value = dest_value;
376                 remat->proj = proj;
377                 remat->inverse = 0;
378         } else {
379                 arch_inverse_t     inverse;
380                 int                i,
381                                                    n;
382
383                 /* get the index of the operand we want to retrieve by the inverse op */
384                 for (i = 0, n = get_irn_arity(op); i < n; ++i) {
385                         ir_node        *arg = get_irn_n(op, i);
386
387                         if(arg == dest_value) break;
388                 }
389                 if(i == n) return NULL;
390
391                 DBG((si->dbg, LEVEL_5, "\t  requesting inverse op for argument %d of op %+F\n", i, op));
392
393                 /* else ask the backend to give an inverse op */
394                 if(arch_get_inverse(si->chordal_env->birg->main_env->arch_env, op, i, &inverse, si->obst)) {
395                         int   i;
396
397                         DBG((si->dbg, LEVEL_4, "\t  backend gave us an inverse op with %d nodes and cost %d\n", inverse.n, inverse.costs));
398
399                         assert(inverse.n > 0 && "inverse op should have at least one node");
400
401                         for(i=0; i<inverse.n; ++i) {
402                                 pset_insert_ptr(si->inverse_ops, inverse.nodes[i]);
403                         }
404
405                         if(inverse.n <= 2) {
406                                 remat = obstack_alloc(si->obst, sizeof(*remat));
407                                 remat->op = inverse.nodes[0];
408                                 remat->cost = inverse.costs;
409                                 remat->value = dest_value;
410                                 remat->proj = (inverse.n==2)?inverse.nodes[1]:NULL;
411                                 remat->inverse = 1;
412
413                                 assert(is_Proj(remat->proj));
414                         } else {
415                                 assert(0 && "I can not handle remats with more than 2 nodes");
416                         }
417                 }
418         }
419
420         if(remat) {
421                 if(remat->proj) {
422                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F with %+F\n", remat->op, dest_value, op, remat->proj));
423                 } else {
424                         DBG((si->dbg, LEVEL_3, "\t >Found remat %+F for %+F from %+F\n", remat->op, dest_value, op));
425                 }
426         }
427         return remat;
428 }
429
430
431 static INLINE void
432 add_remat(const spill_ilp_t * si, const remat_t * remat)
433 {
434         remat_info_t    *remat_info,
435                      query;
436         int              i,
437                                          n;
438
439         assert(remat->op);
440         assert(remat->value);
441
442         query.irn = remat->value;
443         query.remats = NULL;
444         query.remats_by_operand = NULL;
445         remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(remat->value));
446
447         if(remat_info->remats == NULL) {
448                 remat_info->remats = new_pset(cmp_remat, 4096);
449         }
450         pset_insert(remat_info->remats, remat, HASH_PTR(remat->op));
451
452         /* insert the remat into the remats_be_operand set of each argument of the remat op */
453         for (i = 0, n = get_irn_arity(remat->op); i < n; ++i) {
454                 ir_node        *arg = get_irn_n(remat->op, i);
455
456                 query.irn = arg;
457                 query.remats = NULL;
458                 query.remats_by_operand = NULL;
459                 remat_info = set_insert(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
460
461                 if(remat_info->remats_by_operand == NULL) {
462                         remat_info->remats_by_operand = new_pset(cmp_remat, 4096);
463                 }
464                 pset_insert(remat_info->remats_by_operand, remat, HASH_PTR(remat->op));
465         }
466 }
467
468 static int
469 get_irn_n_nonremat_edges(const spill_ilp_t * si, const ir_node * irn)
470 {
471         const ir_edge_t   *edge = get_irn_out_edge_first(irn);
472         int                i = 0;
473
474         while(edge) {
475                 if(!pset_find_ptr(si->inverse_ops, edge->src)) {
476                         ++i;
477                 }
478                 edge = get_irn_out_edge_next(irn, edge);
479         }
480
481         return i;
482 }
483
484 static INLINE void
485 get_remats_from_op(spill_ilp_t * si, const ir_node * op)
486 {
487         int       i,
488                       n;
489         remat_t *remat;
490
491 #ifdef NO_SINGLE_USE_REMATS
492         if(has_reg_class(si, op) && (get_irn_n_nonremat_edges(si, op) > 1)) {
493 #else
494         if(has_reg_class(si, op)) {
495 #endif
496                 remat = get_remat_from_op(si, op, op);
497                 if(remat) {
498                         add_remat(si, remat);
499                 }
500         }
501
502 #ifdef COLLECT_INVERSE_REMATS
503         /* repeat the whole stuff for each remat retrieved by get_remat_from_op(op, arg)
504            for each arg */
505         for (i = 0, n = get_irn_arity(op); i < n; ++i) {
506                 ir_node        *arg = get_irn_n(op, i);
507
508                 if(has_reg_class(si, arg)) {
509                         /* try to get an inverse remat */
510                         remat = get_remat_from_op(si, arg, op);
511                         if(remat) {
512                                 add_remat(si, remat);
513                         }
514                 }
515         }
516 #endif
517
518 }
519
520 static INLINE int
521 value_is_defined_before(const spill_ilp_t * si, const ir_node * pos, const ir_node * val)
522 {
523         ir_node *block;
524         ir_node *def_block = get_nodes_block(val);
525         int      ret;
526
527         if(val == pos)
528                 return 0;
529
530         /* if pos is at end of a basic block */
531         if(is_Block(pos)) {
532                 ret = (pos == def_block || block_dominates(def_block, pos));
533 //              ir_fprintf(stderr, "(def(bb)=%d) ", ret);
534                 return ret;
535         }
536
537         /* else if this is a normal operation */
538         block = get_nodes_block(pos);
539         if(block == def_block) {
540                 if(!sched_is_scheduled(val)) return 1;
541
542                 ret = sched_comes_after(val, pos);
543 //              ir_fprintf(stderr, "(def(same block)=%d) ",ret);
544                 return ret;
545         }
546
547         ret = block_dominates(def_block, block);
548 //      ir_fprintf(stderr, "(def(other block)=%d) ", ret);
549         return ret;
550 }
551
552 static INLINE ir_node *
553 sched_block_last_noncf(const spill_ilp_t * si, const ir_node * bb)
554 {
555     return sched_skip((ir_node*)bb, 0, sched_skip_cf_predicator, (void *) si->chordal_env->birg->main_env->arch_env);
556 }
557
558 /**
559  * Returns first non-Phi node of block @p bb
560  */
561 static INLINE ir_node *
562 sched_block_first_nonphi(const ir_node * bb)
563 {
564         return sched_skip((ir_node*)bb, 1, sched_skip_phi_predicator, NULL);
565 }
566
567 static int
568 sched_skip_proj_predicator(const ir_node * irn, void * data)
569 {
570         return (is_Proj(irn));
571 }
572
573 static INLINE ir_node *
574 sched_next_nonproj(const ir_node * irn, int forward)
575 {
576         return sched_skip((ir_node*)irn, forward, sched_skip_proj_predicator, NULL);
577 }
578
579 /**
580  * Returns next operation node (non-Proj) after @p irn
581  * or the basic block of this node
582  */
583 static INLINE ir_node *
584 sched_next_op(const ir_node * irn)
585 {
586         ir_node *next = sched_next(irn);
587
588         if(is_Block(next))
589                 return next;
590
591         return sched_next_nonproj(next, 1);
592 }
593
594 /**
595  * Returns previous operation node (non-Proj) before @p irn
596  * or the basic block of this node
597  */
598 static INLINE ir_node *
599 sched_prev_op(const ir_node * irn)
600 {
601         ir_node *prev = sched_prev(irn);
602
603         if(is_Block(prev))
604                 return prev;
605
606         return sched_next_nonproj(prev, 0);
607 }
608
609 static void
610 sched_put_after(ir_node * insert, ir_node * irn)
611 {
612         if(is_Block(insert)) {
613                 insert = sched_block_first_nonphi(insert);
614         } else {
615                 insert = sched_next_op(insert);
616         }
617         sched_add_before(insert, irn);
618 }
619
620 static void
621 sched_put_before(const spill_ilp_t * si, ir_node * insert, ir_node * irn)
622 {
623   if(is_Block(insert)) {
624           insert = sched_block_last_noncf(si, insert);
625   } else {
626           insert = sched_next_nonproj(insert, 0);
627           insert = sched_prev(insert);
628   }
629   sched_add_after(insert, irn);
630 }
631
632 /**
633  * Tells you whether a @p remat can be placed before the irn @p pos
634  */
635 static INLINE int
636 can_remat_before(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
637 {
638         const ir_node   *op = remat->op;
639         const ir_node   *prev;
640         int        i,
641                            n,
642                            res = 1;
643
644         if(is_Block(pos)) {
645                 prev = sched_block_last_noncf(si, pos);
646                 prev = sched_next_nonproj(prev, 0);
647         } else {
648                 prev = sched_prev_op(pos);
649         }
650         /* do not remat if the rematted value is defined immediately before this op */
651         if(prev == remat->op) {
652                 return 0;
653         }
654
655 #if 0
656         /* this should be just fine, the following OP will be using this value, right? */
657
658         /* only remat AFTER the real definition of a value (?) */
659         if(!value_is_defined_before(si, pos, remat->value)) {
660 //              ir_fprintf(stderr, "error(not defined)");
661                 return 0;
662         }
663 #endif
664
665         for(i=0, n=get_irn_arity(op); i<n && res; ++i) {
666                 const ir_node   *arg = get_irn_n(op, i);
667
668 #ifdef NO_ENLARGE_L1V3N355
669                 if(has_reg_class(si, arg) && live) {
670                         res &= pset_find_ptr(live, arg)?1:0;
671                 } else {
672                         res &= value_is_defined_before(si, pos, arg);
673                 }
674 #else
675                 res &= value_is_defined_before(si, pos, arg);
676 #endif
677         }
678
679         return res;
680 }
681
682 /**
683  * Tells you whether a @p remat can be placed after the irn @p pos
684  */
685 static INLINE int
686 can_remat_after(const spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
687 {
688         if(is_Block(pos)) {
689                 pos = sched_block_first_nonphi(pos);
690         } else {
691                 pos = sched_next_op(pos);
692         }
693
694         /* only remat AFTER the real definition of a value (?) */
695         if(!value_is_defined_before(si, pos, remat->value)) {
696                 return 0;
697         }
698
699         return can_remat_before(si, remat, pos, live);
700 }
701
702 /**
703  * Collect potetially rematerializable OPs
704  */
705 static void
706 walker_remat_collector(ir_node * irn, void * data)
707 {
708         spill_ilp_t    *si = data;
709
710         if(!is_Block(irn) && !is_Phi(irn)) {
711                 DBG((si->dbg, LEVEL_4, "\t  Processing %+F\n", irn));
712                 get_remats_from_op(si, irn);
713         }
714 }
715
716 /**
717  * Inserts a copy of @p irn before @p pos
718  */
719 static ir_node *
720 insert_copy_before(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
721 {
722         ir_node     *bb;
723         ir_node     *copy;
724
725         bb = is_Block(pos)?pos:get_nodes_block(pos);
726         copy = exact_copy(irn);
727         set_nodes_block(copy, bb);
728         sched_put_before(si, pos, copy);
729
730         return copy;
731 }
732
733 /**
734  * Inserts a copy of @p irn after @p pos
735  */
736 static ir_node *
737 insert_copy_after(const spill_ilp_t * si, const ir_node * irn, ir_node * pos)
738 {
739         ir_node     *bb;
740         ir_node     *copy;
741
742         bb = is_Block(pos)?pos:get_nodes_block(pos);
743         copy = exact_copy(irn);
744         set_nodes_block(copy, bb);
745         sched_put_after(pos, copy);
746
747         return copy;
748 }
749
750 static void
751 insert_remat_after(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
752 {
753         char     buf[256];
754
755         if(can_remat_after(si, remat, pos, live)) {
756                 ir_node         *copy,
757                                                 *proj_copy;
758                 op_t            *op;
759
760                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
761
762                 copy = insert_copy_after(si, remat->op, pos);
763
764 //              ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", remat->value, pos);
765                 ir_snprintf(buf, sizeof(buf), "remat2_%N_%N", copy, pos);
766                 op = obstack_alloc(si->obst, sizeof(*op));
767                 op->is_remat = 1;
768                 op->attr.remat.remat = remat;
769                 op->attr.remat.pre = 0;
770                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
771
772                 set_irn_link(copy, op);
773                 pset_insert_ptr(si->all_possible_remats, copy);
774                 if(remat->proj) {
775                         proj_copy = insert_copy_after(si, remat->proj, copy);
776                         set_irn_n(proj_copy, 0, copy);
777                         set_irn_link(proj_copy, op);
778                         pset_insert_ptr(si->all_possible_remats, proj_copy);
779                 } else {
780                         proj_copy = NULL;
781                 }
782         }
783 }
784
785 static void
786 insert_remat_before(spill_ilp_t * si, const remat_t * remat, const ir_node * pos, const pset * live)
787 {
788         char     buf[256];
789
790         if(can_remat_before(si, remat, pos, live)) {
791                 ir_node         *copy,
792                                                 *proj_copy;
793                 op_t            *op;
794
795                 DBG((si->dbg, LEVEL_3, "\t  >inserting remat %+F\n", remat->op));
796
797                 copy = insert_copy_before(si, remat->op, pos);
798
799 //              ir_snprintf(buf, sizeof(buf), "remat_%N_%N", remat->value, pos);
800                 ir_snprintf(buf, sizeof(buf), "remat_%N_%N", copy, pos);
801                 op = obstack_alloc(si->obst, sizeof(*op));
802                 op->is_remat = 1;
803                 op->attr.remat.remat = remat;
804                 op->attr.remat.pre = 1;
805                 op->attr.remat.ilp = lpp_add_var(si->lpp, buf, lpp_binary, remat->cost*execution_frequency(si, pos));
806
807                 set_irn_link(copy, op);
808                 pset_insert_ptr(si->all_possible_remats, copy);
809                 if(remat->proj) {
810                         proj_copy = insert_copy_after(si, remat->proj, copy);
811                         set_irn_n(proj_copy, 0, copy);
812                         set_irn_link(proj_copy, op);
813                         pset_insert_ptr(si->all_possible_remats, proj_copy);
814                 } else {
815                         proj_copy = NULL;
816                 }
817         }
818 }
819
820 static int
821 get_block_n_succs(const ir_node *block) {
822         const ir_edge_t *edge;
823
824         assert(edges_activated(current_ir_graph));
825
826         edge = get_block_succ_first(block);
827         if (! edge)
828                 return 0;
829
830         edge = get_block_succ_next(block, edge);
831         return edge ? 2 : 1;
832 }
833
834 static int
835 is_merge_edge(const ir_node * bb)
836 {
837 #ifdef GOODWIN_REDUCTION
838         return get_block_n_succs(bb) == 1;
839 #else
840         return 1;
841 #endif
842 }
843
844 static int
845 is_diverge_edge(const ir_node * bb)
846 {
847 #ifdef GOODWIN_REDUCTION
848         return get_Block_n_cfgpreds(bb) == 1;
849 #else
850         return 1;
851 #endif
852 }
853
854 /**
855  * Insert (so far unused) remats into the irg to
856  * recompute the potential liveness of all values
857  */
858 static void
859 walker_remat_insertor(ir_node * bb, void * data)
860 {
861         spill_ilp_t    *si = data;
862         spill_bb_t     *spill_bb;
863         ir_node        *irn;
864         int             n;
865         irn_live_t     *li;
866         pset           *live = pset_new_ptr_default();
867
868         DBG((si->dbg, LEVEL_3, "\t Entering %+F\n\n", bb));
869
870         live_foreach(bb, li) {
871                 ir_node        *value = (ir_node *) li->irn;
872
873                 /* add remats at end of block */
874                 if (live_is_end(li) && has_reg_class(si, value)) {
875                         pset_insert_ptr(live, value);
876                 }
877         }
878
879         spill_bb = obstack_alloc(si->obst, sizeof(*spill_bb));
880         set_irn_link(bb, spill_bb);
881
882         irn = sched_last(bb);
883         while(!sched_is_end(irn)) {
884                 ir_node   *next;
885                 op_t      *op;
886                 pset      *args;
887                 ir_node   *arg;
888
889                 next = sched_prev(irn);
890
891                 DBG((si->dbg, LEVEL_5, "\t at %+F (next: %+F)\n", irn, next));
892
893                 if(is_Phi(irn) || is_Proj(irn)) {
894                         op_t      *op;
895
896                         if(has_reg_class(si, irn)) {
897                                 pset_remove_ptr(live, irn);
898                         }
899
900                         op = obstack_alloc(si->obst, sizeof(*op));
901                         op->is_remat = 0;
902                         op->attr.live_range.args.reloads = NULL;
903                         op->attr.live_range.ilp = ILP_UNDEF;
904                         set_irn_link(irn, op);
905
906                         irn = next;
907                         continue;
908                 }
909
910                 op = obstack_alloc(si->obst, sizeof(*op));
911                 op->is_remat = 0;
912                 op->attr.live_range.ilp = ILP_UNDEF;
913                 op->attr.live_range.args.reloads = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
914                 memset(op->attr.live_range.args.reloads, 0xFF, sizeof(*op->attr.live_range.args.reloads) * get_irn_arity(irn));
915                 set_irn_link(irn, op);
916
917                 args = pset_new_ptr_default();
918
919                 /* collect arguments of op */
920                 for (n = get_irn_arity(irn)-1; n>=0; --n) {
921                         ir_node        *arg = get_irn_n(irn, n);
922
923                         pset_insert_ptr(args, arg);
924                 }
925
926                 /* set args of op live in epilog */
927                 pset_foreach(args, arg) {
928                         if(has_reg_class(si, arg)) {
929                                 pset_insert_ptr(live, arg);
930                         }
931                 }
932
933                 /* insert all possible remats after irn */
934                 pset_foreach(args, arg) {
935                         remat_info_t   *remat_info,
936                                                     query;
937                         remat_t        *remat;
938
939                         /* continue if the operand has the wrong reg class
940                          */
941                         if(!has_reg_class(si, arg))
942                                 continue;
943
944                         query.irn = arg;
945                         query.remats = NULL;
946                         query.remats_by_operand = NULL;
947                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
948
949                         if(!remat_info) {
950                                 continue;
951                         }
952
953                         /* do not place post remats after jumps */
954                         if(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) continue;
955
956                         if(remat_info->remats_by_operand) {
957                                 pset_foreach(remat_info->remats_by_operand, remat) {
958                                         /* do not insert remats producing the same value as one of the operands */
959                                         if(!pset_find_ptr(args, remat->value)) {
960                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F with arg %+F\n", remat->op, arg));
961 #ifdef REMAT_WHILE_LIVE
962                                                 if(pset_find_ptr(live, remat->value)) {
963                                                         insert_remat_after(si, remat, irn, live);
964                                                 }
965 #else
966                                                 insert_remat_after(si, remat, irn, live);
967 #endif
968                                         }
969                                 }
970                         }
971                 }
972
973                 /* delete defined value from live set */
974                 if(has_reg_class(si, irn)) {
975                         pset_remove_ptr(live, irn);
976                 }
977
978                 /* insert all possible remats before irn */
979                 pset_foreach(args, arg) {
980                         remat_info_t   *remat_info,
981                                                     query;
982                         remat_t        *remat;
983
984                         /* continue if the operand has the wrong reg class
985                          */
986                         if(!has_reg_class(si, arg))
987                                 continue;
988
989                         query.irn = arg;
990                         query.remats = NULL;
991                         query.remats_by_operand = NULL;
992                         remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(arg));
993
994                         if(!remat_info) {
995                                 continue;
996                         }
997
998                         if(remat_info->remats) {
999                                 pset_foreach(remat_info->remats, remat) {
1000                                         DBG((si->dbg, LEVEL_4, "\t  considering remat %+F for arg %+F\n", remat->op, arg));
1001 #ifdef REMAT_WHILE_LIVE
1002                                         if(pset_find_ptr(live, remat->value)) {
1003                                                 insert_remat_before(si, remat, irn, live);
1004                                         }
1005 #else
1006                                         insert_remat_before(si, remat, irn, live);
1007 #endif
1008                                 }
1009                         }
1010                 }
1011
1012                 del_pset(args);
1013                 irn = next;
1014         }
1015
1016         live_foreach(bb, li) {
1017                 ir_node        *value = (ir_node *) li->irn;
1018
1019                 /* add remats at end if successor has multiple predecessors */
1020                 if(is_merge_edge(bb)) {
1021                         /* add remats at end of block */
1022                         if (live_is_end(li) && has_reg_class(si, value)) {
1023                                 remat_info_t   *remat_info,
1024                                                            query;
1025                                 remat_t        *remat;
1026
1027                                 query.irn = value;
1028                                 query.remats = NULL;
1029                                 query.remats_by_operand = NULL;
1030                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1031
1032                                 if(remat_info && remat_info->remats) {
1033                                         pset_foreach(remat_info->remats, remat) {
1034                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at end of block %+F\n", remat->op, bb));
1035
1036                                                 insert_remat_before(si, remat, bb, NULL);
1037                                         }
1038                                 }
1039                         }
1040                 }
1041                 if(is_diverge_edge(bb)) {
1042                         /* add remat2s at beginning of block */
1043                         if ((live_is_in(li) || (is_Phi(value) && get_nodes_block(value)==bb)) && has_reg_class(si, value)) {
1044                                 remat_info_t   *remat_info,
1045                                                            query;
1046                                 remat_t        *remat;
1047
1048                                 query.irn = value;
1049                                 query.remats = NULL;
1050                                 query.remats_by_operand = NULL;
1051                                 remat_info = set_find(si->remat_info, &query, sizeof(query), HASH_PTR(value));
1052
1053                                 if(remat_info && remat_info->remats) {
1054                                         pset_foreach(remat_info->remats, remat) {
1055                                                 DBG((si->dbg, LEVEL_4, "\t  considering remat %+F at beginning of block %+F\n", remat->op, bb));
1056
1057                                                 /* put the remat here if all its args are available */
1058                                                 insert_remat_after(si, remat, bb, NULL);
1059
1060                                         }
1061                                 }
1062                         }
1063                 }
1064         }
1065 }
1066
1067 /**
1068  * Preparation of blocks' ends for Luke Blockwalker(tm)(R)
1069  */
1070 static void
1071 luke_endwalker(ir_node * bb, void * data)
1072 {
1073         spill_ilp_t    *si = (spill_ilp_t*)data;
1074         irn_live_t     *li;
1075         pset           *live;
1076         pset           *use_end;
1077         char            buf[256];
1078         ilp_cst_t       cst;
1079         ir_node        *irn;
1080         spill_bb_t     *spill_bb = get_irn_link(bb);
1081
1082
1083         live = pset_new_ptr_default();
1084         use_end = pset_new_ptr_default();
1085
1086         live_foreach(bb, li) {
1087                 irn = (ir_node *) li->irn;
1088                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1089                         op_t      *op;
1090
1091                         pset_insert_ptr(live, irn);
1092                         op = get_irn_link(irn);
1093                         assert(!op->is_remat);
1094                 }
1095         }
1096
1097         /* collect values used by cond jumps etc. at bb end (use_end) -> always live */
1098         /* their reg_out must always be set */
1099         sched_foreach_reverse(bb, irn) {
1100                 int   i,
1101                           n;
1102
1103                 if(!sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) break;
1104
1105                 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
1106                         ir_node        *irn_arg = get_irn_n(irn, i);
1107                         if(has_reg_class(si, irn_arg)) {
1108                                 pset_insert_ptr(use_end, irn_arg);
1109                         }
1110                 }
1111         }
1112
1113         ir_snprintf(buf, sizeof(buf), "check_end_%N", bb);
1114         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - pset_count(use_end));
1115
1116         spill_bb->ilp = new_set(cmp_spill, 16);
1117
1118         pset_foreach(live,irn) {
1119                 spill_t     query,
1120                                         *spill;
1121                 double      spill_cost;
1122
1123
1124                 /* handle values used by control flow nodes later separately */
1125                 if(pset_find_ptr(use_end, irn)) continue;
1126
1127                 query.irn = irn;
1128                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1129
1130                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1131
1132                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1133                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1134                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1135
1136                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1137                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1138
1139                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1140                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1141
1142                 spill->reg_in = ILP_UNDEF;
1143                 spill->mem_in = ILP_UNDEF;
1144         }
1145
1146         pset_foreach(use_end,irn) {
1147                 spill_t     query,
1148                                         *spill;
1149                 double      spill_cost;
1150                 ilp_cst_t   end_use_req;
1151
1152                 query.irn = irn;
1153                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1154
1155                 spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1156
1157                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", irn, bb);
1158                 spill->reg_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1159                 /* if irn is used at the end of the block, then it is live anyway */
1160                 //lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1161
1162                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1163                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1164
1165                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1166                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1167
1168                 spill->reg_in = ILP_UNDEF;
1169                 spill->mem_in = ILP_UNDEF;
1170
1171                 ir_snprintf(buf, sizeof(buf), "req_cf_end_%N_%N", irn, bb);
1172                 end_use_req = lpp_add_cst(si->lpp, buf, lpp_equal, 1);
1173                 lpp_set_factor_fast(si->lpp, end_use_req, spill->reg_out, 1.0);
1174         }
1175
1176         del_pset(live);
1177         del_pset(use_end);
1178 }
1179
1180 static ir_node *
1181 next_post_remat(const ir_node * irn)
1182 {
1183         op_t      *op;
1184
1185         if(is_Block(irn)) {
1186                 irn = sched_block_first_nonphi(irn);
1187         } else {
1188                 irn = sched_next_op(irn);
1189         }
1190
1191         if(sched_is_end(irn))
1192                 return NULL;
1193
1194         op = (op_t*)get_irn_link(irn);
1195         if(op->is_remat && !op->attr.remat.pre) {
1196                 return irn;
1197         }
1198
1199         return NULL;
1200 }
1201
1202
1203 static ir_node *
1204 next_pre_remat(const spill_ilp_t * si, const ir_node * irn)
1205 {
1206         op_t      *op;
1207         ir_node   *ret;
1208
1209         if(is_Block(irn)) {
1210                 ret = sched_block_last_noncf(si, irn);
1211                 ret = sched_next(ret);
1212                 ret = sched_prev_op(ret);
1213         } else {
1214                 ret = sched_prev_op(irn);
1215         }
1216
1217         if(sched_is_end(ret) || is_Phi(ret))
1218                 return NULL;
1219
1220         op = (op_t*)get_irn_link(ret);
1221         if(op->is_remat && op->attr.remat.pre) {
1222                 return ret;
1223         }
1224
1225         return NULL;
1226 }
1227
1228 /**
1229  * Find a remat of value @p value in the epilog of @p pos
1230  */
1231 static ir_node *
1232 find_post_remat(const ir_node * value, const ir_node * pos)
1233 {
1234         while((pos = next_post_remat(pos)) != NULL) {
1235                 op_t   *op;
1236
1237                 op = get_irn_link(pos);
1238                 assert(op->is_remat && !op->attr.remat.pre);
1239
1240                 if(op->attr.remat.remat->value == value)
1241                         return (ir_node*)pos;
1242
1243 #if 0
1244         const ir_edge_t *edge;
1245                 foreach_out_edge(pos, edge) {
1246                         ir_node   *proj = get_edge_src_irn(edge);
1247                         assert(is_Proj(proj));
1248                 }
1249 #endif
1250
1251         }
1252
1253         return NULL;
1254 }
1255
1256 /**
1257  * Find a remat of value @p value in the prolog of @p pos
1258  */
1259 static ir_node *
1260 find_pre_remat(const spill_ilp_t * si, const ir_node * value, const ir_node * pos)
1261 {
1262         while((pos = next_pre_remat(si,pos)) != NULL) {
1263                 op_t   *op;
1264
1265                 op = get_irn_link(pos);
1266                 assert(op->is_remat && op->attr.remat.pre);
1267
1268                 if(op->attr.remat.remat->value == value)
1269                         return (ir_node*)pos;
1270         }
1271
1272         return NULL;
1273 }
1274
1275 static spill_t *
1276 add_to_spill_bb(spill_ilp_t * si, ir_node * bb, ir_node * irn)
1277 {
1278         spill_bb_t  *spill_bb = get_irn_link(bb);
1279         spill_t     *spill,
1280                                  query;
1281         char         buf[256];
1282
1283         query.irn = irn;
1284         spill = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1285         if(!spill) {
1286                 double   spill_cost = is_Unknown(irn)?0.0001:COST_STORE*execution_frequency(si, bb);
1287
1288                 spill = set_insert(spill_bb->ilp, &query, sizeof(query), HASH_PTR(irn));
1289
1290                 spill->reg_out = ILP_UNDEF;
1291                 spill->reg_in  = ILP_UNDEF;
1292                 spill->mem_in  = ILP_UNDEF;
1293
1294                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", irn, bb);
1295                 spill->mem_out = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1296
1297                 ir_snprintf(buf, sizeof(buf), "spill_%N_%N", irn, bb);
1298                 spill->spill = lpp_add_var(si->lpp, buf, lpp_binary, spill_cost);
1299         }
1300
1301         return spill;
1302 }
1303
1304 static void
1305 get_live_end(spill_ilp_t * si, ir_node * bb, pset * live)
1306 {
1307         irn_live_t     *li;
1308         ir_node        *irn;
1309
1310         live_foreach(bb, li) {
1311                 irn = (ir_node *) li->irn;
1312
1313                 if (live_is_end(li) && has_reg_class(si, irn) && !pset_find_ptr(si->all_possible_remats, irn)) {
1314                         pset_insert_ptr(live, irn);
1315                 }
1316         }
1317
1318         irn = sched_last(bb);
1319
1320         /* all values eaten by control flow operations are also live until the end of the block */
1321         while(sched_skip_cf_predicator(irn, si->chordal_env->birg->main_env->arch_env)) {
1322                 int  i;
1323                 for(i=get_irn_arity(irn)-1; i>=0; --i) {
1324                         ir_node *arg = get_irn_n(irn,i);
1325
1326                         if(has_reg_class(si, arg)) {
1327                                 pset_insert_ptr(live, arg);
1328                         }
1329                 }
1330
1331                 irn = sched_prev(irn);
1332         }
1333 }
1334
1335 /**
1336  * Walk all irg blocks and emit this ILP
1337  */
1338 static void
1339 luke_blockwalker(ir_node * bb, void * data)
1340 {
1341         spill_ilp_t    *si = (spill_ilp_t*)data;
1342         ir_node        *irn;
1343         pset           *live;
1344         char            buf[256];
1345         ilp_cst_t       cst;
1346         spill_bb_t     *spill_bb = get_irn_link(bb);
1347         int             i;
1348         ir_node        *tmp;
1349         spill_t        *spill;
1350         irn_live_t      *li;
1351
1352
1353         live = pset_new_ptr_default();
1354
1355         /* do something at the end of the block */
1356
1357         /* init live values at end of block */
1358         get_live_end(si, bb, live);
1359
1360         if(is_merge_edge(bb)) {
1361                 spill_bb->reloads = new_set(cmp_keyval, pset_count(live));
1362         } else {
1363                 spill_bb->reloads = NULL;
1364         }
1365
1366         pset_foreach(live, irn) {
1367                 op_t           *op;
1368                 ilp_var_t       reload;
1369
1370                 spill = set_find_spill(spill_bb->ilp, irn);
1371                 assert(spill);
1372
1373                 if(spill_bb->reloads) {
1374                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", bb, irn);
1375                         reload = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1376
1377                         set_insert_keyval(spill_bb->reloads, irn, INT_TO_PTR(reload));
1378
1379                         /* reload <= mem_out */
1380                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1381                         lpp_set_factor_fast(si->lpp, cst, reload, 1.0);
1382                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1383                 }
1384
1385                 op = get_irn_link(irn);
1386                 assert(!op->is_remat);
1387
1388                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", irn, bb);
1389                 op->attr.live_range.ilp = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1390                 op->attr.live_range.op = bb;
1391
1392                 ir_snprintf(buf, sizeof(buf), "reg_out_%N_%N", bb, irn);
1393                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1394
1395                 /* reg_out - reload - remat - live_range <= 0 */
1396                 lpp_set_factor_fast(si->lpp, cst, spill->reg_out, 1.0);
1397                 if(spill_bb->reloads) lpp_set_factor_fast(si->lpp, cst, reload, -1.0);
1398                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, -1.0);
1399                 foreach_pre_remat(si, bb, tmp) {
1400                         op_t     *remat_op = get_irn_link(tmp);
1401                         if(remat_op->attr.remat.remat->value == irn) {
1402                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1403                         }
1404                 }
1405         }
1406         DBG((si->dbg, LEVEL_4, "\t   %d values live at end of block %+F\n", pset_count(live), bb));
1407
1408         sched_foreach_reverse(bb, irn) {
1409                 op_t       *op;
1410                 op_t       *tmp_op;
1411                 int         n,
1412                                         k,
1413                                         d = 0;
1414                 ilp_cst_t       check_pre,
1415                                         check_post;
1416 #ifdef CHECK_POST_REMAT
1417                 ilp_cst_t       check_post_remat;
1418 #endif
1419                 set        *args = new_set(cmp_keyval, get_irn_arity(irn));
1420                 keyval_t   *keyval;
1421
1422                 if(is_Phi(irn))
1423                         break;
1424
1425                 op = get_irn_link(irn);
1426                 /* skip remats */
1427                 if(op->is_remat) continue;
1428                 DBG((si->dbg, LEVEL_4, "\t  at node %+F\n", irn));
1429
1430                 if(has_reg_class(si, irn)) {
1431                         assert(pset_find_ptr(live, irn));
1432                         pset_remove_ptr(live, irn);
1433                 }
1434
1435                 if(is_Proj(irn)) continue;
1436
1437                 /* init set of irn's arguments */
1438                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1439                         ir_node        *irn_arg = get_irn_n(irn, n);
1440                         if(has_reg_class(si, irn_arg)) {
1441                                 set_insert_keyval(args, irn_arg, (void*)n);
1442                         }
1443                 }
1444
1445 #ifdef CHECK_POST_REMAT
1446                 /* check the register pressure after the epilog */
1447                 ir_snprintf(buf, sizeof(buf), "check_post_remat_%N", irn);
1448                 check_post_remat = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1449
1450                 /* iterate over L\U */
1451                 pset_foreach(live, tmp) {
1452                         if(!set_find_keyval(args, tmp)) {
1453                                 /* if a live value is not used by irn */
1454                                 tmp_op = get_irn_link(tmp);
1455 //                              assert(tmp_op->attr.live_range.op != irn);
1456                                 lpp_set_factor_fast(si->lpp, check_post_remat, tmp_op->attr.live_range.ilp, 1.0);
1457                         }
1458                 }
1459                 /* iterate over following remats and remove possibly defined values again from check_post_remat */
1460                 foreach_post_remat(irn, tmp) {
1461                         op_t           *remat_op = get_irn_link(tmp);
1462                         const ir_node  *value = remat_op->attr.remat.remat->value;
1463                         op_t           *val_op = get_irn_link(value);
1464
1465                         assert(remat_op->is_remat && !remat_op->attr.remat.pre);
1466
1467                         /* values that are defined by remat2s are not counted */
1468 #ifdef REMAT_WHILE_LIVE
1469                         assert(val_op->attr.live_range.ilp);
1470                         lpp_set_factor_fast(si->lpp, check_post_remat, val_op->attr.live_range.ilp, 0.0);
1471 #else
1472                         if(val_op->attr.live_range.ilp != ILP_UNDEF) {
1473                                 lpp_set_factor_fast(si->lpp, check_post_remat, val_op->attr.live_range.ilp, 0.0);
1474                         }
1475 #endif /* REMAT_WHILE_LIVE */
1476                 }
1477 #endif /* CHECK_POST_REMAT */
1478
1479
1480                 /* new live ranges for values from L\U defined by remat2s or used by remats */
1481                 pset_foreach(live, tmp) {
1482                         ir_node     *value = tmp;//remat_op->attr.remat.remat->value;
1483                         op_t        *value_op = get_irn_link(value);
1484
1485                         if(!set_find_keyval(args, value)) {
1486                                 ilp_var_t    prev_lr = ILP_UNDEF;
1487                                 ir_node     *remat;
1488                                 cst = ILP_UNDEF;
1489
1490                                 foreach_post_remat(irn, remat) {
1491                                         op_t        *remat_op = get_irn_link(remat);
1492
1493                                         /* if value is being rematerialized by this remat */
1494                                         if(value == remat_op->attr.remat.remat->value) {
1495                                                 if(cst == ILP_UNDEF) {
1496                                                         /* next_live_range <= prev_live_range + sum remat2s */
1497                                                         ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1498                                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1499                                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1500                                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1501                                                         lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1502                                                         lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1503                                                 }
1504
1505                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1506                                         }
1507                                 }
1508
1509 #ifdef MAY_DIE_AT_PRE_REMAT
1510                                 if(cst == ILP_UNDEF) {
1511                                         foreach_pre_remat(si, irn, remat) {
1512                                                 int       n;
1513
1514                                                 for (n=get_irn_arity(remat)-1; n>=0; --n) {
1515                                                         ir_node        *remat_arg = get_irn_n(remat, n);
1516
1517                                                         /* if value is being used by this remat */
1518                                                         if(value == remat_arg) {
1519                                                                 /* next_live_range <= prev_live_range */
1520                                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", value, irn);
1521                                                                 prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1522
1523                                                                 ir_snprintf(buf, sizeof(buf), "next_lr_%N_%N", value, irn);
1524                                                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1525                                                                 lpp_set_factor_fast(si->lpp, cst, value_op->attr.live_range.ilp, 1.0);
1526                                                                 lpp_set_factor_fast(si->lpp, cst, prev_lr, -1.0);
1527                                                                 goto fertig;
1528                                                         }
1529                                                         /* TODO check afterwards whether lr dies after a pre-remat (should not happen) */
1530                                                 }
1531                                         }
1532                                 }
1533 fertig:
1534 #endif
1535
1536                                 if(prev_lr != ILP_UNDEF) {
1537                                         value_op->attr.live_range.ilp = prev_lr;
1538                                         value_op->attr.live_range.op = irn;
1539                                 }
1540                         }
1541                 }
1542
1543                 /* get count of values in my register class defined by irn */
1544                 /* also add defined values to check_post_remat; do this before iterating over args */
1545                 if(get_irn_mode(irn) == mode_T) {
1546                         ir_node  *proj = sched_next(irn);
1547                         op_t     *proj_op = get_irn_link(proj);
1548
1549                         while(is_Proj(proj)) {
1550                                 if(has_reg_class(si, proj)) {
1551                                         ++d;
1552 #ifdef CHECK_POST_REMAT
1553                                         lpp_set_factor_fast(si->lpp, check_post_remat, proj_op->attr.live_range.ilp, 1.0);
1554 #endif
1555                                 }
1556                                 proj = sched_next(proj);
1557                                 proj_op = get_irn_link(proj);
1558                         }
1559                 } else {
1560                         if(has_reg_class(si, irn)) {
1561                                  d = 1;
1562 #ifdef CHECK_POST_REMAT
1563                                  lpp_set_factor_fast(si->lpp, check_post_remat, op->attr.live_range.ilp, 1.0);
1564 #endif
1565                         }
1566                 }
1567                 DBG((si->dbg, LEVEL_4, "\t   %+F produces %d values in my register class\n", irn, d));
1568
1569                 /* count how many regs irn needs for arguments */
1570                 k = set_count(args);
1571
1572                 /* check the register pressure in the prolog */
1573                 /* sum_{L\U} lr <= n - |U| */
1574                 ir_snprintf(buf, sizeof(buf), "check_pre_%N", irn);
1575                 check_pre = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - k);
1576
1577                 /* check the register pressure in the epilog */
1578                 ir_snprintf(buf, sizeof(buf), "check_post_%N", irn);
1579                 check_post = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs - d);
1580
1581                 set_foreach(args, keyval) {
1582                         ilp_var_t       next_lr;
1583                         op_t           *arg_op;
1584                         ilp_var_t       post_use;
1585                         int             p = 0;
1586                         spill_t        *spill;
1587                         ir_node        *arg = keyval->key;
1588
1589                         spill = add_to_spill_bb(si, bb, arg);
1590
1591                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", arg, irn);
1592                         next_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1593
1594                         i = (int)keyval->val;
1595
1596                         ir_snprintf(buf, sizeof(buf), "reload_%N_%N", arg, irn);
1597                         op->attr.live_range.args.reloads[i] = lpp_add_var(si->lpp, buf, lpp_binary, COST_LOAD*execution_frequency(si, bb));
1598
1599                         /* reload <= mem_out */
1600                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1601                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1602                         lpp_set_factor_fast(si->lpp, cst, spill->mem_out, -1.0);
1603
1604                         arg_op = get_irn_link(arg);
1605
1606                         /* requirement: arg must be in register for use */
1607                         /* reload + remat + live_range == 1 */
1608                         ir_snprintf(buf, sizeof(buf), "req_%N_%N", irn, arg);
1609                         cst = lpp_add_cst(si->lpp, buf, lpp_equal, 1.0);
1610
1611                         lpp_set_factor_fast(si->lpp, cst, next_lr, 1.0);
1612                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[i], 1.0);
1613                         foreach_pre_remat(si, irn, tmp) {
1614                                 op_t     *remat_op = get_irn_link(tmp);
1615                                 if(remat_op->attr.remat.remat->value == arg) {
1616                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1617                                 }
1618                         }
1619
1620                         /* the epilog stuff - including post_use, post, post_remat */
1621                         ir_snprintf(buf, sizeof(buf), "post_use_%N_%N", arg, irn);
1622                         post_use = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1623
1624                         lpp_set_factor_fast(si->lpp, check_post, post_use, 1.0);
1625
1626                         /* arg is live throughout epilog if the next live_range is in a register */
1627                         if(pset_find_ptr(live, arg)) {
1628                                 DBG((si->dbg, LEVEL_3, "\t  arg %+F is possibly live in epilog of %+F\n", arg, irn));
1629
1630                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1631                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1632                                 lpp_set_factor_fast(si->lpp, cst, post_use, -1.0);
1633                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, 1.0);
1634
1635 #ifdef CHECK_POST_REMAT
1636                                 lpp_set_factor_fast(si->lpp, check_post_remat, arg_op->attr.live_range.ilp, 1.0);
1637 #endif
1638                         }
1639
1640                         /*forall remat2 which use arg add a similar cst*/
1641                         foreach_post_remat(irn, tmp) {
1642                                 int      n;
1643
1644                                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1645                                         ir_node    *remat_arg = get_irn_n(tmp, n);
1646                                         op_t       *remat_op = get_irn_link(tmp);
1647
1648                                         if(remat_arg == arg) {
1649                                                 DBG((si->dbg, LEVEL_3, "\t  found remat with arg %+F in epilog of %+F\n", arg, irn));
1650
1651                                                 ir_snprintf(buf, sizeof(buf), "post_use_%N_%N-%d", arg, irn, p++);
1652                                                 cst = lpp_add_cst(si->lpp, buf, lpp_greater, 0.0);
1653                                                 lpp_set_factor_fast(si->lpp, cst, post_use, 1.0);
1654                                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
1655                                         }
1656                                 }
1657                         }
1658
1659                         /* new live range begins for each argument */
1660                         arg_op->attr.live_range.ilp = next_lr;
1661                         arg_op->attr.live_range.op = irn;
1662
1663                         pset_insert_ptr(live, arg);
1664                 }
1665
1666                 /* start new live ranges for values used by remats */
1667                 foreach_pre_remat(si, irn, tmp) {
1668                         int       n;
1669
1670                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1671                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1672                                 op_t           *arg_op = get_irn_link(remat_arg);
1673                                 ilp_var_t       prev_lr;
1674
1675                                 if(!has_reg_class(si, remat_arg)) continue;
1676
1677                                 /* if value is becoming live through use by remat */
1678                                 if(!pset_find_ptr(live, remat_arg)) {
1679                                         ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1680                                         prev_lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1681
1682                                         arg_op->attr.live_range.ilp = prev_lr;
1683                                         arg_op->attr.live_range.op = irn;
1684
1685                                         DBG((si->dbg, LEVEL_4, "  value %+F becoming live through use by remat %+F\n", remat_arg, tmp));
1686
1687                                         /* TODO ist das hier die richtige Stelle???? */
1688                                         pset_insert_ptr(live, remat_arg);
1689                                         add_to_spill_bb(si, bb, remat_arg);
1690                                 }
1691                                 /* TODO check afterwards whether lr dies after a pre-remat (should not happen) */
1692                         }
1693                 }
1694
1695                 /* iterate over L\U */
1696                 pset_foreach(live, tmp) {
1697                         if(!set_find_keyval(args, tmp)) {
1698                                 /* if a live value is not used by irn */
1699                                 tmp_op = get_irn_link(tmp);
1700 //                              assert(tmp_op->attr.live_range.op != irn);
1701                                 lpp_set_factor_fast(si->lpp, check_pre, tmp_op->attr.live_range.ilp, 1.0);
1702                                 lpp_set_factor_fast(si->lpp, check_post, tmp_op->attr.live_range.ilp, 1.0);
1703                         }
1704                 }
1705
1706                 /* requirements for remats */
1707                 foreach_pre_remat(si, irn, tmp) {
1708                         op_t        *remat_op = get_irn_link(tmp);
1709                         int          n;
1710
1711                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1712                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1713                                 op_t           *arg_op = get_irn_link(remat_arg);
1714
1715                                 if(!has_reg_class(si, remat_arg)) continue;
1716
1717                                 /* remat <= live_rang(remat_arg) [ + reload(remat_arg) ] */
1718                                 ir_snprintf(buf, sizeof(buf), "req_remat_%N_arg_%N", tmp, remat_arg);
1719                                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1720
1721                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1722                                 lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1723
1724                                 /* if remat arg is also used by current op then we can use reload placed for this argument */
1725                                 if((keyval = set_find_keyval(args, remat_arg)) != NULL) {
1726                                         int    index = (int)keyval->val;
1727
1728                                         lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.args.reloads[index], -1.0);
1729                                 }
1730                         }
1731                 }
1732
1733                 /* requirements for remats2
1734                  *
1735                  *  TODO unsure if this does the right thing.
1736                  *  should insert values into set if they do not become live through remat and
1737                  *  op
1738                  */
1739                 foreach_post_remat(irn, tmp) {
1740                         op_t        *remat_op = get_irn_link(tmp);
1741                         int          n;
1742
1743                         for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1744                                 ir_node        *remat_arg = get_irn_n(tmp, n);
1745                                 op_t           *arg_op = get_irn_link(remat_arg);
1746
1747                                 if(!has_reg_class(si, remat_arg)) continue;
1748
1749                                 /* only for values in L\U, the others are handled with post_use */
1750                                 if(!set_find_keyval(args, remat_arg)) {
1751                                         /* remat <= live_rang(remat_arg) */
1752                                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_arg_%N", tmp, remat_arg);
1753                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1754
1755                                         /* if value is becoming live through use by remat2 */
1756                                         if(!pset_find_ptr(live, remat_arg)) {
1757                                                 ilp_var_t     lr;
1758
1759                                                 ir_snprintf(buf, sizeof(buf), "lr_%N_%N", remat_arg, irn);
1760                                                 lr = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1761
1762                                                 arg_op->attr.live_range.ilp = lr;
1763                                                 arg_op->attr.live_range.op = irn;
1764
1765                                                 DBG((si->dbg, LEVEL_3, "  value %+F becoming live through use by remat2 %+F\n", remat_arg, tmp));
1766
1767                                                 pset_insert_ptr(live, remat_arg);
1768                                                 add_to_spill_bb(si, bb, remat_arg);
1769                                         }
1770
1771                                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1772                                         lpp_set_factor_fast(si->lpp, cst, arg_op->attr.live_range.ilp, -1.0);
1773                                 }
1774                         }
1775                 }
1776
1777 #ifdef CHECK_POST_REMAT
1778                 /* iterate over following remats and add them to check_post_remat */
1779                 foreach_post_remat(irn, tmp) {
1780                         op_t           *remat_op = get_irn_link(tmp);
1781
1782                         assert(remat_op->is_remat && !remat_op->attr.remat.pre);
1783
1784                         lpp_set_factor_fast(si->lpp, check_post_remat, remat_op->attr.remat.ilp, 1.0);
1785                 }
1786 #endif
1787
1788
1789
1790                 DBG((si->dbg, LEVEL_4, "\t   %d values live at %+F\n", pset_count(live), irn));
1791
1792                 pset_foreach(live, tmp) {
1793                         assert(has_reg_class(si, tmp));
1794                 }
1795
1796                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
1797                         ir_node        *arg = get_irn_n(irn, n);
1798
1799                         assert(!find_post_remat(arg, irn) && "there should be no post remat for an argument of an op");
1800                 }
1801
1802                 del_set(args);
1803         }
1804
1805
1806
1807         /* do something at the beginning of the block */
1808
1809         /* we are now at the beginning of the basic block, there are only \Phis in front of us */
1810         DBG((si->dbg, LEVEL_3, "\t   %d values live at beginning of block %+F\n", pset_count(live), bb));
1811
1812         pset_foreach(live, irn) {
1813                 assert(is_Phi(irn) || get_nodes_block(irn) != bb);
1814         }
1815
1816         /* construct mem_outs for all values */
1817
1818         set_foreach(spill_bb->ilp, spill) {
1819                 ir_snprintf(buf, sizeof(buf), "mem_out_%N_%N", spill->irn, bb);
1820                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1821
1822                 lpp_set_factor_fast(si->lpp, cst, spill->mem_out, 1.0);
1823                 lpp_set_factor_fast(si->lpp, cst, spill->spill, -1.0);
1824
1825                 if(pset_find_ptr(live, spill->irn)) {
1826                         DBG((si->dbg, LEVEL_5, "\t     %+F live at beginning of block %+F\n", spill->irn, bb));
1827
1828                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N", spill->irn, bb);
1829                         spill->mem_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1830                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
1831
1832                         if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
1833                                 int   n;
1834                                 op_t *op = get_irn_link(spill->irn);
1835
1836                                 /* do we have to copy a phi argument? */
1837                                 op->attr.live_range.args.copies = obstack_alloc(si->obst, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
1838                                 memset(op->attr.live_range.args.copies, 0xFF, sizeof(*op->attr.live_range.args.copies) * get_irn_arity(spill->irn));
1839
1840                                 for(n=get_irn_arity(spill->irn)-1; n>=0; --n) {
1841                                         const ir_node  *arg = get_irn_n(spill->irn, n);
1842                                         double          freq=0.0;
1843                                         int             m;
1844                                         ilp_var_t       var;
1845
1846
1847                                         /* argument already done? */
1848                                         if(op->attr.live_range.args.copies[n] != ILP_UNDEF) continue;
1849
1850                                         /* get sum of execution frequencies of blocks with the same phi argument */
1851                                         for(m=n; m>=0; --m) {
1852                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
1853
1854                                                 if(arg==arg2) {
1855                                                         freq += execution_frequency(si, get_Block_cfgpred_block(bb, m));
1856                                                 }
1857                                         }
1858
1859                                         /* copies are not for free */
1860                                         ir_snprintf(buf, sizeof(buf), "copy_%N_%N", arg, spill->irn);
1861                                         var = lpp_add_var(si->lpp, buf, lpp_binary, COST_STORE * freq);
1862
1863                                         for(m=n; m>=0; --m) {
1864                                                 const ir_node  *arg2 = get_irn_n(spill->irn, m);
1865
1866                                                 if(arg==arg2) {
1867                                                         op->attr.live_range.args.copies[m] = var;
1868                                                 }
1869                                         }
1870
1871                                         /* copy <= mem_in */
1872                                         ir_snprintf(buf, sizeof(buf), "nocopy_%N_%N", arg, spill->irn);
1873                                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1874                                         lpp_set_factor_fast(si->lpp, cst, var, 1.0);
1875                                         lpp_set_factor_fast(si->lpp, cst, spill->mem_in, -1.0);
1876                                 }
1877                         }
1878                 }
1879         }
1880
1881
1882         /* L\U is empty at bb start */
1883         /* arg is live throughout epilog if it is reg_in into this block */
1884
1885         /* check the register pressure at the beginning of the block
1886          * including remats
1887          */
1888         ir_snprintf(buf, sizeof(buf), "check_start_%N", bb);
1889         cst = lpp_add_cst(si->lpp, buf, lpp_less, si->n_regs);
1890
1891         pset_foreach(live, irn) {
1892         ilp_cst_t  nospill;
1893
1894                 spill = set_find_spill(spill_bb->ilp, irn);
1895                 assert(spill);
1896
1897                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N", irn, bb);
1898                 spill->reg_in = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
1899
1900                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, 1.0);
1901
1902                 /* spill + reg_in <= 1 */
1903                 ir_snprintf(buf, sizeof(buf), "nospill_%N_%N", irn, bb);
1904                 nospill = lpp_add_cst(si->lpp, buf, lpp_less, 1);
1905
1906                 lpp_set_factor_fast(si->lpp, nospill, spill->reg_in, 1.0);
1907                 lpp_set_factor_fast(si->lpp, nospill, spill->spill, 1.0);
1908
1909         }
1910         foreach_post_remat(bb, irn) {
1911                 op_t     *remat_op = get_irn_link(irn);
1912
1913                 DBG((si->dbg, LEVEL_4, "\t  next post remat: %+F\n", irn));
1914                 assert(remat_op->is_remat && !remat_op->attr.remat.pre);
1915
1916                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1917         }
1918
1919         /* forall remat2 add requirements */
1920         foreach_post_remat(bb, tmp) {
1921                 int         n;
1922
1923                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
1924                         ir_node    *remat_arg = get_irn_n(tmp, n);
1925                         op_t       *remat_op = get_irn_link(tmp);
1926
1927                         if(!has_reg_class(si, remat_arg)) continue;
1928
1929                         spill = set_find_spill(spill_bb->ilp, remat_arg);
1930                         assert(spill);
1931
1932                         /* TODO verify this is placed correctly */
1933                         ir_snprintf(buf, sizeof(buf), "req_remat2_%N_%N_arg_%N", tmp, bb, remat_arg);
1934                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1935                         lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
1936                         lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, 1.0);
1937                 }
1938         }
1939
1940         /* mem_in/reg_in for live_in values, especially phis and their arguments */
1941         pset_foreach(live, irn) {
1942                 int          p = 0,
1943                                          n;
1944
1945                 spill = set_find_spill(spill_bb->ilp, irn);
1946                 assert(spill && spill->irn == irn);
1947
1948                 if(is_Phi(irn) && get_nodes_block(irn) == bb) {
1949                         for (n=get_Phi_n_preds(irn)-1; n>=0; --n) {
1950                                 ilp_cst_t       mem_in,
1951                                                                 reg_in;
1952                                 ir_node        *phi_arg = get_Phi_pred(irn, n);
1953                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
1954                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
1955                                 spill_t        *spill_p;
1956
1957                                 /* although the phi is in the right regclass one or more of
1958                                  * its arguments can be in a different one or at least to
1959                                  * ignore
1960                                  */
1961                                 if(has_reg_class(si, phi_arg)) {
1962                                         ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
1963                                         mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1964                                         ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
1965                                         reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1966
1967                                         lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
1968                                         lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
1969
1970                                         spill_p = set_find_spill(spill_bb_p->ilp, phi_arg);
1971                                         assert(spill_p);
1972
1973                                         lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
1974                                         lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
1975                                 }
1976                         }
1977                 } else {
1978                         /* else assure the value arrives on all paths in the same resource */
1979
1980                         for (n=get_Block_n_cfgpreds(bb)-1; n>=0; --n) {
1981                                 ilp_cst_t       mem_in,
1982                                                                 reg_in;
1983                                 ir_node        *bb_p = get_Block_cfgpred_block(bb, n);
1984                                 spill_bb_t     *spill_bb_p = get_irn_link(bb_p);
1985                                 spill_t        *spill_p;
1986
1987                                 ir_snprintf(buf, sizeof(buf), "mem_in_%N_%N-%d", irn, bb, p);
1988                                 mem_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1989                                 ir_snprintf(buf, sizeof(buf), "reg_in_%N_%N-%d", irn, bb, p++);
1990                                 reg_in = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
1991
1992                                 lpp_set_factor_fast(si->lpp, mem_in, spill->mem_in, 1.0);
1993                                 lpp_set_factor_fast(si->lpp, reg_in, spill->reg_in, 1.0);
1994
1995                                 spill_p = set_find_spill(spill_bb_p->ilp, irn);
1996                                 assert(spill_p);
1997
1998                                 lpp_set_factor_fast(si->lpp, mem_in, spill_p->mem_out, -1.0);
1999                                 lpp_set_factor_fast(si->lpp, reg_in, spill_p->reg_out, -1.0);
2000                         }
2001                 }
2002         }
2003
2004         /* first live ranges from reg_ins */
2005         pset_foreach(live, irn) {
2006                 op_t      *op = get_irn_link(irn);
2007
2008                 spill = set_find_spill(spill_bb->ilp, irn);
2009                 assert(spill && spill->irn == irn);
2010
2011                 ir_snprintf(buf, sizeof(buf), "first_lr_%N_%N", irn, bb);
2012                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2013                 lpp_set_factor_fast(si->lpp, cst, op->attr.live_range.ilp, 1.0);
2014                 lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2015
2016                 foreach_post_remat(bb, tmp) {
2017                         op_t     *remat_op = get_irn_link(tmp);
2018
2019                         if(remat_op->attr.remat.remat->value == irn) {
2020                                 lpp_set_factor_fast(si->lpp, cst, remat_op->attr.remat.ilp, -1.0);
2021                         }
2022                 }
2023         }
2024
2025         /* walk forward now and compute constraints for placing spills */
2026         /* this must only be done for values that are not defined in this block */
2027         /* TODO are these values at start of block? if yes, just check whether this is a diverge edge and skip the loop */
2028         pset_foreach(live, irn) {
2029                 spill = set_find_spill(spill_bb->ilp, irn);
2030                 assert(spill);
2031
2032                 ir_snprintf(buf, sizeof(buf), "req_spill_%N_%N", irn, bb);
2033                 cst = lpp_add_cst(si->lpp, buf, lpp_less, 0.0);
2034
2035                 lpp_set_factor_fast(si->lpp, cst, spill->spill, 1.0);
2036                 if(is_diverge_edge(bb)) lpp_set_factor_fast(si->lpp, cst, spill->reg_in, -1.0);
2037
2038                 sched_foreach_op(bb, tmp) {
2039                         op_t   *op = get_irn_link(tmp);
2040
2041                         if(is_Phi(tmp)) continue;
2042                         assert(!is_Proj(tmp));
2043
2044                         if(op->is_remat) {
2045                                 ir_node   *value = op->attr.remat.remat->value;
2046
2047                                 if(value == irn) {
2048                                         /* only collect remats up to the first use of a value */
2049                                         lpp_set_factor_fast(si->lpp, cst, op->attr.remat.ilp, -1.0);
2050                                 }
2051                         } else {
2052                                 int   n;
2053
2054                                 for (n=get_irn_arity(tmp)-1; n>=0; --n) {
2055                                         ir_node    *arg = get_irn_n(tmp, n);
2056
2057                                         if(arg == irn) {
2058                                                 /* if a value is used stop collecting remats */
2059                                                 cst = ILP_UNDEF;
2060                                         }
2061                                         break;
2062                                 }
2063                         }
2064                         if(cst == ILP_UNDEF) break;
2065                 }
2066         }
2067
2068
2069         /* if a value is used by a mem-phi, then mem_in of this value is 0 (has to be spilled again into a different slot)
2070            mem_in(phi) -> not mem_in(orig_value) TODO: how does this depend on a certain predecessor?
2071          */
2072
2073         /* mem_in of mem-phi has associated costs (but first one is free) */
2074         /* define n_mem_copies as positive integer in each predecessor block,
2075            #mem_in into this block from predecessor block - 1 weighted with SPILL_COST*execfreq(predecessor)
2076            TODO
2077          */
2078
2079
2080         del_pset(live);
2081 }
2082
2083 typedef struct _irnlist_t {
2084         struct list_head   list;
2085         ir_node           *irn;
2086 } irnlist_t;
2087
2088 typedef struct _interference_t {
2089         struct list_head    blocklist;
2090         ir_node            *a;
2091         ir_node            *b;
2092 } interference_t;
2093
2094 static int
2095 cmp_interference(const void *a, const void *b, size_t size)
2096 {
2097         const interference_t *p = a;
2098         const interference_t *q = b;
2099
2100         return !(p->a == q->a && p->b == q->b);
2101 }
2102
2103 static interference_t *
2104 set_find_interference(set * set, ir_node * a, ir_node * b)
2105 {
2106         interference_t     query;
2107
2108         query.a = (a>b)?a:b;
2109         query.b = (a>b)?b:a;
2110
2111         return set_find(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2112 }
2113
2114 static interference_t *
2115 set_insert_interference(spill_ilp_t * si, set * set, ir_node * a, ir_node * b, ir_node * bb)
2116 {
2117         interference_t     query,
2118                                           *result;
2119         irnlist_t         *list = obstack_alloc(si->obst, sizeof(*list));
2120
2121         list->irn = bb;
2122
2123         result = set_find_interference(set, a, b);
2124         if(result) {
2125
2126                 list_add(&list->list, &result->blocklist);
2127                 return result;
2128         }
2129
2130         query.a = (a>b)?a:b;
2131         query.b = (a>b)?b:a;
2132
2133         result = set_insert(set, &query, sizeof(query), HASH_PTR(PTR_TO_INT(a) ^ PTR_TO_INT(b)));
2134
2135         INIT_LIST_HEAD(&result->blocklist);
2136         list_add(&list->list, &result->blocklist);
2137
2138         return result;
2139 }
2140
2141 static int
2142 values_interfere_in_block(ir_node * bb, ir_node * a, ir_node * b)
2143 {
2144         const ir_edge_t *edge;
2145
2146         if(get_nodes_block(a) != bb && get_nodes_block(b) != bb) {
2147                 /* both values are live in, so they interfere */
2148                 return 1;
2149         }
2150
2151         /* ensure a dominates b */
2152         if(value_dominates(b,a)) {
2153                 const ir_node * t;
2154                 t = b;
2155                 b = a;
2156                 a = t;
2157         }
2158         assert(get_nodes_block(b) == bb && "at least b should be defined here in this block");
2159
2160
2161         /* the following code is stolen from bera.c */
2162         if(is_live_end(bb, a))
2163                 return 1;
2164
2165         foreach_out_edge(a, edge) {
2166                 const ir_node *user = edge->src;
2167                 if(get_nodes_block(user) == bb
2168                                 && !is_Phi(user)
2169                                 && b != user
2170                                 && value_dominates(b, user))
2171                         return 1;
2172         }
2173
2174         return 0;
2175 }
2176
2177 /**
2178  * Walk all irg blocks and collect interfering values inside of phi classes
2179  */
2180 static void
2181 luke_interferencewalker(ir_node * bb, void * data)
2182 {
2183         spill_ilp_t    *si = (spill_ilp_t*)data;
2184         irn_live_t     *li1,
2185                        *li2;
2186
2187         live_foreach(bb, li1) {
2188                 ir_node        *a = (ir_node *) li1->irn;
2189                 op_t           *a_op = get_irn_link(a);
2190
2191                 if(a_op->is_remat) continue;
2192
2193                 /* a is only interesting if it is in my register class and if it is inside a phi class */
2194                 if (has_reg_class(si, a) && get_phi_class(a)) {
2195                         for(li2=li1->next; li2; li2 = li2->next) {
2196                                 ir_node        *b = (ir_node *) li2->irn;
2197                                 op_t           *b_op = get_irn_link(b);
2198
2199                                 if(b_op->is_remat) continue;
2200
2201                                 /* a and b are only interesting if they are in the same phi class */
2202                                 if(has_reg_class(si, b) && get_phi_class(a) == get_phi_class(b)) {
2203                                         if(values_interfere_in_block(bb, a, b)) {
2204                                                 //DBG((si->dbg, LEVEL_1, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b));
2205                                                 ir_fprintf(stderr, "\tvalues interfere in %+F: %+F, %+F\n", bb, a, b);
2206                                                 set_insert_interference(si, si->interferences, a, b, bb);
2207                                         }
2208                                 }
2209                         }
2210                 }
2211         }
2212 }
2213
2214 static unsigned int copy_path_id = 0;
2215
2216 static void
2217 write_copy_path_cst(spill_ilp_t *si, pset * copies, ilp_var_t any_interfere)
2218 {
2219         ilp_cst_t  cst;
2220         ilp_var_t  copy;
2221         char       buf[256];
2222         void      *ptr;
2223
2224         ir_snprintf(buf, sizeof(buf), "copy_path-%d", copy_path_id++);
2225         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2226
2227         lpp_set_factor_fast(si->lpp, cst, any_interfere, 1.0);
2228
2229         pset_foreach(copies, ptr) {
2230                 copy = PTR_TO_INT(ptr);
2231                 lpp_set_factor_fast(si->lpp, cst, copy, -1.0);
2232         }
2233 }
2234
2235 /**
2236  * @parameter copies   contains a path of copies which lead us to irn
2237  * @parameter visited  contains a set of nodes already visited on this path
2238  */
2239 static void
2240 find_copy_path(spill_ilp_t * si, ir_node * irn, ir_node * target, ilp_var_t any_interfere, pset * copies, pset * visited)
2241 {
2242         ir_edge_t *edge;
2243         op_t      *op = get_irn_link(irn);
2244
2245         if(op->is_remat) return;
2246
2247         pset_insert_ptr(visited, irn);
2248
2249         if(is_Phi(irn)) {
2250                 int    n;
2251
2252                 /* visit all operands */
2253                 for(n=get_irn_arity(irn)-1; n>=0; --n) {
2254                         ir_node  *arg = get_irn_n(irn, n);
2255                         ilp_var_t  copy = op->attr.live_range.args.copies[n];
2256
2257                         if(!has_reg_class(si, arg)) continue;
2258
2259                         if(arg == target) {
2260                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2261                                 write_copy_path_cst(si, copies, any_interfere);
2262                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2263                         } else {
2264                                 if(!pset_find_ptr(visited, arg)) {
2265                                         pset_insert(copies, INT_TO_PTR(copy), copy);
2266                                         find_copy_path(si, arg, target, any_interfere, copies, visited);
2267                                         pset_remove(copies, INT_TO_PTR(copy), copy);
2268                                 }
2269                         }
2270                 }
2271         }
2272
2273         /* visit all uses which are phis */
2274         foreach_out_edge(irn, edge) {
2275                 ir_node  *user = edge->src;
2276                 int       pos  = edge->pos;
2277                 op_t     *op = get_irn_link(user);
2278                 ilp_var_t copy;
2279
2280                 if(!is_Phi(user)) continue;
2281                 if(!has_reg_class(si, user)) continue;
2282
2283                 copy = op->attr.live_range.args.copies[pos];
2284
2285                 if(user == target) {
2286                         pset_insert(copies, INT_TO_PTR(copy), copy);
2287                         write_copy_path_cst(si, copies, any_interfere);
2288                         pset_remove(copies, INT_TO_PTR(copy), copy);
2289                 } else {
2290                         if(!pset_find_ptr(visited, user)) {
2291                                 pset_insert(copies, INT_TO_PTR(copy), copy);
2292                                 find_copy_path(si, user, target, any_interfere, copies, visited);
2293                                 pset_remove(copies, INT_TO_PTR(copy), copy);
2294                         }
2295                 }
2296         }
2297
2298         pset_remove_ptr(visited, irn);
2299 }
2300
2301 static void
2302 gen_copy_constraints(spill_ilp_t * si, ir_node * a, ir_node * b, ilp_var_t any_interfere)
2303 {
2304         pset * copies = pset_new_ptr_default();
2305         pset * visited = pset_new_ptr_default();
2306
2307         find_copy_path(si, a, b, any_interfere, copies, visited);
2308
2309         del_pset(visited);
2310         del_pset(copies);
2311 }
2312
2313
2314 static void
2315 memcopyhandler(spill_ilp_t * si)
2316 {
2317         interference_t   *interference;
2318         char              buf[256];
2319         /* teste Speicherwerte auf Interferenz */
2320
2321         /* analyze phi classes */
2322         phi_class_compute(si->chordal_env->irg);
2323
2324         DBG((si->dbg, LEVEL_2, "\t calling interferencewalker\n"));
2325         irg_block_walk_graph(si->chordal_env->irg, luke_interferencewalker, NULL, si);
2326
2327 //      phi_class_free(si->chordal_env->irg);
2328
2329         /* now lets emit the ILP unequations for the crap */
2330         set_foreach(si->interferences, interference) {
2331                 irnlist_t      *irnlist;
2332                 ilp_var_t       interfere,
2333                                                 any_interfere;
2334                 ilp_cst_t       any_interfere_cst,
2335                                                 cst;
2336                 const ir_node  *a  = interference->a;
2337                 const ir_node  *b  = interference->b;
2338                 struct list_head *pos;
2339
2340                 /* any_interf <= \sum interf */
2341                 ir_snprintf(buf, sizeof(buf), "interfere_%N_%N", a, b);
2342                 any_interfere_cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2343                 any_interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2344
2345                 lpp_set_factor_fast(si->lpp, any_interfere_cst, any_interfere, 1.0);
2346
2347                 list_for_each_entry(irnlist_t, irnlist, &interference->blocklist, list) {
2348                         const ir_node  *bb = irnlist->irn;
2349                         spill_bb_t     *spill_bb = get_irn_link(bb);
2350                         spill_t        *spilla,
2351                                                    *spillb,
2352                                                    query;
2353                         char           buf[256];
2354
2355                         query.irn = a;
2356                         spilla = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(a));
2357                         assert(spilla);
2358
2359                         query.irn = b;
2360                         spillb = set_find(spill_bb->ilp, &query, sizeof(query), HASH_PTR(b));
2361                         assert(spillb);
2362
2363                         /* interfere <-> (mem_in_a or spill_a) and (mem_in_b or spill_b): */
2364                         /* 1:   mem_in_a + mem_in_b + spill_a + spill_b - interfere <= 1 */
2365                         /* 2: - mem_in_a - spill_a + interfere <= 0 */
2366                         /* 3: - mem_in_b - spill_b + interfere <= 0 */
2367                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N", bb, a, b);
2368                         interfere = lpp_add_var(si->lpp, buf, lpp_binary, 0.0);
2369
2370                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-1", bb, a, b);
2371                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 1);
2372
2373                         lpp_set_factor_fast(si->lpp, cst, interfere, -1.0);
2374                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, 1.0);
2375                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, 1.0);
2376                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, 1.0);
2377                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, 1.0);
2378
2379                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-2", bb, a, b);
2380                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2381
2382                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2383                         if(spilla->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spilla->mem_in, -1.0);
2384                         lpp_set_factor_fast(si->lpp, cst, spilla->spill, -1.0);
2385
2386                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N_%N-3", bb, a, b);
2387                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2388
2389                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2390                         if(spillb->mem_in != ILP_UNDEF) lpp_set_factor_fast(si->lpp, cst, spillb->mem_in, -1.0);
2391                         lpp_set_factor_fast(si->lpp, cst, spillb->spill, -1.0);
2392
2393
2394                         lpp_set_factor_fast(si->lpp, any_interfere_cst, interfere, -1.0);
2395
2396                         /* any_interfere >= interf */
2397                         ir_snprintf(buf, sizeof(buf), "interfere_%N_%N-%N", a, b, bb);
2398                         cst = lpp_add_cst(si->lpp, buf, lpp_less, 0);
2399
2400                         lpp_set_factor_fast(si->lpp, cst, interfere, 1.0);
2401                         lpp_set_factor_fast(si->lpp, cst, any_interfere, -1.0);
2402                 }
2403
2404                 /* now that we know whether the two values interfere in memory we can drop constraints to enforce copies */
2405                 gen_copy_constraints(si,a,b,any_interfere);
2406         }
2407 }
2408
2409
2410
2411 static void
2412 memcopyinsertor(spill_ilp_t * si)
2413 {
2414         /* weise Spillkontexte zu. Sorge bei Phis dafuer, dass gleiche
2415          * Kontexte zusammenfliessen (Operanden und Ergebnis hat gleichen
2416          * Kontext)
2417          */
2418
2419
2420
2421
2422
2423 }
2424
2425
2426
2427
2428 static INLINE int
2429 is_zero(double x)
2430 {
2431         return fabs(x) < 0.00001;
2432 }
2433
2434 #ifdef KEEPALIVE
2435 static int mark_remat_nodes_hook(FILE *F, ir_node *n, ir_node *l)
2436 {
2437         spill_ilp_t *si = get_irg_link(current_ir_graph);
2438
2439         if(pset_find_ptr(si->all_possible_remats, n)) {
2440                 op_t   *op = (op_t*)get_irn_link(n);
2441                 assert(op && op->is_remat);
2442
2443                 if(!op->attr.remat.remat->inverse) {
2444                         if(op->attr.remat.pre) {
2445                                 ir_fprintf(F, "color:red info3:\"remat value: %+F\"", op->attr.remat.remat->value);
2446                         } else {
2447                                 ir_fprintf(F, "color:orange info3:\"remat2 value: %+F\"", op->attr.remat.remat->value);
2448                         }
2449
2450                         return 1;
2451                 } else {
2452                         op_t   *op = (op_t*)get_irn_link(n);
2453                         assert(op && op->is_remat);
2454
2455                         if(op->attr.remat.pre) {
2456                                 ir_fprintf(F, "color:cyan info3:\"remat inverse value: %+F\"", op->attr.remat.remat->value);
2457                         } else {
2458                                 ir_fprintf(F, "color:lightcyan info3:\"remat2 inverse value: %+F\"", op->attr.remat.remat->value);
2459                         }
2460
2461                         return 1;
2462                 }
2463         }
2464
2465         return 0;
2466 }
2467
2468 static void
2469 dump_graph_with_remats(ir_graph * irg, const char * suffix)
2470 {
2471         set_dump_node_vcgattr_hook(mark_remat_nodes_hook);
2472         be_dump(irg, suffix, dump_ir_block_graph_sched);
2473         set_dump_node_vcgattr_hook(NULL);
2474 }
2475 #endif
2476
2477 /**
2478  * Edge hook to dump the schedule edges with annotated register pressure.
2479  */
2480 static int
2481 sched_pressure_edge_hook(FILE *F, ir_node *irn)
2482 {
2483         if(sched_is_scheduled(irn) && sched_has_prev(irn)) {
2484                 ir_node *prev = sched_prev(irn);
2485                 fprintf(F, "edge:{sourcename:\"");
2486                 PRINT_NODEID(irn);
2487                 fprintf(F, "\" targetname:\"");
2488                 PRINT_NODEID(prev);
2489                 fprintf(F, "\" label:\"%d", (int)get_irn_link(irn));
2490                 fprintf(F, "\" color:magenta}\n");
2491         }
2492         return 1;
2493 }
2494
2495 static void
2496 dump_ir_block_graph_sched_pressure(ir_graph *irg, const char *suffix)
2497 {
2498         DUMP_NODE_EDGE_FUNC old = get_dump_node_edge_hook();
2499
2500         dump_consts_local(0);
2501         set_dump_node_edge_hook(sched_pressure_edge_hook);
2502         dump_ir_block_graph(irg, suffix);
2503         set_dump_node_edge_hook(old);
2504 }
2505
2506 static void
2507 walker_pressure_annotator(ir_node * bb, void * data)
2508 {
2509         spill_ilp_t  *si = data;
2510         ir_node      *irn;
2511         irn_live_t   *li;
2512         int           n;
2513         pset         *live = pset_new_ptr_default();
2514         int           projs = 0;
2515
2516         live_foreach(bb, li) {
2517                 irn = (ir_node *) li->irn;
2518
2519                 if (live_is_end(li) && has_reg_class(si, irn)) {
2520                         pset_insert_ptr(live, irn);
2521                 }
2522         }
2523
2524         set_irn_link(bb, INT_TO_PTR(pset_count(live)));
2525
2526         sched_foreach_reverse(bb, irn) {
2527                 if(is_Phi(irn)) {
2528                         set_irn_link(irn, INT_TO_PTR(pset_count(live)));
2529                         continue;
2530                 }
2531
2532                 if(has_reg_class(si, irn)) {
2533                         pset_remove_ptr(live, irn);
2534                         if(is_Proj(irn)) ++projs;
2535                 }
2536
2537                 if(!is_Proj(irn)) projs = 0;
2538
2539                 for (n=get_irn_arity(irn)-1; n>=0; --n) {
2540                         ir_node    *arg = get_irn_n(irn, n);
2541
2542                         if(has_reg_class(si, arg)) pset_insert_ptr(live, arg);
2543                 }
2544                 set_irn_link(irn, INT_TO_PTR(pset_count(live)+projs));
2545         }
2546
2547         del_pset(live);
2548 }
2549
2550 static void
2551 dump_pressure_graph(spill_ilp_t * si, const char *suffix)
2552 {
2553         be_dump(si->chordal_env->irg, suffix, dump_ir_block_graph_sched_pressure);
2554 }
2555
2556 #ifdef KEEPALIVE
2557 static void
2558 connect_all_remats_with_keep(spill_ilp_t * si)
2559 {
2560         ir_node   *irn;
2561         ir_node  **ins,
2562                          **pos;
2563         int        n_remats;
2564
2565
2566         n_remats = pset_count(si->all_possible_remats);
2567         if(n_remats) {
2568                 ins = obstack_alloc(si->obst, n_remats * sizeof(*ins));
2569
2570                 pos = ins;
2571                 pset_foreach(si->all_possible_remats, irn) {
2572                         *pos = irn;
2573                         ++pos;
2574                 }
2575
2576                 si->keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_remats, ins);
2577
2578                 obstack_free(si->obst, ins);
2579         }
2580 }
2581 #endif
2582
2583 static void
2584 connect_all_spills_with_keep(spill_ilp_t * si)
2585 {
2586         ir_node   *irn;
2587         ir_node  **ins,
2588                          **pos;
2589         int        n_spills;
2590         ir_node   *keep;
2591
2592
2593         n_spills = pset_count(si->spills);
2594         if(n_spills) {
2595                 ins = obstack_alloc(si->obst, n_spills * sizeof(*ins));
2596
2597                 pos = ins;
2598                 pset_foreach(si->spills, irn) {
2599                         *pos = irn;
2600                         ++pos;
2601                 }
2602
2603                 keep = be_new_Keep(si->chordal_env->cls, si->chordal_env->irg, get_irg_end_block(si->chordal_env->irg), n_spills, ins);
2604
2605                 obstack_free(si->obst, ins);
2606         }
2607 }
2608
2609 /** insert a spill at an arbitrary position */
2610 ir_node *be_spill2(const arch_env_t *arch_env, ir_node *irn, ir_node *insert, ir_node *ctx)
2611 {
2612         ir_node *bl     = is_Block(insert)?insert:get_nodes_block(insert);
2613         ir_graph *irg   = get_irn_irg(bl);
2614         ir_node *frame  = get_irg_frame(irg);
2615         ir_node *spill;
2616         ir_node *next;
2617
2618         const arch_register_class_t *cls       = arch_get_irn_reg_class(arch_env, irn, -1);
2619         const arch_register_class_t *cls_frame = arch_get_irn_reg_class(arch_env, frame, -1);
2620
2621         spill = be_new_Spill(cls, cls_frame, irg, bl, frame, irn, ctx);
2622
2623         /*
2624          * search the right insertion point. a spill of a phi cannot be put
2625          * directly after the phi, if there are some phis behind the one which
2626          * is spilled. Also, a spill of a Proj must be after all Projs of the
2627          * same tuple node.
2628          *
2629          * Here's one special case:
2630          * If the spill is in the start block, the spill must be after the frame
2631          * pointer is set up. This is done by setting insert to the end of the block
2632          * which is its default initialization (see above).
2633          */
2634
2635         if(bl == get_irg_start_block(irg) && sched_get_time_step(frame) >= sched_get_time_step(insert))
2636                 insert = frame;
2637
2638         for (next = sched_next(insert); is_Phi(next) || is_Proj(next); next = sched_next(insert))
2639                 insert = next;
2640
2641         sched_add_after(insert, spill);
2642         return spill;
2643 }
2644
2645 static void
2646 delete_remat(spill_ilp_t * si, ir_node * remat) {
2647         int       n;
2648         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2649
2650         sched_remove(remat);
2651
2652         /* kill links to operands */
2653         for (n=get_irn_arity(remat)-1; n>=-1; --n) {
2654                 set_irn_n(remat, n, bad);
2655         }
2656 }
2657
2658 static void
2659 clean_remat_info(spill_ilp_t * si)
2660 {
2661         int            n;
2662         remat_t       *remat;
2663         remat_info_t  *remat_info;
2664         ir_node       *bad = get_irg_bad(si->chordal_env->irg);
2665
2666         set_foreach(si->remat_info, remat_info) {
2667                 if(!remat_info->remats) continue;
2668
2669                 pset_foreach(remat_info->remats, remat)
2670                 {
2671                         if(remat->proj && get_irn_n_edges(remat->proj) == 0) {
2672                                 set_irn_n(remat->proj, -1, bad);
2673                                 set_irn_n(remat->proj, 0, bad);
2674                         }
2675
2676                         if(get_irn_n_edges(remat->op) == 0) {
2677                                 for (n=get_irn_arity(remat->op)-1; n>=-1; --n) {
2678                                         set_irn_n(remat->op, n, bad);
2679                                 }
2680                         }
2681                 }
2682
2683                 if(remat_info->remats) del_pset(remat_info->remats);
2684                 if(remat_info->remats_by_operand) del_pset(remat_info->remats_by_operand);
2685         }
2686 }
2687
2688 static void
2689 delete_unnecessary_remats(spill_ilp_t * si)
2690 {
2691 #ifdef KEEPALIVE
2692         int       n;
2693         ir_node  *bad = get_irg_bad(si->chordal_env->irg);
2694
2695         if(si->keep) {
2696                 ir_node   *end = get_irg_end(si->chordal_env->irg);
2697                 ir_node  **keeps;
2698
2699                 for (n=get_irn_arity(si->keep)-1; n>=0; --n) {
2700                         ir_node        *keep_arg = get_irn_n(si->keep, n);
2701                         op_t           *arg_op = get_irn_link(keep_arg);
2702                         lpp_name_t     *name;
2703
2704                         assert(arg_op->is_remat);
2705
2706                         name = si->lpp->vars[arg_op->attr.remat.ilp];
2707
2708                         if(is_zero(name->value)) {
2709                                 DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", keep_arg));
2710                                 /* TODO check whether reload is preferred over remat (could be bug) */
2711                                 delete_remat(si, keep_arg);
2712                         } else {
2713                                 if(!arg_op->attr.remat.remat->inverse) {
2714                                         if(arg_op->attr.remat.pre) {
2715                                                 DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", keep_arg));
2716                                         } else {
2717                                                 DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", keep_arg));
2718                                         }
2719                                 } else {
2720                                         if(arg_op->attr.remat.pre) {
2721                                                 DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", keep_arg));
2722                                         } else {
2723                                                 DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", keep_arg));
2724                                         }
2725                                 }
2726                         }
2727
2728                         set_irn_n(si->keep, n, bad);
2729                 }
2730 #if 0
2731                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
2732                         ir_node        *end_arg = get_End_keepalive(end, i);
2733
2734                         if(end_arg != si->keep) {
2735                                 obstack_grow(si->obst, &end_arg, sizeof(end_arg));
2736                         }
2737                 }
2738                 keeps = obstack_finish(si->obst);
2739                 set_End_keepalives(end, n-1, keeps);
2740                 obstack_free(si->obst, keeps);
2741 #endif
2742         } else {
2743                 DBG((si->dbg, LEVEL_2, "\t  no remats to delete (none have been inserted)\n"));
2744         }
2745 #else
2746         ir_node  *remat;
2747
2748         pset_foreach(si->all_possible_remats, remat) {
2749                 op_t           *remat_op = get_irn_link(remat);
2750                 lpp_name_t     *name = si->lpp->vars[remat_op->attr.remat.ilp];
2751
2752                 if(is_zero(name->value)) {
2753                         DBG((si->dbg, LEVEL_3, "\t  deleting remat %+F\n", remat));
2754                         /* TODO check whether reload is preferred over remat (could be bug) */
2755                         delete_remat(si, remat);
2756                 } else {
2757                         if(!remat_op->attr.remat.remat->inverse) {
2758                                 if(remat_op->attr.remat.pre) {
2759                                         DBG((si->dbg, LEVEL_2, "\t**remat kept: %+F\n", remat));
2760                                 } else {
2761                                         DBG((si->dbg, LEVEL_2, "\t%%%%remat2 kept: %+F\n", remat));
2762                                 }
2763                         } else {
2764                                 if(remat_op->attr.remat.pre) {
2765                                         DBG((si->dbg, LEVEL_2, "\t**INVERSE remat kept: %+F\n", remat));
2766                                 } else {
2767                                         DBG((si->dbg, LEVEL_2, "\t%%%%INVERSE remat2 kept: %+F\n", remat));
2768                                 }
2769                         }
2770                 }
2771         }
2772 #endif
2773 }
2774
2775 /**
2776  * @param before   The node after which the spill will be placed in the schedule
2777  */
2778 /* TODO set context properly */
2779 static ir_node *
2780 insert_spill(spill_ilp_t * si, ir_node * irn, ir_node * value, ir_node * before)
2781 {
2782         defs_t   *defs;
2783         ir_node  *spill;
2784         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
2785
2786         DBG((si->dbg, LEVEL_3, "\t  inserting spill for value %+F after %+F\n", irn, before));
2787
2788         spill = be_spill2(arch_env, irn, before, irn);
2789
2790         defs = set_insert_def(si->values, value);
2791         assert(defs);
2792
2793         /* enter into the linked list */
2794         set_irn_link(spill, defs->spills);
2795         defs->spills = spill;
2796
2797 #ifdef KEEPALIVE_SPILLS
2798         pset_insert_ptr(si->spills, spill);
2799 #endif
2800
2801         return spill;
2802 }
2803
2804 /**
2805  * @param before   The Phi node which has to be spilled
2806  */
2807 static ir_node *
2808 insert_mem_phi(spill_ilp_t * si, const ir_node * phi)
2809 {
2810         ir_node   *mem_phi;
2811         ir_node  **ins;
2812         defs_t    *defs;
2813         int        n;
2814
2815         NEW_ARR_A(ir_node*, ins, get_irn_arity(phi));
2816
2817         for(n=get_irn_arity(phi)-1; n>=0; --n) {
2818                 ins[n] = si->m_unknown;
2819         }
2820
2821         mem_phi =  new_r_Phi(si->chordal_env->irg, get_nodes_block(phi), get_irn_arity(phi), ins, mode_M);
2822
2823         defs = set_insert_def(si->values, phi);
2824         assert(defs);
2825
2826         /* enter into the linked list */
2827         set_irn_link(mem_phi, defs->spills);
2828         defs->spills = mem_phi;
2829
2830         sched_add_after(phi, mem_phi);
2831
2832 #ifdef KEEPALIVE_SPILLS
2833         pset_insert_ptr(si->spills, mem_phi);
2834 #endif
2835
2836         return mem_phi;
2837 }
2838
2839 /**
2840  * Add remat to list of defs, destroys link field!
2841  */
2842 static void
2843 insert_remat(spill_ilp_t * si, ir_node * remat)
2844 {
2845         defs_t   *defs;
2846         op_t     *remat_op = get_irn_link(remat);
2847
2848         assert(remat_op->is_remat);
2849
2850         defs = set_insert_def(si->values, remat_op->attr.remat.remat->value);
2851         assert(defs);
2852
2853         /* enter into the linked list */
2854         set_irn_link(remat, defs->remats);
2855         defs->remats = remat;
2856 }
2857
2858 #if 0
2859 static void
2860 collect_spills(spill_ilp_t * si, ir_node * value, pset * spills, pset * visited)
2861 {
2862         ir_node  *next;
2863         defs_t   *defs;
2864
2865         defs = set_find_def(si->values, value);
2866
2867         if(defs && defs->spills) {
2868                 for(next = defs->spills; next; next = get_irn_link(next)) {
2869                         pset_insert_ptr(spills, next);
2870                 }
2871         } else if (is_Phi(value)) {
2872                 /* recursion */
2873                 if(!pset_find_ptr(visited, value)) {
2874                         int    i,
2875                                    n;
2876
2877                         pset_insert_ptr(visited, value);
2878                         for(i=0, n=get_irn_arity(value); i<n; ++i) {
2879                                 ir_node    *arg = get_irn_n(value, i);
2880
2881                                 collect_spills(si, arg, spills, visited);
2882                         }
2883                 }
2884         } else {
2885 //              assert(0 && "Phi operand not spilled");
2886         }
2887 }
2888 #endif
2889
2890 static pset *
2891 get_spills_for_value(spill_ilp_t * si, ir_node * value)
2892 {
2893         pset     *spills = pset_new_ptr_default();
2894 //      pset     *visited = pset_new_ptr_default();
2895
2896 //      collect_spills(si, value, spills, visited);
2897 //      del_pset(visited);
2898         ir_node  *next;
2899         defs_t   *defs;
2900
2901         defs = set_find_def(si->values, value);
2902
2903         if(defs && defs->spills) {
2904                 for(next = defs->spills; next; next = get_irn_link(next)) {
2905                         pset_insert_ptr(spills, next);
2906                 }
2907         }
2908
2909         return spills;
2910 }
2911
2912 /**
2913  * Add reload before operation and add to list of defs
2914  */
2915 static ir_node *
2916 insert_reload(spill_ilp_t * si, const ir_node * value, const ir_node * after)
2917 {
2918         defs_t   *defs;
2919         ir_node  *reload,
2920                          *spill;
2921         const arch_env_t *arch_env = si->chordal_env->birg->main_env->arch_env;
2922
2923         DBG((si->dbg, LEVEL_3, "\t  inserting reload for value %+F before %+F\n", value, after));
2924
2925         defs = set_find_def(si->values, value);
2926         /* get a spill of this value */
2927 #if 0
2928         if((!defs || !defs->spills) && is_Phi(value)) {
2929                 pset  *spills;
2930
2931                 spills = get_spills_for_value(si, value);
2932
2933                 spill = pset_first(spills);
2934                 del_pset(spills);
2935
2936                 if(!defs) {
2937                         defs = set_insert_def(si->values, value);
2938                 }
2939                 defs->spills = spill;
2940                 set_irn_link(spill, NULL);
2941         } else {
2942                 spill = defs->spills;
2943         }
2944 #endif
2945         spill = defs->spills;
2946         assert(spill && "no spill placed before reload");
2947
2948         reload = be_reload(arch_env, si->cls, after, get_irn_mode(value), spill);
2949
2950         /* enter into the linked list */
2951         set_irn_link(reload, defs->remats);
2952         defs->remats = reload;
2953
2954         return reload;
2955 }
2956
2957 static void
2958 walker_spill_placer(ir_node * bb, void * data) {
2959         spill_ilp_t   *si = (spill_ilp_t*)data;
2960         ir_node       *irn;
2961         spill_bb_t    *spill_bb = get_irn_link(bb);
2962         pset          *spills_to_do = pset_new_ptr_default();
2963         spill_t       *spill;
2964
2965         set_foreach(spill_bb->ilp, spill) {
2966                 lpp_name_t    *name;
2967
2968                 if(is_Phi(spill->irn) && get_nodes_block(spill->irn) == bb) {
2969                         name = si->lpp->vars[spill->mem_in];
2970                         if(!is_zero(name->value)) {
2971                                 ir_node   *mem_phi;
2972
2973                                 mem_phi = insert_mem_phi(si, spill->irn);
2974
2975                                 DBG((si->dbg, LEVEL_2, "\t >>spilled Phi %+F -> %+F\n", spill->irn, mem_phi));
2976                         }
2977                 }
2978
2979                 name = si->lpp->vars[spill->spill];
2980                 if(!is_zero(name->value)) {
2981                         if(spill->reg_in > 0) {
2982                                 name = si->lpp->vars[spill->reg_in];
2983                                 if(!is_zero(name->value)) {
2984                                         insert_spill(si, spill->irn, spill->irn, bb);
2985                                         continue;
2986                                 }
2987                         }
2988                         pset_insert_ptr(spills_to_do, spill->irn);
2989                 }
2990         }
2991         DBG((si->dbg, LEVEL_3, "\t  %d spills to do in block %+F\n", pset_count(spills_to_do), bb));
2992
2993
2994         for(irn = sched_block_first_nonphi(bb); !sched_is_end(irn); irn = sched_next(irn)) {
2995                 op_t     *op = get_irn_link(irn);
2996
2997                 if(be_is_Spill(irn)) continue;
2998
2999                 if(op->is_remat) {
3000                         /* TODO fix this if we want to support remats with more than two nodes */
3001                         if(get_irn_mode(irn) != mode_T && pset_find_ptr(spills_to_do, op->attr.remat.remat->value)) {
3002                                 pset_remove_ptr(spills_to_do, op->attr.remat.remat->value);
3003
3004                                 insert_spill(si, irn, op->attr.remat.remat->value, irn);
3005                         }
3006                 } else {
3007                         if(pset_find_ptr(spills_to_do, irn)) {
3008                                 pset_remove_ptr(spills_to_do, irn);
3009
3010                                 insert_spill(si, irn, irn, irn);
3011                         }
3012                 }
3013
3014         }
3015
3016         assert(pset_count(spills_to_do) == 0);
3017
3018         /* afterwards free data in block */
3019         del_pset(spills_to_do);
3020 }
3021
3022 static void
3023 phim_fixer(spill_ilp_t *si) {
3024         defs_t  *defs;
3025
3026         set_foreach(si->values, defs) {
3027                 const ir_node  *phi = defs->value;
3028                 ir_node  *phi_m = NULL;
3029                 ir_node  *next = defs->spills;
3030                 int       n;
3031
3032                 if(!is_Phi(phi)) continue;
3033
3034                 while(next) {
3035                         if(is_Phi(next) && get_irn_mode(next) == mode_M) {
3036                                 phi_m = next;
3037                                 break;
3038                         } else {
3039                                 next = get_irn_link(next);
3040                         }
3041                 }
3042                 if(!phi_m) continue;
3043
3044                 for(n=get_irn_arity(phi)-1; n>=0; --n) {
3045                         const ir_node  *value = get_irn_n(phi, n);
3046                         defs_t         *val_defs = set_find_def(si->values, value);
3047
3048                         /* get a spill of this value */
3049                         ir_node      *spill = val_defs->spills;
3050
3051                         assert(spill && "no spill placed before PhiM");
3052
3053                         set_irn_n(phi_m, n, spill);
3054                 }
3055         }
3056 }
3057
3058 static void
3059 walker_reload_placer(ir_node * bb, void * data) {
3060         spill_ilp_t   *si = (spill_ilp_t*)data;
3061         ir_node       *irn;
3062         spill_bb_t    *spill_bb = get_irn_link(bb);
3063         int            i;
3064         irn_live_t    *li;
3065
3066         sched_foreach_reverse(bb, irn) {
3067                 op_t     *op = get_irn_link(irn);
3068
3069                 if(be_is_Reload(irn) || be_is_Spill(irn)) continue;
3070                 if(is_Phi(irn)) break;
3071
3072                 if(op->is_remat) {
3073                         if(get_irn_mode(irn) != mode_T) {
3074                                 insert_remat(si, irn);
3075                         }
3076                 } else {
3077                         int    n;
3078
3079                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3080                                 ir_node    *arg = get_irn_n(irn, n);
3081
3082                                 if(op->attr.live_range.args.reloads && op->attr.live_range.args.reloads[n] != ILP_UNDEF) {
3083                                         lpp_name_t    *name;
3084
3085                                         name = si->lpp->vars[op->attr.live_range.args.reloads[n]];
3086                                         if(!is_zero(name->value)) {
3087                                                 ir_node    *reload;
3088                                                 ir_node    *insert_pos = irn;
3089                                                 ir_node    *prev = insert_pos;
3090                                                 op_t       *prev_op;
3091
3092                                                 do {
3093                                                         prev = sched_prev(prev);
3094                                                 } while(be_is_Spill(prev));
3095
3096                                                 prev_op = get_irn_link(prev);
3097
3098                                                 /* insert reload before pre-remats */
3099                                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !is_Phi(prev)
3100                                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3101                                                         insert_pos = prev;
3102
3103                                                         do {
3104                                                                 prev = sched_prev(prev);
3105                                                         } while(be_is_Spill(prev));
3106
3107                                                         prev_op = get_irn_link(prev);
3108
3109                                                 }
3110
3111                                                 reload = insert_reload(si, arg, insert_pos);
3112
3113                                                 set_irn_n(irn, n, reload);
3114
3115 #ifdef KEEPALIVE_RELOADS
3116                                                 pset_insert_ptr(si->spills, reload);
3117 #endif
3118                                         }
3119                                 }
3120                         }
3121                 }
3122         }
3123
3124         /* reloads at end of block */
3125         if(spill_bb->reloads) {
3126                 keyval_t    *keyval;
3127
3128                 set_foreach(spill_bb->reloads, keyval) {
3129                         ir_node        *irn = (ir_node*)keyval->key;
3130                         ilp_var_t       reload = PTR_TO_INT(keyval->val);
3131                         lpp_name_t     *name;
3132
3133                         name = si->lpp->vars[reload];
3134                         if(!is_zero(name->value)) {
3135                                 ir_node    *reload;
3136                                 ir_node    *insert_pos = bb;
3137                                 ir_node    *prev = sched_prev(insert_pos);
3138                                 op_t       *prev_op = get_irn_link(prev);
3139
3140                                 /* insert reload before pre-remats */
3141                                 while(!sched_is_end(prev) && !be_is_Reload(prev) && !be_is_Spill(prev)
3142                                                 && prev_op->is_remat && prev_op->attr.remat.pre) {
3143                                         insert_pos = prev;
3144
3145                                         prev = sched_prev(insert_pos);
3146                                         prev_op = get_irn_link(prev);
3147                                 }
3148
3149                                 reload = insert_reload(si, irn, insert_pos);
3150
3151 #ifdef KEEPALIVE_RELOADS
3152                                 pset_insert_ptr(si->spills, reload);
3153 #endif
3154                         }
3155                 }
3156         }
3157
3158         del_set(spill_bb->ilp);
3159         if(spill_bb->reloads) del_set(spill_bb->reloads);
3160 }
3161
3162 static void
3163 walker_collect_used(ir_node * irn, void * data)
3164 {
3165         lc_bitset_t   *used = data;
3166
3167         lc_bitset_set(used, get_irn_idx(irn));
3168 }
3169
3170 struct kill_helper {
3171         lc_bitset_t  *used;
3172         spill_ilp_t  *si;
3173 };
3174
3175 static void
3176 walker_kill_unused(ir_node * bb, void * data)
3177 {
3178         struct kill_helper *kh = data;
3179         const ir_node      *bad = get_irg_bad(get_irn_irg(bb));
3180         ir_node            *irn;
3181
3182
3183         for(irn=sched_first(bb); !sched_is_end(irn);) {
3184                 ir_node     *next = sched_next(irn);
3185                 int          n;
3186
3187                 if(!lc_bitset_is_set(kh->used, get_irn_idx(irn))) {
3188                         if(be_is_Spill(irn) || be_is_Reload(irn)) {
3189                                 DBG((kh->si->dbg, LEVEL_1, "\t SUBOPTIMAL! %+F IS UNUSED (cost: %g)\n", irn, get_cost(kh->si, irn)*execution_frequency(kh->si, bb)));
3190                                 assert(lpp_get_sol_state(kh->si->lpp) != lpp_optimal && "optimal solution is suboptimal?");
3191                         }
3192
3193                         sched_remove(irn);
3194
3195                         set_nodes_block(irn, bad);
3196                         for (n=get_irn_arity(irn)-1; n>=0; --n) {
3197                                 set_irn_n(irn, n, bad);
3198                         }
3199                 }
3200                 irn = next;
3201         }
3202 }
3203
3204 static void
3205 kill_all_unused_values_in_schedule(spill_ilp_t * si)
3206 {
3207         struct kill_helper kh;
3208
3209         kh.used = lc_bitset_malloc(get_irg_last_idx(si->chordal_env->irg));
3210         kh.si = si;
3211
3212         irg_walk_graph(si->chordal_env->irg, walker_collect_used, NULL, kh.used);
3213         irg_block_walk_graph(si->chordal_env->irg, walker_kill_unused, NULL, &kh);
3214
3215         lc_bitset_free(kh.used);
3216 }
3217
3218 static void
3219 print_irn_pset(pset * p)
3220 {
3221         ir_node   *irn;
3222
3223         pset_foreach(p, irn) {
3224                 ir_printf("%+F\n", irn);
3225         }
3226 }
3227
3228 static void
3229 rewire_uses(spill_ilp_t * si)
3230 {
3231         dom_front_info_t     *dfi = be_compute_dominance_frontiers(si->chordal_env->irg);
3232         defs_t               *defs;
3233         pset                 *ignore = pset_new_ptr(1);
3234
3235         pset_insert_ptr(ignore, get_irg_end(si->chordal_env->irg));
3236
3237         /* then fix uses of spills */
3238         set_foreach(si->values, defs) {
3239                 pset     *reloads;
3240                 pset     *spills;
3241                 ir_node  *next = defs->remats;
3242                 int remats = 0;
3243
3244                 reloads = pset_new_ptr_default();
3245
3246                 while(next) {
3247                         if(be_is_Reload(next)) {
3248                                 pset_insert_ptr(reloads, next);
3249                         } else {
3250                                 ++remats;
3251                         }
3252                         next = get_irn_link(next);
3253                 }
3254
3255                 spills = get_spills_for_value(si, defs->value);
3256                 DBG((si->dbg, LEVEL_2, "\t  %d remats, %d reloads, and %d spills for value %+F\n", remats, pset_count(reloads), pset_count(spills), defs->value));
3257                 if(pset_count(spills) > 1) {
3258                         //assert(pset_count(reloads) > 0);
3259                         //                              print_irn_pset(spills);
3260                         //                              print_irn_pset(reloads);
3261
3262                         be_ssa_constr_set_ignore(dfi, spills, ignore);
3263                 }
3264
3265                 del_pset(reloads);
3266                 del_pset(spills);
3267         }
3268
3269         /* first fix uses of remats and reloads */
3270         set_foreach(si->values, defs) {
3271                 pset     *nodes;
3272                 ir_node  *next = defs->remats;
3273
3274                 if(next) {
3275                         nodes = pset_new_ptr_default();
3276                         pset_insert_ptr(nodes, defs->value);
3277
3278                         while(next) {
3279                                 pset_insert_ptr(nodes, next);
3280                                 next = get_irn_link(next);
3281                         }
3282
3283                         if(pset_count(nodes) > 1) {
3284                                 DBG((si->dbg, LEVEL_4, "\t    %d new definitions for value %+F\n", pset_count(nodes)-1, defs->value));
3285                                 be_ssa_constr_set(dfi, nodes);
3286                         }
3287
3288                         del_pset(nodes);
3289                 }
3290         }
3291
3292 //      remove_unused_defs(si);
3293
3294         be_free_dominance_frontiers(dfi);
3295 }
3296
3297 static void
3298 writeback_results(spill_ilp_t * si)
3299 {
3300         /* walk through the graph and collect all spills, reloads and remats for a value */
3301
3302         si->values = new_set(cmp_defs, 4096);
3303
3304         DBG((si->dbg, LEVEL_1, "Applying results\n"));
3305         delete_unnecessary_remats(si);
3306         si->m_unknown = new_r_Unknown(si->chordal_env->irg, mode_M);
3307         irg_block_walk_graph(si->chordal_env->irg, walker_spill_placer, NULL, si);
3308         phim_fixer(si);
3309         irg_block_walk_graph(si->chordal_env->irg, walker_reload_placer, NULL, si);
3310
3311         /* clean the remat info! there are still back-edges leading there! */
3312         clean_remat_info(si);
3313
3314         rewire_uses(si);
3315
3316         connect_all_spills_with_keep(si);
3317
3318         del_set(si->values);
3319 }
3320
3321 static int
3322 get_n_regs(spill_ilp_t * si)
3323 {
3324         int     arch_n_regs = arch_register_class_n_regs(si->cls);
3325         int     free = 0;
3326         int     i;
3327
3328         for(i=0; i<arch_n_regs; i++) {
3329                 if(!arch_register_type_is(&si->cls->regs[i], ignore)) {
3330                         free++;
3331                 }
3332         }
3333
3334         DBG((si->dbg, LEVEL_1, "\tArchitecture has %d free registers in class %s\n", free, si->cls->name));
3335         return free;
3336 }
3337
3338 static void
3339 walker_reload_mover(ir_node * bb, void * data)
3340 {
3341         spill_ilp_t   *si = data;
3342         ir_node           *tmp;
3343
3344         sched_foreach(bb, tmp) {
3345                 if(be_is_Reload(tmp) && has_reg_class(si, tmp)) {
3346                         ir_node       *reload = tmp;
3347                         ir_node       *irn = tmp;
3348
3349                         /* move reload upwards */
3350
3351                         int pressure = (int)get_irn_link(reload);
3352                         if(pressure < si->n_regs) {
3353                                 irn = sched_prev(reload);
3354                                 DBG((si->dbg, LEVEL_5, "regpressure before %+F: %d\n", reload, pressure));
3355                                 sched_remove(reload);
3356                                 pressure = (int)get_irn_link(irn);
3357
3358                                 while(pressure < si->n_regs) {
3359                                         if(sched_is_end(irn) || (be_is_Reload(irn) && has_reg_class(si, irn))) break;
3360
3361                                         set_irn_link(irn, INT_TO_PTR(pressure+1));
3362                                         DBG((si->dbg, LEVEL_5, "new regpressure before %+F: %d\n", irn, pressure+1));
3363                                         irn = sched_prev(irn);
3364
3365                                         pressure = (int)get_irn_link(irn);
3366                                 }
3367
3368                                 DBG((si->dbg, LEVEL_3, "putting reload %+F after %+F\n", reload, irn));
3369                                 sched_put_after(irn, reload);
3370                         }
3371                 }
3372         }
3373 }
3374
3375 static void
3376 move_reloads_upward(spill_ilp_t * si)
3377 {
3378         irg_block_walk_graph(si->chordal_env->irg, walker_reload_mover, NULL, si);
3379 }
3380
3381 void
3382 be_spill_remat(const be_chordal_env_t * chordal_env)
3383 {
3384         char            problem_name[256];
3385         char            dump_suffix[256];
3386         char            dump_suffix2[256];
3387         char            dump_suffix3[256];
3388         struct obstack  obst;
3389         spill_ilp_t     si;
3390
3391         ir_snprintf(problem_name, sizeof(problem_name), "%F_%s", chordal_env->irg, chordal_env->cls->name);
3392         ir_snprintf(dump_suffix, sizeof(dump_suffix), "-%s-remats", chordal_env->cls->name);
3393         ir_snprintf(dump_suffix2, sizeof(dump_suffix2), "-%s-pressure", chordal_env->cls->name);
3394
3395         FIRM_DBG_REGISTER(si.dbg, "firm.be.ra.spillremat");
3396         DBG((si.dbg, LEVEL_1, "\n\n\t\t===== Processing %s =====\n\n", problem_name));
3397
3398         obstack_init(&obst);
3399         si.chordal_env = chordal_env;
3400         si.obst = &obst;
3401         si.cls = chordal_env->cls;
3402         si.lpp = new_lpp(problem_name, lpp_minimize);
3403         si.remat_info = new_set(cmp_remat_info, 4096);
3404         si.interferences = new_set(cmp_interference, 4096);
3405         si.all_possible_remats = pset_new_ptr_default();
3406         si.spills = pset_new_ptr_default();
3407         si.inverse_ops = pset_new_ptr_default();
3408 #ifndef EXECFREQ_LOOPDEPH
3409         si.execfreqs = compute_execfreq(chordal_env->irg);
3410 #else
3411         si.execfreqs = NULL;
3412 #endif
3413 #ifdef KEEPALIVE
3414         si.keep = NULL;
3415 #endif
3416         si.n_regs = get_n_regs(&si);
3417
3418         set_irg_link(chordal_env->irg, &si);
3419         compute_doms(chordal_env->irg);
3420
3421         /* compute phi classes */
3422 //      phi_class_compute(chordal_env->irg);
3423
3424         be_analyze_regpressure(chordal_env, "-pre");
3425
3426 #ifdef COLLECT_REMATS
3427         /* collect remats */
3428         DBG((si.dbg, LEVEL_1, "Collecting remats\n"));
3429         irg_walk_graph(chordal_env->irg, walker_remat_collector, NULL, &si);
3430 #endif
3431
3432         /* insert possible remats */
3433         DBG((si.dbg, LEVEL_1, "Inserting possible remats\n"));
3434         irg_block_walk_graph(chordal_env->irg, walker_remat_insertor, NULL, &si);
3435         DBG((si.dbg, LEVEL_2, " -> inserted %d possible remats\n", pset_count(si.all_possible_remats)));
3436
3437 #ifdef KEEPALIVE
3438         DBG((si.dbg, LEVEL_1, "Connecting remats with keep and dumping\n"));
3439         connect_all_remats_with_keep(&si);
3440         /* dump graph with inserted remats */
3441         dump_graph_with_remats(chordal_env->irg, dump_suffix);
3442 #endif
3443
3444
3445         /* recompute liveness */
3446         DBG((si.dbg, LEVEL_1, "Recomputing liveness\n"));
3447         be_liveness(chordal_env->irg);
3448
3449         /* build the ILP */
3450
3451         DBG((si.dbg, LEVEL_1, "\tBuilding ILP\n"));
3452         DBG((si.dbg, LEVEL_2, "\t endwalker\n"));
3453         irg_block_walk_graph(chordal_env->irg, luke_endwalker, NULL, &si);
3454
3455         DBG((si.dbg, LEVEL_2, "\t blockwalker\n"));
3456         irg_block_walk_graph(chordal_env->irg, luke_blockwalker, NULL, &si);
3457
3458 #ifndef NO_MEMCOPIES
3459         DBG((si.dbg, LEVEL_2, "\t memcopyhandler\n"));
3460         memcopyhandler(&si);
3461 #endif
3462
3463 #ifdef DUMP_ILP
3464         {
3465                 FILE           *f;
3466                 char            buf[256];
3467
3468                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.ilp", problem_name);
3469                 if ((f = fopen(buf, "wt")) != NULL) {
3470                         lpp_dump_plain(si.lpp, f);
3471                         fclose(f);
3472                 }
3473         }
3474 #endif
3475
3476 #ifdef SOLVE
3477         DBG((si.dbg, LEVEL_1, "\tSolving %F\n", chordal_env->irg));
3478 #ifdef ILP_TIMEOUT
3479         lpp_set_time_limit(si.lpp, ILP_TIMEOUT);
3480 #endif
3481
3482 #ifdef SOLVE_LOCAL
3483         lpp_solve_cplex(si.lpp);
3484 #else
3485         lpp_solve_net(si.lpp, LPP_SERVER, LPP_SOLVER);
3486 #endif
3487         assert(lpp_is_sol_valid(si.lpp)
3488                && "solution of ILP must be valid");
3489
3490         DBG((si.dbg, LEVEL_1, "\t%s: iterations: %d, solution time: %g, objective function: %g\n", problem_name, si.lpp->iterations, si.lpp->sol_time, is_zero(si.lpp->objval)?0.0:si.lpp->objval));
3491
3492 #ifdef DUMP_SOLUTION
3493         {
3494                 FILE           *f;
3495                 char            buf[256];
3496
3497                 ir_snprintf(buf, sizeof(buf), "%s-spillremat.sol", problem_name);
3498                 if ((f = fopen(buf, "wt")) != NULL) {
3499                         int             i;
3500                         for (i = 0; i < si.lpp->var_next; ++i) {
3501                                 lpp_name_t     *name = si.lpp->vars[i];
3502                                 fprintf(f, "%20s %4d %10f\n", name->name, name->nr, name->value);
3503                         }
3504                         fclose(f);
3505                 }
3506         }
3507 #endif
3508
3509         writeback_results(&si);
3510
3511 #endif                          /* SOLVE */
3512
3513         kill_all_unused_values_in_schedule(&si);
3514
3515 #if defined(KEEPALIVE_SPILLS) || defined(KEEPALIVE_RELOADS)
3516         be_dump(chordal_env->irg, "-spills-placed", dump_ir_block_graph);
3517 #endif
3518
3519         // move reloads upwards
3520         be_liveness(chordal_env->irg);
3521         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3522         move_reloads_upward(&si);
3523
3524         irg_block_walk_graph(chordal_env->irg, walker_pressure_annotator, NULL, &si);
3525
3526         dump_pressure_graph(&si, dump_suffix2);
3527
3528         // TODO fix temporarily exceeded regpressure due to remat2s
3529
3530         // TODO insert copys to fix interferences in memory
3531
3532         be_analyze_regpressure(chordal_env, "-post");
3533
3534         free_dom(chordal_env->irg);
3535         del_set(si.interferences);
3536         del_pset(si.inverse_ops);
3537         del_pset(si.all_possible_remats);
3538         del_pset(si.spills);
3539 #ifndef EXECFREQ_LOOPDEPH
3540         free_execfreq(si.execfreqs);
3541 #endif
3542         free_lpp(si.lpp);
3543         obstack_free(&obst, NULL);
3544         DBG((si.dbg, LEVEL_1, "\tdone.\n"));
3545 }
3546
3547 #else                           /* WITH_ILP */
3548
3549 static void
3550 only_that_you_can_compile_without_WITH_ILP_defined(void)
3551 {
3552 }
3553
3554 #endif                          /* WITH_ILP */