some fixups for latency
[libfirm] / ir / be / beilpsched.c
1 /**
2  * Scheduling algorithms.
3  * An ILP scheduler based on
4  * "ILP-based Instruction Scheduling for IA-64"
5  * by Daniel Kaestner and Sebastian Winkel
6  *
7  * @date   22.10.2005
8  * @author Christian Wuerdig
9  * @cvs-id $Id$
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #ifndef _WIN32
20 #include <strings.h>
21 #endif /* _WIN32 */
22
23 #include "irnode_t.h"
24 #include "irgwalk.h"
25 #include "irbitset.h"
26 #include "irphase_t.h"
27 #include "height.h"
28 #include "iredges.h"
29 #include "pdeq.h"
30 #include "debug.h"
31 #include "irtools.h"
32 #include "irdump.h"
33 #include "plist.h"
34
35 #include <lpp/lpp.h>
36 #include <lpp/lpp_net.h>
37
38 #ifdef WITH_LIBCORE
39 #include <libcore/lc_opts.h>
40 #include <libcore/lc_opts_enum.h>
41 #include <libcore/lc_timing.h>
42 #endif /* WITH_LIBCORE */
43
44 #include "be.h"
45 #include "benode_t.h"
46 #include "besched_t.h"
47 #include "beilpsched.h"
48
49 typedef struct _ilpsched_options_t {
50         unsigned time_limit;
51         char     log_file[1024];
52 } ilpsched_options_t;
53
54 typedef struct _unit_type_info_t {
55         int                            n_units;
56         const be_execution_unit_type_t *tp;
57 } unit_type_info_t;
58
59 /**
60  * holding the ILP variables of the different types
61  */
62 typedef struct _ilp_var_types_t {
63         int *x;   /* x_{nt}^k variables */
64         int *d;   /* d_{nt}^k variables */
65         int *y;   /* y_{nt}^k variables */
66 } ilp_var_types_t;
67
68 /* attributes for a node */
69 typedef struct _ilpsched_node_attr_t {
70         unsigned asap;                     /**< The ASAP scheduling control step */
71         unsigned alap;                     /**< The ALAP scheduling control step */
72         unsigned sched_point;              /**< the step in which the node is finally scheduled */
73         unsigned visit_idx;                /**< Index of the node having visited this node last */
74         unsigned consumer_idx;             /**< Index of the node having counted this node as consumer last */
75         unsigned n_consumer;               /**< Number of consumers */
76         ir_node  **block_consumer;         /**< List of consumer being in the same block */
77         unsigned block_idx     : 30;       /**< A unique per block index */
78         unsigned alap_changed  : 1;        /**< the current ALAP has changed, revisit preds */
79         unsigned is_dummy_node : 1;        /**< this node is assigned to DUMMY unit */
80         bitset_t *transitive_block_nodes;  /**< Set of transitive block nodes (predecessors
81                                                                                         for ASAP, successors for ALAP */
82         unsigned n_unit_types;             /**< number of allowed execution unit types */
83         unit_type_info_t *type_info;       /**< list of allowed execution unit types */
84         ilp_var_types_t  ilp_vars;         /**< the different ILP variables */
85 } ilpsched_node_attr_t;
86
87 /* attributes for a block */
88 typedef struct _ilpsched_block_attr_t {
89         unsigned block_last_idx;        /**< The highest node index in block so far */
90         unsigned n_interesting_nodes;   /**< The number of nodes interesting for scheduling */
91         unsigned max_steps;             /**< Upper bound for block execution */
92         plist_t  *root_nodes;           /**< A list of nodes having no user in current block */
93         ir_node  *head_ilp_nodes;       /**< A linked list of nodes which will contribute to ILP */
94 } ilpsched_block_attr_t;
95
96 typedef union _ilpsched_attr_ {
97         ilpsched_node_attr_t  node_attr;
98         ilpsched_block_attr_t block_attr;
99 } ilpsched_attr_t;
100
101 /* A irn for the phase and it's attributes (either node or block) */
102 typedef struct {
103         ir_node         *irn;
104         ilpsched_attr_t attr;
105 } be_ilpsched_irn_t;
106
107 /* The ILP scheduling environment */
108 typedef struct {
109         phase_t              ph;            /**< The phase */
110         ir_graph             *irg;          /**< The current irg */
111         heights_t            *height;       /**< The heights object of the irg */
112         void                 *irg_env;      /**< An environment for the irg scheduling, provided by the backend */
113         void                 *block_env;    /**< An environment for scheduling a block, provided by the backend */
114         const arch_env_t     *arch_env;
115         const arch_isa_t     *isa;          /**< The ISA */
116         const be_main_env_t  *main_env;
117         const be_machine_t   *cpu;          /**< the current abstract machine */
118         ilpsched_options_t   *opts;         /**< the ilp options for current irg */
119         const ilp_sched_selector_t *sel;    /**< The ILP sched selector provided by the backend */
120         DEBUG_ONLY(firm_dbg_module_t *dbg);
121 } be_ilpsched_env_t;
122
123 /* convenience macros to handle phase irn data */
124 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
125 #define is_ilpsched_block(node)             (is_Block((node)->irn))
126 #define get_ilpsched_block_attr(block)      (&(block)->attr.block_attr)
127 #define get_ilpsched_node_attr(node)        (&(node)->attr.node_attr)
128
129 /* iterate over a list of ir_nodes linked by link field */
130 #define foreach_linked_irns(head, iter) for ((iter) = (head); (iter); (iter) = get_irn_link((iter)))
131
132 /* check if node is considered for ILP scheduling */
133 #define consider_for_sched(isa, irn) \
134         (! (is_Block(irn)            ||  \
135                 is_normal_Proj(isa, irn) ||  \
136                 is_Phi(irn)              ||  \
137                 is_NoMem(irn)            ||  \
138                 is_Jmp(irn)              ||  \
139                 is_End(irn)                  \
140                 ))
141
142 /* gives the valid scheduling time step interval for a node */
143 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
144
145 /* gives the valid interval where a node can die */
146 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
147
148 /* gives the corresponding ILP variable for given node, unit and time step */
149 #define ILPVAR_IDX(na, unit, control_step) \
150         ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
151
152 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
153 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
154         ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
155
156 /* check if a double value is within an epsilon environment of 0 */
157 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
158
159 #ifdef WITH_LIBCORE
160         #define ilp_timer_push(t)         lc_timer_push((t))
161         #define ilp_timer_pop()           lc_timer_pop()
162         #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
163 #else /* WITH_LIBCORE */
164         #define ilp_timer_push(t)
165         #define ilp_timer_pop()
166         #define ilp_timer_elapsed_usec(t) 0.0
167 #endif /* WITH_LIBCORE */
168
169 /* option variable */
170 static ilpsched_options_t ilp_opts = {
171         120,   /* 120 sec per block time limit */
172         ""     /* no log file */
173 };
174
175 #ifdef WITH_LIBCORE
176 /* ILP options */
177 static const lc_opt_table_entry_t ilpsched_option_table[] = {
178         LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
179         LC_OPT_ENT_STR("lpp_log",    "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
180         { NULL }
181 };
182 #endif /* WITH_LIBCORE */
183
184 /**
185  * Check if irn is a Proj, which has no execution units assigned.
186  * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
187  */
188 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
189         return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
190 }
191
192 /**
193  * Skips normal Projs.
194  * @return predecessor if irn is a normal Proj, otherwise irn.
195  */
196 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
197         if (is_normal_Proj(isa, irn))
198                 return get_Proj_pred(irn);
199         return irn;
200 }
201
202 static INLINE fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
203         unsigned lat = be_ilp_sched_latency(sel, irn, env);
204         if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
205                 lat = 1;
206         return lat;
207 }
208
209
210 /**
211  * Compare scheduling time steps of two be_ilpsched_irn's.
212  */
213 static int cmp_ilpsched_irn(const void *a, const void *b) {
214         be_ilpsched_irn_t    *n1   = *(be_ilpsched_irn_t **)a;
215         be_ilpsched_irn_t    *n2   = *(be_ilpsched_irn_t **)b;
216         ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
217         ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
218
219         return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
220 }
221
222 /**
223  * In case there is no phase information for irn, initialize it.
224  */
225 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
226         be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
227
228         if (res == old) {
229                 /* if we have already some data: check for reinitialization */
230
231                 if (! is_Block(irn)) {
232                         ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
233
234                         if (! na->transitive_block_nodes) {
235                                 ir_node               *block      = get_nodes_block(irn);
236                                 be_ilpsched_irn_t     *block_node = phase_get_or_set_irn_data(ph, block);
237                                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
238
239                                 /* we are called after the block indices have been build: create bitset */
240                                 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
241                         }
242                         else {
243                                 /* we are called from reinit block data: clear the bitset */
244                                 bitset_clear_all(na->transitive_block_nodes);
245                                 na->visit_idx    = 0;
246                                 na->alap_changed = 1;
247                         }
248                 }
249                 return old;
250         }
251
252         res->irn = irn;
253
254         /* set ilpsched irn attributes (either block or irn) */
255         if (is_Block(irn)) {
256                 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
257
258                 ba->n_interesting_nodes = 0;
259                 ba->block_last_idx      = 0;
260                 ba->root_nodes          = plist_new();
261                 ba->head_ilp_nodes      = NULL;
262                 ba->max_steps           = 0;
263         }
264         else {
265                 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
266                 memset(na, 0, sizeof(*na));
267         }
268
269         return res;
270 }
271
272 /**
273  * Assign a per block unique number to each node.
274  */
275 static void build_block_idx(ir_node *irn, void *walk_env) {
276         be_ilpsched_env_t     *env = walk_env;
277         be_ilpsched_irn_t     *node, *block_node;
278         ilpsched_node_attr_t  *na;
279         ilpsched_block_attr_t *ba;
280
281         if (! consider_for_sched(env->arch_env->isa, irn))
282                 return;
283
284         node       = get_ilpsched_irn(env, irn);
285         na         = get_ilpsched_node_attr(node);
286         block_node = get_ilpsched_irn(env, get_nodes_block(irn));
287         ba         = get_ilpsched_block_attr(block_node);
288
289         na->block_idx = ba->block_last_idx++;
290 }
291
292 /********************************************************
293  *                              __        _
294  *                             / /       | |
295  *   __ _ ___  __ _ _ __      / /    __ _| | __ _ _ __
296  *  / _` / __|/ _` | '_ \    / /    / _` | |/ _` | '_ \
297  * | (_| \__ \ (_| | |_) |  / /    | (_| | | (_| | |_) |
298  *  \__,_|___/\__,_| .__/  /_/      \__,_|_|\__,_| .__/
299  *                 | |                           | |
300  *                 |_|                           |_|
301  ********************************************************/
302
303 /**
304  * Add all nodes having no user in current block to last_nodes list.
305  */
306 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
307         ir_node               *block;
308         const ir_edge_t       *edge;
309         be_ilpsched_irn_t     *block_node, *node;
310         ilpsched_block_attr_t *ba;
311         ilpsched_node_attr_t  *na;
312         int                   i, j;
313         be_ilpsched_env_t     *env           = walk_env;
314         int                   has_block_user = 0;
315         unsigned              n_consumer     = 0;
316         ir_edge_kind_t        ekind[2]       = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
317         ir_node               **consumer;
318         int                   idx;
319
320         if (! consider_for_sched(env->arch_env->isa, irn))
321                 return;
322
323         block    = get_nodes_block(irn);
324     idx      = get_irn_idx(irn);
325         consumer = NEW_ARR_F(ir_node *, 0);
326
327         DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
328
329         /* check data and dependency out edges */
330         for (i = 0; i < 2 && ! has_block_user; ++i) {
331                 foreach_out_edge_kind(irn, edge, ekind[i]) {
332                         ir_node *user = get_edge_src_irn(edge);
333
334                         if (is_normal_Proj(env->arch_env->isa, user)) {
335                                 const ir_edge_t *user_edge;
336
337                                 if (get_irn_mode(user) == mode_X)
338                                         continue;
339
340                                 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
341                                 for (j = 0; j < 2; ++j) {
342                                         foreach_out_edge_kind(user, user_edge, ekind[j]) {
343                                                 ir_node *real_user = get_edge_src_irn(user_edge);
344
345                                                 if (! is_Phi(real_user) && ! is_Block(real_user)) {
346                                                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, real_user);
347                                                         ilpsched_node_attr_t *ua   = get_ilpsched_node_attr(node);
348
349                                                         /* skip already visited nodes */
350                                                         if (ua->consumer_idx == idx)
351                                                                 continue;
352
353                                                         /* check if node has user in this block and collect the user if it's a data user */
354                                                         if (get_nodes_block(real_user) == block) {
355                                                                 if (i == 0 && j == 0)
356                                                                         ARR_APP1(ir_node *, consumer, real_user);
357                                                                 has_block_user = 1;
358                                                         }
359
360                                                         /* only count data consumer */
361                                                         if (i == 0)
362                                                                 n_consumer++;
363
364                                                         /* mark user as visited by this node */
365                                                         ua->consumer_idx = idx;
366                                                 }
367                                         }
368                                 }
369                         }
370                         else if (is_Block(user)) {
371                                 continue;
372                         }
373                         else if (! is_Phi(user)) {
374                                 be_ilpsched_irn_t    *node = get_ilpsched_irn(env, user);
375                                 ilpsched_node_attr_t *ua   = get_ilpsched_node_attr(node);
376
377                                 /* skip already visited nodes */
378                                 if (ua->consumer_idx == idx)
379                                         continue;
380
381                                 /* check if node has user in this block and collect the user if it's a data user */
382                                 if (get_nodes_block(user) == block) {
383                                         if (i == 0)
384                                                 ARR_APP1(ir_node *, consumer, user);
385                                         has_block_user = 1;
386                                 }
387
388                                 /* only count data consumer */
389                                 if (i == 0)
390                                         n_consumer++;
391
392                                 /* mark user visited by this node */
393                                 ua->consumer_idx = idx;
394                         }
395                 }
396         }
397
398         block_node = get_ilpsched_irn(env, block);
399         ba         = get_ilpsched_block_attr(block_node);
400
401         ba->n_interesting_nodes++;
402
403         /* current irn has no user inside this block, add to queue */
404         if (! has_block_user) {
405                 DB((env->dbg, LEVEL_3, "root node\n"));
406                 plist_insert_back(ba->root_nodes, irn);
407         }
408         else {
409                 DB((env->dbg, LEVEL_3, "normal node\n"));
410         }
411
412         /* record number of all consumer and the consumer within the same block */
413         node = get_ilpsched_irn(env, irn);
414         na   = get_ilpsched_node_attr(node);
415         na->n_consumer     = n_consumer;
416         na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
417         memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
418         DEL_ARR_F(consumer);
419 }
420
421 /**
422  * Calculate the ASAP scheduling step for current irn.
423  */
424 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
425         be_ilpsched_env_t     *env = walk_env;
426         int                   i;
427         ir_node               *block;
428         be_ilpsched_irn_t     *node, *block_node;
429         ilpsched_node_attr_t  *na;
430         ilpsched_block_attr_t *ba;
431         unsigned              lat;
432
433         /* These nodes are handled separate */
434         if (! consider_for_sched(env->arch_env->isa, irn))
435                 return;
436
437         DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
438
439         block    = get_nodes_block(irn);
440         node     = get_ilpsched_irn(env, irn);
441         na       = get_ilpsched_node_attr(node);
442         na->asap = 1;
443
444         for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
445                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
446
447                 /* check for greatest distance to top */
448                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
449                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
450                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
451                         unsigned             lat;
452
453                         lat      = fixed_latency(env->sel, pred, env->block_env);
454                         na->asap = MAX(na->asap, pna->asap + lat);
455                 }
456         }
457
458         /* add node to ILP node list and update max_steps */
459         block_node = get_ilpsched_irn(env, block);
460         ba         = get_ilpsched_block_attr(block_node);
461
462         set_irn_link(irn, ba->head_ilp_nodes);
463         ba->head_ilp_nodes = irn;
464         ba->max_steps     += fixed_latency(env->sel, irn, env->block_env);
465
466         DB((env->dbg, LEVEL_2, "%u\n", na->asap));
467 }
468
469 /**
470  * Calculate the ALAP scheduling step of all irns in current block.
471  * Depends on max_steps being calculated.
472  */
473 static void calculate_block_alap(ir_node *block, void *walk_env) {
474         be_ilpsched_env_t     *env        = walk_env;
475         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
476         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
477         waitq                 *cur_queue  = new_waitq();
478         plist_element_t       *el;
479
480         assert(is_Block(block));
481
482         DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
483                 block, ba->n_interesting_nodes, ba->max_steps));
484
485         /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
486         //phase_reinit_block_irn_data(&env->ph, block);
487
488         /* init start queue */
489         foreach_plist(ba->root_nodes, el) {
490                 waitq_put(cur_queue, plist_element_get_value(el));
491         }
492
493         /* repeat until all nodes are processed */
494         while (! waitq_empty(cur_queue)) {
495                 waitq *next_queue = new_waitq();
496
497                 /* process all nodes in current step */
498                 while (! waitq_empty(cur_queue)) {
499                         ir_node              *cur_irn = waitq_get(cur_queue);
500                         be_ilpsched_irn_t    *node    = get_ilpsched_irn(env, cur_irn);
501                         ilpsched_node_attr_t *na      = get_ilpsched_node_attr(node);
502                         int                  i;
503
504                         /* cur_node has no alap set -> it's a root node, set to max alap */
505                         if (na->alap == 0) {
506                                 na->alap = ba->max_steps;
507                                 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
508                                         cur_irn, na->alap));
509                         }
510                         else {
511                                 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
512                                         cur_irn, na->alap));
513                         }
514
515                         /* set the alap's of all predecessors */
516                         for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
517                                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
518
519                                 /* check for greatest distance to bottom */
520                                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
521                                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
522                                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
523                                         unsigned             lat;
524
525                                         /* mark the predecessor as visited by current irn */
526                                         if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
527                                                 continue;
528                                         pna->visit_idx = get_irn_idx(cur_irn);
529
530                                         lat = fixed_latency(env->sel, pred, env->block_env);
531
532                                         /* set ALAP of current pred */
533                                         if (pna->alap == 0) {
534                                                 /* current ALAP is 0: node has not yet been visited */
535                                                 pna->alap_changed = 1;
536                                                 pna->alap         = na->alap - lat;
537                                         }
538                                         else if (pna->alap > na->alap - lat) {
539                                                 /* we found a longer path to root node: change ALAP */
540                                                 pna->alap         = na->alap - lat;
541                                                 pna->alap_changed = 1;
542                                         }
543                                         else {
544                                                 /* current ALAP is best found so far: keep it */
545                                                 pna->alap_changed = 0;
546                                         }
547
548                                         DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
549
550                                         /* enqueue node for next iteration */
551                                         if (get_irn_ins_or_deps(pred) > 0)
552                                                 waitq_put(next_queue, pred);
553                                 }
554                         }
555                 }
556
557                 /* prepare for next iteration */
558                 del_waitq(cur_queue);
559                 cur_queue = next_queue;
560         }
561 }
562
563 /**
564  * We can free the list of root nodes here.
565  */
566 static void clear_unwanted_data(ir_node *block, void *walk_env) {
567         be_ilpsched_env_t     *env        = walk_env;
568         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
569         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
570
571         plist_free(ba->root_nodes);
572         ba->root_nodes = NULL;
573 }
574
575 /**
576  * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
577  * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
578  */
579 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
580         be_ilpsched_env_t    *env = walk_env;
581         be_ilpsched_irn_t    *node, *pred_node;
582         ilpsched_node_attr_t *na, *pna;
583         ir_node              *pred;
584
585         if (! consider_for_sched(env->arch_env->isa, irn))
586                 return;
587
588         if (! is_Proj(irn) && ! be_is_Keep(irn))
589                 return;
590
591         /* go to the ancestor */
592         if (be_is_Keep(irn))
593                 irn = get_irn_n(irn, 0);
594         pred = skip_Proj(irn);
595
596         node      = get_ilpsched_irn(env, irn);
597         pred_node = get_ilpsched_irn(env, pred);
598         na        = get_ilpsched_node_attr(node);
599         pna       = get_ilpsched_node_attr(pred_node);
600
601         na->asap = pna->asap;
602         na->alap = pna->alap;
603
604         DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
605 }
606
607 #if 0
608 /**
609  * Calculate the ASAP scheduling step for current irn.
610  */
611 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
612         be_ilpsched_irn_t *node;
613         be_ilpsched_env_t *env = walk_env;
614         int      i;
615         ir_node  *block;
616         ilpsched_node_attr_t *na;
617
618         /* These nodes are handled separate */
619         if (! consider_for_sched(env->arch_env->isa, irn))
620                 return;
621
622         DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F\n", irn));
623
624         node  = get_ilpsched_irn(env, irn);
625         block = get_nodes_block(irn);
626         na    = get_ilpsched_node_attr(node);
627
628         /* accumulate all transitive predecessors of current node */
629         for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
630                 ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
631                 be_ilpsched_irn_t    *pred_node;
632                 ilpsched_node_attr_t *pna;
633                 unsigned             idx;
634
635                 if (be_is_Keep(pred))
636                         pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
637
638                 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
639                         continue;
640
641                 pred_node = get_ilpsched_irn(env, pred);
642                 pna       = get_ilpsched_node_attr(pred_node);
643                 idx       = get_irn_idx(irn);
644
645                 assert(pna->asap && "missing ASAP of predecessor");
646
647                 /*
648                         We have not already visited this predecessor
649                         -> accumulate it's predecessors
650                 */
651                 if (pna->visit_idx != idx) {
652                         pna->visit_idx = idx;
653                         na->transitive_block_nodes = bitset_or(na->transitive_block_nodes, pna->transitive_block_nodes);
654                         DBG((env->dbg, LEVEL_3, "\taccumulating preds of %+F\n", pred));
655                 }
656         }
657
658         /* every node is it's own transitive predecessor in block */
659         bitset_set(na->transitive_block_nodes, na->block_idx);
660
661         /* asap = number of transitive predecessors in this block */
662         na->asap = bitset_popcnt(na->transitive_block_nodes);
663
664         DBG((env->dbg, LEVEL_2, "\tcalculated ASAP is %u\n", na->asap));
665 }
666
667 /**
668  * Calculate the ALAP scheduling step for current irn.
669  * @note: requires ASAP being calculated.
670  */
671 static void calculate_irn_alap(ir_node *irn, void *walk_env) {
672         be_ilpsched_env_t     *env = walk_env;
673         int                   i, is_head;
674         ir_node               *block;
675         be_ilpsched_irn_t     *node;
676         ilpsched_node_attr_t  *na;
677
678         /* These nodes are handled separate */
679         if (! consider_for_sched(env->arch_env->isa, irn))
680                 return;
681
682         DBG((env->dbg, LEVEL_2, "Calculating ALAP of node %+F ... ", irn));
683
684         block      = get_nodes_block(irn);
685         node       = get_ilpsched_irn(env, irn);
686         na         = get_ilpsched_node_attr(node);
687         is_head    = 1;
688
689         for (i = get_irn_ins_or_deps(irn) - 1; i >= i; --i) {
690                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
691
692                 /* check, if we have a head node */
693                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
694                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
695                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
696                         unsigned             lat;
697
698                         lat      = fixed_latency(env->sel, pred, env->block_env);
699                         na->alap = MAX(na->alap, pna->alap + lat);
700                         is_head  = 0;
701                 }
702         }
703
704         /* handle head nodes (no predecessor in same block) */
705         if (is_head) {
706                 be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
707                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
708                 plist_element_t       *el;
709
710                 DB((env->dbg, LEVEL_2, "head node ... "));
711
712                 /*
713                 We have a head node here:
714                 ALAP(m) = sum_over_all_m(ASAP(m))
715                 where m is a root node and there is no path from m to n
716                 */
717                 foreach_plist(ba->root_nodes, el) {
718                         ir_node *root = plist_element_get_value(el);
719
720                         /* check if current root is independent from irn */
721                         if (! heights_reachable_in_block(env->height, root, irn)) {
722                                 be_ilpsched_irn_t     *root_node = get_ilpsched_irn(env, root);
723                                 ilpsched_node_attr_t  *rna       = get_ilpsched_node_attr(root_node);
724
725                                 na->alap = rna->asap + fixed_latency(env->sel, root, env->block_env);
726                         }
727                 }
728         }
729
730         DB((env->dbg, LEVEL_2, "%u\n", na->alap));
731 }
732
733 /**
734  * Accumulate the successors of all nodes from irn on upwards.
735  */
736 static void accumulate_succs(be_ilpsched_env_t *env, ir_node *irn) {
737         unsigned             i, n;
738         be_ilpsched_irn_t    *node  = get_ilpsched_irn(env, irn);
739         ilpsched_node_attr_t *na    = get_ilpsched_node_attr(node);
740         ir_node              *block = get_nodes_block(irn);
741         waitq                *wq    = new_waitq();
742
743         DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F\n", irn));
744
745         /* enqueue node for final alap calculation */
746         if (! na->enqueued) {
747                 be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
748                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
749
750                 na->enqueued = 1;
751                 na->alap     = ba->max_steps;
752                 waitq_put(env->alap_queue, node);
753
754                 set_irn_link(irn, ba->head_ilp_nodes);
755                 ba->head_ilp_nodes = irn;
756                 DBG((env->dbg, LEVEL_5, "\t\tlinked %+F to ilp nodes of %+F, attr %p\n", irn, block, ba));
757                 DBG((env->dbg, LEVEL_4, "\t\tenqueueing %+F for final ALAP calculation\n", irn));
758         }
759
760         for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
761                 ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
762                 unsigned             idx;
763                 be_ilpsched_irn_t    *pred_node;
764                 ilpsched_node_attr_t *pna;
765
766                 if (be_is_Keep(pred))
767                         pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
768
769                 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
770                         continue;
771
772                 pred_node = get_ilpsched_irn(env, pred);
773                 pna       = get_ilpsched_node_attr(pred_node);
774                 idx       = get_irn_idx(irn);
775
776                 /* accumulate the successors */
777                 if (pna->visit_idx != idx) {
778                         pna->visit_idx = idx;
779                         pna->transitive_block_nodes = bitset_or(pna->transitive_block_nodes, na->transitive_block_nodes);
780
781                         /* set current node as successor */
782                         bitset_set(pna->transitive_block_nodes, na->block_idx);
783                         waitq_put(wq, pred);
784
785                         DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F to %+F\n", irn, pred));
786                 }
787         }
788
789         /* process all predecessors */
790         while (! waitq_empty(wq)) {
791                 accumulate_succs(env, waitq_get(wq));
792         }
793
794         del_waitq(wq);
795 }
796
797 /**
798  * Calculate the ALAP scheduling step of all irns in current block.
799  * Depends on ASAP being calculated.
800  */
801 static void calculate_block_alap(ir_node *block, void *walk_env) {
802         be_ilpsched_env_t     *env        = walk_env;
803         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
804         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
805
806         assert(is_Block(block));
807
808         DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes)\n", block, ba->n_interesting_nodes));
809
810         /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
811         phase_reinit_block_irn_data(&env->ph, block);
812
813         /* calculate the alap of all nodes, starting at collected roots upwards */
814         while (! waitq_empty(ba->root_nodes)) {
815                 accumulate_succs(env, waitq_get(ba->root_nodes));
816         }
817
818         /* we don't need it anymore */
819         del_waitq(ba->root_nodes);
820         ba->root_nodes = NULL;
821
822         /* all interesting nodes should have their successors accumulated now */
823         while (! waitq_empty(env->alap_queue)) {
824                 be_ilpsched_irn_t    *node = waitq_get(env->alap_queue);
825                 ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
826
827                 /* control flow ops must always be scheduled last */
828                 if (is_cfop(node->irn) && ! is_Start(node->irn) && get_irn_opcode(node->irn) != iro_End)
829                         na->asap = na->alap;
830                 else
831                         na->alap -= bitset_popcnt(na->transitive_block_nodes);
832                 DBG((env->dbg, LEVEL_2, "\tALAP of %+F is %u (%u succs, %u consumer)\n",
833                         node->irn, na->alap, bitset_popcnt(na->transitive_block_nodes), na->n_consumer));
834
835                 /* maximum block steps is maximum alap of all nodes */
836                 ba->max_steps = MAX(ba->max_steps, na->alap);
837         }
838 }
839 #endif /* if 0 */
840
841 /*******************************************
842  *           _              _       _
843  *          | |            | |     | |
844  *  ___  ___| |__   ___  __| |_   _| | ___
845  * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
846  * \__ \ (__| | | |  __/ (_| | |_| | |  __/
847  * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
848  *
849  *******************************************/
850
851 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
852         const ir_edge_t *edge;
853
854         foreach_out_edge(irn, edge) {
855                 ir_node *user = get_edge_src_irn(edge);
856
857                 if (be_is_Keep(user)) {
858                         assert(get_nodes_block(user) == block && "Keep must not be in different block.");
859                         waitq_put(keeps, user);
860                 }
861         }
862 }
863
864 /**
865  * Inserts @p irn before @p before into schedule and notifies backend.
866  */
867 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
868         ir_node *before, ir_node *irn, unsigned cycle)
869 {
870         be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
871         sched_add_before(before, irn);
872 }
873
874 /**
875  * Adds a node, it's Projs (in case of mode_T nodes) and
876  * it's Keeps to schedule.
877  */
878 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
879         const ir_edge_t *edge;
880         waitq           *keeps = new_waitq();
881
882         /* mode_M nodes are not scheduled */
883         if (get_irn_mode(irn) == mode_M)
884                 return;
885
886         if (! sched_is_scheduled(irn))
887                 notified_sched_add_before(env, block, irn, cycle);
888
889         /* add Projs */
890         if (get_irn_mode(irn) == mode_T) {
891                 foreach_out_edge(irn, edge) {
892                         ir_node *user = get_edge_src_irn(edge);
893
894                         if (to_appear_in_schedule(user) || get_irn_mode(user) == mode_b)
895                                 notified_sched_add_before(env, block, user, cycle);
896
897                         check_for_keeps(keeps, block, user);
898                 }
899         }
900         else {
901                 check_for_keeps(keeps, block, irn);
902         }
903
904         /* add Keeps */
905         while (! waitq_empty(keeps)) {
906                 ir_node *keep = waitq_get(keeps);
907                 if (! sched_is_scheduled(keep))
908                         notified_sched_add_before(env, block, keep, cycle);
909         }
910
911         del_waitq(keeps);
912 }
913
914 /**
915  * Schedule all nodes in the given block, according to the ILP solution.
916  */
917 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
918         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
919         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
920         sched_info_t          *info       = get_irn_sched_info(block);
921         be_ilpsched_irn_t     **sched_nodes;
922         unsigned              i, l;
923         ir_node               *cfop, *irn;
924         const ir_edge_t       *edge;
925
926         /* init block schedule list */
927         INIT_LIST_HEAD(&info->list);
928         info->scheduled = 1;
929
930         /* collect nodes and their scheduling time step */
931         sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
932         if (ba->n_interesting_nodes == 0) {
933                 /* ignore */
934         }
935         else if (ba->n_interesting_nodes == 1) {
936                 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
937
938                 /* add the single node */
939                 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
940         }
941         else {
942                 /* check all nodes for their positive solution */
943                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
944                         be_ilpsched_irn_t    *node;
945                         ilpsched_node_attr_t *na;
946                         int                  tp_idx, found;
947                         unsigned             cur_var, t;
948
949                         node    = get_ilpsched_irn(env, irn);
950                         na      = get_ilpsched_node_attr(node);
951                         cur_var = 0;
952                         found   = 0;
953
954                         /* go over all variables of a node until the non-zero one is found */
955                         for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
956                                 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
957                                         double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
958
959                                         /* check, if variable is set to one (it's not zero then :) */
960                                         if (! LPP_VALUE_IS_0(val)) {
961                                                 na->sched_point = t;
962                                                 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
963                                                 DBG((env->dbg, LEVEL_1, "Schedpoint of %+F is %u at unit type %s\n",
964                                                         irn, t, na->type_info[tp_idx].tp->name));
965                                                 found = 1;
966                                         }
967                                 }
968                         }
969                 }
970
971                 /* sort nodes ascending by scheduling time step */
972                 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
973         }
974
975         /* make all Phis ready and remember the single cf op */
976         cfop = NULL;
977         foreach_out_edge(block, edge) {
978                 irn = get_edge_src_irn(edge);
979
980                 switch (get_irn_opcode(irn)) {
981                         case iro_Phi:
982                                 add_to_sched(env, block, irn, 0);
983                                 break;
984                         case iro_Start:
985                         case iro_End:
986                         case iro_Proj:
987                         case iro_Bad:
988                                 break;
989                         default:
990                                 if (is_cfop(irn)) {
991                                         assert(cfop == NULL && "Highlander - there can be only one");
992                                         cfop = irn;
993                                 }
994                         break;
995                 }
996         }
997
998         /* add all nodes from list */
999         for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
1000                 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
1001                 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
1002         }
1003
1004         /* schedule control flow node if not already done */
1005         if (cfop && ! sched_is_scheduled(cfop))
1006                 add_to_sched(env, block, cfop, 0);
1007
1008         DEL_ARR_F(sched_nodes);
1009 }
1010
1011 /***************************************************************
1012  *   _____ _      _____     _____           _   _
1013  *  |_   _| |    |  __ \   / ____|         | | (_)
1014  *    | | | |    | |__) | | (___   ___  ___| |_ _  ___  _ __
1015  *    | | | |    |  ___/   \___ \ / _ \/ __| __| |/ _ \| '_ \
1016  *   _| |_| |____| |       ____) |  __/ (__| |_| | (_) | | | |
1017  *  |_____|______|_|      |_____/ \___|\___|\__|_|\___/|_| |_|
1018  *
1019  ***************************************************************/
1020
1021 /**
1022  * Check if node can be executed on given unit type.
1023  */
1024 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
1025         int                  i;
1026         ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1027
1028         for (i = na->n_unit_types - 1; i >= 0; --i) {
1029                 if (na->type_info[i].tp == tp)
1030                         return i;
1031         }
1032
1033         return -1;
1034 }
1035
1036 /************************************************
1037  *                   _       _     _
1038  *                  (_)     | |   | |
1039  *  __   ____ _ _ __ _  __ _| |__ | | ___  ___
1040  *  \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
1041  *   \ V / (_| | |  | | (_| | |_) | |  __/\__ \
1042  *    \_/ \__,_|_|  |_|\__,_|_.__/|_|\___||___/
1043  *
1044  ************************************************/
1045
1046 /**
1047  * Create the following variables:
1048  * - x_{nt}^k    binary     weigthed with: t
1049  *      node n is scheduled at time step t to unit type k
1050  * ==>> These variables represent the schedule
1051  * TODO:
1052  *
1053  * - d_{nt}^k    binary     weighted with: t
1054  *      node n dies at time step t on unit type k
1055  *
1056  * - y_{nt}^k    binary     weighted with: num_nodes^2
1057  *      node n is scheduled at time step t to unit type k
1058  *      although all units of this type are occupied
1059  * ==>> These variables represent the register pressure
1060  */
1061 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
1062         char                  buf[1024];
1063         ir_node               *irn;
1064         unsigned              num_block_var, num_nodes;
1065         ilpsched_block_attr_t *ba      = get_ilpsched_block_attr(block_node);
1066         unsigned              weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
1067 #ifdef WITH_LIBCORE
1068         lc_timer_t            *t_var   = lc_timer_register("beilpsched_var", "create ilp variables");
1069 #endif /* WITH_LIBCORE */
1070
1071         ilp_timer_push(t_var);
1072         num_block_var = num_nodes = 0;
1073         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1074                 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
1075                 be_ilpsched_irn_t         *node;
1076                 ilpsched_node_attr_t      *na;
1077                 unsigned                  n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
1078                 unsigned                  cur_var_d, cur_var_x, cur_var_y, num_die;
1079
1080                 /* count number of available unit types for this node */
1081                 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
1082                         /* just count */ ;
1083
1084                 node = get_ilpsched_irn(env, irn);
1085                 na   = get_ilpsched_node_attr(node);
1086
1087                 na->n_unit_types = n_unit_types;
1088                 na->type_info    = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
1089
1090                 /* fill the type info array */
1091                 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1092                         for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
1093                                 /* beware: we also count number of available units here */
1094                                 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
1095                                         na->is_dummy_node = 1;
1096                         }
1097
1098                         na->type_info[tp_idx].tp      = execunits[tp_idx][0]->tp;
1099                         na->type_info[tp_idx].n_units = unit_idx;
1100                 }
1101
1102                 /* allocate space for ilp variables */
1103                 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1104                 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
1105
1106                 /* we need these variables only for "real" nodes */
1107                 if (! na->is_dummy_node) {
1108                         na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1109                         memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
1110
1111                         num_die        = ba->max_steps - na->asap + 1;
1112                         na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_die);
1113                         memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
1114                 }
1115
1116                 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
1117                         irn, na->asap, na->alap, na->n_unit_types));
1118
1119                 cur_var_x = cur_var_d = cur_var_y = cur_unit = n_var = 0;
1120                 /* create variables */
1121                 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1122                         unsigned t;
1123
1124                         for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1125                                 /* x_{nt}^k variables */
1126                                 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1127                                         get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1128                                 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1129                                 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1130                                 /* variable counter */
1131                                 n_var++;
1132                                 num_block_var++;
1133
1134                                 if (! na->is_dummy_node) {
1135                                         /* y_{nt}^k variables */
1136                                         snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1137                                                 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1138                                         na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
1139                                         DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1140
1141                                         /* variable counter */
1142                                         n_var++;
1143                                         num_block_var++;
1144                                 }
1145                         }
1146
1147                         /* a node can die at any step t: asap(n) <= t <= U */
1148                         if (! na->is_dummy_node) {
1149                                 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1150                                         /* d_{nt}^k variables */
1151                                         snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
1152                                                 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1153                                         na->ilp_vars.d[cur_var_d++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1154                                         DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1155
1156                                         /* variable counter */
1157                                         n_var++;
1158                                         num_block_var++;
1159                                 }
1160                         }
1161                 }
1162
1163                 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1164                 num_nodes++;
1165         }
1166         ilp_timer_pop();
1167         DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
1168                 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
1169 }
1170
1171 /*******************************************************
1172  *                      _             _       _
1173  *                     | |           (_)     | |
1174  *   ___ ___  _ __  ___| |_ _ __ __ _ _ _ __ | |_ ___
1175  *  / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1176  * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1177  *  \___\___/|_| |_|___/\__|_|  \__,_|_|_| |_|\__|___/
1178  *
1179  *******************************************************/
1180
1181 /**
1182  * Create following ILP constraints:
1183  * - the assignment constraints:
1184  *     assure each node is executed once by exactly one (allowed) execution unit
1185  * - the dead node assignment constraints:
1186  *     assure a node can only die at most once
1187  * - the precedence constraints:
1188  *     assure that no data dependencies are violated
1189  */
1190 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1191         unsigned              num_cst_assign, num_cst_prec, num_cst_dead;
1192         char                  buf[1024];
1193         ir_node               *irn;
1194         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1195         bitset_t              *bs_block_irns = bitset_alloca(ba->block_last_idx);
1196 #ifdef WITH_LIBCORE
1197         lc_timer_t            *t_cst_assign  = lc_timer_register("beilpsched_cst_assign",      "create assignment constraints");
1198         lc_timer_t            *t_cst_dead    = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1199         lc_timer_t            *t_cst_prec    = lc_timer_register("beilpsched_cst_prec",        "create precedence constraints");
1200 #endif /* WITH_LIBCORE */
1201
1202         num_cst_assign = num_cst_prec = num_cst_dead = 0;
1203         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1204                 int                  cst, tp_idx, i;
1205                 unsigned             cur_var;
1206                 be_ilpsched_irn_t    *node;
1207                 ilpsched_node_attr_t *na;
1208
1209                 node    = get_ilpsched_irn(env, irn);
1210                 na      = get_ilpsched_node_attr(node);
1211                 cur_var = 0;
1212
1213                 /* the assignment constraint */
1214                 ilp_timer_push(t_cst_assign);
1215                 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1216                 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1217                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1218                 num_cst_assign++;
1219
1220                 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1221                 ilp_timer_pop();
1222
1223                 /* the dead node assignment constraint */
1224                 if (! na->is_dummy_node) {
1225                         ilp_timer_push(t_cst_dead);
1226                         snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1227                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1228                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1229
1230                         lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1231                         ilp_timer_pop();
1232                 }
1233
1234                 /* the precedence constraints */
1235                 ilp_timer_push(t_cst_prec);
1236                 bs_block_irns = bitset_clear_all(bs_block_irns);
1237                 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1238                         ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1239                         unsigned             t_low, t_high, t;
1240                         be_ilpsched_irn_t    *pred_node;
1241                         ilpsched_node_attr_t *pna;
1242                         unsigned             delay;
1243
1244                         if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1245                                 continue;
1246
1247                         pred_node = get_ilpsched_irn(env, pred);
1248                         pna       = get_ilpsched_node_attr(pred_node);
1249
1250                         assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1251
1252                         if (! bitset_is_set(bs_block_irns, pna->block_idx))
1253                                 bitset_set(bs_block_irns, pna->block_idx);
1254                         else
1255                                 continue;
1256
1257                         /* irn = n, pred = m */
1258                         delay  = fixed_latency(env->sel, pred, env->block_env);
1259                         t_low  = MAX(na->asap, pna->asap + delay - 1);
1260                         t_high = MIN(na->alap, pna->alap + delay - 1);
1261                         for (t = t_low - 1; t <= t_high - 1; ++t) {
1262                                 unsigned tn, tm;
1263                                 int      *tmp_var_idx = NEW_ARR_F(int, 0);
1264
1265                                 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1266                                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1267                                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1268                                 num_cst_prec++;
1269
1270                                 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1271                                 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1272                                         /* node variables have smaller index than pred variables */
1273                                         for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1274                                                 for (tn = na->asap - 1; tn <= t; ++tn) {
1275                                                         unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1276                                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1277                                                 }
1278                                         }
1279
1280                                         for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1281                                                 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1282                                                         unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1283                                                         ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1284                                                 }
1285                                         }
1286                                 }
1287                                 else {
1288                                         /* pred variables have smaller index than node variables */
1289                                         for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1290                                                 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1291                                                         unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1292                                                         ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1293                                                 }
1294                                         }
1295
1296                                         for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1297                                                 for (tn = na->asap - 1; tn <= t; ++tn) {
1298                                                         unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1299                                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1300                                                 }
1301                                         }
1302                                 }
1303
1304                                 if (ARR_LEN(tmp_var_idx) > 0)
1305                                         lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1306
1307                                 DEL_ARR_F(tmp_var_idx);
1308                         }
1309                 }
1310                 ilp_timer_pop();
1311         }
1312         DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1313                 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1314         DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1315                 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1316 }
1317
1318 /**
1319  * Create ILP resource constraints:
1320  * - assure that for each time step not more instructions are scheduled
1321  *   to the same unit types as units of this type are available
1322  */
1323 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1324         int                   glob_type_idx;
1325         char                  buf[1024];
1326         unsigned              num_cst_resrc = 0;
1327         ilpsched_block_attr_t *ba           = get_ilpsched_block_attr(block_node);
1328 #ifdef WITH_LIBCORE
1329         lc_timer_t            *t_cst_rsrc   = lc_timer_register("beilpsched_cst_rsrc",   "create resource constraints");
1330 #endif /* WITH_LIBCORE */
1331
1332         ilp_timer_push(t_cst_rsrc);
1333         for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1334                 unsigned                 t;
1335                 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1336
1337                 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1338
1339                 /* check each time step */
1340                 for (t = 0; t < ba->max_steps; ++t) {
1341                         ir_node *irn;
1342                         int     cst;
1343                         int     *tmp_var_idx = NEW_ARR_F(int, 0);
1344
1345                         snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1346                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1347                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1348                         num_cst_resrc++;
1349
1350                         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1351                                 be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1352                                 ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1353                                 int                  tp_idx;
1354
1355                                 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1356
1357                                 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1358                                         int cur_var = ILPVAR_IDX(na, tp_idx, t);
1359                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1360                                 }
1361                         }
1362
1363                         /* set constraints if we have some */
1364                         if (ARR_LEN(tmp_var_idx) > 0)
1365                                 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1366
1367                         DEL_ARR_F(tmp_var_idx);
1368                 }
1369         }
1370         ilp_timer_pop();
1371         DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1372                 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1373 }
1374
1375 /**
1376  * Create ILP bundle constraints:
1377  * - assure, at most bundle_size * bundles_per_cycle instructions
1378  *   can be started at a certain point.
1379  */
1380 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1381         char                  buf[1024];
1382         unsigned              t;
1383         unsigned              num_cst_bundle = 0;
1384         unsigned              n_instr_max    = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1385         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1386 #ifdef WITH_LIBCORE
1387         lc_timer_t            *t_cst_bundle  = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1388 #endif /* WITH_LIBCORE */
1389
1390         ilp_timer_push(t_cst_bundle);
1391         for (t = 0; t < ba->max_steps; ++t) {
1392                 ir_node *irn;
1393                 int     cst;
1394                 int     *tmp_var_idx = NEW_ARR_F(int, 0);
1395
1396                 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1397                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1398                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1399                 num_cst_bundle++;
1400
1401                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1402                         be_ilpsched_irn_t    *node;
1403                         ilpsched_node_attr_t *na;
1404                         int                  tp_idx;
1405
1406                         /* Projs and Keeps do not contribute to bundle size */
1407                         if (is_Proj(irn) || be_is_Keep(irn))
1408                                 continue;
1409
1410                         node = get_ilpsched_irn(env, irn);
1411                         na   = get_ilpsched_node_attr(node);
1412
1413                         /* nodes assigned to DUMMY unit do not contribute to bundle size */
1414                         if (na->is_dummy_node)
1415                                 continue;
1416
1417                         if (t >= na->asap - 1 && t <= na->alap - 1) {
1418                                 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1419                                         int idx = ILPVAR_IDX(na, tp_idx, t);
1420                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1421                                 }
1422                         }
1423                 }
1424
1425                 if (ARR_LEN(tmp_var_idx) > 0)
1426                         lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1427
1428                 DEL_ARR_F(tmp_var_idx);
1429         }
1430         ilp_timer_pop();
1431         DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1432                 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1433 }
1434
1435 /**
1436  * Create ILP dying nodes constraints:
1437  * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1438  */
1439 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1440         char                  buf[1024];
1441         unsigned              t;
1442         unsigned              num_cst = 0;
1443         ilpsched_block_attr_t *ba     = get_ilpsched_block_attr(block_node);
1444 #ifdef WITH_LIBCORE
1445         lc_timer_t            *t_cst  = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1446 #endif /* WITH_LIBCORE */
1447
1448         ilp_timer_push(t_cst);
1449         /* check all time_steps */
1450         for (t = 0; t < ba->max_steps; ++t) {
1451                 ir_node *irn;
1452
1453                 /* for all nodes */
1454                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1455                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1456                         ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1457
1458                         /* if node has no consumer within current block, it cannot die here */
1459                         /* we also ignore nodes assigned to dummy unit */
1460                         if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1461                                 continue;
1462
1463                         /* node can only die here if t at least asap(n) */
1464                         if (t >= na->asap - 1) {
1465                                 int node_tp_idx;
1466
1467                                 /* for all unit types */
1468                                 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1469                                         int tp_idx, i, cst;
1470                                         int *tmp_var_idx = NEW_ARR_F(int, 0);
1471
1472                                         snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1473                                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1474                                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1475                                         num_cst++;
1476
1477                                         /* number of consumer scheduled till t */
1478                                         for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1479                                                 be_ilpsched_irn_t    *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1480                                                 ilpsched_node_attr_t *ca   = get_ilpsched_node_attr(cons);
1481
1482                                                 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1483                                                         unsigned tm;
1484
1485                                                         for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1486                                                                 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1487                                                                 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1488                                                         }
1489                                                 }
1490                                         }
1491
1492                                         /* could be that no consumer can be scheduled at this point */
1493                                         if (ARR_LEN(tmp_var_idx)) {
1494                                                 int      idx;
1495                                                 unsigned tn;
1496
1497                                                 /* subtract possible prior kill points */
1498                                                 for (tn = na->asap - 1; tn < t; ++tn) {
1499                                                         idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1500                                                         lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1501                                                 }
1502
1503                                                 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1504                                                 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1505                                                 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1506                                         }
1507
1508                                         DEL_ARR_F(tmp_var_idx);
1509                                 }
1510                         }
1511
1512                 }
1513         }
1514         ilp_timer_pop();
1515         DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1516                 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1517 }
1518
1519 /**
1520 * Create ILP pressure constraints:
1521 * - add additional costs to objective function if a node is scheduled
1522 *   on a unit although all units of this type are currently occupied
1523 */
1524 static void create_pressure_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1525         char                  buf[1024];
1526         ir_node               *cur_irn;
1527         unsigned              num_cst = 0;
1528         ilpsched_block_attr_t *ba     = get_ilpsched_block_attr(block_node);
1529 #ifdef WITH_LIBCORE
1530         lc_timer_t            *t_cst  = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1531 #endif /* WITH_LIBCORE */
1532
1533         ilp_timer_push(t_cst);
1534         /* y_{nt}^k is set for each node and timestep and unit type */
1535         foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1536                 unsigned             cur_idx   = get_irn_idx(cur_irn);
1537                 be_ilpsched_irn_t    *cur_node = get_ilpsched_irn(env, cur_irn);
1538                 ilpsched_node_attr_t *cur_na   = get_ilpsched_node_attr(cur_node);
1539                 int                  glob_type_idx;
1540
1541                 /* we ignore nodes assigned to DUMMY unit here */
1542                 if (cur_na->is_dummy_node)
1543                         continue;
1544
1545                 /* for all types */
1546                 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1547                         be_execution_unit_type_t *cur_tp   = &env->cpu->unit_types[glob_type_idx];
1548                         int                      cur_tp_idx;
1549                         unsigned                 t;
1550
1551                         /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1552
1553                         /* check if node can be executed on this unit type */
1554                         cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1555                         if (cur_tp_idx < 0)
1556                                 continue;
1557
1558                         /* check all time_steps */
1559                         for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1560                                 int     cst, y_idx;
1561                                 ir_node *irn;
1562                                 int     *tmp_idx_1  = NEW_ARR_F(int, 0);
1563                                 int     *tmp_idx_m1 = NEW_ARR_F(int, 0);
1564
1565                                 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1566                                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1567                                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1568                                 num_cst++;
1569
1570                                 /*
1571                                         - accumulate all nodes scheduled on unit type k till t
1572                                         - subtract all nodes died on unit type k till t
1573                                 */
1574                                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1575                                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1576                                         ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1577                                         unsigned             tn, tmax;
1578                                         int                  tp_idx;
1579
1580                                         tmax   = MIN(t, na->alap - 1);
1581                                         tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1582
1583                                         /* current unit type is not suitable for current node */
1584                                         if (tp_idx < 0)
1585                                                 continue;
1586
1587                                         for (tn = na->asap - 1; tn <= tmax; ++tn) {
1588                                                 int idx;
1589
1590                                                 /* node scheduled */
1591                                                 idx = ILPVAR_IDX(na, tp_idx, tn);
1592                                                 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1593
1594                                                 /* node dead */
1595                                                 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1596                                                 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1597                                         }
1598                                 }
1599
1600                                 if (ARR_LEN(tmp_idx_1) > 0)
1601                                         lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1602
1603                                 if (ARR_LEN(tmp_idx_m1) > 0)
1604                                         lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1605
1606                                 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1607                                 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1608                                 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(t));
1609
1610                                 DEL_ARR_F(tmp_idx_1);
1611                                 DEL_ARR_F(tmp_idx_m1);
1612                         }
1613                 }
1614         }
1615         ilp_timer_pop();
1616         DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1617                 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1618 }
1619
1620 /***************************************************
1621  *  _____ _      _____                    _
1622  * |_   _| |    |  __ \                  (_)
1623  *   | | | |    | |__) |  _ __ ___   __ _ _ _ __
1624  *   | | | |    |  ___/  | '_ ` _ \ / _` | | '_ \
1625  *  _| |_| |____| |      | | | | | | (_| | | | | |
1626  * |_____|______|_|      |_| |_| |_|\__,_|_|_| |_|
1627  *
1628  ***************************************************/
1629
1630 /**
1631  * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1632  */
1633 static void create_ilp(ir_node *block, void *walk_env) {
1634         be_ilpsched_env_t     *env           = walk_env;
1635         be_ilpsched_irn_t     *block_node    = get_ilpsched_irn(env, block);
1636         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1637         FILE                  *logfile       = NULL;
1638         lpp_t                 *lpp           = NULL;
1639         struct obstack        var_obst;
1640
1641         DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1642         DBG((env->dbg, 255, "  ILP Scheduling for %+F\n", block));
1643         DBG((env->dbg, 255, "=========================================\n\n"));
1644
1645         DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1646                 block, ba->n_interesting_nodes, ba->max_steps));
1647
1648         /* notify backend and get block environment */
1649         env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1650
1651         /* if we have less than two interesting nodes, there is no need to create the ILP */
1652         if (ba->n_interesting_nodes > 1) {
1653                 double fact_var        = ba->n_interesting_nodes > 25 ? 1.1 : 1.2;
1654                 double fact_cst        = ba->n_interesting_nodes > 25 ? 0.7 : 1.5;
1655                 int    base_num        = ba->n_interesting_nodes * ba->n_interesting_nodes;
1656                 int    estimated_n_var = (int)((double)base_num * fact_var);
1657                 int    estimated_n_cst = (int)((double)base_num * fact_cst);
1658
1659                 DBG((env->dbg, LEVEL_1, "Creating LPP with estimed numbers: %d vars, %d cst\n",
1660                         estimated_n_var, estimated_n_cst));
1661
1662                 /* set up the LPP object */
1663                 lpp = new_lpp_userdef(
1664                         "be ilp scheduling",
1665                         lpp_minimize,
1666                         estimated_n_cst + 1,  /* num vars */
1667                         estimated_n_cst + 20, /* num cst */
1668                         1.2);                 /* grow factor */
1669                 obstack_init(&var_obst);
1670
1671                 /* create ILP variables */
1672                 create_variables(env, lpp, block_node, &var_obst);
1673
1674                 /* create ILP constraints */
1675                 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1676                 create_assignment_and_precedence_constraints(env, lpp, block_node);
1677                 create_ressource_constraints(env, lpp, block_node);
1678                 create_bundle_constraints(env, lpp, block_node);
1679                 create_dying_nodes_constraint(env, lpp, block_node);
1680                 create_pressure_constraint(env, lpp, block_node);
1681
1682                 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
1683
1684                 /* debug stuff, dump lpp when debugging is on  */
1685                 DEBUG_ONLY(
1686                         if (firm_dbg_get_mask(env->dbg) > 0) {
1687                                 char buf[1024];
1688                                 FILE *f;
1689
1690                                 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1691                                 f = fopen(buf, "w");
1692                                 lpp_dump_plain(lpp, f);
1693                                 fclose(f);
1694                                 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1695                                 lpp_dump(lpp, buf);
1696                         }
1697                 );
1698
1699                 /* set solve time limit */
1700                 lpp_set_time_limit(lpp, env->opts->time_limit);
1701
1702                 /* set logfile if requested */
1703                 if (strlen(env->opts->log_file) > 0) {
1704                         if (strcasecmp(env->opts->log_file, "stdout") == 0)
1705                                 lpp_set_log(lpp, stdout);
1706                         else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1707                                 lpp_set_log(lpp, stderr);
1708                         else {
1709                                 logfile = fopen(env->opts->log_file, "w");
1710                                 if (! logfile)
1711                                         fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1712                                 else
1713                                         lpp_set_log(lpp, logfile);
1714                         }
1715                 }
1716
1717                 /* solve the ILP */
1718                 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
1719
1720                 if (logfile)
1721                         fclose(logfile);
1722
1723                 /* check for valid solution */
1724                 if (! lpp_is_sol_valid(lpp)) {
1725                         char buf[1024];
1726                         FILE *f;
1727
1728                         snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
1729                         f = fopen(buf, "w");
1730                         lpp_dump_plain(lpp, f);
1731                         fclose(f);
1732                         snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
1733                         lpp_dump(lpp, buf);
1734                         dump_ir_block_graph(env->irg, "-assert");
1735
1736                         assert(0 && "ILP solution is not feasible!");
1737                 }
1738
1739                 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1740                 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1741                 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1742                 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
1743                 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
1744                 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1745                 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1746
1747                 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1748         }
1749
1750         /* apply solution */
1751         apply_solution(env, lpp, block);
1752
1753         if (lpp)
1754                 free_lpp(lpp);
1755
1756         /* notify backend */
1757         be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1758 }
1759
1760 /**
1761  * Perform ILP scheduling on the given irg.
1762  */
1763 void be_ilp_sched(const be_irg_t *birg) {
1764         be_ilpsched_env_t          env;
1765         const char                 *name = "be ilp scheduling";
1766         arch_isa_t                 *isa  = birg->main_env->arch_env->isa;
1767         const ilp_sched_selector_t *sel  = isa->impl->get_ilp_sched_selector(isa);
1768
1769         FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
1770
1771         //firm_dbg_set_mask(env.dbg, 31);
1772
1773         env.irg_env    = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
1774         env.sel        = sel;
1775         env.irg        = birg->irg;
1776         env.height     = heights_new(birg->irg);
1777         env.main_env   = birg->main_env;
1778         env.arch_env   = birg->main_env->arch_env;
1779         env.cpu        = arch_isa_get_machine(birg->main_env->arch_env->isa);
1780         env.opts       = &ilp_opts;
1781         phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
1782
1783         /* assign a unique per block number to all interesting nodes */
1784         irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
1785
1786         /*
1787                 The block indices are completely build after the walk,
1788                 now we can allocate the bitsets (size depends on block indices)
1789                 for all nodes.
1790         */
1791         phase_reinit_irn_data(&env.ph);
1792
1793         /* Collect all root nodes (having no user in their block) and calculate ASAP. */
1794         irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
1795
1796         /* Calculate ALAP of all irns */
1797         irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
1798
1799         /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
1800         irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
1801
1802         /* we don't need this information any longer */
1803         heights_free(env.height);
1804
1805         /* perform ILP scheduling */
1806         irg_block_walk_graph(env.irg, clear_unwanted_data, create_ilp, &env);
1807
1808         DEBUG_ONLY(
1809                 if (firm_dbg_get_mask(env.dbg)) {
1810                         phase_stat_t stat;
1811                         phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
1812
1813                         fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
1814                 }
1815         );
1816
1817         /* free all allocated object */
1818         phase_free(&env.ph);
1819
1820         /* notify backend */
1821         be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
1822 }
1823
1824 #ifdef WITH_LIBCORE
1825 /**
1826  * Register ILP scheduler options.
1827  */
1828 void ilpsched_register_options(lc_opt_entry_t *grp) {
1829         static int     run_once = 0;
1830         lc_opt_entry_t *sched_grp;
1831
1832         if (! run_once) {
1833                 run_once  = 1;
1834                 sched_grp = lc_opt_get_grp(grp, "ilpsched");
1835
1836                 lc_opt_add_table(sched_grp, ilpsched_option_table);
1837         }
1838 }
1839 #endif /* WITH_LIBCORE */
1840
1841 #else /* WITH_ILP */
1842
1843 static int some_picky_compiler_do_not_allow_empty_files;
1844
1845 #endif /* WITH_ILP */