removed warning for ILP scheduler, as the scheduler can now schedule on it's own
[libfirm] / ir / be / beilpsched.c
1 /**
2  * Scheduling algorithms.
3  * An ILP scheduler based on
4  * "ILP-based Instruction Scheduling for IA-64"
5  * by Daniel Kaestner and Sebastian Winkel
6  *
7  * @date   22.10.2005
8  * @author Christian Wuerdig
9  * @cvs-id $Id$
10  */
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #ifdef WITH_ILP
16
17 #include <math.h>
18
19 #ifndef _WIN32
20 #include <strings.h>
21 #endif /* _WIN32 */
22
23 #include "irnode_t.h"
24 #include "irgwalk.h"
25 #include "irbitset.h"
26 #include "irphase_t.h"
27 #include "height.h"
28 #include "iredges.h"
29 #include "pdeq.h"
30 #include "debug.h"
31 #include "irtools.h"
32 #include "irdump.h"
33 #include "plist.h"
34
35 #include <lpp/lpp.h>
36 #include <lpp/lpp_net.h>
37
38 #ifdef WITH_LIBCORE
39 #include <libcore/lc_opts.h>
40 #include <libcore/lc_opts_enum.h>
41 #include <libcore/lc_timing.h>
42 #endif /* WITH_LIBCORE */
43
44 #include "be.h"
45 #include "benode_t.h"
46 #include "besched_t.h"
47 #include "beilpsched.h"
48
49 typedef struct _ilpsched_options_t {
50         unsigned time_limit;
51         char     log_file[1024];
52 } ilpsched_options_t;
53
54 typedef struct _unit_type_info_t {
55         int                            n_units;
56         const be_execution_unit_type_t *tp;
57 } unit_type_info_t;
58
59 /**
60  * holding the ILP variables of the different types
61  */
62 typedef struct _ilp_var_types_t {
63         int *x;   /* x_{nt}^k variables */
64         int *d;   /* d_{nt}^k variables */
65         int *y;   /* y_{nt}^k variables */
66 } ilp_var_types_t;
67
68 /* attributes for a node */
69 typedef struct _ilpsched_node_attr_t {
70         unsigned asap;                     /**< The ASAP scheduling control step */
71         unsigned alap;                     /**< The ALAP scheduling control step */
72         unsigned sched_point;              /**< the step in which the node is finally scheduled */
73         unsigned visit_idx;                /**< Index of the node having visited this node last */
74         unsigned consumer_idx;             /**< Index of the node having counted this node as consumer last */
75         unsigned n_consumer;               /**< Number of consumers */
76         ir_node  **block_consumer;         /**< List of consumer being in the same block */
77         unsigned block_idx     : 30;       /**< A unique per block index */
78         unsigned alap_changed  : 1;        /**< the current ALAP has changed, revisit preds */
79         unsigned is_dummy_node : 1;        /**< this node is assigned to DUMMY unit */
80         bitset_t *transitive_block_nodes;  /**< Set of transitive block nodes (predecessors
81                                                                                         for ASAP, successors for ALAP */
82         unsigned n_unit_types;             /**< number of allowed execution unit types */
83         unit_type_info_t *type_info;       /**< list of allowed execution unit types */
84         ilp_var_types_t  ilp_vars;         /**< the different ILP variables */
85 } ilpsched_node_attr_t;
86
87 /* attributes for a block */
88 typedef struct _ilpsched_block_attr_t {
89         unsigned block_last_idx;        /**< The highest node index in block so far */
90         unsigned n_interesting_nodes;   /**< The number of nodes interesting for scheduling */
91         unsigned max_steps;             /**< Upper bound for block execution */
92         plist_t  *root_nodes;           /**< A list of nodes having no user in current block */
93         ir_node  *head_ilp_nodes;       /**< A linked list of nodes which will contribute to ILP */
94 } ilpsched_block_attr_t;
95
96 typedef union _ilpsched_attr_ {
97         ilpsched_node_attr_t  node_attr;
98         ilpsched_block_attr_t block_attr;
99 } ilpsched_attr_t;
100
101 /* A irn for the phase and it's attributes (either node or block) */
102 typedef struct {
103         ir_node         *irn;
104         ilpsched_attr_t attr;
105 } be_ilpsched_irn_t;
106
107 /* The ILP scheduling environment */
108 typedef struct {
109         phase_t              ph;            /**< The phase */
110         ir_graph             *irg;          /**< The current irg */
111         heights_t            *height;       /**< The heights object of the irg */
112         void                 *irg_env;      /**< An environment for the irg scheduling, provided by the backend */
113         void                 *block_env;    /**< An environment for scheduling a block, provided by the backend */
114         const arch_env_t     *arch_env;
115         const arch_isa_t     *isa;          /**< The ISA */
116         const be_main_env_t  *main_env;
117         const be_machine_t   *cpu;          /**< the current abstract machine */
118         ilpsched_options_t   *opts;         /**< the ilp options for current irg */
119         const ilp_sched_selector_t *sel;    /**< The ILP sched selector provided by the backend */
120         DEBUG_ONLY(firm_dbg_module_t *dbg);
121 } be_ilpsched_env_t;
122
123 /* convenience macros to handle phase irn data */
124 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
125 #define is_ilpsched_block(node)             (is_Block((node)->irn))
126 #define get_ilpsched_block_attr(block)      (&(block)->attr.block_attr)
127 #define get_ilpsched_node_attr(node)        (&(node)->attr.node_attr)
128
129 /* iterate over a list of ir_nodes linked by link field */
130 #define foreach_linked_irns(head, iter) for ((iter) = (head); (iter); (iter) = get_irn_link((iter)))
131
132 /* check if node is considered for ILP scheduling */
133 #define consider_for_sched(isa, irn) \
134         (! (is_Block(irn)            ||  \
135                 is_normal_Proj(isa, irn) ||  \
136                 is_Phi(irn)              ||  \
137                 is_NoMem(irn)            ||  \
138                 is_End(irn)                  \
139                 ))
140
141 /* gives the valid scheduling time step interval for a node */
142 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
143
144 /* gives the valid interval where a node can die */
145 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
146
147 /* gives the corresponding ILP variable for given node, unit and time step */
148 #define ILPVAR_IDX(na, unit, control_step) \
149         ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
150
151 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
152 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
153         ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
154
155 /* check if a double value is within an epsilon environment of 0 */
156 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
157
158 #ifdef WITH_LIBCORE
159         #define ilp_timer_push(t)         lc_timer_push((t))
160         #define ilp_timer_pop()           lc_timer_pop()
161         #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
162 #else /* WITH_LIBCORE */
163         #define ilp_timer_push(t)
164         #define ilp_timer_pop()
165         #define ilp_timer_elapsed_usec(t) 0.0
166 #endif /* WITH_LIBCORE */
167
168 /* option variable */
169 static ilpsched_options_t ilp_opts = {
170         120,   /* 120 sec per block time limit */
171         ""     /* no log file */
172 };
173
174 #ifdef WITH_LIBCORE
175 /* ILP options */
176 static const lc_opt_table_entry_t ilpsched_option_table[] = {
177         LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
178         LC_OPT_ENT_STR("lpp_log",    "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
179         { NULL }
180 };
181 #endif /* WITH_LIBCORE */
182
183 /**
184  * Check if irn is a Proj, which has no execution units assigned.
185  * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
186  */
187 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
188         return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
189 }
190
191 /**
192  * Skips normal Projs.
193  * @return predecessor if irn is a normal Proj, otherwise irn.
194  */
195 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
196         if (is_normal_Proj(isa, irn))
197                 return get_Proj_pred(irn);
198         return irn;
199 }
200
201 /**
202  * Compare scheduling time steps of two be_ilpsched_irn's.
203  */
204 static int cmp_ilpsched_irn(const void *a, const void *b) {
205         be_ilpsched_irn_t    *n1   = *(be_ilpsched_irn_t **)a;
206         be_ilpsched_irn_t    *n2   = *(be_ilpsched_irn_t **)b;
207         ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
208         ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
209
210         return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
211 }
212
213 /**
214  * In case there is no phase information for irn, initialize it.
215  */
216 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
217         be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
218
219         if (res == old) {
220                 /* if we have already some data: check for reinitialization */
221
222                 if (! is_Block(irn)) {
223                         ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
224
225                         if (! na->transitive_block_nodes) {
226                                 ir_node               *block      = get_nodes_block(irn);
227                                 be_ilpsched_irn_t     *block_node = phase_get_or_set_irn_data(ph, block);
228                                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
229
230                                 /* we are called after the block indices have been build: create bitset */
231                                 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
232                         }
233                         else {
234                                 /* we are called from reinit block data: clear the bitset */
235                                 bitset_clear_all(na->transitive_block_nodes);
236                                 na->visit_idx    = 0;
237                                 na->alap_changed = 1;
238                         }
239                 }
240                 return old;
241         }
242
243         res->irn = irn;
244
245         /* set ilpsched irn attributes (either block or irn) */
246         if (is_Block(irn)) {
247                 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
248
249                 ba->n_interesting_nodes = 0;
250                 ba->block_last_idx      = 0;
251                 ba->root_nodes          = plist_new();
252                 ba->head_ilp_nodes      = NULL;
253         }
254         else {
255                 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
256                 memset(na, 0, sizeof(*na));
257         }
258
259         return res;
260 }
261
262 /**
263  * Assign a per block unique number to each node.
264  */
265 static void build_block_idx(ir_node *irn, void *walk_env) {
266         be_ilpsched_env_t     *env = walk_env;
267         be_ilpsched_irn_t     *node, *block_node;
268         ilpsched_node_attr_t  *na;
269         ilpsched_block_attr_t *ba;
270
271         if (! consider_for_sched(env->arch_env->isa, irn))
272                 return;
273
274         node       = get_ilpsched_irn(env, irn);
275         na         = get_ilpsched_node_attr(node);
276         block_node = get_ilpsched_irn(env, get_nodes_block(irn));
277         ba         = get_ilpsched_block_attr(block_node);
278
279         na->block_idx = ba->block_last_idx++;
280 }
281
282 /********************************************************
283  *                              __        _
284  *                             / /       | |
285  *   __ _ ___  __ _ _ __      / /    __ _| | __ _ _ __
286  *  / _` / __|/ _` | '_ \    / /    / _` | |/ _` | '_ \
287  * | (_| \__ \ (_| | |_) |  / /    | (_| | | (_| | |_) |
288  *  \__,_|___/\__,_| .__/  /_/      \__,_|_|\__,_| .__/
289  *                 | |                           | |
290  *                 |_|                           |_|
291  ********************************************************/
292
293 /**
294  * Add all nodes having no user in current block to last_nodes list.
295  */
296 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
297         ir_node               *block;
298         const ir_edge_t       *edge;
299         be_ilpsched_irn_t     *block_node, *node;
300         ilpsched_block_attr_t *ba;
301         ilpsched_node_attr_t  *na;
302         int                   i, j;
303         be_ilpsched_env_t     *env           = walk_env;
304         int                   has_block_user = 0;
305         unsigned              n_consumer     = 0;
306         ir_edge_kind_t        ekind[2]       = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
307         ir_node               **consumer;
308         int                   idx;
309
310         if (! consider_for_sched(env->arch_env->isa, irn))
311                 return;
312
313         block    = get_nodes_block(irn);
314     idx      = get_irn_idx(irn);
315         consumer = NEW_ARR_F(ir_node *, 0);
316
317         DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
318
319         /* check data and dependency out edges */
320         for (i = 0; i < 2 && ! has_block_user; ++i) {
321                 foreach_out_edge_kind(irn, edge, ekind[i]) {
322                         ir_node *user = get_edge_src_irn(edge);
323
324                         if (is_normal_Proj(env->arch_env->isa, user)) {
325                                 const ir_edge_t *user_edge;
326
327                                 if (get_irn_mode(user) == mode_X)
328                                         continue;
329
330                                 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
331                                 for (j = 0; j < 2; ++j) {
332                                         foreach_out_edge_kind(user, user_edge, ekind[j]) {
333                                                 ir_node *real_user = get_edge_src_irn(user_edge);
334
335                                                 if (! is_Phi(real_user) && ! is_Block(real_user)) {
336                                                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, real_user);
337                                                         ilpsched_node_attr_t *ua   = get_ilpsched_node_attr(node);
338
339                                                         /* skip already visited nodes */
340                                                         if (ua->consumer_idx == idx)
341                                                                 continue;
342
343                                                         /* check if node has user in this block and collect the user if it's a data user */
344                                                         if (get_nodes_block(real_user) == block) {
345                                                                 if (i == 0 && j == 0)
346                                                                         ARR_APP1(ir_node *, consumer, real_user);
347                                                                 has_block_user = 1;
348                                                         }
349
350                                                         /* only count data consumer */
351                                                         if (i == 0)
352                                                                 n_consumer++;
353
354                                                         /* mark user as visited by this node */
355                                                         ua->consumer_idx = idx;
356                                                 }
357                                         }
358                                 }
359                         }
360                         else if (is_Block(user)) {
361                                 continue;
362                         }
363                         else if (! is_Phi(user)) {
364                                 be_ilpsched_irn_t    *node = get_ilpsched_irn(env, user);
365                                 ilpsched_node_attr_t *ua   = get_ilpsched_node_attr(node);
366
367                                 /* skip already visited nodes */
368                                 if (ua->consumer_idx == idx)
369                                         continue;
370
371                                 /* check if node has user in this block and collect the user if it's a data user */
372                                 if (get_nodes_block(user) == block) {
373                                         if (i == 0)
374                                                 ARR_APP1(ir_node *, consumer, user);
375                                         has_block_user = 1;
376                                 }
377
378                                 /* only count data consumer */
379                                 if (i == 0)
380                                         n_consumer++;
381
382                                 /* mark user visited by this node */
383                                 ua->consumer_idx = idx;
384                         }
385                 }
386         }
387
388         block_node = get_ilpsched_irn(env, block);
389         ba         = get_ilpsched_block_attr(block_node);
390
391         ba->n_interesting_nodes++;
392
393         /* current irn has no user inside this block, add to queue */
394         if (! has_block_user) {
395                 DB((env->dbg, LEVEL_3, "root node\n"));
396                 plist_insert_back(ba->root_nodes, irn);
397         }
398         else {
399                 DB((env->dbg, LEVEL_3, "normal node\n"));
400         }
401
402         /* record number of all consumer and the consumer within the same block */
403         node = get_ilpsched_irn(env, irn);
404         na   = get_ilpsched_node_attr(node);
405         na->n_consumer     = n_consumer;
406         na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
407         memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
408         DEL_ARR_F(consumer);
409 }
410
411 /**
412  * Calculate the ASAP scheduling step for current irn.
413  */
414 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
415         be_ilpsched_env_t     *env = walk_env;
416         int                   i;
417         ir_node               *block;
418         be_ilpsched_irn_t     *node, *block_node;
419         ilpsched_node_attr_t  *na;
420         ilpsched_block_attr_t *ba;
421         unsigned              lat;
422
423         /* These nodes are handled separate */
424         if (! consider_for_sched(env->arch_env->isa, irn))
425                 return;
426
427         DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
428
429         block    = get_nodes_block(irn);
430         node     = get_ilpsched_irn(env, irn);
431         na       = get_ilpsched_node_attr(node);
432         na->asap = 1;
433
434         for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
435                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
436
437                 /* check for greatest distance to top */
438                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
439                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
440                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
441                         unsigned             lat;
442
443                         lat      = be_ilp_sched_latency(env->sel, pred, env->block_env);
444                         na->asap = MAX(na->asap, pna->asap + lat);
445                 }
446         }
447
448         /* add node to ILP node list and update max_steps */
449         block_node = get_ilpsched_irn(env, block);
450         ba         = get_ilpsched_block_attr(block_node);
451
452         set_irn_link(irn, ba->head_ilp_nodes);
453         ba->head_ilp_nodes = irn;
454         lat                = be_ilp_sched_latency(env->sel, irn, env->block_env);
455
456         if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
457                 lat = 1;
458
459         ba->max_steps += lat;
460
461         DB((env->dbg, LEVEL_2, "%u\n", na->asap));
462 }
463
464 /**
465  * Calculate the ALAP scheduling step of all irns in current block.
466  * Depends on max_steps being calculated.
467  */
468 static void calculate_block_alap(ir_node *block, void *walk_env) {
469         be_ilpsched_env_t     *env        = walk_env;
470         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
471         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
472         waitq                 *cur_queue  = new_waitq();
473         plist_element_t       *el;
474
475         assert(is_Block(block));
476
477         DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
478                 block, ba->n_interesting_nodes, ba->max_steps));
479
480         /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
481         //phase_reinit_block_irn_data(&env->ph, block);
482
483         /* init start queue */
484         foreach_plist(ba->root_nodes, el) {
485                 waitq_put(cur_queue, plist_element_get_value(el));
486         }
487
488         /* repeat until all nodes are processed */
489         while (! waitq_empty(cur_queue)) {
490                 waitq *next_queue = new_waitq();
491
492                 /* process all nodes in current step */
493                 while (! waitq_empty(cur_queue)) {
494                         ir_node              *cur_irn = waitq_get(cur_queue);
495                         be_ilpsched_irn_t    *node    = get_ilpsched_irn(env, cur_irn);
496                         ilpsched_node_attr_t *na      = get_ilpsched_node_attr(node);
497                         int                  i;
498
499                         /* cur_node has no alap set -> it's a root node, set to max alap */
500                         if (na->alap == 0) {
501                                 na->alap = ba->max_steps;
502                                 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
503                                         cur_irn, na->alap));
504                         }
505                         else {
506                                 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
507                                         cur_irn, na->alap));
508                         }
509
510                         /* set the alap's of all predecessors */
511                         for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
512                                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
513
514                                 /* check for greatest distance to bottom */
515                                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
516                                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
517                                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
518                                         unsigned             lat;
519
520                                         /* mark the predecessor as visited by current irn */
521                                         if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
522                                                 continue;
523                                         pna->visit_idx = get_irn_idx(cur_irn);
524
525                                         lat = be_ilp_sched_latency(env->sel, pred, env->block_env);
526
527                                         /* set ALAP of current pred */
528                                         if (pna->alap == 0) {
529                                                 /* current ALAP is 0: node has not yet been visited */
530                                                 pna->alap_changed = 1;
531                                                 pna->alap         = na->alap - lat;
532                                         }
533                                         else if (pna->alap > na->alap - lat) {
534                                                 /* we found a longer path to root node: change ALAP */
535                                                 pna->alap         = na->alap - lat;
536                                                 pna->alap_changed = 1;
537                                         }
538                                         else {
539                                                 /* current ALAP is best found so far: keep it */
540                                                 pna->alap_changed = 0;
541                                         }
542
543                                         DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
544
545                                         /* enqueue node for next iteration */
546                                         if (get_irn_ins_or_deps(pred) > 0)
547                                                 waitq_put(next_queue, pred);
548                                 }
549                         }
550                 }
551
552                 /* prepare for next iteration */
553                 del_waitq(cur_queue);
554                 cur_queue = next_queue;
555         }
556 }
557
558 /**
559  * We can free the list of root nodes here.
560  */
561 static void clear_unwanted_data(ir_node *block, void *walk_env) {
562         be_ilpsched_env_t     *env        = walk_env;
563         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
564         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
565
566         plist_free(ba->root_nodes);
567         ba->root_nodes = NULL;
568 }
569
570 /**
571  * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
572  * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
573  */
574 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
575         be_ilpsched_env_t    *env = walk_env;
576         be_ilpsched_irn_t    *node, *pred_node;
577         ilpsched_node_attr_t *na, *pna;
578         ir_node              *pred;
579
580         if (! consider_for_sched(env->arch_env->isa, irn))
581                 return;
582
583         if (! is_Proj(irn) && ! be_is_Keep(irn))
584                 return;
585
586         /* go to the ancestor */
587         if (be_is_Keep(irn))
588                 irn = get_irn_n(irn, 0);
589         pred = skip_Proj(irn);
590
591         node      = get_ilpsched_irn(env, irn);
592         pred_node = get_ilpsched_irn(env, pred);
593         na        = get_ilpsched_node_attr(node);
594         pna       = get_ilpsched_node_attr(pred_node);
595
596         na->asap = pna->asap;
597         na->alap = pna->alap;
598
599         DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
600 }
601
602 #if 0
603 /**
604  * Calculate the ASAP scheduling step for current irn.
605  */
606 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
607         be_ilpsched_irn_t *node;
608         be_ilpsched_env_t *env = walk_env;
609         int      i;
610         ir_node  *block;
611         ilpsched_node_attr_t *na;
612
613         /* These nodes are handled separate */
614         if (! consider_for_sched(env->arch_env->isa, irn))
615                 return;
616
617         DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F\n", irn));
618
619         node  = get_ilpsched_irn(env, irn);
620         block = get_nodes_block(irn);
621         na    = get_ilpsched_node_attr(node);
622
623         /* accumulate all transitive predecessors of current node */
624         for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
625                 ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
626                 be_ilpsched_irn_t    *pred_node;
627                 ilpsched_node_attr_t *pna;
628                 unsigned             idx;
629
630                 if (be_is_Keep(pred))
631                         pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
632
633                 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
634                         continue;
635
636                 pred_node = get_ilpsched_irn(env, pred);
637                 pna       = get_ilpsched_node_attr(pred_node);
638                 idx       = get_irn_idx(irn);
639
640                 assert(pna->asap && "missing ASAP of predecessor");
641
642                 /*
643                         We have not already visited this predecessor
644                         -> accumulate it's predecessors
645                 */
646                 if (pna->visit_idx != idx) {
647                         pna->visit_idx = idx;
648                         na->transitive_block_nodes = bitset_or(na->transitive_block_nodes, pna->transitive_block_nodes);
649                         DBG((env->dbg, LEVEL_3, "\taccumulating preds of %+F\n", pred));
650                 }
651         }
652
653         /* every node is it's own transitive predecessor in block */
654         bitset_set(na->transitive_block_nodes, na->block_idx);
655
656         /* asap = number of transitive predecessors in this block */
657         na->asap = bitset_popcnt(na->transitive_block_nodes);
658
659         DBG((env->dbg, LEVEL_2, "\tcalculated ASAP is %u\n", na->asap));
660 }
661
662 /**
663  * Calculate the ALAP scheduling step for current irn.
664  * @note: requires ASAP being calculated.
665  */
666 static void calculate_irn_alap(ir_node *irn, void *walk_env) {
667         be_ilpsched_env_t     *env = walk_env;
668         int                   i, is_head;
669         ir_node               *block;
670         be_ilpsched_irn_t     *node;
671         ilpsched_node_attr_t  *na;
672
673         /* These nodes are handled separate */
674         if (! consider_for_sched(env->arch_env->isa, irn))
675                 return;
676
677         DBG((env->dbg, LEVEL_2, "Calculating ALAP of node %+F ... ", irn));
678
679         block      = get_nodes_block(irn);
680         node       = get_ilpsched_irn(env, irn);
681         na         = get_ilpsched_node_attr(node);
682         is_head    = 1;
683
684         for (i = get_irn_ins_or_deps(irn) - 1; i >= i; --i) {
685                 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
686
687                 /* check, if we have a head node */
688                 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
689                         be_ilpsched_irn_t    *pred_node = get_ilpsched_irn(env, pred);
690                         ilpsched_node_attr_t *pna       = get_ilpsched_node_attr(pred_node);
691                         unsigned             lat;
692
693                         lat      = be_ilp_sched_latency(env->sel, pred, env->block_env);
694                         na->alap = MAX(na->alap, pna->alap + lat);
695                         is_head  = 0;
696                 }
697         }
698
699         /* handle head nodes (no predecessor in same block) */
700         if (is_head) {
701                 be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
702                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
703                 plist_element_t       *el;
704
705                 DB((env->dbg, LEVEL_2, "head node ... "));
706
707                 /*
708                 We have a head node here:
709                 ALAP(m) = sum_over_all_m(ASAP(m))
710                 where m is a root node and there is no path from m to n
711                 */
712                 foreach_plist(ba->root_nodes, el) {
713                         ir_node *root = plist_element_get_value(el);
714
715                         /* check if current root is independent from irn */
716                         if (! heights_reachable_in_block(env->height, root, irn)) {
717                                 be_ilpsched_irn_t     *root_node = get_ilpsched_irn(env, root);
718                                 ilpsched_node_attr_t  *rna       = get_ilpsched_node_attr(root_node);
719
720                                 na->alap = rna->asap + be_ilp_sched_latency(env->sel, root, env->block_env);
721                         }
722                 }
723         }
724
725         DB((env->dbg, LEVEL_2, "%u\n", na->alap));
726 }
727
728 /**
729  * Accumulate the successors of all nodes from irn on upwards.
730  */
731 static void accumulate_succs(be_ilpsched_env_t *env, ir_node *irn) {
732         unsigned             i, n;
733         be_ilpsched_irn_t    *node  = get_ilpsched_irn(env, irn);
734         ilpsched_node_attr_t *na    = get_ilpsched_node_attr(node);
735         ir_node              *block = get_nodes_block(irn);
736         waitq                *wq    = new_waitq();
737
738         DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F\n", irn));
739
740         /* enqueue node for final alap calculation */
741         if (! na->enqueued) {
742                 be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
743                 ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
744
745                 na->enqueued = 1;
746                 na->alap     = ba->max_steps;
747                 waitq_put(env->alap_queue, node);
748
749                 set_irn_link(irn, ba->head_ilp_nodes);
750                 ba->head_ilp_nodes = irn;
751                 DBG((env->dbg, LEVEL_5, "\t\tlinked %+F to ilp nodes of %+F, attr %p\n", irn, block, ba));
752                 DBG((env->dbg, LEVEL_4, "\t\tenqueueing %+F for final ALAP calculation\n", irn));
753         }
754
755         for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
756                 ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
757                 unsigned             idx;
758                 be_ilpsched_irn_t    *pred_node;
759                 ilpsched_node_attr_t *pna;
760
761                 if (be_is_Keep(pred))
762                         pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
763
764                 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
765                         continue;
766
767                 pred_node = get_ilpsched_irn(env, pred);
768                 pna       = get_ilpsched_node_attr(pred_node);
769                 idx       = get_irn_idx(irn);
770
771                 /* accumulate the successors */
772                 if (pna->visit_idx != idx) {
773                         pna->visit_idx = idx;
774                         pna->transitive_block_nodes = bitset_or(pna->transitive_block_nodes, na->transitive_block_nodes);
775
776                         /* set current node as successor */
777                         bitset_set(pna->transitive_block_nodes, na->block_idx);
778                         waitq_put(wq, pred);
779
780                         DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F to %+F\n", irn, pred));
781                 }
782         }
783
784         /* process all predecessors */
785         while (! waitq_empty(wq)) {
786                 accumulate_succs(env, waitq_get(wq));
787         }
788
789         del_waitq(wq);
790 }
791
792 /**
793  * Calculate the ALAP scheduling step of all irns in current block.
794  * Depends on ASAP being calculated.
795  */
796 static void calculate_block_alap(ir_node *block, void *walk_env) {
797         be_ilpsched_env_t     *env        = walk_env;
798         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
799         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
800
801         assert(is_Block(block));
802
803         DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes)\n", block, ba->n_interesting_nodes));
804
805         /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
806         phase_reinit_block_irn_data(&env->ph, block);
807
808         /* calculate the alap of all nodes, starting at collected roots upwards */
809         while (! waitq_empty(ba->root_nodes)) {
810                 accumulate_succs(env, waitq_get(ba->root_nodes));
811         }
812
813         /* we don't need it anymore */
814         del_waitq(ba->root_nodes);
815         ba->root_nodes = NULL;
816
817         /* all interesting nodes should have their successors accumulated now */
818         while (! waitq_empty(env->alap_queue)) {
819                 be_ilpsched_irn_t    *node = waitq_get(env->alap_queue);
820                 ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
821
822                 /* control flow ops must always be scheduled last */
823                 if (is_cfop(node->irn) && ! is_Start(node->irn) && get_irn_opcode(node->irn) != iro_End)
824                         na->asap = na->alap;
825                 else
826                         na->alap -= bitset_popcnt(na->transitive_block_nodes);
827                 DBG((env->dbg, LEVEL_2, "\tALAP of %+F is %u (%u succs, %u consumer)\n",
828                         node->irn, na->alap, bitset_popcnt(na->transitive_block_nodes), na->n_consumer));
829
830                 /* maximum block steps is maximum alap of all nodes */
831                 ba->max_steps = MAX(ba->max_steps, na->alap);
832         }
833 }
834 #endif /* if 0 */
835
836 /*******************************************
837  *           _              _       _
838  *          | |            | |     | |
839  *  ___  ___| |__   ___  __| |_   _| | ___
840  * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
841  * \__ \ (__| | | |  __/ (_| | |_| | |  __/
842  * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
843  *
844  *******************************************/
845
846 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
847         const ir_edge_t *edge;
848
849         foreach_out_edge(irn, edge) {
850                 ir_node *user = get_edge_src_irn(edge);
851
852                 if (be_is_Keep(user)) {
853                         assert(get_nodes_block(user) == block && "Keep must not be in different block.");
854                         waitq_put(keeps, user);
855                 }
856         }
857 }
858
859 /**
860  * Inserts @p irn before @p before into schedule and notifies backend.
861  */
862 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
863         ir_node *before, ir_node *irn, unsigned cycle)
864 {
865         be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
866         sched_add_before(before, irn);
867 }
868
869 /**
870  * Adds a node, it's Projs (in case of mode_T nodes) and
871  * it's Keeps to schedule.
872  */
873 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
874         const ir_edge_t *edge;
875         waitq           *keeps = new_waitq();
876
877         /* mode_M nodes are not scheduled */
878         if (get_irn_mode(irn) == mode_M)
879                 return;
880
881         if (! sched_is_scheduled(irn))
882                 notified_sched_add_before(env, block, irn, cycle);
883
884         /* add Projs */
885         if (get_irn_mode(irn) == mode_T) {
886                 foreach_out_edge(irn, edge) {
887                         ir_node *user = get_edge_src_irn(edge);
888
889                         if (to_appear_in_schedule(user))
890                                 notified_sched_add_before(env, block, user, cycle);
891
892                         check_for_keeps(keeps, block, user);
893                 }
894         }
895         else {
896                 check_for_keeps(keeps, block, irn);
897         }
898
899         /* add Keeps */
900         while (! waitq_empty(keeps)) {
901                 ir_node *keep = waitq_get(keeps);
902                 if (! sched_is_scheduled(keep))
903                         notified_sched_add_before(env, block, keep, cycle);
904         }
905
906         del_waitq(keeps);
907 }
908
909 /**
910  * Schedule all nodes in the given block, according to the ILP solution.
911  */
912 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
913         be_ilpsched_irn_t     *block_node = get_ilpsched_irn(env, block);
914         ilpsched_block_attr_t *ba         = get_ilpsched_block_attr(block_node);
915         sched_info_t          *info       = get_irn_sched_info(block);
916         be_ilpsched_irn_t     **sched_nodes;
917         unsigned              i, l;
918         ir_node               *cfop, *irn;
919         const ir_edge_t       *edge;
920
921         /* init block schedule list */
922         INIT_LIST_HEAD(&info->list);
923         info->scheduled = 1;
924
925         /* collect nodes and their scheduling time step */
926         sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
927         if (ba->n_interesting_nodes == 0) {
928                 /* ignore */
929         }
930         else if (ba->n_interesting_nodes == 1) {
931                 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
932
933                 /* add the single node */
934                 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
935         }
936         else {
937                 /* check all nodes for their positive solution */
938                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
939                         be_ilpsched_irn_t    *node;
940                         ilpsched_node_attr_t *na;
941                         int                  tp_idx, found;
942                         unsigned             cur_var, t;
943
944                         node    = get_ilpsched_irn(env, irn);
945                         na      = get_ilpsched_node_attr(node);
946                         cur_var = 0;
947                         found   = 0;
948
949                         /* go over all variables of a node until the non-zero one is found */
950                         for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
951                                 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
952                                         double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
953
954                                         /* check, if variable is set to one (it's not zero then :) */
955                                         if (! LPP_VALUE_IS_0(val)) {
956                                                 na->sched_point = t;
957                                                 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
958                                                 DBG((env->dbg, LEVEL_1, "Schedpoint of %+F is %u at unit type %s\n",
959                                                         irn, t, na->type_info[tp_idx].tp->name));
960                                                 found = 1;
961                                         }
962                                 }
963                         }
964                 }
965
966                 /* sort nodes ascending by scheduling time step */
967                 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
968         }
969
970         /* make all Phis ready and remember the single cf op */
971         cfop = NULL;
972         foreach_out_edge(block, edge) {
973                 irn = get_edge_src_irn(edge);
974
975                 switch (get_irn_opcode(irn)) {
976                         case iro_Phi:
977                                 add_to_sched(env, block, irn, 0);
978                                 break;
979                         case iro_Start:
980                         case iro_End:
981                         case iro_Proj:
982                         case iro_Bad:
983                                 break;
984                         default:
985                                 if (is_cfop(irn)) {
986                                         assert(cfop == NULL && "Highlander - there can be only one");
987                                         cfop = irn;
988                                 }
989                         break;
990                 }
991         }
992
993         /* add all nodes from list */
994         for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
995                 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
996                 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
997         }
998
999         /* schedule control flow node if not already done */
1000         if (cfop && ! sched_is_scheduled(cfop))
1001                 add_to_sched(env, block, cfop, 0);
1002
1003         DEL_ARR_F(sched_nodes);
1004 }
1005
1006 /***************************************************************
1007  *   _____ _      _____     _____           _   _
1008  *  |_   _| |    |  __ \   / ____|         | | (_)
1009  *    | | | |    | |__) | | (___   ___  ___| |_ _  ___  _ __
1010  *    | | | |    |  ___/   \___ \ / _ \/ __| __| |/ _ \| '_ \
1011  *   _| |_| |____| |       ____) |  __/ (__| |_| | (_) | | | |
1012  *  |_____|______|_|      |_____/ \___|\___|\__|_|\___/|_| |_|
1013  *
1014  ***************************************************************/
1015
1016 /**
1017  * Check if node can be executed on given unit type.
1018  */
1019 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
1020         int                  i;
1021         ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1022
1023         for (i = na->n_unit_types - 1; i >= 0; --i) {
1024                 if (na->type_info[i].tp == tp)
1025                         return i;
1026         }
1027
1028         return -1;
1029 }
1030
1031 /************************************************
1032  *                   _       _     _
1033  *                  (_)     | |   | |
1034  *  __   ____ _ _ __ _  __ _| |__ | | ___  ___
1035  *  \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
1036  *   \ V / (_| | |  | | (_| | |_) | |  __/\__ \
1037  *    \_/ \__,_|_|  |_|\__,_|_.__/|_|\___||___/
1038  *
1039  ************************************************/
1040
1041 /**
1042  * Create the following variables:
1043  * - x_{nt}^k    binary     weigthed with: t
1044  *      node n is scheduled at time step t to unit type k
1045  * ==>> These variables represent the schedule
1046  * TODO:
1047  *
1048  * - d_{nt}^k    binary     weighted with: t
1049  *      node n dies at time step t on unit type k
1050  *
1051  * - y_{nt}^k    binary     weighted with: num_nodes^2
1052  *      node n is scheduled at time step t to unit type k
1053  *      although all units of this type are occupied
1054  * ==>> These variables represent the register pressure
1055  */
1056 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
1057         char                  buf[1024];
1058         ir_node               *irn;
1059         unsigned              num_block_var, num_nodes;
1060         ilpsched_block_attr_t *ba      = get_ilpsched_block_attr(block_node);
1061         unsigned              weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
1062 #ifdef WITH_LIBCORE
1063         lc_timer_t            *t_var   = lc_timer_register("beilpsched_var", "create ilp variables");
1064 #endif /* WITH_LIBCORE */
1065
1066         ilp_timer_push(t_var);
1067         num_block_var = num_nodes = 0;
1068         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1069                 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
1070                 be_ilpsched_irn_t         *node;
1071                 ilpsched_node_attr_t      *na;
1072                 unsigned                  n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
1073                 unsigned                  cur_var_d, cur_var_x, cur_var_y, num_die;
1074
1075                 /* count number of available unit types for this node */
1076                 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
1077                         /* just count */ ;
1078
1079                 node = get_ilpsched_irn(env, irn);
1080                 na   = get_ilpsched_node_attr(node);
1081
1082                 na->n_unit_types = n_unit_types;
1083                 na->type_info    = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
1084
1085                 /* fill the type info array */
1086                 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1087                         for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
1088                                 /* beware: we also count number of available units here */
1089                                 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
1090                                         na->is_dummy_node = 1;
1091                         }
1092
1093                         na->type_info[tp_idx].tp      = execunits[tp_idx][0]->tp;
1094                         na->type_info[tp_idx].n_units = unit_idx;
1095                 }
1096
1097                 /* allocate space for ilp variables */
1098                 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1099                 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
1100
1101                 /* we need these variables only for "real" nodes */
1102                 if (! na->is_dummy_node) {
1103                         na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1104                         memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
1105
1106                         num_die        = ba->max_steps - na->asap + 1;
1107                         na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_die);
1108                         memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
1109                 }
1110
1111                 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
1112                         irn, na->asap, na->alap, na->n_unit_types));
1113
1114                 cur_var_x = cur_var_d = cur_var_y = cur_unit = n_var = 0;
1115                 /* create variables */
1116                 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1117                         unsigned t;
1118
1119                         for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1120                                 /* x_{nt}^k variables */
1121                                 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1122                                         get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1123                                 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1124                                 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1125                                 /* variable counter */
1126                                 n_var++;
1127                                 num_block_var++;
1128
1129                                 if (! na->is_dummy_node) {
1130                                         /* y_{nt}^k variables */
1131                                         snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1132                                                 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1133                                         na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
1134                                         DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1135
1136                                         /* variable counter */
1137                                         n_var++;
1138                                         num_block_var++;
1139                                 }
1140                         }
1141
1142                         /* a node can die at any step t: asap(n) <= t <= U */
1143                         if (! na->is_dummy_node) {
1144                                 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1145                                         /* d_{nt}^k variables */
1146                                         snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
1147                                                 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1148                                         na->ilp_vars.d[cur_var_d++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1149                                         DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1150
1151                                         /* variable counter */
1152                                         n_var++;
1153                                         num_block_var++;
1154                                 }
1155                         }
1156                 }
1157
1158                 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1159                 num_nodes++;
1160         }
1161         ilp_timer_pop();
1162         DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
1163                 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
1164 }
1165
1166 /*******************************************************
1167  *                      _             _       _
1168  *                     | |           (_)     | |
1169  *   ___ ___  _ __  ___| |_ _ __ __ _ _ _ __ | |_ ___
1170  *  / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1171  * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1172  *  \___\___/|_| |_|___/\__|_|  \__,_|_|_| |_|\__|___/
1173  *
1174  *******************************************************/
1175
1176 /**
1177  * Create following ILP constraints:
1178  * - the assignment constraints:
1179  *     assure each node is executed once by exactly one (allowed) execution unit
1180  * - the dead node assignment constraints:
1181  *     assure a node can only die at most once
1182  * - the precedence constraints:
1183  *     assure that no data dependencies are violated
1184  */
1185 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1186         unsigned              num_cst_assign, num_cst_prec, num_cst_dead;
1187         char                  buf[1024];
1188         ir_node               *irn;
1189         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1190         bitset_t              *bs_block_irns = bitset_alloca(ba->block_last_idx);
1191 #ifdef WITH_LIBCORE
1192         lc_timer_t            *t_cst_assign  = lc_timer_register("beilpsched_cst_assign",      "create assignment constraints");
1193         lc_timer_t            *t_cst_dead    = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1194         lc_timer_t            *t_cst_prec    = lc_timer_register("beilpsched_cst_prec",        "create precedence constraints");
1195 #endif /* WITH_LIBCORE */
1196
1197         num_cst_assign = num_cst_prec = num_cst_dead = 0;
1198         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1199                 int                  cst, tp_idx, i;
1200                 unsigned             cur_var;
1201                 be_ilpsched_irn_t    *node;
1202                 ilpsched_node_attr_t *na;
1203
1204                 node    = get_ilpsched_irn(env, irn);
1205                 na      = get_ilpsched_node_attr(node);
1206                 cur_var = 0;
1207
1208                 /* the assignment constraint */
1209                 ilp_timer_push(t_cst_assign);
1210                 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1211                 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1212                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1213                 num_cst_assign++;
1214
1215                 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1216                 ilp_timer_pop();
1217
1218                 /* the dead node assignment constraint */
1219                 if (! na->is_dummy_node) {
1220                         ilp_timer_push(t_cst_dead);
1221                         snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1222                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1223                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1224
1225                         lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1226                         ilp_timer_pop();
1227                 }
1228
1229                 /* the precedence constraints */
1230                 ilp_timer_push(t_cst_prec);
1231                 bs_block_irns = bitset_clear_all(bs_block_irns);
1232                 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1233                         ir_node              *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1234                         unsigned             t_low, t_high, t;
1235                         be_ilpsched_irn_t    *pred_node;
1236                         ilpsched_node_attr_t *pna;
1237                         unsigned             delay;
1238
1239                         if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1240                                 continue;
1241
1242                         pred_node = get_ilpsched_irn(env, pred);
1243                         pna       = get_ilpsched_node_attr(pred_node);
1244
1245                         assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1246
1247                         if (! bitset_is_set(bs_block_irns, pna->block_idx))
1248                                 bitset_set(bs_block_irns, pna->block_idx);
1249                         else
1250                                 continue;
1251
1252                         /* irn = n, pred = m */
1253                         delay  = be_ilp_sched_latency(env->sel, pred, env->block_env);
1254                         t_low  = MAX(na->asap, pna->asap + delay - 1);
1255                         t_high = MIN(na->alap, pna->alap + delay - 1);
1256                         for (t = t_low - 1; t <= t_high - 1; ++t) {
1257                                 unsigned tn, tm;
1258                                 int      cur_idx, int_na, int_pna;
1259                                 int      *tmp_var_idx;
1260
1261                                 int_na      = (t >= na->asap - 1) ? MIN(t, na->alap - 1) - na->asap + 1 : 0;
1262                                 int_pna     = (t < pna->alap)     ? pna->alap - t                       : 0;
1263                                 tmp_var_idx = NEW_ARR_F(int, int_na * na->n_unit_types + int_pna * pna->n_unit_types);
1264
1265                                 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1266                                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1267                                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1268                                 num_cst_prec++;
1269
1270                                 cur_idx = 0;
1271
1272                                 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1273                                 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1274                                         /* node variables have smaller index than pred variables */
1275                                         for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1276                                                 for (tn = na->asap - 1; tn <= t; ++tn) {
1277                                                         unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1278                                                         tmp_var_idx[cur_idx++] = na->ilp_vars.x[idx];
1279                                                 }
1280                                         }
1281
1282                                         for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1283                                                 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1284                                                         unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1285                                                         tmp_var_idx[cur_idx++] = pna->ilp_vars.x[idx];
1286                                                 }
1287                                         }
1288                                 }
1289                                 else {
1290                                         /* pred variables have smaller index than node variables */
1291                                         for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1292                                                 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1293                                                         unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1294                                                         tmp_var_idx[cur_idx++] = pna->ilp_vars.x[idx];
1295                                                 }
1296                                         }
1297
1298                                         for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1299                                                 for (tn = na->asap - 1; tn <= t; ++tn) {
1300                                                         unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1301                                                         tmp_var_idx[cur_idx++] = na->ilp_vars.x[idx];
1302                                                 }
1303                                         }
1304                                 }
1305
1306                                 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1307
1308                                 DEL_ARR_F(tmp_var_idx);
1309                         }
1310                 }
1311                 ilp_timer_pop();
1312         }
1313         DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1314                 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1315         DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1316                 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1317 }
1318
1319 /**
1320  * Create ILP resource constraints:
1321  * - assure that for each time step not more instructions are scheduled
1322  *   to the same unit types as units of this type are available
1323  */
1324 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1325         int                   glob_type_idx;
1326         char                  buf[1024];
1327         unsigned              num_cst_resrc = 0;
1328         ilpsched_block_attr_t *ba           = get_ilpsched_block_attr(block_node);
1329 #ifdef WITH_LIBCORE
1330         lc_timer_t            *t_cst_rsrc   = lc_timer_register("beilpsched_cst_rsrc",   "create resource constraints");
1331 #endif /* WITH_LIBCORE */
1332
1333         ilp_timer_push(t_cst_rsrc);
1334         for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1335                 unsigned                 t;
1336                 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1337
1338                 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1339
1340                 /* check each time step */
1341                 for (t = 0; t < ba->max_steps; ++t) {
1342                         ir_node *irn;
1343                         int     cst;
1344                         int     *tmp_var_idx = NEW_ARR_F(int, 0);
1345
1346                         snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1347                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1348                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1349                         num_cst_resrc++;
1350
1351                         foreach_linked_irns(ba->head_ilp_nodes, irn) {
1352                                 be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1353                                 ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1354                                 int                  tp_idx;
1355
1356                                 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1357
1358                                 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1359                                         int cur_var = ILPVAR_IDX(na, tp_idx, t);
1360                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1361                                 }
1362                         }
1363
1364                         /* set constraints if we have some */
1365                         if (ARR_LEN(tmp_var_idx) > 0)
1366                                 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1367
1368                         DEL_ARR_F(tmp_var_idx);
1369                 }
1370         }
1371         ilp_timer_pop();
1372         DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1373                 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1374 }
1375
1376 /**
1377  * Create ILP bundle constraints:
1378  * - assure, at most bundle_size * bundles_per_cycle instructions
1379  *   can be started at a certain point.
1380  */
1381 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1382         char                  buf[1024];
1383         unsigned              t;
1384         unsigned              num_cst_bundle = 0;
1385         unsigned              n_instr_max    = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1386         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1387 #ifdef WITH_LIBCORE
1388         lc_timer_t            *t_cst_bundle  = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1389 #endif /* WITH_LIBCORE */
1390
1391         ilp_timer_push(t_cst_bundle);
1392         for (t = 0; t < ba->max_steps; ++t) {
1393                 ir_node *irn;
1394                 int     cst;
1395                 int     *tmp_var_idx = NEW_ARR_F(int, 0);
1396
1397                 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1398                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1399                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1400                 num_cst_bundle++;
1401
1402                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1403                         be_ilpsched_irn_t    *node;
1404                         ilpsched_node_attr_t *na;
1405                         int                  tp_idx;
1406
1407                         /* Projs and Keeps do not contribute to bundle size */
1408                         if (is_Proj(irn) || be_is_Keep(irn))
1409                                 continue;
1410
1411                         node = get_ilpsched_irn(env, irn);
1412                         na   = get_ilpsched_node_attr(node);
1413
1414                         /* nodes assigned to DUMMY unit do not contribute to bundle size */
1415                         if (na->is_dummy_node)
1416                                 continue;
1417
1418                         if (t >= na->asap - 1 && t <= na->alap - 1) {
1419                                 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1420                                         int idx = ILPVAR_IDX(na, tp_idx, t);
1421                                         ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1422                                 }
1423                         }
1424                 }
1425
1426                 if (ARR_LEN(tmp_var_idx) > 0)
1427                         lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1428
1429                 DEL_ARR_F(tmp_var_idx);
1430         }
1431         ilp_timer_pop();
1432         DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1433                 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1434 }
1435
1436 /**
1437  * Create ILP dying nodes constraints:
1438  * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1439  */
1440 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1441         char                  buf[1024];
1442         unsigned              t;
1443         unsigned              num_cst = 0;
1444         ilpsched_block_attr_t *ba     = get_ilpsched_block_attr(block_node);
1445 #ifdef WITH_LIBCORE
1446         lc_timer_t            *t_cst  = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1447 #endif /* WITH_LIBCORE */
1448
1449         ilp_timer_push(t_cst);
1450         /* check all time_steps */
1451         for (t = 0; t < ba->max_steps; ++t) {
1452                 ir_node *irn;
1453
1454                 /* for all nodes */
1455                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1456                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1457                         ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1458
1459                         /* if node has no consumer within current block, it cannot die here */
1460                         /* we also ignore nodes assigned to dummy unit */
1461                         if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1462                                 continue;
1463
1464                         /* node can only die here if t at least asap(n) */
1465                         if (t >= na->asap - 1) {
1466                                 int node_tp_idx;
1467
1468                                 /* for all unit types */
1469                                 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1470                                         int tp_idx, i, cst;
1471                                         int *tmp_var_idx = NEW_ARR_F(int, 0);
1472
1473                                         snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1474                                         cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1475                                         DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1476                                         num_cst++;
1477
1478                                         /* number of consumer scheduled till t */
1479                                         for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1480                                                 be_ilpsched_irn_t    *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1481                                                 ilpsched_node_attr_t *ca   = get_ilpsched_node_attr(cons);
1482
1483                                                 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1484                                                         unsigned tm;
1485
1486                                                         for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1487                                                                 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1488                                                                 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1489                                                         }
1490                                                 }
1491                                         }
1492
1493                                         /* could be that no consumer can be scheduled at this point */
1494                                         if (ARR_LEN(tmp_var_idx)) {
1495                                                 int      idx;
1496                                                 unsigned tn;
1497
1498                                                 /* subtract possible prior kill points */
1499                                                 for (tn = na->asap - 1; tn < t; ++tn) {
1500                                                         idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1501                                                         lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1502                                                 }
1503
1504                                                 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1505                                                 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1506                                                 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1507                                         }
1508
1509                                         DEL_ARR_F(tmp_var_idx);
1510                                 }
1511                         }
1512
1513                 }
1514         }
1515         ilp_timer_pop();
1516         DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1517                 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1518 }
1519
1520 /**
1521 * Create ILP pressure constraints:
1522 * - add additional costs to objective function if a node is scheduled
1523 *   on a unit although all units of this type are currently occupied
1524 */
1525 static void create_pressure_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1526         char                  buf[1024];
1527         ir_node               *cur_irn;
1528         unsigned              num_cst = 0;
1529         ilpsched_block_attr_t *ba     = get_ilpsched_block_attr(block_node);
1530 #ifdef WITH_LIBCORE
1531         lc_timer_t            *t_cst  = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1532 #endif /* WITH_LIBCORE */
1533
1534         ilp_timer_push(t_cst);
1535         /* y_{nt}^k is set for each node and timestep and unit type */
1536         foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1537                 unsigned             cur_idx   = get_irn_idx(cur_irn);
1538                 be_ilpsched_irn_t    *cur_node = get_ilpsched_irn(env, cur_irn);
1539                 ilpsched_node_attr_t *cur_na   = get_ilpsched_node_attr(cur_node);
1540                 int                  glob_type_idx;
1541
1542                 /* we ignore nodes assigned to DUMMY unit here */
1543                 if (cur_na->is_dummy_node)
1544                         continue;
1545
1546                 /* for all types */
1547                 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1548                         be_execution_unit_type_t *cur_tp   = &env->cpu->unit_types[glob_type_idx];
1549                         int                      cur_tp_idx;
1550                         unsigned                 t;
1551
1552                         /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1553
1554                         /* check if node can be executed on this unit type */
1555                         cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1556                         if (cur_tp_idx < 0)
1557                                 continue;
1558
1559                         /* check all time_steps */
1560                         for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1561                                 int     cst, y_idx;
1562                                 ir_node *irn;
1563                                 int     *tmp_idx_1  = NEW_ARR_F(int, 0);
1564                                 int     *tmp_idx_m1 = NEW_ARR_F(int, 0);
1565
1566                                 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1567                                 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1568                                 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1569                                 num_cst++;
1570
1571                                 /*
1572                                         - accumulate all nodes scheduled on unit type k till t
1573                                         - subtract all nodes died on unit type k till t
1574                                 */
1575                                 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1576                                         be_ilpsched_irn_t    *node = get_ilpsched_irn(env, irn);
1577                                         ilpsched_node_attr_t *na   = get_ilpsched_node_attr(node);
1578                                         unsigned             tn, tmax;
1579                                         int                  tp_idx;
1580
1581                                         tmax   = MIN(t, na->alap - 1);
1582                                         tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1583
1584                                         /* current unit type is not suitable for current node */
1585                                         if (tp_idx < 0)
1586                                                 continue;
1587
1588                                         for (tn = na->asap - 1; tn <= tmax; ++tn) {
1589                                                 int idx;
1590
1591                                                 /* node scheduled */
1592                                                 idx = ILPVAR_IDX(na, tp_idx, tn);
1593                                                 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1594
1595                                                 /* node dead */
1596                                                 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1597                                                 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1598                                         }
1599                                 }
1600
1601                                 if (ARR_LEN(tmp_idx_1) > 0)
1602                                         lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1603
1604                                 if (ARR_LEN(tmp_idx_m1) > 0)
1605                                         lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1606
1607                                 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1608                                 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1609                                 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(t));
1610
1611                                 DEL_ARR_F(tmp_idx_1);
1612                                 DEL_ARR_F(tmp_idx_m1);
1613                         }
1614                 }
1615         }
1616         ilp_timer_pop();
1617         DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1618                 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1619 }
1620
1621 /***************************************************
1622  *  _____ _      _____                    _
1623  * |_   _| |    |  __ \                  (_)
1624  *   | | | |    | |__) |  _ __ ___   __ _ _ _ __
1625  *   | | | |    |  ___/  | '_ ` _ \ / _` | | '_ \
1626  *  _| |_| |____| |      | | | | | | (_| | | | | |
1627  * |_____|______|_|      |_| |_| |_|\__,_|_|_| |_|
1628  *
1629  ***************************************************/
1630
1631 /**
1632  * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1633  */
1634 static void create_ilp(ir_node *block, void *walk_env) {
1635         be_ilpsched_env_t     *env           = walk_env;
1636         be_ilpsched_irn_t     *block_node    = get_ilpsched_irn(env, block);
1637         ilpsched_block_attr_t *ba            = get_ilpsched_block_attr(block_node);
1638         FILE                  *logfile       = NULL;
1639         lpp_t                 *lpp           = NULL;
1640         struct obstack        var_obst;
1641
1642         DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1643         DBG((env->dbg, 255, "  ILP Scheduling for %+F\n", block));
1644         DBG((env->dbg, 255, "=========================================\n\n"));
1645
1646         DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1647                 block, ba->n_interesting_nodes, ba->max_steps));
1648
1649         /* notify backend and get block environment */
1650         env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1651
1652         /* if we have less than two interesting nodes, there is no need to create the ILP */
1653         if (ba->n_interesting_nodes > 1) {
1654                 double fact_var        = ba->n_interesting_nodes > 25 ? 1.1 : 1.2;
1655                 double fact_cst        = ba->n_interesting_nodes > 25 ? 0.7 : 1.5;
1656                 int    base_num        = ba->n_interesting_nodes * ba->n_interesting_nodes;
1657                 int    estimated_n_var = (int)((double)base_num * fact_var);
1658                 int    estimated_n_cst = (int)((double)base_num * fact_cst);
1659
1660                 DBG((env->dbg, LEVEL_1, "Creating LPP with estimed numbers: %d vars, %d cst\n",
1661                         estimated_n_var, estimated_n_cst));
1662
1663                 /* set up the LPP object */
1664                 lpp = new_lpp_userdef(
1665                         "be ilp scheduling",
1666                         lpp_minimize,
1667                         estimated_n_cst + 1,  /* num vars */
1668                         estimated_n_cst + 20, /* num cst */
1669                         1.2);                 /* grow factor */
1670                 obstack_init(&var_obst);
1671
1672                 /* create ILP variables */
1673                 create_variables(env, lpp, block_node, &var_obst);
1674
1675                 /* create ILP constraints */
1676                 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1677                 create_assignment_and_precedence_constraints(env, lpp, block_node);
1678                 create_ressource_constraints(env, lpp, block_node);
1679                 create_bundle_constraints(env, lpp, block_node);
1680                 create_dying_nodes_constraint(env, lpp, block_node);
1681                 create_pressure_constraint(env, lpp, block_node);
1682
1683                 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
1684
1685                 /* debug stuff, dump lpp when debugging is on  */
1686                 DEBUG_ONLY(
1687                         if (firm_dbg_get_mask(env->dbg) > 0) {
1688                                 char buf[1024];
1689                                 FILE *f;
1690
1691                                 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1692                                 f = fopen(buf, "w");
1693                                 lpp_dump_plain(lpp, f);
1694                                 fclose(f);
1695                                 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1696                                 lpp_dump(lpp, buf);
1697                         }
1698                 );
1699
1700                 /* set solve time limit */
1701                 lpp_set_time_limit(lpp, env->opts->time_limit);
1702
1703                 /* set logfile if requested */
1704                 if (strlen(env->opts->log_file) > 0) {
1705                         if (strcasecmp(env->opts->log_file, "stdout") == 0)
1706                                 lpp_set_log(lpp, stdout);
1707                         else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1708                                 lpp_set_log(lpp, stderr);
1709                         else {
1710                                 logfile = fopen(env->opts->log_file, "w");
1711                                 if (! logfile)
1712                                         fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1713                                 else
1714                                         lpp_set_log(lpp, logfile);
1715                         }
1716                 }
1717
1718                 /* solve the ILP */
1719                 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
1720
1721                 if (logfile)
1722                         fclose(logfile);
1723
1724                 /* check for valid solution */
1725                 if (! lpp_is_sol_valid(lpp)) {
1726                         char buf[1024];
1727                         FILE *f;
1728
1729                         snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
1730                         f = fopen(buf, "w");
1731                         lpp_dump_plain(lpp, f);
1732                         fclose(f);
1733                         snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
1734                         lpp_dump(lpp, buf);
1735                         dump_ir_block_graph(env->irg, "-assert");
1736
1737                         assert(0 && "ILP solution is not feasible!");
1738                 }
1739
1740                 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1741                 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1742                 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1743                 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
1744                 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
1745                 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1746                 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1747
1748                 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1749         }
1750
1751         /* apply solution */
1752         apply_solution(env, lpp, block);
1753
1754         if (lpp)
1755                 free_lpp(lpp);
1756
1757         /* notify backend */
1758         be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1759 }
1760
1761 /**
1762  * Perform ILP scheduling on the given irg.
1763  */
1764 void be_ilp_sched(const be_irg_t *birg) {
1765         be_ilpsched_env_t          env;
1766         const char                 *name = "be ilp scheduling";
1767         arch_isa_t                 *isa  = birg->main_env->arch_env->isa;
1768         const ilp_sched_selector_t *sel  = isa->impl->get_ilp_sched_selector(isa);
1769
1770         FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
1771
1772         //firm_dbg_set_mask(env.dbg, 31);
1773
1774         env.irg_env    = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
1775         env.sel        = sel;
1776         env.irg        = birg->irg;
1777         env.height     = heights_new(birg->irg);
1778         env.main_env   = birg->main_env;
1779         env.arch_env   = birg->main_env->arch_env;
1780         env.cpu        = arch_isa_get_machine(birg->main_env->arch_env->isa);
1781         env.opts       = &ilp_opts;
1782         phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
1783
1784         /* assign a unique per block number to all interesting nodes */
1785         irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
1786
1787         /*
1788                 The block indices are completely build after the walk,
1789                 now we can allocate the bitsets (size depends on block indices)
1790                 for all nodes.
1791         */
1792         phase_reinit_irn_data(&env.ph);
1793
1794         /* Collect all root nodes (having no user in their block) and calculate ASAP. */
1795         irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
1796
1797         /* Calculate ALAP of all irns */
1798         irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
1799
1800         /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
1801         irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
1802
1803         /* we don't need this information any longer */
1804         heights_free(env.height);
1805
1806         /* perform ILP scheduling */
1807         irg_block_walk_graph(env.irg, clear_unwanted_data, create_ilp, &env);
1808
1809         DEBUG_ONLY(
1810                 if (firm_dbg_get_mask(env.dbg)) {
1811                         phase_stat_t stat;
1812                         phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
1813
1814                         fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
1815                 }
1816         );
1817
1818         /* free all allocated object */
1819         phase_free(&env.ph);
1820
1821         /* notify backend */
1822         be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
1823 }
1824
1825 #ifdef WITH_LIBCORE
1826 /**
1827  * Register ILP scheduler options.
1828  */
1829 void ilpsched_register_options(lc_opt_entry_t *grp) {
1830         static int     run_once = 0;
1831         lc_opt_entry_t *sched_grp;
1832
1833         if (! run_once) {
1834                 run_once  = 1;
1835                 sched_grp = lc_opt_get_grp(grp, "ilpsched");
1836
1837                 lc_opt_add_table(sched_grp, ilpsched_option_table);
1838         }
1839 }
1840 #endif /* WITH_LIBCORE */
1841
1842 #else /* WITH_ILP */
1843
1844 static int some_picky_compiler_do_not_allow_empty_files;
1845
1846 #endif /* WITH_ILP */