2 * Scheduling algorithms.
3 * An ILP scheduler based on
4 * "ILP-based Instruction Scheduling for IA-64"
5 * by Daniel Kaestner and Sebastian Winkel
8 * @author Christian Wuerdig
26 #include "irphase_t.h"
36 #include <lpp/lpp_net.h>
39 #include <libcore/lc_opts.h>
40 #include <libcore/lc_opts_enum.h>
41 #include <libcore/lc_timing.h>
42 #endif /* WITH_LIBCORE */
46 #include "besched_t.h"
47 #include "beilpsched.h"
49 typedef struct _ilpsched_options_t {
55 typedef struct _unit_type_info_t {
57 const be_execution_unit_type_t *tp;
61 * holding the ILP variables of the different types
63 typedef struct _ilp_var_types_t {
64 int *x; /* x_{nt}^k variables */
65 int *a; /* a_{nt}^k variables */
66 int *d; /* d_{nt}^k variables */
67 int *y; /* y_{nt}^k variables */
70 /* attributes for a node */
71 typedef struct _ilpsched_node_attr_t {
72 unsigned asap; /**< The ASAP scheduling control step */
73 unsigned alap; /**< The ALAP scheduling control step */
74 unsigned sched_point; /**< the step in which the node is finally scheduled */
75 unsigned visit_idx; /**< Index of the node having visited this node last */
76 unsigned consumer_idx; /**< Index of the node having counted this node as consumer last */
77 unsigned n_consumer; /**< Number of consumers */
78 ir_node **block_consumer; /**< List of consumer being in the same block */
79 waitq *projkeeps; /**< A List of Projs and Keeps belonging to this node */
80 unsigned block_idx : 30; /**< A unique per block index */
81 unsigned alap_changed : 1; /**< the current ALAP has changed, revisit preds */
82 unsigned is_dummy_node : 1; /**< this node is assigned to DUMMY unit */
83 bitset_t *transitive_block_nodes; /**< Set of transitive block nodes (predecessors
84 for ASAP, successors for ALAP */
85 unsigned n_unit_types; /**< number of allowed execution unit types */
86 unit_type_info_t *type_info; /**< list of allowed execution unit types */
87 ilp_var_types_t ilp_vars; /**< the different ILP variables */
88 } ilpsched_node_attr_t;
90 /* attributes for a block */
91 typedef struct _ilpsched_block_attr_t {
92 unsigned block_last_idx; /**< The highest node index in block so far */
93 unsigned n_interesting_nodes; /**< The number of nodes interesting for scheduling */
94 unsigned max_steps; /**< Upper bound for block execution */
95 plist_t *root_nodes; /**< A list of nodes having no user in current block */
96 ir_node *head_ilp_nodes; /**< A linked list of nodes which will contribute to ILP */
97 } ilpsched_block_attr_t;
99 typedef union _ilpsched_attr_ {
100 ilpsched_node_attr_t node_attr;
101 ilpsched_block_attr_t block_attr;
104 /* A irn for the phase and it's attributes (either node or block) */
107 ilpsched_attr_t attr;
110 /* The ILP scheduling environment */
112 phase_t ph; /**< The phase */
113 ir_graph *irg; /**< The current irg */
114 heights_t *height; /**< The heights object of the irg */
115 void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
116 void *block_env; /**< An environment for scheduling a block, provided by the backend */
117 const arch_env_t *arch_env;
118 const arch_isa_t *isa; /**< The ISA */
119 const be_main_env_t *main_env;
120 const be_machine_t *cpu; /**< the current abstract machine */
121 ilpsched_options_t *opts; /**< the ilp options for current irg */
122 const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
123 DEBUG_ONLY(firm_dbg_module_t *dbg);
126 /* convenience macros to handle phase irn data */
127 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
128 #define is_ilpsched_block(node) (is_Block((node)->irn))
129 #define get_ilpsched_block_attr(block) (&(block)->attr.block_attr)
130 #define get_ilpsched_node_attr(node) (&(node)->attr.node_attr)
132 /* iterate over a list of ir_nodes linked by link field */
133 #define foreach_linked_irns(head, iter) for ((iter) = (head); (iter); (iter) = get_irn_link((iter)))
135 /* check if node is considered for ILP scheduling */
136 #define consider_for_sched(isa, irn) \
137 (! (is_Block(irn) || \
138 is_normal_Proj(isa, irn) || \
145 /* gives the valid scheduling time step interval for a node */
146 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
148 /* gives the valid interval where a node can die */
149 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
151 /* gives the corresponding ILP variable for given node, unit and time step */
152 #define ILPVAR_IDX(na, unit, control_step) \
153 ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
155 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
156 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
157 ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
159 /* check if a double value is within an epsilon environment of 0 */
160 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
163 #define ilp_timer_push(t) lc_timer_push((t))
164 #define ilp_timer_pop() lc_timer_pop()
165 #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
166 #else /* WITH_LIBCORE */
167 #define ilp_timer_push(t)
168 #define ilp_timer_pop()
169 #define ilp_timer_elapsed_usec(t) 0.0
170 #endif /* WITH_LIBCORE */
172 /* option variable */
173 static ilpsched_options_t ilp_opts = {
174 70, /* if we have more than 70 nodes: use alive nodes constraint */
175 300, /* 300 sec per block time limit */
181 static const lc_opt_table_entry_t ilpsched_option_table[] = {
182 LC_OPT_ENT_INT("limit_dead", "Upto how many nodes the dead node constraint should be used", &ilp_opts.limit_dead),
183 LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
184 LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
187 #endif /* WITH_LIBCORE */
190 We need this global variable as we compare nodes dependent on heights,
191 but we cannot pass any information to the qsort compare function.
193 static heights_t *glob_heights;
196 * Check if irn is a Proj, which has no execution units assigned.
197 * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
199 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
200 return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
204 * Skips normal Projs.
205 * @return predecessor if irn is a normal Proj, otherwise irn.
207 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
208 if (is_normal_Proj(isa, irn))
209 return get_Proj_pred(irn);
213 static INLINE int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
214 unsigned lat = be_ilp_sched_latency(sel, irn, env);
215 if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
222 * Compare scheduling time steps of two be_ilpsched_irn's.
224 static int cmp_ilpsched_irn(const void *a, const void *b) {
225 be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
226 be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
227 ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
228 ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
230 if (n1_a->sched_point == n2_a->sched_point) {
231 ir_node *irn_a = n1->irn;
232 ir_node *irn_b = n2->irn;
234 if (heights_reachable_in_block(glob_heights, irn_a, irn_b))
236 if (heights_reachable_in_block(glob_heights, irn_b, irn_a))
241 return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
245 * In case there is no phase information for irn, initialize it.
247 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
248 be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
251 /* if we have already some data: check for reinitialization */
253 if (! is_Block(irn)) {
254 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
256 if (! na->transitive_block_nodes) {
257 ir_node *block = get_nodes_block(irn);
258 be_ilpsched_irn_t *block_node = phase_get_or_set_irn_data(ph, block);
259 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
261 /* we are called after the block indices have been build: create bitset */
262 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
265 /* we are called from reinit block data: clear the bitset */
266 bitset_clear_all(na->transitive_block_nodes);
268 na->alap_changed = 1;
276 /* set ilpsched irn attributes (either block or irn) */
278 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
280 ba->n_interesting_nodes = 0;
281 ba->block_last_idx = 0;
282 ba->root_nodes = plist_new();
283 ba->head_ilp_nodes = NULL;
287 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
288 memset(na, 0, sizeof(*na));
295 * Assign a per block unique number to each node.
297 static void build_block_idx(ir_node *irn, void *walk_env) {
298 be_ilpsched_env_t *env = walk_env;
299 be_ilpsched_irn_t *node, *block_node;
300 ilpsched_node_attr_t *na;
301 ilpsched_block_attr_t *ba;
303 if (! consider_for_sched(env->arch_env->isa, irn))
306 node = get_ilpsched_irn(env, irn);
307 na = get_ilpsched_node_attr(node);
308 block_node = get_ilpsched_irn(env, get_nodes_block(irn));
309 ba = get_ilpsched_block_attr(block_node);
311 na->block_idx = ba->block_last_idx++;
314 /********************************************************
317 * __ _ ___ __ _ _ __ / / __ _| | __ _ _ __
318 * / _` / __|/ _` | '_ \ / / / _` | |/ _` | '_ \
319 * | (_| \__ \ (_| | |_) | / / | (_| | | (_| | |_) |
320 * \__,_|___/\__,_| .__/ /_/ \__,_|_|\__,_| .__/
323 ********************************************************/
326 * Add all nodes having no user in current block to last_nodes list.
328 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
330 const ir_edge_t *edge;
331 be_ilpsched_irn_t *block_node, *node;
332 ilpsched_block_attr_t *ba;
333 ilpsched_node_attr_t *na;
335 be_ilpsched_env_t *env = walk_env;
336 int has_block_user = 0;
337 unsigned n_consumer = 0;
338 ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
342 if (! consider_for_sched(env->arch_env->isa, irn))
345 block = get_nodes_block(irn);
346 idx = get_irn_idx(irn);
347 consumer = NEW_ARR_F(ir_node *, 0);
349 DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
351 /* check data and dependency out edges */
352 for (i = 0; i < 2 && ! has_block_user; ++i) {
353 foreach_out_edge_kind(irn, edge, ekind[i]) {
354 ir_node *user = get_edge_src_irn(edge);
356 if (is_normal_Proj(env->arch_env->isa, user)) {
357 const ir_edge_t *user_edge;
359 if (get_irn_mode(user) == mode_X)
362 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
363 for (j = 0; j < 2; ++j) {
364 foreach_out_edge_kind(user, user_edge, ekind[j]) {
365 ir_node *real_user = get_edge_src_irn(user_edge);
367 if (! is_Phi(real_user) && ! is_Block(real_user)) {
368 be_ilpsched_irn_t *node = get_ilpsched_irn(env, real_user);
369 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
371 /* skip already visited nodes */
372 if (ua->consumer_idx == idx)
375 /* check if node has user in this block and collect the user if it's a data user */
376 if (get_nodes_block(real_user) == block) {
377 if (i == 0 && j == 0)
378 ARR_APP1(ir_node *, consumer, real_user);
382 /* only count data consumer */
386 /* mark user as visited by this node */
387 ua->consumer_idx = idx;
392 else if (is_Block(user)) {
395 else if (! is_Phi(user)) {
396 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
397 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
399 /* skip already visited nodes */
400 if (ua->consumer_idx == idx)
403 /* check if node has user in this block and collect the user if it's a data user */
404 if (get_nodes_block(user) == block) {
406 ARR_APP1(ir_node *, consumer, user);
410 /* only count data consumer */
414 /* mark user visited by this node */
415 ua->consumer_idx = idx;
420 block_node = get_ilpsched_irn(env, block);
421 ba = get_ilpsched_block_attr(block_node);
423 ba->n_interesting_nodes++;
425 /* current irn has no user inside this block, add to queue */
426 if (! has_block_user) {
427 DB((env->dbg, LEVEL_3, "root node\n"));
428 plist_insert_back(ba->root_nodes, irn);
431 DB((env->dbg, LEVEL_3, "normal node\n"));
434 /* record number of all consumer and the consumer within the same block */
435 node = get_ilpsched_irn(env, irn);
436 na = get_ilpsched_node_attr(node);
437 na->n_consumer = n_consumer;
438 na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
439 memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
444 * Calculate the ASAP scheduling step for current irn.
446 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
447 be_ilpsched_env_t *env = walk_env;
450 be_ilpsched_irn_t *node, *block_node;
451 ilpsched_node_attr_t *na;
452 ilpsched_block_attr_t *ba;
454 /* These nodes are handled separate */
455 if (! consider_for_sched(env->arch_env->isa, irn))
458 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
460 block = get_nodes_block(irn);
461 node = get_ilpsched_irn(env, irn);
462 na = get_ilpsched_node_attr(node);
465 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
466 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
468 /* check for greatest distance to top */
469 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
470 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
471 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
474 lat = fixed_latency(env->sel, pred, env->block_env);
475 na->asap = MAX(na->asap, pna->asap + lat);
479 /* add node to ILP node list and update max_steps */
480 block_node = get_ilpsched_irn(env, block);
481 ba = get_ilpsched_block_attr(block_node);
483 set_irn_link(irn, ba->head_ilp_nodes);
484 ba->head_ilp_nodes = irn;
485 ba->max_steps += fixed_latency(env->sel, irn, env->block_env);
487 DB((env->dbg, LEVEL_2, "%u\n", na->asap));
491 * Calculate the ALAP scheduling step of all irns in current block.
492 * Depends on max_steps being calculated.
494 static void calculate_block_alap(ir_node *block, void *walk_env) {
495 be_ilpsched_env_t *env = walk_env;
496 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
497 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
498 waitq *cur_queue = new_waitq();
501 assert(is_Block(block));
503 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
504 block, ba->n_interesting_nodes, ba->max_steps));
506 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
507 //phase_reinit_block_irn_data(&env->ph, block);
509 /* init start queue */
510 foreach_plist(ba->root_nodes, el) {
511 waitq_put(cur_queue, plist_element_get_value(el));
514 /* repeat until all nodes are processed */
515 while (! waitq_empty(cur_queue)) {
516 waitq *next_queue = new_waitq();
518 /* process all nodes in current step */
519 while (! waitq_empty(cur_queue)) {
520 ir_node *cur_irn = waitq_get(cur_queue);
521 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
522 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
525 /* cur_node has no alap set -> it's a root node, set to max alap */
527 na->alap = ba->max_steps;
528 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
532 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
536 /* set the alap's of all predecessors */
537 for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
538 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
540 /* check for greatest distance to bottom */
541 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
542 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
543 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
546 /* mark the predecessor as visited by current irn */
547 if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
549 pna->visit_idx = get_irn_idx(cur_irn);
551 lat = fixed_latency(env->sel, pred, env->block_env);
553 /* set ALAP of current pred */
554 if (pna->alap == 0) {
555 /* current ALAP is 0: node has not yet been visited */
556 pna->alap_changed = 1;
557 pna->alap = na->alap - lat;
559 else if (pna->alap > na->alap - lat) {
560 /* we found a longer path to root node: change ALAP */
561 pna->alap = na->alap - lat;
562 pna->alap_changed = 1;
565 /* current ALAP is best found so far: keep it */
566 pna->alap_changed = 0;
569 DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
571 /* enqueue node for next iteration */
572 if (get_irn_ins_or_deps(pred) > 0)
573 waitq_put(next_queue, pred);
578 /* prepare for next iteration */
579 del_waitq(cur_queue);
580 cur_queue = next_queue;
585 * We can free the list of root nodes here.
587 static void clear_unwanted_data(ir_node *block, void *walk_env) {
588 be_ilpsched_env_t *env = walk_env;
589 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
590 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
592 plist_free(ba->root_nodes);
593 ba->root_nodes = NULL;
597 * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
598 * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
600 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
601 be_ilpsched_env_t *env = walk_env;
603 be_ilpsched_irn_t *node, *pred_node;
604 ilpsched_node_attr_t *na, *pna;
606 if (! consider_for_sched(env->arch_env->isa, irn))
609 if (! is_Proj(irn) && ! be_is_Keep(irn))
612 /* go to the ancestor */
614 pred = get_irn_n(irn, 0);
615 pred = skip_Proj(pred);
617 node = get_ilpsched_irn(env, irn);
618 pred_node = get_ilpsched_irn(env, pred);
619 na = get_ilpsched_node_attr(node);
620 pna = get_ilpsched_node_attr(pred_node);
622 na->asap = pna->asap;
623 na->alap = pna->alap;
625 /* record all Projs and Keeps for this node */
626 if (! pna->projkeeps)
627 pna->projkeeps = new_waitq();
628 waitq_put(pna->projkeeps, irn);
630 DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
633 /*******************************************
636 * ___ ___| |__ ___ __| |_ _| | ___
637 * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
638 * \__ \ (__| | | | __/ (_| | |_| | | __/
639 * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
641 *******************************************/
643 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
644 const ir_edge_t *edge;
646 foreach_out_edge(irn, edge) {
647 ir_node *user = get_edge_src_irn(edge);
649 if (be_is_Keep(user)) {
650 assert(get_nodes_block(user) == block && "Keep must not be in different block.");
651 waitq_put(keeps, user);
657 * Inserts @p irn before @p before into schedule and notifies backend.
659 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
660 ir_node *before, ir_node *irn, unsigned cycle)
662 be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
663 sched_add_before(before, irn);
667 * Adds a node, it's Projs (in case of mode_T nodes) and
668 * it's Keeps to schedule.
670 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
671 const ir_edge_t *edge;
672 waitq *keeps = new_waitq();
674 /* mode_M nodes are not scheduled */
675 if (get_irn_mode(irn) == mode_M)
678 if (! sched_is_scheduled(irn))
679 notified_sched_add_before(env, block, irn, cycle);
682 if (get_irn_mode(irn) == mode_T) {
683 foreach_out_edge(irn, edge) {
684 ir_node *user = get_edge_src_irn(edge);
686 if (to_appear_in_schedule(user) || get_irn_mode(user) == mode_b)
687 notified_sched_add_before(env, block, user, cycle);
689 check_for_keeps(keeps, block, user);
693 check_for_keeps(keeps, block, irn);
697 while (! waitq_empty(keeps)) {
698 ir_node *keep = waitq_get(keeps);
699 if (! sched_is_scheduled(keep))
700 notified_sched_add_before(env, block, keep, cycle);
707 * Schedule all nodes in the given block, according to the ILP solution.
709 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
710 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
711 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
712 sched_info_t *info = get_irn_sched_info(block);
713 be_ilpsched_irn_t **sched_nodes;
716 const ir_edge_t *edge;
718 /* init block schedule list */
719 INIT_LIST_HEAD(&info->list);
722 /* collect nodes and their scheduling time step */
723 sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
724 if (ba->n_interesting_nodes == 0) {
727 else if (ba->n_interesting_nodes == 1) {
728 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
730 /* add the single node */
731 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
734 /* check all nodes for their positive solution */
735 foreach_linked_irns(ba->head_ilp_nodes, irn) {
736 be_ilpsched_irn_t *node;
737 ilpsched_node_attr_t *na;
741 node = get_ilpsched_irn(env, irn);
742 na = get_ilpsched_node_attr(node);
746 /* go over all variables of a node until the non-zero one is found */
747 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
748 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
749 double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
751 /* check, if variable is set to one (it's not zero then :) */
752 if (! LPP_VALUE_IS_0(val)) {
754 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
755 DBG((env->dbg, LEVEL_1, "Schedpoint of %+F is %u at unit type %s\n",
756 irn, t, na->type_info[tp_idx].tp->name));
763 glob_heights = heights_new(env->irg);
764 /* sort nodes ascending by scheduling time step */
765 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
766 heights_free(glob_heights);
769 /* make all Phis ready and remember the single cf op */
771 foreach_out_edge(block, edge) {
772 irn = get_edge_src_irn(edge);
774 switch (get_irn_opcode(irn)) {
776 add_to_sched(env, block, irn, 0);
785 assert(cfop == NULL && "Highlander - there can be only one");
792 /* add all nodes from list */
793 for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
794 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
795 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
798 /* schedule control flow node if not already done */
799 if (cfop && ! sched_is_scheduled(cfop))
800 add_to_sched(env, block, cfop, 0);
802 DEL_ARR_F(sched_nodes);
805 /***************************************************************
806 * _____ _ _____ _____ _ _
807 * |_ _| | | __ \ / ____| | | (_)
808 * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
809 * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
810 * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
811 * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
813 ***************************************************************/
816 * Check if node can be executed on given unit type.
818 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
820 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
822 for (i = na->n_unit_types - 1; i >= 0; --i) {
823 if (na->type_info[i].tp == tp)
830 /************************************************
833 * __ ____ _ _ __ _ __ _| |__ | | ___ ___
834 * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
835 * \ V / (_| | | | | (_| | |_) | | __/\__ \
836 * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
838 ************************************************/
841 * Create the following variables:
842 * - x_{nt}^k binary weigthed with: t
843 * node n is scheduled at time step t to unit type k
844 * ==>> These variables represent the schedule
846 * - d_{nt}^k binary weighted with: t
847 * node n dies at time step t on unit type k
848 * - a_{nt}^k binary weighted with num_nodes
849 * node n is alive at time step t on unit type k
851 * - y_{nt}^k binary weighted with: num_nodes^2
852 * node n is scheduled at time step t to unit type k
853 * although all units of this type are occupied
854 * ==>> These variables represent the register pressure
857 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
860 unsigned num_block_var, num_nodes;
861 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
862 unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
864 lc_timer_t *t_var = lc_timer_register("beilpsched_var", "create ilp variables");
865 #endif /* WITH_LIBCORE */
867 ilp_timer_push(t_var);
868 num_block_var = num_nodes = 0;
869 foreach_linked_irns(ba->head_ilp_nodes, irn) {
870 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
871 be_ilpsched_irn_t *node;
872 ilpsched_node_attr_t *na;
873 unsigned n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
874 unsigned cur_var_ad, cur_var_x, cur_var_y, num_ad;
876 /* count number of available unit types for this node */
877 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
880 node = get_ilpsched_irn(env, irn);
881 na = get_ilpsched_node_attr(node);
883 na->n_unit_types = n_unit_types;
884 na->type_info = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
886 /* fill the type info array */
887 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
888 for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
889 /* beware: we also count number of available units here */
890 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
891 na->is_dummy_node = 1;
894 na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
895 na->type_info[tp_idx].n_units = unit_idx;
898 /* allocate space for ilp variables */
899 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
900 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
902 /* we need these variables only for "real" nodes */
903 if (! na->is_dummy_node) {
904 na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
905 memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
907 num_ad = ba->max_steps - na->asap + 1;
909 if (ba->n_interesting_nodes > env->opts->limit_dead) {
910 na->ilp_vars.a = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
911 memset(na->ilp_vars.a, -1, ARR_LEN(na->ilp_vars.a) * sizeof(na->ilp_vars.a[0]));
914 na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
915 memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
919 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
920 irn, na->asap, na->alap, na->n_unit_types));
922 cur_var_x = cur_var_ad = cur_var_y = cur_unit = n_var = 0;
923 /* create variables */
924 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
927 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
928 /* x_{nt}^k variables */
929 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
930 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
931 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
932 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
933 /* variable counter */
937 if (! na->is_dummy_node) {
938 /* y_{nt}^k variables */
939 snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
940 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
941 na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
942 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
944 /* variable counter */
950 /* a node can die at any step t: asap(n) <= t <= U */
951 if (! na->is_dummy_node) {
952 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
954 if (ba->n_interesting_nodes > env->opts->limit_dead) {
955 /* a_{nt}^k variables */
956 snprintf(buf, sizeof(buf), "a_n%u_%s_%u",
957 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
958 na->ilp_vars.a[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
961 /* d_{nt}^k variables */
962 snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
963 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
964 na->ilp_vars.d[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
966 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
968 /* variable counter */
975 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
979 DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
980 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
983 /*******************************************************
986 * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
987 * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
988 * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
989 * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
991 *******************************************************/
994 * Create following ILP constraints:
995 * - the assignment constraints:
996 * assure each node is executed once by exactly one (allowed) execution unit
997 * - the dead node assignment constraints:
998 * assure a node can only die at most once
999 * - the precedence constraints:
1000 * assure that no data dependencies are violated
1002 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1003 unsigned num_cst_assign, num_cst_prec, num_cst_dead;
1006 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1007 bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
1009 lc_timer_t *t_cst_assign = lc_timer_register("beilpsched_cst_assign", "create assignment constraints");
1010 lc_timer_t *t_cst_dead = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1011 lc_timer_t *t_cst_prec = lc_timer_register("beilpsched_cst_prec", "create precedence constraints");
1012 #endif /* WITH_LIBCORE */
1014 num_cst_assign = num_cst_prec = num_cst_dead = 0;
1015 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1018 be_ilpsched_irn_t *node;
1019 ilpsched_node_attr_t *na;
1021 node = get_ilpsched_irn(env, irn);
1022 na = get_ilpsched_node_attr(node);
1025 /* the assignment constraint */
1026 ilp_timer_push(t_cst_assign);
1027 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1028 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1029 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1032 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1035 /* the dead node assignment constraint */
1036 if (! na->is_dummy_node && ba->n_interesting_nodes <= env->opts->limit_dead) {
1037 ilp_timer_push(t_cst_dead);
1038 snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1039 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1040 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1042 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1046 /* We have separate constraints for Projs and Keeps */
1047 // ILP becomes infeasible ?!?
1048 // if (is_Proj(irn) || be_is_Keep(irn))
1051 /* the precedence constraints */
1052 ilp_timer_push(t_cst_prec);
1053 bs_block_irns = bitset_clear_all(bs_block_irns);
1054 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1055 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1056 unsigned t_low, t_high, t;
1057 be_ilpsched_irn_t *pred_node;
1058 ilpsched_node_attr_t *pna;
1061 if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1064 pred_node = get_ilpsched_irn(env, pred);
1065 pna = get_ilpsched_node_attr(pred_node);
1067 assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1069 if (! bitset_is_set(bs_block_irns, pna->block_idx))
1070 bitset_set(bs_block_irns, pna->block_idx);
1074 /* irn = n, pred = m */
1075 delay = fixed_latency(env->sel, pred, env->block_env);
1076 t_low = MAX(na->asap, pna->asap + delay - 1);
1077 t_high = MIN(na->alap, pna->alap + delay - 1);
1078 for (t = t_low - 1; t <= t_high - 1; ++t) {
1080 int *tmp_var_idx = NEW_ARR_F(int, 0);
1082 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1083 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1084 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1087 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1088 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1089 /* node variables have smaller index than pred variables */
1090 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1091 for (tn = na->asap - 1; tn <= t; ++tn) {
1092 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1093 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1097 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1098 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1099 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1100 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1105 /* pred variables have smaller index than node variables */
1106 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1107 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1108 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1109 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1113 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1114 for (tn = na->asap - 1; tn <= t; ++tn) {
1115 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1116 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1121 if (ARR_LEN(tmp_var_idx) > 0)
1122 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1124 DEL_ARR_F(tmp_var_idx);
1129 DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1130 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1131 DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1132 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1136 * Create ILP resource constraints:
1137 * - assure that for each time step not more instructions are scheduled
1138 * to the same unit types as units of this type are available
1140 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1143 unsigned num_cst_resrc = 0;
1144 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1146 lc_timer_t *t_cst_rsrc = lc_timer_register("beilpsched_cst_rsrc", "create resource constraints");
1147 #endif /* WITH_LIBCORE */
1149 ilp_timer_push(t_cst_rsrc);
1150 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1152 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1154 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1156 /* check each time step */
1157 for (t = 0; t < ba->max_steps; ++t) {
1160 int *tmp_var_idx = NEW_ARR_F(int, 0);
1162 snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1163 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1164 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1167 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1168 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1169 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1172 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1174 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1175 int cur_var = ILPVAR_IDX(na, tp_idx, t);
1176 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1180 /* set constraints if we have some */
1181 if (ARR_LEN(tmp_var_idx) > 0)
1182 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1184 DEL_ARR_F(tmp_var_idx);
1188 DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1189 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1193 * Create ILP bundle constraints:
1194 * - assure, at most bundle_size * bundles_per_cycle instructions
1195 * can be started at a certain point.
1197 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1200 unsigned num_cst_bundle = 0;
1201 unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1202 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1204 lc_timer_t *t_cst_bundle = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1205 #endif /* WITH_LIBCORE */
1207 ilp_timer_push(t_cst_bundle);
1208 for (t = 0; t < ba->max_steps; ++t) {
1211 int *tmp_var_idx = NEW_ARR_F(int, 0);
1213 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1214 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1215 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1218 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1219 be_ilpsched_irn_t *node;
1220 ilpsched_node_attr_t *na;
1223 /* Projs and Keeps do not contribute to bundle size */
1224 if (is_Proj(irn) || be_is_Keep(irn))
1227 node = get_ilpsched_irn(env, irn);
1228 na = get_ilpsched_node_attr(node);
1230 /* nodes assigned to DUMMY unit do not contribute to bundle size */
1231 if (na->is_dummy_node)
1234 if (t >= na->asap - 1 && t <= na->alap - 1) {
1235 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1236 int idx = ILPVAR_IDX(na, tp_idx, t);
1237 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1242 if (ARR_LEN(tmp_var_idx) > 0)
1243 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1245 DEL_ARR_F(tmp_var_idx);
1248 DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1249 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1253 * Create ILP dying nodes constraints:
1254 * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1256 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1259 unsigned num_cst = 0;
1260 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1262 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1263 #endif /* WITH_LIBCORE */
1265 ilp_timer_push(t_cst);
1266 /* check all time_steps */
1267 for (t = 0; t < ba->max_steps; ++t) {
1271 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1272 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1273 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1275 /* if node has no consumer within current block, it cannot die here */
1276 /* we also ignore nodes assigned to dummy unit */
1277 if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1280 /* node can only die here if t at least asap(n) */
1281 if (t >= na->asap - 1) {
1284 /* for all unit types */
1285 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1287 int *tmp_var_idx = NEW_ARR_F(int, 0);
1289 snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1290 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1291 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1294 /* number of consumer scheduled till t */
1295 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1296 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1297 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1299 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1302 for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1303 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1304 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1309 /* could be that no consumer can be scheduled at this point */
1310 if (ARR_LEN(tmp_var_idx)) {
1314 /* subtract possible prior kill points */
1315 for (tn = na->asap - 1; tn < t; ++tn) {
1316 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1317 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1320 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1321 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1322 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1325 DEL_ARR_F(tmp_var_idx);
1332 DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1333 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1337 * Create ILP alive nodes constraints:
1338 * - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
1340 static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1343 unsigned num_cst = 0;
1344 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1346 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_alive_nodes", "create alive nodes constraints");
1347 #endif /* WITH_LIBCORE */
1349 ilp_timer_push(t_cst);
1351 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1352 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1353 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1356 /* we ignore nodes assigned to dummy unit here */
1357 if (na->is_dummy_node)
1360 /* check check all time steps: asap(n) <= t <= U */
1361 for (t = na->asap - 1; t < ba->max_steps; ++t) {
1364 /* for all unit types available for this node */
1365 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1366 unsigned tn, tn_max, idx;
1368 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1369 int *tmp_var_idx_m = NEW_ARR_F(int, 0);
1371 snprintf(buf, sizeof(buf), "alive_node_cst_%u_n%u_%s",
1372 t, get_irn_idx(irn), na->type_info[node_tp_idx].tp->name);
1373 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 0.0);
1374 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1377 tn_max = MIN(na->alap - 1, t);
1378 /* check if the node has been scheduled so far */
1379 for (tn = na->asap - 1; tn <= tn_max; ++tn) {
1380 int idx = ILPVAR_IDX(na, node_tp_idx, tn);
1381 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1384 if (ARR_LEN(tmp_var_idx_n) > 0)
1385 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(na->n_consumer));
1386 DEL_ARR_F(tmp_var_idx_n);
1388 /* subtract the number of consumer scheduled so far */
1389 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1390 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1391 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1393 unsigned tm, tm_max;
1395 tm_max = MIN(ca->alap - 1, t);
1396 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1397 for (tm = ca->asap - 1; tm <= tm_max; ++tm) {
1398 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1399 ARR_APP1(int, tmp_var_idx_m, ca->ilp_vars.x[idx]);
1404 if (ARR_LEN(tmp_var_idx_m) > 0)
1405 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_m, ARR_LEN(tmp_var_idx_m), -1.0);
1406 DEL_ARR_F(tmp_var_idx_m);
1409 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1410 lpp_set_factor_fast(lpp, cst, na->ilp_vars.a[idx], 0.0 - (double)(na->n_consumer));
1416 DBG((env->dbg, LEVEL_1, "\t%u alive nodes constraints (%g sec)\n",
1417 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1421 * Create ILP pressure constraints, based on dead nodes:
1422 * - add additional costs to objective function if a node is scheduled
1423 * on a unit although all units of this type are currently occupied
1425 static void create_pressure_dead_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1428 unsigned num_cst = 0;
1429 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1431 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1432 #endif /* WITH_LIBCORE */
1434 ilp_timer_push(t_cst);
1435 /* y_{nt}^k is set for each node and timestep and unit type */
1436 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1437 unsigned cur_idx = get_irn_idx(cur_irn);
1438 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1439 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1442 /* we ignore nodes assigned to DUMMY unit here */
1443 if (cur_na->is_dummy_node)
1447 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1448 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1452 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1454 /* check if node can be executed on this unit type */
1455 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1459 /* check all time_steps */
1460 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1463 int *tmp_idx_1 = NEW_ARR_F(int, 0);
1464 int *tmp_idx_m1 = NEW_ARR_F(int, 0);
1466 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1467 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1468 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1472 - accumulate all nodes scheduled on unit type k till t
1473 - subtract all nodes died on unit type k till t
1475 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1476 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1477 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1481 tmax = MIN(t, na->alap - 1);
1482 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1484 /* current unit type is not suitable for current node */
1488 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1491 /* node scheduled */
1492 idx = ILPVAR_IDX(na, tp_idx, tn);
1493 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1496 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1497 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1501 if (ARR_LEN(tmp_idx_1) > 0)
1502 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1504 if (ARR_LEN(tmp_idx_m1) > 0)
1505 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1507 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1508 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1509 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(t));
1511 DEL_ARR_F(tmp_idx_1);
1512 DEL_ARR_F(tmp_idx_m1);
1517 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1518 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1522 * Create ILP pressure constraints, based on alive nodes:
1523 * - add additional costs to objective function if a node is scheduled
1524 * on a unit although all units of this type are currently occupied
1526 static void create_pressure_alive_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1529 unsigned num_cst = 0;
1530 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1532 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1533 #endif /* WITH_LIBCORE */
1535 ilp_timer_push(t_cst);
1536 /* y_{nt}^k is set for each node and timestep and unit type */
1537 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1538 unsigned cur_idx = get_irn_idx(cur_irn);
1539 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1540 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1543 /* we ignore nodes assigned to DUMMY unit here */
1544 if (cur_na->is_dummy_node)
1548 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1549 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1553 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1555 /* check if node can be executed on this unit type */
1556 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1560 /* check all time_steps at which the current node can be scheduled */
1561 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1564 int *tmp_var_idx = NEW_ARR_F(int, 0);
1566 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1567 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1568 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1571 /* - accumulate all nodes alive at point t on unit type k */
1572 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1573 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1574 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1577 /* check if node can be alive here */
1578 if (t < na->asap - 1)
1581 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1583 /* current type is not suitable */
1587 a_idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, t);
1588 ARR_APP1(int, tmp_var_idx, na->ilp_vars.a[a_idx]);
1591 if (ARR_LEN(tmp_var_idx) > 0)
1592 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1593 DEL_ARR_F(tmp_var_idx);
1595 /* - num_nodes * y_{nt}^k */
1596 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1597 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(ba->n_interesting_nodes));
1602 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1603 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1606 static void create_proj_keep_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1609 unsigned num_cst = 0;
1610 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1612 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_projkeep", "create proj and keep constraints");
1613 #endif /* WITH_LIBCORE */
1615 ilp_timer_push(t_cst);
1616 /* check all nodes */
1617 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1618 be_ilpsched_irn_t *node;
1619 ilpsched_node_attr_t *na;
1623 /* only mode_T nodes can have Projs and Keeps assigned */
1624 if (get_irn_mode(irn) != mode_T)
1627 node = get_ilpsched_irn(env, irn);
1628 na = get_ilpsched_node_attr(node);
1630 /* check if has some Projs and Keeps assigned */
1631 if (! na->projkeeps)
1634 /* we can run only once over the queue, so preserve the nodes */
1635 pk = NEW_ARR_F(ir_node *, 0);
1636 while (! waitq_empty(na->projkeeps))
1637 ARR_APP1(ir_node *, pk, waitq_get(na->projkeeps));
1638 del_waitq(na->projkeeps);
1639 na->projkeeps = NULL;
1641 /* for all time steps at which this node can be scheduled */
1642 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1644 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1646 /* add the constraint, assure, that a node is always scheduled along with it's Projs and Keeps */
1647 snprintf(buf, sizeof(buf), "projkeep_cst_n%u_%u", get_irn_idx(irn), t);
1648 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 0.0);
1649 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1652 /* sum up scheduling variables for this time step */
1653 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1654 int idx = ILPVAR_IDX(na, tp_idx, t);
1655 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1658 if (ARR_LEN(tmp_var_idx_n) > 0)
1659 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(ARR_LEN(pk)));
1660 DEL_ARR_F(tmp_var_idx_n);
1662 /* subtract all Proj and Keep variables for this step */
1663 for (i = ARR_LEN(pk) - 1; i >= 0; --i) {
1664 be_ilpsched_irn_t *pk_node = get_ilpsched_irn(env, pk[i]);
1665 ilpsched_node_attr_t *pk_na = get_ilpsched_node_attr(pk_node);
1668 for (pk_tp_idx = pk_na->n_unit_types - 1; pk_tp_idx >= 0; --pk_tp_idx) {
1669 int idx = ILPVAR_IDX(pk_na, pk_tp_idx, t);
1670 lpp_set_factor_fast(lpp, cst, pk_na->ilp_vars.x[idx], -1.0);
1676 DBG((env->dbg, LEVEL_1, "\t%u Proj and Keep constraints (%g sec)\n",
1677 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1680 /***************************************************
1682 * |_ _| | | __ \ (_)
1683 * | | | | | |__) | _ __ ___ __ _ _ _ __
1684 * | | | | | ___/ | '_ ` _ \ / _` | | '_ \
1685 * _| |_| |____| | | | | | | | (_| | | | | |
1686 * |_____|______|_| |_| |_| |_|\__,_|_|_| |_|
1688 ***************************************************/
1691 * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1693 static void create_ilp(ir_node *block, void *walk_env) {
1694 be_ilpsched_env_t *env = walk_env;
1695 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
1696 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1697 FILE *logfile = NULL;
1699 struct obstack var_obst;
1701 DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1702 DBG((env->dbg, 255, " ILP Scheduling for %+F\n", block));
1703 DBG((env->dbg, 255, "=========================================\n\n"));
1705 DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1706 block, ba->n_interesting_nodes, ba->max_steps));
1708 /* notify backend and get block environment */
1709 env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1711 /* if we have less than two interesting nodes, there is no need to create the ILP */
1712 if (ba->n_interesting_nodes > 1) {
1713 double fact_var = ba->n_interesting_nodes > 25 ? 1.1 : 1.2;
1714 double fact_cst = ba->n_interesting_nodes > 25 ? 0.7 : 1.5;
1715 int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
1716 int estimated_n_var = (int)((double)base_num * fact_var);
1717 int estimated_n_cst = (int)((double)base_num * fact_cst);
1719 DBG((env->dbg, LEVEL_1, "Creating LPP with estimed numbers: %d vars, %d cst\n",
1720 estimated_n_var, estimated_n_cst));
1722 /* set up the LPP object */
1723 lpp = new_lpp_userdef(
1724 "be ilp scheduling",
1726 estimated_n_cst + 1, /* num vars */
1727 estimated_n_cst + 20, /* num cst */
1728 1.2); /* grow factor */
1729 obstack_init(&var_obst);
1731 /* create ILP variables */
1732 create_variables(env, lpp, block_node, &var_obst);
1734 /* create ILP constraints */
1735 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1736 create_assignment_and_precedence_constraints(env, lpp, block_node);
1737 create_ressource_constraints(env, lpp, block_node);
1738 create_bundle_constraints(env, lpp, block_node);
1739 //create_proj_keep_constraints(env, lpp, block_node);
1740 if (ba->n_interesting_nodes > env->opts->limit_dead) {
1741 create_alive_nodes_constraint(env, lpp, block_node);
1742 create_pressure_alive_constraint(env, lpp, block_node);
1744 create_dying_nodes_constraint(env, lpp, block_node);
1745 create_pressure_dead_constraint(env, lpp, block_node);
1748 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
1750 /* debug stuff, dump lpp when debugging is on */
1752 if (firm_dbg_get_mask(env->dbg) > 0) {
1756 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1757 f = fopen(buf, "w");
1758 lpp_dump_plain(lpp, f);
1760 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1765 /* set solve time limit */
1766 lpp_set_time_limit(lpp, env->opts->time_limit);
1768 /* set logfile if requested */
1769 if (strlen(env->opts->log_file) > 0) {
1770 if (strcasecmp(env->opts->log_file, "stdout") == 0)
1771 lpp_set_log(lpp, stdout);
1772 else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1773 lpp_set_log(lpp, stderr);
1775 logfile = fopen(env->opts->log_file, "w");
1777 fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1779 lpp_set_log(lpp, logfile);
1784 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
1789 /* check for valid solution */
1790 if (! lpp_is_sol_valid(lpp)) {
1794 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
1795 f = fopen(buf, "w");
1796 lpp_dump_plain(lpp, f);
1798 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
1800 dump_ir_block_graph(env->irg, "-assert");
1802 assert(0 && "ILP solution is not feasible!");
1805 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1806 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1807 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1808 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
1809 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
1810 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1811 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1813 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1816 /* apply solution */
1817 apply_solution(env, lpp, block);
1822 /* notify backend */
1823 be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1827 * Perform ILP scheduling on the given irg.
1829 void be_ilp_sched(const be_irg_t *birg) {
1830 be_ilpsched_env_t env;
1831 const char *name = "be ilp scheduling";
1832 arch_isa_t *isa = birg->main_env->arch_env->isa;
1833 const ilp_sched_selector_t *sel = isa->impl->get_ilp_sched_selector(isa);
1835 FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
1837 //firm_dbg_set_mask(env.dbg, 31);
1839 env.irg_env = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
1841 env.irg = birg->irg;
1842 env.height = heights_new(birg->irg);
1843 env.main_env = birg->main_env;
1844 env.arch_env = birg->main_env->arch_env;
1845 env.cpu = arch_isa_get_machine(birg->main_env->arch_env->isa);
1846 env.opts = &ilp_opts;
1847 phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
1849 /* assign a unique per block number to all interesting nodes */
1850 irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
1853 The block indices are completely build after the walk,
1854 now we can allocate the bitsets (size depends on block indices)
1857 phase_reinit_irn_data(&env.ph);
1859 /* Collect all root nodes (having no user in their block) and calculate ASAP. */
1860 irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
1862 /* Calculate ALAP of all irns */
1863 irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
1865 /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
1866 irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
1868 /* we don't need this information any longer */
1869 heights_free(env.height);
1871 /* perform ILP scheduling */
1872 irg_block_walk_graph(env.irg, clear_unwanted_data, create_ilp, &env);
1875 if (firm_dbg_get_mask(env.dbg)) {
1877 phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
1879 fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
1883 /* free all allocated object */
1884 phase_free(&env.ph);
1886 /* notify backend */
1887 be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
1892 * Register ILP scheduler options.
1894 void ilpsched_register_options(lc_opt_entry_t *grp) {
1895 static int run_once = 0;
1896 lc_opt_entry_t *sched_grp;
1900 sched_grp = lc_opt_get_grp(grp, "ilpsched");
1902 lc_opt_add_table(sched_grp, ilpsched_option_table);
1905 #endif /* WITH_LIBCORE */
1907 #else /* WITH_ILP */
1909 static int some_picky_compiler_do_not_allow_empty_files;
1911 #endif /* WITH_ILP */