2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief ILP based instruction scheduling.
23 * @author Christian Wuerdig
27 * An ILP scheduler based on
28 * "ILP-based Instruction Scheduling for IA-64"
29 * by Daniel Kaestner and Sebastian Winkel
30 * extended with register pressure constraints by Christian Wuerdig
45 #include "irphase_t.h"
58 #include <lpp/lpp_net.h>
61 #include "lc_opts_enum.h"
66 #include "beilpsched.h"
70 #include "bemachine.h"
71 #include "belistsched.h"
74 typedef struct ilpsched_options_t {
80 typedef struct unit_type_info_t {
82 const be_execution_unit_type_t *tp;
86 * holding the ILP variables of the different types
88 typedef struct ilp_var_types_t {
89 int *x; /* x_{nt}^k variables */
90 int *a; /* a_{nt}^k variables */
91 int *y; /* y_{nt}^k variables */
95 * Holds alive variables for a node live-in to a block.
97 typedef struct ilp_livein_node_t {
99 unsigned max_alive_steps;
103 /* attributes for a node */
104 typedef struct ilpsched_node_attr_t {
105 unsigned asap; /**< The ASAP scheduling control step */
106 unsigned alap; /**< The ALAP scheduling control step */
107 unsigned latency; /**< Latency of this node (needed for sorting) */
108 unsigned sched_point; /**< the step in which the node is finally scheduled */
109 unsigned visit_idx; /**< Index of the node having visited this node last */
110 unsigned consumer_idx; /**< Index of the node having counted this node as consumer last */
111 unsigned n_consumer; /**< Number of consumers */
112 ir_node **block_consumer; /**< List of consumer being in the same block */
113 waitq *projkeeps; /**< A List of Projs and Keeps belonging to this node */
114 unsigned block_idx : 30; /**< A unique per block index */
115 unsigned alap_changed : 1; /**< the current ALAP has changed, revisit preds */
116 unsigned is_dummy_node : 1; /**< this node is assigned to DUMMY unit */
117 bitset_t *transitive_block_nodes; /**< Set of transitive block nodes (predecessors
118 for ASAP, successors for ALAP */
119 unsigned n_unit_types; /**< number of allowed execution unit types */
120 unit_type_info_t *type_info; /**< list of allowed execution unit types */
121 ilp_var_types_t ilp_vars; /**< the different ILP variables */
122 } ilpsched_node_attr_t;
124 /* attributes for a block */
125 typedef struct ilpsched_block_attr_t {
126 unsigned block_last_idx; /**< The highest node index in block so far */
127 unsigned n_interesting_nodes; /**< The number of nodes interesting for scheduling */
128 unsigned max_steps; /**< Upper bound for block execution */
129 plist_t *root_nodes; /**< A list of nodes having no user in current block */
130 ir_node *head_ilp_nodes; /**< A linked list of nodes which will contribute to ILP */
131 pset *livein_nodes; /**< A set of nodes which are live-in to this block */
132 } ilpsched_block_attr_t;
134 typedef union ilpsched_attr_ {
135 ilpsched_node_attr_t node_attr;
136 ilpsched_block_attr_t block_attr;
139 /* A irn for the phase and it's attributes (either node or block) */
142 ilpsched_attr_t attr;
145 /* The ILP scheduling environment */
147 ir_phase ph; /**< The phase */
148 ir_graph *irg; /**< The current irg */
149 heights_t *height; /**< The heights object of the irg */
150 void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
151 void *block_env; /**< An environment for scheduling a block, provided by the backend */
152 const arch_env_t *arch_env;
153 const be_machine_t *cpu; /**< the current abstract machine */
154 ilpsched_options_t *opts; /**< the ilp options for current irg */
155 const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
156 DEBUG_ONLY(firm_dbg_module_t *dbg);
159 /* convenience macros to handle phase irn data */
160 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
161 #define is_ilpsched_block(node) (is_Block((node)->irn))
162 #define get_ilpsched_block_attr(block) (&(block)->attr.block_attr)
163 #define get_ilpsched_node_attr(node) (&(node)->attr.node_attr)
165 /* check if node is considered for ILP scheduling */
166 #define consider_for_sched(env, irn) \
167 (! (is_Block(irn) || \
168 is_normal_Proj(env, irn) || \
175 /* gives the valid scheduling time step interval for a node */
176 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
178 /* gives the valid interval where a node can die */
179 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
181 /* gives the corresponding ILP variable for given node, unit and time step */
182 #define ILPVAR_IDX(na, unit, control_step) \
183 ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
185 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
186 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
187 ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
189 /* check if a double value is within an epsilon environment of 0 */
190 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
192 /* option variable */
193 static ilpsched_options_t ilp_opts = {
194 1, /* default is with register pressure constraints */
195 300, /* 300 sec per block time limit */
200 static const lc_opt_table_entry_t ilpsched_option_table[] = {
201 LC_OPT_ENT_BOOL("regpress", "Use register pressure constraints", &ilp_opts.regpress),
202 LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
203 LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
208 We need this global variable as we compare nodes dependent on heights,
209 but we cannot pass any information to the qsort compare function.
211 static heights_t *glob_heights;
214 * Check if irn is a Proj, which has no execution units assigned.
215 * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
217 static inline int is_normal_Proj(const arch_env_t *env, const ir_node *irn)
219 return is_Proj(irn) && (arch_env_get_allowed_execution_units(env, irn) == NULL);
223 * Skips normal Projs.
224 * @return predecessor if irn is a normal Proj, otherwise irn.
226 static inline ir_node *skip_normal_Proj(const arch_env_t *env, ir_node *irn)
228 if (is_normal_Proj(env, irn))
229 return get_Proj_pred(irn);
233 static inline int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env)
235 unsigned lat = be_ilp_sched_latency(sel, irn, env);
236 if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
241 static int cmp_live_in_nodes(const void *a, const void *b)
243 const ilp_livein_node_t *n1 = a;
244 const ilp_livein_node_t *n2 = b;
246 return n1->irn != n2->irn;
250 * Compare scheduling time steps of two be_ilpsched_irn's.
252 static int cmp_ilpsched_irn(const void *a, const void *b)
254 be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
255 be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
256 ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
257 ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
259 if (n1_a->sched_point == n2_a->sched_point) {
260 const ir_node *irn_a = n1->irn;
261 const ir_node *irn_b = n2->irn;
263 if (heights_reachable_in_block(glob_heights, irn_a, irn_b))
265 if (heights_reachable_in_block(glob_heights, irn_b, irn_a))
269 Ok, timestep is equal and the nodes are parallel,
270 so check latency and schedule high latency first.
272 return QSORT_CMP(n2_a->latency, n1_a->latency);
275 return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
278 static void *reinit_ilpsched_irn(ir_phase *ph, const ir_node *irn, void *old)
280 be_ilpsched_irn_t *res = old;
282 /* if we have already some data: check for reinitialization */
283 if (! is_Block(irn)) {
284 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
286 if (! na->transitive_block_nodes) {
287 ir_node *block = get_nodes_block(irn);
288 be_ilpsched_irn_t *block_node = phase_get_or_set_irn_data(ph, block);
289 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
291 /* we are called after the block indices have been build: create bitset */
292 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
294 /* we are called from reinit block data: clear the bitset */
295 bitset_clear_all(na->transitive_block_nodes);
297 na->alap_changed = 1;
304 * In case there is no phase information for irn, initialize it.
306 static void *init_ilpsched_irn(ir_phase *phase, const ir_node *irn)
308 be_ilpsched_irn_t *res = phase_alloc(phase, sizeof(res[0]));
311 /* set ilpsched irn attributes (either block or irn) */
313 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
315 ba->n_interesting_nodes = 0;
316 ba->block_last_idx = 0;
317 ba->root_nodes = plist_new();
318 ba->head_ilp_nodes = NULL;
319 ba->livein_nodes = new_pset(cmp_live_in_nodes, 16);
322 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
323 memset(na, 0, sizeof(*na));
330 * Assign a per block unique number to each node.
332 static void build_block_idx(ir_node *irn, void *walk_env)
334 be_ilpsched_env_t *env = walk_env;
335 be_ilpsched_irn_t *node, *block_node;
336 ilpsched_node_attr_t *na;
337 ilpsched_block_attr_t *ba;
339 set_irn_link(irn, NULL);
340 if (! consider_for_sched(env->arch_env, irn))
343 node = get_ilpsched_irn(env, irn);
344 na = get_ilpsched_node_attr(node);
345 block_node = get_ilpsched_irn(env, get_nodes_block(irn));
346 ba = get_ilpsched_block_attr(block_node);
348 na->block_idx = ba->block_last_idx++;
351 /********************************************************
354 * __ _ ___ __ _ _ __ / / __ _| | __ _ _ __
355 * / _` / __|/ _` | '_ \ / / / _` | |/ _` | '_ \
356 * | (_| \__ \ (_| | |_) | / / | (_| | | (_| | |_) |
357 * \__,_|___/\__,_| .__/ /_/ \__,_|_|\__,_| .__/
360 ********************************************************/
363 * Add all nodes having no user in current block to last_nodes list.
365 static void collect_alap_root_nodes(ir_node *irn, void *walk_env)
368 const ir_edge_t *edge;
369 be_ilpsched_irn_t *block_node, *node;
370 ilpsched_block_attr_t *ba;
371 ilpsched_node_attr_t *na;
373 be_ilpsched_env_t *env = walk_env;
374 int has_block_user = 0;
375 unsigned n_consumer = 0;
376 ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
380 if (! consider_for_sched(env->arch_env, irn))
383 block = get_nodes_block(irn);
384 idx = get_irn_idx(irn);
385 consumer = NEW_ARR_F(ir_node *, 0);
387 DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
389 /* check data and dependency out edges */
390 for (i = 0; i < 2 && ! has_block_user; ++i) {
391 foreach_out_edge_kind(irn, edge, ekind[i]) {
392 ir_node *user = get_edge_src_irn(edge);
394 if (is_normal_Proj(env->arch_env, user)) {
395 const ir_edge_t *user_edge;
397 if (get_irn_mode(user) == mode_X)
400 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
401 for (j = 0; j < 2; ++j) {
402 foreach_out_edge_kind(user, user_edge, ekind[j]) {
403 ir_node *real_user = get_edge_src_irn(user_edge);
405 if (! is_Phi(real_user) && ! is_Block(real_user)) {
406 be_ilpsched_irn_t *node = get_ilpsched_irn(env, real_user);
407 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
409 /* skip already visited nodes */
410 if (ua->consumer_idx == idx)
413 /* check if node has user in this block and collect the user if it's a data user */
414 if (get_nodes_block(real_user) == block) {
415 if (i == 0 && j == 0)
416 ARR_APP1(ir_node *, consumer, real_user);
420 /* only count data consumer */
424 /* mark user as visited by this node */
425 ua->consumer_idx = idx;
430 else if (is_Block(user)) {
433 else if (! is_Phi(user)) {
434 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
435 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
437 /* skip already visited nodes */
438 if (ua->consumer_idx == idx)
441 /* check if node has user in this block and collect the user if it's a data user */
442 if (get_nodes_block(user) == block) {
444 ARR_APP1(ir_node *, consumer, user);
448 /* only count data consumer */
452 /* mark user visited by this node */
453 ua->consumer_idx = idx;
455 else if (get_nodes_block(user) != block) {
461 block_node = get_ilpsched_irn(env, block);
462 ba = get_ilpsched_block_attr(block_node);
464 ba->n_interesting_nodes++;
466 /* current irn has no user inside this block, add to queue */
467 if (! has_block_user) {
468 DB((env->dbg, LEVEL_3, "root node\n"));
469 plist_insert_back(ba->root_nodes, irn);
472 DB((env->dbg, LEVEL_3, "normal node\n"));
475 /* record number of all consumer and the consumer within the same block */
476 node = get_ilpsched_irn(env, irn);
477 na = get_ilpsched_node_attr(node);
478 na->n_consumer = n_consumer;
479 na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
480 memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
485 * Calculate the ASAP scheduling step for current irn.
487 static void calculate_irn_asap(ir_node *irn, void *walk_env)
489 be_ilpsched_env_t *env = walk_env;
492 be_ilpsched_irn_t *node, *block_node;
493 ilpsched_node_attr_t *na;
494 ilpsched_block_attr_t *ba;
496 /* These nodes are handled separate */
497 if (! consider_for_sched(env->arch_env, irn))
500 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
502 block = get_nodes_block(irn);
503 node = get_ilpsched_irn(env, irn);
504 na = get_ilpsched_node_attr(node);
507 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
508 ir_node *pred = skip_normal_Proj(env->arch_env, get_irn_in_or_dep(irn, i));
510 /* check for greatest distance to top */
511 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
512 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
513 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
516 lat = fixed_latency(env->sel, pred, env->block_env);
518 na->asap = MAX(na->asap, pna->asap + lat);
522 /* add node to ILP node list and update max_steps */
523 block_node = get_ilpsched_irn(env, block);
524 ba = get_ilpsched_block_attr(block_node);
526 set_irn_link(irn, ba->head_ilp_nodes);
527 ba->head_ilp_nodes = irn;
528 ba->max_steps += fixed_latency(env->sel, irn, env->block_env);
530 DB((env->dbg, LEVEL_2, "%u\n", na->asap));
534 * Calculate the ALAP scheduling step of all irns in current block.
535 * Depends on max_steps being calculated.
537 static void calculate_block_alap(ir_node *block, void *walk_env)
539 be_ilpsched_env_t *env = walk_env;
540 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
541 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
542 waitq *cur_queue = new_waitq();
545 assert(is_Block(block));
547 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
548 block, ba->n_interesting_nodes, ba->max_steps));
550 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
551 //phase_reinit_block_irn_data(&env->ph, block);
553 /* init start queue */
554 foreach_plist(ba->root_nodes, el) {
555 waitq_put(cur_queue, plist_element_get_value(el));
558 /* repeat until all nodes are processed */
559 while (! waitq_empty(cur_queue)) {
560 waitq *next_queue = new_waitq();
562 /* process all nodes in current step */
563 while (! waitq_empty(cur_queue)) {
564 ir_node *cur_irn = waitq_get(cur_queue);
565 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
566 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
569 /* cur_node has no alap set -> it's a root node, set to max alap */
571 na->alap = ba->max_steps;
572 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
576 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
580 /* set the alap's of all predecessors */
581 for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
582 ir_node *pred = skip_normal_Proj(env->arch_env, get_irn_in_or_dep(cur_irn, i));
584 /* check for greatest distance to bottom */
585 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
586 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
587 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
590 /* mark the predecessor as visited by current irn */
591 if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
593 pna->visit_idx = get_irn_idx(cur_irn);
595 lat = fixed_latency(env->sel, pred, env->block_env);
597 /* set ALAP of current pred */
598 if (pna->alap == 0) {
599 /* current ALAP is 0: node has not yet been visited */
600 pna->alap_changed = 1;
601 pna->alap = na->alap - lat;
603 else if (pna->alap > na->alap - lat) {
604 /* we found a longer path to root node: change ALAP */
605 pna->alap = na->alap - lat;
606 pna->alap_changed = 1;
609 /* current ALAP is best found so far: keep it */
610 pna->alap_changed = 0;
613 DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
615 /* enqueue node for next iteration */
616 if (get_irn_ins_or_deps(pred) > 0)
617 waitq_put(next_queue, pred);
622 /* prepare for next iteration */
623 del_waitq(cur_queue);
624 cur_queue = next_queue;
629 * Free list of root nodes and the set of live-in nodes.
631 static void clear_unwanted_data(ir_node *block, void *walk_env)
633 be_ilpsched_env_t *env = walk_env;
634 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
635 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
637 plist_free(ba->root_nodes);
638 ba->root_nodes = NULL;
639 del_pset(ba->livein_nodes);
640 ba->livein_nodes = NULL;
644 * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
645 * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
647 static void refine_asap_alap_times(ir_node *irn, void *walk_env)
649 be_ilpsched_env_t *env = walk_env;
651 be_ilpsched_irn_t *node, *pred_node;
652 ilpsched_node_attr_t *na, *pna;
654 if (! consider_for_sched(env->arch_env, irn))
657 if (! is_Proj(irn) && ! be_is_Keep(irn))
660 /* go to the ancestor */
662 pred = get_irn_n(irn, 0);
663 pred = skip_Proj(pred);
665 node = get_ilpsched_irn(env, irn);
666 pred_node = get_ilpsched_irn(env, pred);
667 na = get_ilpsched_node_attr(node);
668 pna = get_ilpsched_node_attr(pred_node);
670 na->asap = pna->asap;
671 na->alap = pna->alap;
673 /* record all Projs and Keeps for this node */
674 if (! pna->projkeeps)
675 pna->projkeeps = new_waitq();
676 waitq_put(pna->projkeeps, irn);
678 DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
681 /*******************************************
684 * ___ ___| |__ ___ __| |_ _| | ___
685 * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
686 * \__ \ (__| | | | __/ (_| | |_| | | __/
687 * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
689 *******************************************/
691 static inline void check_for_keeps(waitq *keeps, const ir_node *block, const ir_node *irn)
693 const ir_edge_t *edge;
696 foreach_out_edge(irn, edge) {
697 ir_node *user = get_edge_src_irn(edge);
699 if (be_is_Keep(user)) {
700 assert(get_nodes_block(user) == block && "Keep must not be in different block.");
701 waitq_put(keeps, user);
707 * Inserts @p irn before @p before into schedule and notifies backend.
709 static inline void notified_sched_add_before(be_ilpsched_env_t *env,
710 const ir_node *before, const ir_node *irn, unsigned cycle)
712 be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
713 sched_add_before((ir_node*) before, (ir_node*) irn);
717 * Adds a node, it's Projs (in case of mode_T nodes) and
718 * it's Keeps to schedule.
720 static void add_to_sched(be_ilpsched_env_t *env, const ir_node *block, const ir_node *irn, unsigned cycle)
722 const ir_edge_t *edge;
723 waitq *keeps = new_waitq();
725 /* mode_M nodes are not scheduled */
726 if (get_irn_mode(irn) == mode_M)
729 if (! sched_is_scheduled(irn))
730 notified_sched_add_before(env, block, irn, cycle);
733 if (get_irn_mode(irn) == mode_T) {
734 foreach_out_edge(irn, edge) {
735 ir_node *user = get_edge_src_irn(edge);
737 if ((to_appear_in_schedule(user) || get_irn_mode(user) == mode_b) &&
738 get_irn_n_edges(user) > 0)
740 notified_sched_add_before(env, block, user, cycle);
743 check_for_keeps(keeps, block, user);
747 check_for_keeps(keeps, block, irn);
751 while (! waitq_empty(keeps)) {
752 ir_node *keep = waitq_get(keeps);
753 if (! sched_is_scheduled(keep))
754 notified_sched_add_before(env, block, keep, cycle);
761 * Schedule all nodes in the given block, according to the ILP solution.
763 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block)
765 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
766 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
767 be_ilpsched_irn_t **sched_nodes;
770 const ir_edge_t *edge;
772 /* init block schedule list */
773 sched_init_block(block);
775 /* collect nodes and their scheduling time step */
776 sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
777 if (ba->n_interesting_nodes == 0) {
780 else if (ba->n_interesting_nodes == 1) {
781 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
783 /* add the single node */
784 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
787 /* check all nodes for their positive solution */
788 foreach_linked_irns(ba->head_ilp_nodes, irn) {
789 be_ilpsched_irn_t *node;
790 ilpsched_node_attr_t *na;
794 node = get_ilpsched_irn(env, irn);
795 na = get_ilpsched_node_attr(node);
799 if (! na->is_dummy_node) {
800 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
801 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
802 double cost = lpp_get_var_sol(lpp, na->ilp_vars.y[cur_var]);
804 if (! LPP_VALUE_IS_0(cost)) {
805 DBG((env->dbg, LEVEL_3, "%+F has additional regpressure costs of %f\n", irn, cost));
813 /* go over all variables of a node until the non-zero one is found */
814 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
815 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
816 double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
818 /* check, if variable is set to one (it's not zero then :) */
819 if (! LPP_VALUE_IS_0(val)) {
821 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
822 DBG((env->dbg, LEVEL_2, "Schedpoint of %+F is %u at unit type %s\n",
823 irn, t, na->type_info[tp_idx].tp->name));
830 glob_heights = env->height;
831 /* sort nodes ascending by scheduling time step */
832 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
835 /* make all Phis ready and remember the single cf op */
837 foreach_out_edge(block, edge) {
838 irn = get_edge_src_irn(edge);
840 switch (get_irn_opcode(irn)) {
842 add_to_sched(env, block, irn, 0);
852 assert(cfop == NULL && "Highlander - there can be only one");
859 /* add all nodes from list */
860 for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
861 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
862 if (sched_nodes[i]->irn != cfop)
863 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
866 /* schedule control flow node if not already done */
867 if (cfop && ! sched_is_scheduled(cfop))
868 add_to_sched(env, block, cfop, 0);
870 DEL_ARR_F(sched_nodes);
873 /***************************************************************
874 * _____ _ _____ _____ _ _
875 * |_ _| | | __ \ / ____| | | (_)
876 * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
877 * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
878 * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
879 * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
881 ***************************************************************/
884 * Check if node can be executed on given unit type.
886 static inline int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node)
889 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
891 for (i = na->n_unit_types - 1; i >= 0; --i) {
892 if (na->type_info[i].tp == tp)
899 /************************************************
902 * __ ____ _ _ __ _ __ _| |__ | | ___ ___
903 * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
904 * \ V / (_| | | | | (_| | |_) | | __/\__ \
905 * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
907 ************************************************/
909 static int be_ilpsched_set_type_info(be_ilpsched_env_t *env, ir_node *irn, struct obstack *obst)
911 const be_execution_unit_t ***execunits = arch_env_get_allowed_execution_units(env->arch_env, irn);
912 unsigned n_unit_types = 0;
913 be_ilpsched_irn_t *node;
914 ilpsched_node_attr_t *na;
915 unsigned unit_idx, tp_idx;
917 /* count number of available unit types for this node */
918 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
921 node = get_ilpsched_irn(env, irn);
922 na = get_ilpsched_node_attr(node);
924 if (! na->type_info) {
925 na->n_unit_types = n_unit_types;
926 na->type_info = NEW_ARR_D(unit_type_info_t, obst, n_unit_types);
928 /* fill the type info array */
929 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
930 for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
931 /* beware: we also count number of available units here */
932 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
933 na->is_dummy_node = 1;
936 na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
937 na->type_info[tp_idx].n_units = unit_idx;
945 * Returns the largest alap time of a user of @p irn.
946 * The user must be in block @p block.
948 static unsigned be_ilpsched_get_max_alap_user(be_ilpsched_env_t *env, const ir_node *irn, const ir_node *block)
950 const ir_edge_t *edge;
951 unsigned max_alap = 0;
953 foreach_out_edge(irn, edge) {
954 ir_node *user = get_edge_src_irn(edge);
956 if (get_nodes_block(user) == block) {
957 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
958 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
960 max_alap = MAX(max_alap, na->alap);
964 assert(max_alap > 0);
969 * Create the following variables:
970 * - x_{nt}^k binary weigthed with: t
971 * node n is scheduled at time step t to unit type k
972 * ==>> These variables represent the schedule
974 * - a_{nt}^k binary weighted with num_nodes
975 * node n is alive at time step t on unit type k
977 * - y_{nt}^k continuous weighted with: num_nodes^2
978 * register pressure over limit for unit type k
979 * ==>> These variables represent the register pressure
982 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst)
986 unsigned num_block_var, num_nodes;
987 ilp_livein_node_t *livein;
988 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
989 unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
993 num_block_var = num_nodes = 0;
994 foreach_linked_irns(ba->head_ilp_nodes, irn) {
995 be_ilpsched_irn_t *node;
996 ilpsched_node_attr_t *na;
997 unsigned n_unit_types, tp_idx, n_var, cur_unit;
998 unsigned cur_var_ad, cur_var_x, cur_var_y, num_ad;
1001 node = get_ilpsched_irn(env, irn);
1002 na = get_ilpsched_node_attr(node);
1003 n_unit_types = be_ilpsched_set_type_info(env, irn, var_obst);
1005 /* allocate space for ilp variables */
1006 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1007 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
1009 /* we need these variables only for "real" nodes */
1010 if (! na->is_dummy_node) {
1011 na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1012 memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
1014 num_ad = ba->max_steps - na->asap + 1;
1015 na->ilp_vars.a = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
1016 memset(na->ilp_vars.a, -1, ARR_LEN(na->ilp_vars.a) * sizeof(na->ilp_vars.a[0]));
1019 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
1020 irn, na->asap, na->alap, na->n_unit_types));
1022 cur_var_x = cur_var_ad = cur_var_y = cur_unit = n_var = 0;
1023 /* create variables */
1024 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1027 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1028 /* x_{nt}^k variables */
1029 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1030 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1031 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1032 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1033 /* variable counter */
1037 if (! na->is_dummy_node) {
1038 /* y_{nt}^k variables */
1039 snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1040 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1041 na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_continous, (double)(weigth_y));
1042 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1044 /* variable counter */
1050 /* a node can die at any step t: asap(n) <= t <= U */
1051 if (! na->is_dummy_node) {
1052 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1054 /* a_{nt}^k variables */
1055 snprintf(buf, sizeof(buf), "a_n%u_%s_%u",
1056 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1057 na->ilp_vars.a[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
1058 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1060 /* variable counter */
1066 /* collect live-in nodes */
1067 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1068 ir_node *pred = get_irn_n(irn, i);
1070 if (get_nodes_block(pred) != block_node->irn && consider_for_sched(env->arch_env, pred)) {
1071 be_ilpsched_set_type_info(env, pred, var_obst);
1072 if (! na->is_dummy_node) {
1073 ilp_livein_node_t *entry = OALLOC(var_obst, ilp_livein_node_t);
1076 pset_insert(ba->livein_nodes, entry, (unsigned)get_irn_idx(pred));
1082 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1086 /* create alive variables a_{nt}^k for live-ins */
1087 foreach_pset(ba->livein_nodes, livein) {
1088 be_ilpsched_irn_t *node;
1089 ilpsched_node_attr_t *na;
1090 unsigned tp_idx, var_idx;
1094 node = get_ilpsched_irn(env, irn);
1095 na = get_ilpsched_node_attr(node);
1097 livein->max_alive_steps = be_ilpsched_get_max_alap_user(env, irn, block_node->irn);
1099 livein->a = NEW_ARR_D(int, var_obst, na->n_unit_types * livein->max_alive_steps);
1102 /* create variables */
1103 for (tp_idx = 0; tp_idx < na->n_unit_types; ++tp_idx) {
1106 for (t = 0; t < livein->max_alive_steps; ++t) {
1107 /* a_{nt}^k variables */
1108 snprintf(buf, sizeof(buf), "al_n%u_%s_%u",
1109 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1110 livein->a[var_idx++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
1111 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1117 stat_ev_tim_pop("beilpsched_var");
1120 /*******************************************************
1123 * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
1124 * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1125 * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1126 * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
1128 *******************************************************/
1131 * Collect all operands and nodes @p irn depends on.
1132 * If there is a Proj within the dependencies, all other Projs of the parent node are added as well.
1134 static void sta_collect_in_deps(ir_node *irn, ir_nodeset_t *deps)
1138 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1139 ir_node *p = get_irn_in_or_dep(irn, i);
1142 const ir_edge_t *edge;
1144 p = get_Proj_pred(p);
1145 foreach_out_edge(p, edge) {
1146 ir_node *src = get_edge_src_irn(edge);
1147 ir_nodeset_insert(deps, src);
1151 ir_nodeset_insert(deps, p);
1157 * Create following ILP constraints:
1158 * - the assignment constraints:
1159 * assure each node is executed once by exactly one (allowed) execution unit
1160 * - the dead node assignment constraints:
1161 * assure a node can only die at most once
1162 * - the precedence constraints:
1163 * assure that no data dependencies are violated
1165 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1167 unsigned num_cst_assign, num_cst_prec, num_cst_dead;
1170 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1171 bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
1173 num_cst_assign = num_cst_prec = num_cst_dead = 0;
1174 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1177 be_ilpsched_irn_t *node;
1178 ilpsched_node_attr_t *na;
1181 ir_nodeset_iterator_t iter;
1183 ir_nodeset_init(&deps);
1185 node = get_ilpsched_irn(env, irn);
1186 na = get_ilpsched_node_attr(node);
1189 /* the assignment constraint */
1191 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1192 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1193 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1196 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1197 stat_ev_tim_pop("beilpsched_cst_assign");
1199 /* We have separate constraints for Projs and Keeps */
1200 // ILP becomes infeasible ?!?
1201 // if (is_Proj(irn) || be_is_Keep(irn))
1204 /* the precedence constraints */
1206 bitset_clear_all(bs_block_irns);
1208 sta_collect_in_deps(irn, &deps);
1209 foreach_ir_nodeset(&deps, pred, iter) {
1210 unsigned t_low, t_high, t;
1211 be_ilpsched_irn_t *pred_node;
1212 ilpsched_node_attr_t *pna;
1215 pred = skip_normal_Proj(env->arch_env, pred);
1216 if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1219 pred_node = get_ilpsched_irn(env, pred);
1220 pna = get_ilpsched_node_attr(pred_node);
1222 assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1224 if (! bitset_is_set(bs_block_irns, pna->block_idx))
1225 bitset_set(bs_block_irns, pna->block_idx);
1229 /* irn = n, pred = m */
1230 delay = fixed_latency(env->sel, pred, env->block_env);
1231 t_low = MAX(na->asap, pna->asap + delay - 1);
1232 t_high = MIN(na->alap, pna->alap + delay - 1);
1233 for (t = t_low - 1; t <= t_high - 1; ++t) {
1235 int *tmp_var_idx = NEW_ARR_F(int, 0);
1237 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1238 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1239 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1242 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1243 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1244 /* node variables have smaller index than pred variables */
1245 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1246 for (tn = na->asap - 1; tn <= t; ++tn) {
1247 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1248 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1252 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1253 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1254 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1255 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1260 /* pred variables have smaller index than node variables */
1261 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1262 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1263 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1264 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1268 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1269 for (tn = na->asap - 1; tn <= t; ++tn) {
1270 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1271 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1276 if (ARR_LEN(tmp_var_idx) > 0)
1277 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1279 DEL_ARR_F(tmp_var_idx);
1282 ir_nodeset_destroy(&deps);
1283 stat_ev_tim_pop("beilpsched_cst_prec");
1288 * Create ILP resource constraints:
1289 * - assure that for each time step not more instructions are scheduled
1290 * to the same unit types as units of this type are available
1292 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1296 unsigned num_cst_resrc = 0;
1297 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1300 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1302 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1304 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1306 /* check each time step */
1307 for (t = 0; t < ba->max_steps; ++t) {
1310 int *tmp_var_idx = NEW_ARR_F(int, 0);
1312 snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1313 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1314 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1317 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1318 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1319 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1322 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1324 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1325 int cur_var = ILPVAR_IDX(na, tp_idx, t);
1326 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1330 /* set constraints if we have some */
1331 if (ARR_LEN(tmp_var_idx) > 0)
1332 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1334 DEL_ARR_F(tmp_var_idx);
1337 stat_ev_tim_pop("beilpsched_cst_rsrc");
1341 * Create ILP bundle constraints:
1342 * - assure, at most bundle_size * bundles_per_cycle instructions
1343 * can be started at a certain point.
1345 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1349 unsigned num_cst_bundle = 0;
1350 unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1351 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1354 for (t = 0; t < ba->max_steps; ++t) {
1357 int *tmp_var_idx = NEW_ARR_F(int, 0);
1359 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1360 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1361 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1364 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1365 be_ilpsched_irn_t *node;
1366 ilpsched_node_attr_t *na;
1369 /* Projs and Keeps do not contribute to bundle size */
1370 if (is_Proj(irn) || be_is_Keep(irn))
1373 node = get_ilpsched_irn(env, irn);
1374 na = get_ilpsched_node_attr(node);
1376 /* nodes assigned to DUMMY unit do not contribute to bundle size */
1377 if (na->is_dummy_node)
1380 if (t >= na->asap - 1 && t <= na->alap - 1) {
1381 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1382 int idx = ILPVAR_IDX(na, tp_idx, t);
1383 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1388 if (ARR_LEN(tmp_var_idx) > 0)
1389 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1391 DEL_ARR_F(tmp_var_idx);
1393 stat_ev_tim_pop("beilpsched_cst_bundle");
1397 * Create ILP alive nodes constraints:
1398 * - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
1400 static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1404 unsigned num_cst = 0;
1405 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1409 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1410 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1411 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1414 /* we ignore nodes assigned to dummy unit here */
1415 if (na->is_dummy_node)
1418 /* check check all time steps: asap(n) <= t <= U */
1419 for (t = na->asap - 1; t < ba->max_steps; ++t) {
1422 /* for all unit types available for this node */
1423 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1424 unsigned tn, tn_max, idx;
1426 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1427 int *tmp_var_idx_m = NEW_ARR_F(int, 0);
1429 snprintf(buf, sizeof(buf), "alive_node_cst_%u_n%u_%s",
1430 t, get_irn_idx(irn), na->type_info[node_tp_idx].tp->name);
1431 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 0.0);
1432 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1435 tn_max = MIN(na->alap - 1, t);
1436 /* check if the node has been scheduled so far */
1437 for (tn = na->asap - 1; tn <= tn_max; ++tn) {
1438 int idx = ILPVAR_IDX(na, node_tp_idx, tn);
1439 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1442 if (ARR_LEN(tmp_var_idx_n) > 0)
1443 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(na->n_consumer));
1444 DEL_ARR_F(tmp_var_idx_n);
1446 /* subtract the number of consumer scheduled so far */
1447 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1448 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1449 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1451 unsigned tm, tm_max;
1453 tm_max = MIN(ca->alap - 1, t);
1454 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1455 for (tm = ca->asap - 1; tm <= tm_max; ++tm) {
1456 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1457 ARR_APP1(int, tmp_var_idx_m, ca->ilp_vars.x[idx]);
1462 if (ARR_LEN(tmp_var_idx_m) > 0)
1463 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_m, ARR_LEN(tmp_var_idx_m), -1.0);
1464 DEL_ARR_F(tmp_var_idx_m);
1467 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1468 lpp_set_factor_fast(lpp, cst, na->ilp_vars.a[idx], 0.0 - (double)(na->n_consumer));
1473 stat_ev_tim_pop("beilpsched_cst_alive_nodes");
1477 * Create ILP alive nodes constraints for live-in nodes:
1478 * - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
1480 static void create_alive_livein_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1483 ilp_livein_node_t *livein;
1484 unsigned num_cst = 0;
1485 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1489 foreach_pset(ba->livein_nodes, livein) {
1490 ir_node *irn = livein->irn;
1491 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1492 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1495 /* check check all time steps: 0 <= t < max_alive_steps */
1496 for (t = 0; t < livein->max_alive_steps; ++t) {
1499 /* for all unit types available for this node */
1500 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1501 const ir_edge_t *edge;
1503 int cst, num_block_user;
1504 int *tmp_var_idx_m = NEW_ARR_F(int, 0);
1506 /* check the number of consumer scheduled so far */
1508 foreach_out_edge(irn, edge) {
1509 ir_node *user = get_edge_src_irn(edge);
1510 be_ilpsched_irn_t *cons;
1511 ilpsched_node_attr_t *ca;
1513 unsigned tm, tm_max;
1515 /* check only users within current block */
1516 if (get_nodes_block(user) != block_node->irn)
1520 cons = get_ilpsched_irn(env, user);
1521 ca = get_ilpsched_node_attr(cons);
1523 tm_max = MIN(ca->alap - 1, t);
1524 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1525 for (tm = ca->asap - 1; tm <= tm_max; ++tm) {
1526 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1527 ARR_APP1(int, tmp_var_idx_m, ca->ilp_vars.x[idx]);
1532 snprintf(buf, sizeof(buf), "alive_livein_node_cst_%u_n%u_%s",
1533 t, get_irn_idx(irn), na->type_info[node_tp_idx].tp->name);
1534 cst = lpp_add_cst_uniq(lpp, buf, lpp_greater, (double)num_block_user);
1535 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1538 /* sum(scheduled users) */
1539 if (ARR_LEN(tmp_var_idx_m) > 0)
1540 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_m, ARR_LEN(tmp_var_idx_m), 1.0);
1541 DEL_ARR_F(tmp_var_idx_m);
1543 /* + c * a_{nt}^k */
1544 idx = node_tp_idx * livein->max_alive_steps + t;
1545 lpp_set_factor_fast(lpp, cst, livein->a[idx], (double)(num_block_user));
1549 stat_ev_tim_pop("beilpsched_cst_alive_livein_nodes");
1553 * Create ILP pressure constraints, based on alive nodes:
1554 * - add additional costs to objective function if a node is scheduled
1555 * on a unit although all units of this type are currently occupied
1557 static void create_pressure_alive_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1561 unsigned num_cst = 0;
1562 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1565 /* y_{nt}^k is set for each node and timestep and unit type */
1566 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1567 unsigned cur_idx = get_irn_idx(cur_irn);
1568 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1569 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1572 /* we ignore nodes assigned to DUMMY unit here */
1573 if (cur_na->is_dummy_node)
1577 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1578 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1582 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1584 /* check if node can be executed on this unit type */
1585 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1589 /* check all time_steps at which the current node can be scheduled */
1590 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1593 int *tmp_var_idx = NEW_ARR_F(int, 0);
1594 ilp_livein_node_t *livein;
1596 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1597 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1598 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1601 /* - accumulate all nodes alive at point t on unit type k */
1602 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1603 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1604 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1607 /* check if node can be alive here */
1608 if (t < na->asap - 1)
1611 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1613 /* current type is not suitable */
1617 a_idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, t);
1618 ARR_APP1(int, tmp_var_idx, na->ilp_vars.a[a_idx]);
1620 /* do the same for livein nodes */
1621 foreach_pset(ba->livein_nodes, livein) {
1622 ir_node *irn = livein->irn;
1623 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1626 /* check if node can be alive here */
1627 if (t >= livein->max_alive_steps)
1630 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1632 /* current type is not suitable */
1636 a_idx = tp_idx * livein->max_alive_steps + t;
1637 ARR_APP1(int, tmp_var_idx, livein->a[a_idx]);
1640 if (ARR_LEN(tmp_var_idx) > 0)
1641 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1642 DEL_ARR_F(tmp_var_idx);
1644 /* - num_nodes * y_{nt}^k */
1645 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1646 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], -1.0);
1650 stat_ev_tim_pop("beilpsched_cst_pressure");
1654 * Create ILP branch constraints:
1655 * Assure, alle nodes are scheduled prior to cfg op.
1657 static void create_branch_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1660 ir_node *cur_irn, *cfop;
1661 unsigned num_cst = 0;
1662 unsigned num_non_branches = 0;
1663 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1667 /* determine number of non-branch nodes and the one and only branch node */
1668 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1669 switch (get_irn_opcode(cur_irn)) {
1679 if (is_cfop(cur_irn)) {
1680 assert(cfop == NULL && "Highlander - there can be only one to be constrained");
1691 be_ilpsched_irn_t *cf_node = get_ilpsched_irn(env, cfop);
1692 ilpsched_node_attr_t *cf_na = get_ilpsched_node_attr(cf_node);
1695 /* for each time step */
1696 for (t = cf_na->asap - 1; t <= cf_na->alap - 1; ++t) {
1697 int *non_branch_vars, *branch_vars;
1700 snprintf(buf, sizeof(buf), "branch_cst_%u_n%u", t, get_irn_idx(cfop));
1701 cst = lpp_add_cst_uniq(lpp, buf, lpp_greater, 0.0);
1702 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1705 /* sum(overall non branches: n)x_{nt}^k - sum(overall branches: b)(num_non_branches * x_{bt}^k >= 0) */
1706 non_branch_vars = NEW_ARR_F(int, 0);
1707 branch_vars = NEW_ARR_F(int, 0);
1708 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1709 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
1710 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1713 if (cur_irn == cfop) {
1714 /* for all unit types available for this node */
1715 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1716 unsigned idx = ILPVAR_IDX(na, tp_idx, t);
1717 ARR_APP1(int, branch_vars, na->ilp_vars.x[idx]);
1721 /* sum up all possible schedule points for this node upto current timestep */
1722 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1724 unsigned tmax = MIN(t, na->alap - 1);
1726 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1727 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1728 ARR_APP1(int, non_branch_vars, na->ilp_vars.x[idx]);
1735 if (ARR_LEN(non_branch_vars) > 0)
1736 lpp_set_factor_fast_bulk(lpp, cst, non_branch_vars, ARR_LEN(non_branch_vars), 1.0);
1737 if (ARR_LEN(branch_vars) > 0)
1738 lpp_set_factor_fast_bulk(lpp, cst, branch_vars, ARR_LEN(branch_vars), 0.0 - (double)num_non_branches);
1740 DEL_ARR_F(branch_vars);
1741 DEL_ARR_F(non_branch_vars);
1744 stat_ev_tim_pop("beilpsched_cst_branch");
1748 static void create_proj_keep_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node)
1752 unsigned num_cst = 0;
1753 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1756 /* check all nodes */
1757 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1758 be_ilpsched_irn_t *node;
1759 ilpsched_node_attr_t *na;
1763 /* only mode_T nodes can have Projs and Keeps assigned */
1764 if (get_irn_mode(irn) != mode_T)
1767 node = get_ilpsched_irn(env, irn);
1768 na = get_ilpsched_node_attr(node);
1770 /* check if has some Projs and Keeps assigned */
1771 if (! na->projkeeps)
1774 /* we can run only once over the queue, so preserve the nodes */
1775 pk = NEW_ARR_F(ir_node *, 0);
1776 while (! waitq_empty(na->projkeeps))
1777 ARR_APP1(ir_node *, pk, waitq_get(na->projkeeps));
1778 del_waitq(na->projkeeps);
1779 na->projkeeps = NULL;
1781 /* for all time steps at which this node can be scheduled */
1782 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1784 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1786 /* add the constraint, assure, that a node is always scheduled along with it's Projs and Keeps */
1787 snprintf(buf, sizeof(buf), "projkeep_cst_n%u_%u", get_irn_idx(irn), t);
1788 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 0.0);
1789 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1792 /* sum up scheduling variables for this time step */
1793 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1794 int idx = ILPVAR_IDX(na, tp_idx, t);
1795 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1798 if (ARR_LEN(tmp_var_idx_n) > 0)
1799 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(ARR_LEN(pk)));
1800 DEL_ARR_F(tmp_var_idx_n);
1802 /* subtract all Proj and Keep variables for this step */
1803 for (i = ARR_LEN(pk) - 1; i >= 0; --i) {
1804 be_ilpsched_irn_t *pk_node = get_ilpsched_irn(env, pk[i]);
1805 ilpsched_node_attr_t *pk_na = get_ilpsched_node_attr(pk_node);
1808 for (pk_tp_idx = pk_na->n_unit_types - 1; pk_tp_idx >= 0; --pk_tp_idx) {
1809 int idx = ILPVAR_IDX(pk_na, pk_tp_idx, t);
1810 lpp_set_factor_fast(lpp, cst, pk_na->ilp_vars.x[idx], -1.0);
1815 stat_ev_tim_pop("beilpsched_cst_projkeep");
1819 /***************************************************
1821 * |_ _| | | __ \ (_)
1822 * | | | | | |__) | _ __ ___ __ _ _ _ __
1823 * | | | | | ___/ | '_ ` _ \ / _` | | '_ \
1824 * _| |_| |____| | | | | | | | (_| | | | | |
1825 * |_____|______|_| |_| |_| |_|\__,_|_|_| |_|
1827 ***************************************************/
1830 * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1832 static void create_ilp(ir_node *block, void *walk_env)
1834 be_ilpsched_env_t *env = walk_env;
1835 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
1836 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1837 FILE *logfile = NULL;
1840 struct obstack var_obst;
1843 DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1844 DBG((env->dbg, 255, " ILP Scheduling for %+F\n", block));
1845 DBG((env->dbg, 255, "=========================================\n\n"));
1847 DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1848 block, ba->n_interesting_nodes, ba->max_steps));
1850 /* notify backend and get block environment */
1851 env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1853 /* if we have less than two interesting nodes, there is no need to create the ILP */
1854 if (ba->n_interesting_nodes > 1) {
1855 double fact_var = ba->n_interesting_nodes > 25 ? 2.3 : 3;
1856 double fact_cst = ba->n_interesting_nodes > 25 ? 3 : 4.5;
1857 int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
1858 int estimated_n_var = (int)((double)base_num * fact_var);
1859 int estimated_n_cst = (int)((double)base_num * fact_cst);
1860 be_options_t *options = be_get_irg_options(env->irg);
1862 DBG((env->dbg, LEVEL_1, "Creating LPP with estimated numbers: %d vars, %d cst\n",
1863 estimated_n_var, estimated_n_cst));
1864 (void) estimated_n_var;
1866 /* set up the LPP object */
1867 snprintf(name, sizeof(name), "ilp scheduling IRG %s", get_entity_ld_name(get_irg_entity(env->irg)));
1869 lpp = new_lpp_userdef(
1872 estimated_n_cst, /* num vars */
1873 estimated_n_cst + 1, /* num cst */
1874 1.3); /* grow factor */
1875 obstack_init(&var_obst);
1877 /* create ILP variables */
1878 create_variables(env, lpp, block_node, &var_obst);
1880 /* create ILP constraints */
1881 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1882 create_assignment_and_precedence_constraints(env, lpp, block_node);
1883 create_ressource_constraints(env, lpp, block_node);
1884 create_bundle_constraints(env, lpp, block_node);
1885 create_branch_constraint(env, lpp, block_node);
1886 //create_proj_keep_constraints(env, lpp, block_node);
1888 if (env->opts->regpress) {
1889 create_alive_nodes_constraint(env, lpp, block_node);
1890 create_alive_livein_nodes_constraint(env, lpp, block_node);
1891 create_pressure_alive_constraint(env, lpp, block_node);
1894 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp_get_var_count(lpp), lpp_get_cst_count(lpp)));
1896 /* debug stuff, dump lpp when debugging is on */
1898 if (firm_dbg_get_mask(env->dbg) > 1) {
1902 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1903 f = fopen(buf, "w");
1904 lpp_dump_plain(lpp, f);
1906 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1911 /* set solve time limit */
1912 lpp_set_time_limit(lpp, env->opts->time_limit);
1914 /* set logfile if requested */
1915 if (strlen(env->opts->log_file) > 0) {
1916 if (strcasecmp(env->opts->log_file, "stdout") == 0)
1917 lpp_set_log(lpp, stdout);
1918 else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1919 lpp_set_log(lpp, stderr);
1921 logfile = fopen(env->opts->log_file, "w");
1923 fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1925 lpp_set_log(lpp, logfile);
1930 lpp_solve_net(lpp, options->ilp_server, options->ilp_solver);
1935 /* check for valid solution */
1936 if (! lpp_is_sol_valid(lpp)) {
1941 if (firm_dbg_get_mask(env->dbg) >= 2) {
1942 snprintf(buf, sizeof(buf), "lpp_block_%lu.infeasible.txt", get_irn_node_nr(block));
1943 f = fopen(buf, "w");
1944 lpp_dump_plain(lpp, f);
1946 snprintf(buf, sizeof(buf), "lpp_block_%lu.infeasible.mps", get_irn_node_nr(block));
1948 dump_ir_graph(env->irg, "infeasible");
1952 ir_fprintf(stderr, "ILP found no solution within time (%+F, %+F), falling back to heuristics.\n", block, env->irg);
1956 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1957 //DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1958 //DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1959 //DBG((env->dbg, LEVEL_1, "\tmatrix: %u elements, density %.2f%%, size %.2fMB\n", lpp->n_elems, lpp->density, (double)lpp->matrix_mem / 1024.0 / 1024.0));
1960 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp_get_iter_cnt(lpp)));
1961 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp_get_sol_time(lpp)));
1962 //DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1963 //DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1965 //DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1968 /* apply solution */
1969 be_stat_ev("nodes", ba->block_last_idx);
1970 be_stat_ev("vars", lpp ? lpp_get_var_count(lpp) : 0);
1971 be_stat_ev("csts", lpp ? lpp_get_cst_count(lpp) : 0);
1973 be_stat_ev("time", -1);
1974 be_stat_ev_dbl("opt", 0.0);
1975 list_sched_single_block(env->irg, block);
1979 double opt = lpp_get_sol_state(lpp) == lpp_optimal ? 100.0 : 100.0 * lpp->best_bound / lpp->objval;
1980 be_stat_ev_dbl("time", lpp->sol_time);
1981 be_stat_ev_dbl("opt", opt);
1983 be_stat_ev_dbl("time", 0.0);
1984 be_stat_ev_dbl("opt", 100.0);
1987 apply_solution(env, lpp, block);
1993 /* notify backend */
1994 be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1998 * Perform ILP scheduling on the given irg.
2000 void be_ilp_sched(ir_graph *irg)
2002 be_ilpsched_env_t env;
2003 const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2004 const ilp_sched_selector_t *sel = arch_env->impl->get_ilp_sched_selector(arch_env);
2006 FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
2008 stat_ev_ctx_push("ilpsched");
2010 env.irg_env = be_ilp_sched_init_irg_ilp_schedule(sel, irg);
2013 env.height = heights_new(irg);
2014 env.arch_env = arch_env;
2015 env.cpu = arch_env_get_machine(arch_env);
2016 env.opts = &ilp_opts;
2017 phase_init(&env.ph, env.irg, init_ilpsched_irn);
2019 /* assign a unique per block number to all interesting nodes */
2020 irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
2023 The block indices are completely build after the walk,
2024 now we can allocate the bitsets (size depends on block indices)
2027 phase_reinit_irn_data(&env.ph, reinit_ilpsched_irn);
2029 /* Collect all root nodes (having no user in their block) and calculate ASAP. */
2030 irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
2032 /* Calculate ALAP of all irns */
2033 irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
2035 /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
2036 irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
2038 /* perform ILP scheduling */
2039 irg_block_walk_graph(env.irg, NULL, create_ilp, &env);
2042 if (firm_dbg_get_mask(env.dbg)) {
2044 phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
2046 fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
2050 /* free data allocated dynamically */
2051 irg_block_walk_graph(env.irg, NULL, clear_unwanted_data, &env);
2053 /* free all allocated object */
2054 phase_deinit(&env.ph);
2055 heights_free(env.height);
2057 /* notify backend */
2058 be_ilp_sched_finish_irg_ilp_schedule(sel, irg, env.irg_env);
2060 stat_ev_ctx_pop("ilpsched");
2064 * Register ILP scheduler options.
2066 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_ilpsched);
2067 void be_init_ilpsched(void)
2069 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2070 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "ilpsched");
2072 lc_opt_add_table(sched_grp, ilpsched_option_table);
2075 #else /* WITH_ILP */
2077 static inline void some_picky_compiler_do_not_allow_empty_files(void)
2080 #endif /* WITH_ILP */