2 * Scheduling algorithms.
3 * An ILP scheduler based on
4 * "ILP-based Instruction Scheduling for IA-64"
5 * by Daniel Kaestner and Sebastian Winkel
8 * @author Christian Wuerdig
26 #include "irphase_t.h"
36 #include <lpp/lpp_net.h>
39 #include <libcore/lc_opts.h>
40 #include <libcore/lc_opts_enum.h>
41 #include <libcore/lc_timing.h>
42 #endif /* WITH_LIBCORE */
46 #include "besched_t.h"
47 #include "beilpsched.h"
49 typedef struct _ilpsched_options_t {
54 typedef struct _unit_type_info_t {
56 const be_execution_unit_type_t *tp;
60 * holding the ILP variables of the different types
62 typedef struct _ilp_var_types_t {
63 int *x; /* x_{nt}^k variables */
64 int *d; /* d_{nt}^k variables */
65 int *y; /* y_{nt}^k variables */
68 /* attributes for a node */
69 typedef struct _ilpsched_node_attr_t {
70 unsigned asap; /**< The ASAP scheduling control step */
71 unsigned alap; /**< The ALAP scheduling control step */
72 unsigned sched_point; /**< the step in which the node is finally scheduled */
73 unsigned visit_idx; /**< Index of the node having visited this node last */
74 unsigned consumer_idx; /**< Index of the node having counted this node as consumer last */
75 unsigned n_consumer; /**< Number of consumers */
76 ir_node **block_consumer; /**< List of consumer being in the same block */
77 unsigned block_idx : 30; /**< A unique per block index */
78 unsigned alap_changed : 1; /**< the current ALAP has changed, revisit preds */
79 unsigned is_dummy_node : 1; /**< this node is assigned to DUMMY unit */
80 bitset_t *transitive_block_nodes; /**< Set of transitive block nodes (predecessors
81 for ASAP, successors for ALAP */
82 unsigned n_unit_types; /**< number of allowed execution unit types */
83 unit_type_info_t *type_info; /**< list of allowed execution unit types */
84 ilp_var_types_t ilp_vars; /**< the different ILP variables */
85 } ilpsched_node_attr_t;
87 /* attributes for a block */
88 typedef struct _ilpsched_block_attr_t {
89 unsigned block_last_idx; /**< The highest node index in block so far */
90 unsigned n_interesting_nodes; /**< The number of nodes interesting for scheduling */
91 unsigned max_steps; /**< Upper bound for block execution */
92 plist_t *root_nodes; /**< A list of nodes having no user in current block */
93 ir_node *head_ilp_nodes; /**< A linked list of nodes which will contribute to ILP */
94 } ilpsched_block_attr_t;
96 typedef union _ilpsched_attr_ {
97 ilpsched_node_attr_t node_attr;
98 ilpsched_block_attr_t block_attr;
101 /* A irn for the phase and it's attributes (either node or block) */
104 ilpsched_attr_t attr;
107 /* The ILP scheduling environment */
109 phase_t ph; /**< The phase */
110 ir_graph *irg; /**< The current irg */
111 heights_t *height; /**< The heights object of the irg */
112 void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
113 void *block_env; /**< An environment for scheduling a block, provided by the backend */
114 const arch_env_t *arch_env;
115 const arch_isa_t *isa; /**< The ISA */
116 const be_main_env_t *main_env;
117 const be_machine_t *cpu; /**< the current abstract machine */
118 ilpsched_options_t *opts; /**< the ilp options for current irg */
119 const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
120 DEBUG_ONLY(firm_dbg_module_t *dbg);
123 /* convenience macros to handle phase irn data */
124 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
125 #define is_ilpsched_block(node) (is_Block((node)->irn))
126 #define get_ilpsched_block_attr(block) (&(block)->attr.block_attr)
127 #define get_ilpsched_node_attr(node) (&(node)->attr.node_attr)
129 /* iterate over a list of ir_nodes linked by link field */
130 #define foreach_linked_irns(head, iter) for ((iter) = (head); (iter); (iter) = get_irn_link((iter)))
132 /* check if node is considered for ILP scheduling */
133 #define consider_for_sched(isa, irn) \
134 (! (is_Block(irn) || \
135 is_normal_Proj(isa, irn) || \
142 /* gives the valid scheduling time step interval for a node */
143 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
145 /* gives the valid interval where a node can die */
146 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
148 /* gives the corresponding ILP variable for given node, unit and time step */
149 #define ILPVAR_IDX(na, unit, control_step) \
150 ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
152 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
153 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
154 ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
156 /* check if a double value is within an epsilon environment of 0 */
157 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
160 #define ilp_timer_push(t) lc_timer_push((t))
161 #define ilp_timer_pop() lc_timer_pop()
162 #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
163 #else /* WITH_LIBCORE */
164 #define ilp_timer_push(t)
165 #define ilp_timer_pop()
166 #define ilp_timer_elapsed_usec(t) 0.0
167 #endif /* WITH_LIBCORE */
169 /* option variable */
170 static ilpsched_options_t ilp_opts = {
171 120, /* 120 sec per block time limit */
177 static const lc_opt_table_entry_t ilpsched_option_table[] = {
178 LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
179 LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
182 #endif /* WITH_LIBCORE */
185 * Check if irn is a Proj, which has no execution units assigned.
186 * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
188 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
189 return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
193 * Skips normal Projs.
194 * @return predecessor if irn is a normal Proj, otherwise irn.
196 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
197 if (is_normal_Proj(isa, irn))
198 return get_Proj_pred(irn);
202 static INLINE fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
203 unsigned lat = be_ilp_sched_latency(sel, irn, env);
204 if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
211 * Compare scheduling time steps of two be_ilpsched_irn's.
213 static int cmp_ilpsched_irn(const void *a, const void *b) {
214 be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
215 be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
216 ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
217 ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
219 return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
223 * In case there is no phase information for irn, initialize it.
225 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
226 be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
229 /* if we have already some data: check for reinitialization */
231 if (! is_Block(irn)) {
232 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
234 if (! na->transitive_block_nodes) {
235 ir_node *block = get_nodes_block(irn);
236 be_ilpsched_irn_t *block_node = phase_get_or_set_irn_data(ph, block);
237 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
239 /* we are called after the block indices have been build: create bitset */
240 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
243 /* we are called from reinit block data: clear the bitset */
244 bitset_clear_all(na->transitive_block_nodes);
246 na->alap_changed = 1;
254 /* set ilpsched irn attributes (either block or irn) */
256 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
258 ba->n_interesting_nodes = 0;
259 ba->block_last_idx = 0;
260 ba->root_nodes = plist_new();
261 ba->head_ilp_nodes = NULL;
265 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
266 memset(na, 0, sizeof(*na));
273 * Assign a per block unique number to each node.
275 static void build_block_idx(ir_node *irn, void *walk_env) {
276 be_ilpsched_env_t *env = walk_env;
277 be_ilpsched_irn_t *node, *block_node;
278 ilpsched_node_attr_t *na;
279 ilpsched_block_attr_t *ba;
281 if (! consider_for_sched(env->arch_env->isa, irn))
284 node = get_ilpsched_irn(env, irn);
285 na = get_ilpsched_node_attr(node);
286 block_node = get_ilpsched_irn(env, get_nodes_block(irn));
287 ba = get_ilpsched_block_attr(block_node);
289 na->block_idx = ba->block_last_idx++;
292 /********************************************************
295 * __ _ ___ __ _ _ __ / / __ _| | __ _ _ __
296 * / _` / __|/ _` | '_ \ / / / _` | |/ _` | '_ \
297 * | (_| \__ \ (_| | |_) | / / | (_| | | (_| | |_) |
298 * \__,_|___/\__,_| .__/ /_/ \__,_|_|\__,_| .__/
301 ********************************************************/
304 * Add all nodes having no user in current block to last_nodes list.
306 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
308 const ir_edge_t *edge;
309 be_ilpsched_irn_t *block_node, *node;
310 ilpsched_block_attr_t *ba;
311 ilpsched_node_attr_t *na;
313 be_ilpsched_env_t *env = walk_env;
314 int has_block_user = 0;
315 unsigned n_consumer = 0;
316 ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
320 if (! consider_for_sched(env->arch_env->isa, irn))
323 block = get_nodes_block(irn);
324 idx = get_irn_idx(irn);
325 consumer = NEW_ARR_F(ir_node *, 0);
327 DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
329 /* check data and dependency out edges */
330 for (i = 0; i < 2 && ! has_block_user; ++i) {
331 foreach_out_edge_kind(irn, edge, ekind[i]) {
332 ir_node *user = get_edge_src_irn(edge);
334 if (is_normal_Proj(env->arch_env->isa, user)) {
335 const ir_edge_t *user_edge;
337 if (get_irn_mode(user) == mode_X)
340 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
341 for (j = 0; j < 2; ++j) {
342 foreach_out_edge_kind(user, user_edge, ekind[j]) {
343 ir_node *real_user = get_edge_src_irn(user_edge);
345 if (! is_Phi(real_user) && ! is_Block(real_user)) {
346 be_ilpsched_irn_t *node = get_ilpsched_irn(env, real_user);
347 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
349 /* skip already visited nodes */
350 if (ua->consumer_idx == idx)
353 /* check if node has user in this block and collect the user if it's a data user */
354 if (get_nodes_block(real_user) == block) {
355 if (i == 0 && j == 0)
356 ARR_APP1(ir_node *, consumer, real_user);
360 /* only count data consumer */
364 /* mark user as visited by this node */
365 ua->consumer_idx = idx;
370 else if (is_Block(user)) {
373 else if (! is_Phi(user)) {
374 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
375 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
377 /* skip already visited nodes */
378 if (ua->consumer_idx == idx)
381 /* check if node has user in this block and collect the user if it's a data user */
382 if (get_nodes_block(user) == block) {
384 ARR_APP1(ir_node *, consumer, user);
388 /* only count data consumer */
392 /* mark user visited by this node */
393 ua->consumer_idx = idx;
398 block_node = get_ilpsched_irn(env, block);
399 ba = get_ilpsched_block_attr(block_node);
401 ba->n_interesting_nodes++;
403 /* current irn has no user inside this block, add to queue */
404 if (! has_block_user) {
405 DB((env->dbg, LEVEL_3, "root node\n"));
406 plist_insert_back(ba->root_nodes, irn);
409 DB((env->dbg, LEVEL_3, "normal node\n"));
412 /* record number of all consumer and the consumer within the same block */
413 node = get_ilpsched_irn(env, irn);
414 na = get_ilpsched_node_attr(node);
415 na->n_consumer = n_consumer;
416 na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
417 memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
422 * Calculate the ASAP scheduling step for current irn.
424 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
425 be_ilpsched_env_t *env = walk_env;
428 be_ilpsched_irn_t *node, *block_node;
429 ilpsched_node_attr_t *na;
430 ilpsched_block_attr_t *ba;
433 /* These nodes are handled separate */
434 if (! consider_for_sched(env->arch_env->isa, irn))
437 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
439 block = get_nodes_block(irn);
440 node = get_ilpsched_irn(env, irn);
441 na = get_ilpsched_node_attr(node);
444 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
445 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
447 /* check for greatest distance to top */
448 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
449 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
450 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
453 lat = fixed_latency(env->sel, pred, env->block_env);
454 na->asap = MAX(na->asap, pna->asap + lat);
458 /* add node to ILP node list and update max_steps */
459 block_node = get_ilpsched_irn(env, block);
460 ba = get_ilpsched_block_attr(block_node);
462 set_irn_link(irn, ba->head_ilp_nodes);
463 ba->head_ilp_nodes = irn;
464 ba->max_steps += fixed_latency(env->sel, irn, env->block_env);
466 DB((env->dbg, LEVEL_2, "%u\n", na->asap));
470 * Calculate the ALAP scheduling step of all irns in current block.
471 * Depends on max_steps being calculated.
473 static void calculate_block_alap(ir_node *block, void *walk_env) {
474 be_ilpsched_env_t *env = walk_env;
475 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
476 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
477 waitq *cur_queue = new_waitq();
480 assert(is_Block(block));
482 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
483 block, ba->n_interesting_nodes, ba->max_steps));
485 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
486 //phase_reinit_block_irn_data(&env->ph, block);
488 /* init start queue */
489 foreach_plist(ba->root_nodes, el) {
490 waitq_put(cur_queue, plist_element_get_value(el));
493 /* repeat until all nodes are processed */
494 while (! waitq_empty(cur_queue)) {
495 waitq *next_queue = new_waitq();
497 /* process all nodes in current step */
498 while (! waitq_empty(cur_queue)) {
499 ir_node *cur_irn = waitq_get(cur_queue);
500 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
501 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
504 /* cur_node has no alap set -> it's a root node, set to max alap */
506 na->alap = ba->max_steps;
507 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
511 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
515 /* set the alap's of all predecessors */
516 for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
517 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
519 /* check for greatest distance to bottom */
520 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
521 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
522 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
525 /* mark the predecessor as visited by current irn */
526 if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
528 pna->visit_idx = get_irn_idx(cur_irn);
530 lat = fixed_latency(env->sel, pred, env->block_env);
532 /* set ALAP of current pred */
533 if (pna->alap == 0) {
534 /* current ALAP is 0: node has not yet been visited */
535 pna->alap_changed = 1;
536 pna->alap = na->alap - lat;
538 else if (pna->alap > na->alap - lat) {
539 /* we found a longer path to root node: change ALAP */
540 pna->alap = na->alap - lat;
541 pna->alap_changed = 1;
544 /* current ALAP is best found so far: keep it */
545 pna->alap_changed = 0;
548 DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
550 /* enqueue node for next iteration */
551 if (get_irn_ins_or_deps(pred) > 0)
552 waitq_put(next_queue, pred);
557 /* prepare for next iteration */
558 del_waitq(cur_queue);
559 cur_queue = next_queue;
564 * We can free the list of root nodes here.
566 static void clear_unwanted_data(ir_node *block, void *walk_env) {
567 be_ilpsched_env_t *env = walk_env;
568 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
569 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
571 plist_free(ba->root_nodes);
572 ba->root_nodes = NULL;
576 * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
577 * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
579 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
580 be_ilpsched_env_t *env = walk_env;
581 be_ilpsched_irn_t *node, *pred_node;
582 ilpsched_node_attr_t *na, *pna;
585 if (! consider_for_sched(env->arch_env->isa, irn))
588 if (! is_Proj(irn) && ! be_is_Keep(irn))
591 /* go to the ancestor */
593 irn = get_irn_n(irn, 0);
594 pred = skip_Proj(irn);
596 node = get_ilpsched_irn(env, irn);
597 pred_node = get_ilpsched_irn(env, pred);
598 na = get_ilpsched_node_attr(node);
599 pna = get_ilpsched_node_attr(pred_node);
601 na->asap = pna->asap;
602 na->alap = pna->alap;
604 DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
609 * Calculate the ASAP scheduling step for current irn.
611 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
612 be_ilpsched_irn_t *node;
613 be_ilpsched_env_t *env = walk_env;
616 ilpsched_node_attr_t *na;
618 /* These nodes are handled separate */
619 if (! consider_for_sched(env->arch_env->isa, irn))
622 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F\n", irn));
624 node = get_ilpsched_irn(env, irn);
625 block = get_nodes_block(irn);
626 na = get_ilpsched_node_attr(node);
628 /* accumulate all transitive predecessors of current node */
629 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
630 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
631 be_ilpsched_irn_t *pred_node;
632 ilpsched_node_attr_t *pna;
635 if (be_is_Keep(pred))
636 pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
638 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
641 pred_node = get_ilpsched_irn(env, pred);
642 pna = get_ilpsched_node_attr(pred_node);
643 idx = get_irn_idx(irn);
645 assert(pna->asap && "missing ASAP of predecessor");
648 We have not already visited this predecessor
649 -> accumulate it's predecessors
651 if (pna->visit_idx != idx) {
652 pna->visit_idx = idx;
653 na->transitive_block_nodes = bitset_or(na->transitive_block_nodes, pna->transitive_block_nodes);
654 DBG((env->dbg, LEVEL_3, "\taccumulating preds of %+F\n", pred));
658 /* every node is it's own transitive predecessor in block */
659 bitset_set(na->transitive_block_nodes, na->block_idx);
661 /* asap = number of transitive predecessors in this block */
662 na->asap = bitset_popcnt(na->transitive_block_nodes);
664 DBG((env->dbg, LEVEL_2, "\tcalculated ASAP is %u\n", na->asap));
668 * Calculate the ALAP scheduling step for current irn.
669 * @note: requires ASAP being calculated.
671 static void calculate_irn_alap(ir_node *irn, void *walk_env) {
672 be_ilpsched_env_t *env = walk_env;
675 be_ilpsched_irn_t *node;
676 ilpsched_node_attr_t *na;
678 /* These nodes are handled separate */
679 if (! consider_for_sched(env->arch_env->isa, irn))
682 DBG((env->dbg, LEVEL_2, "Calculating ALAP of node %+F ... ", irn));
684 block = get_nodes_block(irn);
685 node = get_ilpsched_irn(env, irn);
686 na = get_ilpsched_node_attr(node);
689 for (i = get_irn_ins_or_deps(irn) - 1; i >= i; --i) {
690 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
692 /* check, if we have a head node */
693 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
694 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
695 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
698 lat = fixed_latency(env->sel, pred, env->block_env);
699 na->alap = MAX(na->alap, pna->alap + lat);
704 /* handle head nodes (no predecessor in same block) */
706 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
707 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
710 DB((env->dbg, LEVEL_2, "head node ... "));
713 We have a head node here:
714 ALAP(m) = sum_over_all_m(ASAP(m))
715 where m is a root node and there is no path from m to n
717 foreach_plist(ba->root_nodes, el) {
718 ir_node *root = plist_element_get_value(el);
720 /* check if current root is independent from irn */
721 if (! heights_reachable_in_block(env->height, root, irn)) {
722 be_ilpsched_irn_t *root_node = get_ilpsched_irn(env, root);
723 ilpsched_node_attr_t *rna = get_ilpsched_node_attr(root_node);
725 na->alap = rna->asap + fixed_latency(env->sel, root, env->block_env);
730 DB((env->dbg, LEVEL_2, "%u\n", na->alap));
734 * Accumulate the successors of all nodes from irn on upwards.
736 static void accumulate_succs(be_ilpsched_env_t *env, ir_node *irn) {
738 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
739 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
740 ir_node *block = get_nodes_block(irn);
741 waitq *wq = new_waitq();
743 DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F\n", irn));
745 /* enqueue node for final alap calculation */
746 if (! na->enqueued) {
747 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
748 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
751 na->alap = ba->max_steps;
752 waitq_put(env->alap_queue, node);
754 set_irn_link(irn, ba->head_ilp_nodes);
755 ba->head_ilp_nodes = irn;
756 DBG((env->dbg, LEVEL_5, "\t\tlinked %+F to ilp nodes of %+F, attr %p\n", irn, block, ba));
757 DBG((env->dbg, LEVEL_4, "\t\tenqueueing %+F for final ALAP calculation\n", irn));
760 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
761 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
763 be_ilpsched_irn_t *pred_node;
764 ilpsched_node_attr_t *pna;
766 if (be_is_Keep(pred))
767 pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
769 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
772 pred_node = get_ilpsched_irn(env, pred);
773 pna = get_ilpsched_node_attr(pred_node);
774 idx = get_irn_idx(irn);
776 /* accumulate the successors */
777 if (pna->visit_idx != idx) {
778 pna->visit_idx = idx;
779 pna->transitive_block_nodes = bitset_or(pna->transitive_block_nodes, na->transitive_block_nodes);
781 /* set current node as successor */
782 bitset_set(pna->transitive_block_nodes, na->block_idx);
785 DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F to %+F\n", irn, pred));
789 /* process all predecessors */
790 while (! waitq_empty(wq)) {
791 accumulate_succs(env, waitq_get(wq));
798 * Calculate the ALAP scheduling step of all irns in current block.
799 * Depends on ASAP being calculated.
801 static void calculate_block_alap(ir_node *block, void *walk_env) {
802 be_ilpsched_env_t *env = walk_env;
803 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
804 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
806 assert(is_Block(block));
808 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes)\n", block, ba->n_interesting_nodes));
810 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
811 phase_reinit_block_irn_data(&env->ph, block);
813 /* calculate the alap of all nodes, starting at collected roots upwards */
814 while (! waitq_empty(ba->root_nodes)) {
815 accumulate_succs(env, waitq_get(ba->root_nodes));
818 /* we don't need it anymore */
819 del_waitq(ba->root_nodes);
820 ba->root_nodes = NULL;
822 /* all interesting nodes should have their successors accumulated now */
823 while (! waitq_empty(env->alap_queue)) {
824 be_ilpsched_irn_t *node = waitq_get(env->alap_queue);
825 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
827 /* control flow ops must always be scheduled last */
828 if (is_cfop(node->irn) && ! is_Start(node->irn) && get_irn_opcode(node->irn) != iro_End)
831 na->alap -= bitset_popcnt(na->transitive_block_nodes);
832 DBG((env->dbg, LEVEL_2, "\tALAP of %+F is %u (%u succs, %u consumer)\n",
833 node->irn, na->alap, bitset_popcnt(na->transitive_block_nodes), na->n_consumer));
835 /* maximum block steps is maximum alap of all nodes */
836 ba->max_steps = MAX(ba->max_steps, na->alap);
841 /*******************************************
844 * ___ ___| |__ ___ __| |_ _| | ___
845 * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
846 * \__ \ (__| | | | __/ (_| | |_| | | __/
847 * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
849 *******************************************/
851 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
852 const ir_edge_t *edge;
854 foreach_out_edge(irn, edge) {
855 ir_node *user = get_edge_src_irn(edge);
857 if (be_is_Keep(user)) {
858 assert(get_nodes_block(user) == block && "Keep must not be in different block.");
859 waitq_put(keeps, user);
865 * Inserts @p irn before @p before into schedule and notifies backend.
867 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
868 ir_node *before, ir_node *irn, unsigned cycle)
870 be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
871 sched_add_before(before, irn);
875 * Adds a node, it's Projs (in case of mode_T nodes) and
876 * it's Keeps to schedule.
878 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
879 const ir_edge_t *edge;
880 waitq *keeps = new_waitq();
882 /* mode_M nodes are not scheduled */
883 if (get_irn_mode(irn) == mode_M)
886 if (! sched_is_scheduled(irn))
887 notified_sched_add_before(env, block, irn, cycle);
890 if (get_irn_mode(irn) == mode_T) {
891 foreach_out_edge(irn, edge) {
892 ir_node *user = get_edge_src_irn(edge);
894 if (to_appear_in_schedule(user) || get_irn_mode(user) == mode_b)
895 notified_sched_add_before(env, block, user, cycle);
897 check_for_keeps(keeps, block, user);
901 check_for_keeps(keeps, block, irn);
905 while (! waitq_empty(keeps)) {
906 ir_node *keep = waitq_get(keeps);
907 if (! sched_is_scheduled(keep))
908 notified_sched_add_before(env, block, keep, cycle);
915 * Schedule all nodes in the given block, according to the ILP solution.
917 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
918 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
919 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
920 sched_info_t *info = get_irn_sched_info(block);
921 be_ilpsched_irn_t **sched_nodes;
924 const ir_edge_t *edge;
926 /* init block schedule list */
927 INIT_LIST_HEAD(&info->list);
930 /* collect nodes and their scheduling time step */
931 sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
932 if (ba->n_interesting_nodes == 0) {
935 else if (ba->n_interesting_nodes == 1) {
936 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
938 /* add the single node */
939 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
942 /* check all nodes for their positive solution */
943 foreach_linked_irns(ba->head_ilp_nodes, irn) {
944 be_ilpsched_irn_t *node;
945 ilpsched_node_attr_t *na;
949 node = get_ilpsched_irn(env, irn);
950 na = get_ilpsched_node_attr(node);
954 /* go over all variables of a node until the non-zero one is found */
955 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
956 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
957 double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
959 /* check, if variable is set to one (it's not zero then :) */
960 if (! LPP_VALUE_IS_0(val)) {
962 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
963 DBG((env->dbg, LEVEL_1, "Schedpoint of %+F is %u at unit type %s\n",
964 irn, t, na->type_info[tp_idx].tp->name));
971 /* sort nodes ascending by scheduling time step */
972 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
975 /* make all Phis ready and remember the single cf op */
977 foreach_out_edge(block, edge) {
978 irn = get_edge_src_irn(edge);
980 switch (get_irn_opcode(irn)) {
982 add_to_sched(env, block, irn, 0);
991 assert(cfop == NULL && "Highlander - there can be only one");
998 /* add all nodes from list */
999 for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
1000 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
1001 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
1004 /* schedule control flow node if not already done */
1005 if (cfop && ! sched_is_scheduled(cfop))
1006 add_to_sched(env, block, cfop, 0);
1008 DEL_ARR_F(sched_nodes);
1011 /***************************************************************
1012 * _____ _ _____ _____ _ _
1013 * |_ _| | | __ \ / ____| | | (_)
1014 * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
1015 * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
1016 * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
1017 * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
1019 ***************************************************************/
1022 * Check if node can be executed on given unit type.
1024 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
1026 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1028 for (i = na->n_unit_types - 1; i >= 0; --i) {
1029 if (na->type_info[i].tp == tp)
1036 /************************************************
1039 * __ ____ _ _ __ _ __ _| |__ | | ___ ___
1040 * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
1041 * \ V / (_| | | | | (_| | |_) | | __/\__ \
1042 * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
1044 ************************************************/
1047 * Create the following variables:
1048 * - x_{nt}^k binary weigthed with: t
1049 * node n is scheduled at time step t to unit type k
1050 * ==>> These variables represent the schedule
1053 * - d_{nt}^k binary weighted with: t
1054 * node n dies at time step t on unit type k
1056 * - y_{nt}^k binary weighted with: num_nodes^2
1057 * node n is scheduled at time step t to unit type k
1058 * although all units of this type are occupied
1059 * ==>> These variables represent the register pressure
1061 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
1064 unsigned num_block_var, num_nodes;
1065 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1066 unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
1068 lc_timer_t *t_var = lc_timer_register("beilpsched_var", "create ilp variables");
1069 #endif /* WITH_LIBCORE */
1071 ilp_timer_push(t_var);
1072 num_block_var = num_nodes = 0;
1073 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1074 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
1075 be_ilpsched_irn_t *node;
1076 ilpsched_node_attr_t *na;
1077 unsigned n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
1078 unsigned cur_var_d, cur_var_x, cur_var_y, num_die;
1080 /* count number of available unit types for this node */
1081 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
1084 node = get_ilpsched_irn(env, irn);
1085 na = get_ilpsched_node_attr(node);
1087 na->n_unit_types = n_unit_types;
1088 na->type_info = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
1090 /* fill the type info array */
1091 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1092 for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
1093 /* beware: we also count number of available units here */
1094 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
1095 na->is_dummy_node = 1;
1098 na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
1099 na->type_info[tp_idx].n_units = unit_idx;
1102 /* allocate space for ilp variables */
1103 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1104 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
1106 /* we need these variables only for "real" nodes */
1107 if (! na->is_dummy_node) {
1108 na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1109 memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
1111 num_die = ba->max_steps - na->asap + 1;
1112 na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_die);
1113 memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
1116 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
1117 irn, na->asap, na->alap, na->n_unit_types));
1119 cur_var_x = cur_var_d = cur_var_y = cur_unit = n_var = 0;
1120 /* create variables */
1121 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1124 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1125 /* x_{nt}^k variables */
1126 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1127 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1128 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1129 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1130 /* variable counter */
1134 if (! na->is_dummy_node) {
1135 /* y_{nt}^k variables */
1136 snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1137 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1138 na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
1139 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1141 /* variable counter */
1147 /* a node can die at any step t: asap(n) <= t <= U */
1148 if (! na->is_dummy_node) {
1149 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1150 /* d_{nt}^k variables */
1151 snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
1152 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1153 na->ilp_vars.d[cur_var_d++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1154 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1156 /* variable counter */
1163 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1167 DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
1168 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
1171 /*******************************************************
1174 * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
1175 * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1176 * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1177 * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
1179 *******************************************************/
1182 * Create following ILP constraints:
1183 * - the assignment constraints:
1184 * assure each node is executed once by exactly one (allowed) execution unit
1185 * - the dead node assignment constraints:
1186 * assure a node can only die at most once
1187 * - the precedence constraints:
1188 * assure that no data dependencies are violated
1190 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1191 unsigned num_cst_assign, num_cst_prec, num_cst_dead;
1194 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1195 bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
1197 lc_timer_t *t_cst_assign = lc_timer_register("beilpsched_cst_assign", "create assignment constraints");
1198 lc_timer_t *t_cst_dead = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1199 lc_timer_t *t_cst_prec = lc_timer_register("beilpsched_cst_prec", "create precedence constraints");
1200 #endif /* WITH_LIBCORE */
1202 num_cst_assign = num_cst_prec = num_cst_dead = 0;
1203 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1206 be_ilpsched_irn_t *node;
1207 ilpsched_node_attr_t *na;
1209 node = get_ilpsched_irn(env, irn);
1210 na = get_ilpsched_node_attr(node);
1213 /* the assignment constraint */
1214 ilp_timer_push(t_cst_assign);
1215 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1216 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1217 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1220 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1223 /* the dead node assignment constraint */
1224 if (! na->is_dummy_node) {
1225 ilp_timer_push(t_cst_dead);
1226 snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1227 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1228 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1230 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1234 /* the precedence constraints */
1235 ilp_timer_push(t_cst_prec);
1236 bs_block_irns = bitset_clear_all(bs_block_irns);
1237 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1238 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1239 unsigned t_low, t_high, t;
1240 be_ilpsched_irn_t *pred_node;
1241 ilpsched_node_attr_t *pna;
1244 if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1247 pred_node = get_ilpsched_irn(env, pred);
1248 pna = get_ilpsched_node_attr(pred_node);
1250 assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1252 if (! bitset_is_set(bs_block_irns, pna->block_idx))
1253 bitset_set(bs_block_irns, pna->block_idx);
1257 /* irn = n, pred = m */
1258 delay = fixed_latency(env->sel, pred, env->block_env);
1259 t_low = MAX(na->asap, pna->asap + delay - 1);
1260 t_high = MIN(na->alap, pna->alap + delay - 1);
1261 for (t = t_low - 1; t <= t_high - 1; ++t) {
1263 int *tmp_var_idx = NEW_ARR_F(int, 0);
1265 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1266 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1267 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1270 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1271 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1272 /* node variables have smaller index than pred variables */
1273 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1274 for (tn = na->asap - 1; tn <= t; ++tn) {
1275 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1276 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1280 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1281 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1282 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1283 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1288 /* pred variables have smaller index than node variables */
1289 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1290 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1291 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1292 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1296 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1297 for (tn = na->asap - 1; tn <= t; ++tn) {
1298 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1299 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1304 if (ARR_LEN(tmp_var_idx) > 0)
1305 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1307 DEL_ARR_F(tmp_var_idx);
1312 DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1313 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1314 DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1315 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1319 * Create ILP resource constraints:
1320 * - assure that for each time step not more instructions are scheduled
1321 * to the same unit types as units of this type are available
1323 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1326 unsigned num_cst_resrc = 0;
1327 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1329 lc_timer_t *t_cst_rsrc = lc_timer_register("beilpsched_cst_rsrc", "create resource constraints");
1330 #endif /* WITH_LIBCORE */
1332 ilp_timer_push(t_cst_rsrc);
1333 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1335 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1337 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1339 /* check each time step */
1340 for (t = 0; t < ba->max_steps; ++t) {
1343 int *tmp_var_idx = NEW_ARR_F(int, 0);
1345 snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1346 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1347 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1350 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1351 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1352 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1355 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1357 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1358 int cur_var = ILPVAR_IDX(na, tp_idx, t);
1359 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1363 /* set constraints if we have some */
1364 if (ARR_LEN(tmp_var_idx) > 0)
1365 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1367 DEL_ARR_F(tmp_var_idx);
1371 DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1372 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1376 * Create ILP bundle constraints:
1377 * - assure, at most bundle_size * bundles_per_cycle instructions
1378 * can be started at a certain point.
1380 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1383 unsigned num_cst_bundle = 0;
1384 unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1385 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1387 lc_timer_t *t_cst_bundle = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1388 #endif /* WITH_LIBCORE */
1390 ilp_timer_push(t_cst_bundle);
1391 for (t = 0; t < ba->max_steps; ++t) {
1394 int *tmp_var_idx = NEW_ARR_F(int, 0);
1396 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1397 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1398 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1401 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1402 be_ilpsched_irn_t *node;
1403 ilpsched_node_attr_t *na;
1406 /* Projs and Keeps do not contribute to bundle size */
1407 if (is_Proj(irn) || be_is_Keep(irn))
1410 node = get_ilpsched_irn(env, irn);
1411 na = get_ilpsched_node_attr(node);
1413 /* nodes assigned to DUMMY unit do not contribute to bundle size */
1414 if (na->is_dummy_node)
1417 if (t >= na->asap - 1 && t <= na->alap - 1) {
1418 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1419 int idx = ILPVAR_IDX(na, tp_idx, t);
1420 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1425 if (ARR_LEN(tmp_var_idx) > 0)
1426 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1428 DEL_ARR_F(tmp_var_idx);
1431 DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1432 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1436 * Create ILP dying nodes constraints:
1437 * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1439 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1442 unsigned num_cst = 0;
1443 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1445 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1446 #endif /* WITH_LIBCORE */
1448 ilp_timer_push(t_cst);
1449 /* check all time_steps */
1450 for (t = 0; t < ba->max_steps; ++t) {
1454 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1455 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1456 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1458 /* if node has no consumer within current block, it cannot die here */
1459 /* we also ignore nodes assigned to dummy unit */
1460 if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1463 /* node can only die here if t at least asap(n) */
1464 if (t >= na->asap - 1) {
1467 /* for all unit types */
1468 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1470 int *tmp_var_idx = NEW_ARR_F(int, 0);
1472 snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1473 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1474 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1477 /* number of consumer scheduled till t */
1478 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1479 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1480 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1482 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1485 for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1486 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1487 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1492 /* could be that no consumer can be scheduled at this point */
1493 if (ARR_LEN(tmp_var_idx)) {
1497 /* subtract possible prior kill points */
1498 for (tn = na->asap - 1; tn < t; ++tn) {
1499 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1500 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1503 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1504 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1505 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1508 DEL_ARR_F(tmp_var_idx);
1515 DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1516 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1520 * Create ILP pressure constraints:
1521 * - add additional costs to objective function if a node is scheduled
1522 * on a unit although all units of this type are currently occupied
1524 static void create_pressure_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1527 unsigned num_cst = 0;
1528 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1530 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1531 #endif /* WITH_LIBCORE */
1533 ilp_timer_push(t_cst);
1534 /* y_{nt}^k is set for each node and timestep and unit type */
1535 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1536 unsigned cur_idx = get_irn_idx(cur_irn);
1537 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1538 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1541 /* we ignore nodes assigned to DUMMY unit here */
1542 if (cur_na->is_dummy_node)
1546 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1547 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1551 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1553 /* check if node can be executed on this unit type */
1554 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1558 /* check all time_steps */
1559 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1562 int *tmp_idx_1 = NEW_ARR_F(int, 0);
1563 int *tmp_idx_m1 = NEW_ARR_F(int, 0);
1565 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1566 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1567 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1571 - accumulate all nodes scheduled on unit type k till t
1572 - subtract all nodes died on unit type k till t
1574 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1575 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1576 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1580 tmax = MIN(t, na->alap - 1);
1581 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1583 /* current unit type is not suitable for current node */
1587 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1590 /* node scheduled */
1591 idx = ILPVAR_IDX(na, tp_idx, tn);
1592 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1595 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1596 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1600 if (ARR_LEN(tmp_idx_1) > 0)
1601 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1603 if (ARR_LEN(tmp_idx_m1) > 0)
1604 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1606 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1607 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1608 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(t));
1610 DEL_ARR_F(tmp_idx_1);
1611 DEL_ARR_F(tmp_idx_m1);
1616 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1617 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1620 /***************************************************
1622 * |_ _| | | __ \ (_)
1623 * | | | | | |__) | _ __ ___ __ _ _ _ __
1624 * | | | | | ___/ | '_ ` _ \ / _` | | '_ \
1625 * _| |_| |____| | | | | | | | (_| | | | | |
1626 * |_____|______|_| |_| |_| |_|\__,_|_|_| |_|
1628 ***************************************************/
1631 * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1633 static void create_ilp(ir_node *block, void *walk_env) {
1634 be_ilpsched_env_t *env = walk_env;
1635 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
1636 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1637 FILE *logfile = NULL;
1639 struct obstack var_obst;
1641 DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1642 DBG((env->dbg, 255, " ILP Scheduling for %+F\n", block));
1643 DBG((env->dbg, 255, "=========================================\n\n"));
1645 DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1646 block, ba->n_interesting_nodes, ba->max_steps));
1648 /* notify backend and get block environment */
1649 env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1651 /* if we have less than two interesting nodes, there is no need to create the ILP */
1652 if (ba->n_interesting_nodes > 1) {
1653 double fact_var = ba->n_interesting_nodes > 25 ? 1.1 : 1.2;
1654 double fact_cst = ba->n_interesting_nodes > 25 ? 0.7 : 1.5;
1655 int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
1656 int estimated_n_var = (int)((double)base_num * fact_var);
1657 int estimated_n_cst = (int)((double)base_num * fact_cst);
1659 DBG((env->dbg, LEVEL_1, "Creating LPP with estimed numbers: %d vars, %d cst\n",
1660 estimated_n_var, estimated_n_cst));
1662 /* set up the LPP object */
1663 lpp = new_lpp_userdef(
1664 "be ilp scheduling",
1666 estimated_n_cst + 1, /* num vars */
1667 estimated_n_cst + 20, /* num cst */
1668 1.2); /* grow factor */
1669 obstack_init(&var_obst);
1671 /* create ILP variables */
1672 create_variables(env, lpp, block_node, &var_obst);
1674 /* create ILP constraints */
1675 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1676 create_assignment_and_precedence_constraints(env, lpp, block_node);
1677 create_ressource_constraints(env, lpp, block_node);
1678 create_bundle_constraints(env, lpp, block_node);
1679 create_dying_nodes_constraint(env, lpp, block_node);
1680 create_pressure_constraint(env, lpp, block_node);
1682 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
1684 /* debug stuff, dump lpp when debugging is on */
1686 if (firm_dbg_get_mask(env->dbg) > 0) {
1690 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1691 f = fopen(buf, "w");
1692 lpp_dump_plain(lpp, f);
1694 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1699 /* set solve time limit */
1700 lpp_set_time_limit(lpp, env->opts->time_limit);
1702 /* set logfile if requested */
1703 if (strlen(env->opts->log_file) > 0) {
1704 if (strcasecmp(env->opts->log_file, "stdout") == 0)
1705 lpp_set_log(lpp, stdout);
1706 else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1707 lpp_set_log(lpp, stderr);
1709 logfile = fopen(env->opts->log_file, "w");
1711 fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1713 lpp_set_log(lpp, logfile);
1718 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
1723 /* check for valid solution */
1724 if (! lpp_is_sol_valid(lpp)) {
1728 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
1729 f = fopen(buf, "w");
1730 lpp_dump_plain(lpp, f);
1732 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
1734 dump_ir_block_graph(env->irg, "-assert");
1736 assert(0 && "ILP solution is not feasible!");
1739 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1740 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1741 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1742 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
1743 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
1744 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1745 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1747 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1750 /* apply solution */
1751 apply_solution(env, lpp, block);
1756 /* notify backend */
1757 be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1761 * Perform ILP scheduling on the given irg.
1763 void be_ilp_sched(const be_irg_t *birg) {
1764 be_ilpsched_env_t env;
1765 const char *name = "be ilp scheduling";
1766 arch_isa_t *isa = birg->main_env->arch_env->isa;
1767 const ilp_sched_selector_t *sel = isa->impl->get_ilp_sched_selector(isa);
1769 FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
1771 //firm_dbg_set_mask(env.dbg, 31);
1773 env.irg_env = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
1775 env.irg = birg->irg;
1776 env.height = heights_new(birg->irg);
1777 env.main_env = birg->main_env;
1778 env.arch_env = birg->main_env->arch_env;
1779 env.cpu = arch_isa_get_machine(birg->main_env->arch_env->isa);
1780 env.opts = &ilp_opts;
1781 phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
1783 /* assign a unique per block number to all interesting nodes */
1784 irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
1787 The block indices are completely build after the walk,
1788 now we can allocate the bitsets (size depends on block indices)
1791 phase_reinit_irn_data(&env.ph);
1793 /* Collect all root nodes (having no user in their block) and calculate ASAP. */
1794 irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
1796 /* Calculate ALAP of all irns */
1797 irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
1799 /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
1800 irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
1802 /* we don't need this information any longer */
1803 heights_free(env.height);
1805 /* perform ILP scheduling */
1806 irg_block_walk_graph(env.irg, clear_unwanted_data, create_ilp, &env);
1809 if (firm_dbg_get_mask(env.dbg)) {
1811 phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
1813 fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
1817 /* free all allocated object */
1818 phase_free(&env.ph);
1820 /* notify backend */
1821 be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
1826 * Register ILP scheduler options.
1828 void ilpsched_register_options(lc_opt_entry_t *grp) {
1829 static int run_once = 0;
1830 lc_opt_entry_t *sched_grp;
1834 sched_grp = lc_opt_get_grp(grp, "ilpsched");
1836 lc_opt_add_table(sched_grp, ilpsched_option_table);
1839 #endif /* WITH_LIBCORE */
1841 #else /* WITH_ILP */
1843 static int some_picky_compiler_do_not_allow_empty_files;
1845 #endif /* WITH_ILP */