2 * Scheduling algorithms.
3 * An ILP scheduler based on
4 * "ILP-based Instruction Scheduling for IA-64"
5 * by Daniel Kaestner and Sebastian Winkel
8 * @author Christian Wuerdig
26 #include "irphase_t.h"
36 #include <lpp/lpp_net.h>
39 #include <libcore/lc_opts.h>
40 #include <libcore/lc_opts_enum.h>
41 #include <libcore/lc_timing.h>
42 #endif /* WITH_LIBCORE */
46 #include "besched_t.h"
47 #include "beilpsched.h"
49 typedef struct _ilpsched_options_t {
54 typedef struct _unit_type_info_t {
56 const be_execution_unit_type_t *tp;
60 * holding the ILP variables of the different types
62 typedef struct _ilp_var_types_t {
63 int *x; /* x_{nt}^k variables */
64 int *d; /* d_{nt}^k variables */
65 int *y; /* y_{nt}^k variables */
68 /* attributes for a node */
69 typedef struct _ilpsched_node_attr_t {
70 unsigned asap; /**< The ASAP scheduling control step */
71 unsigned alap; /**< The ALAP scheduling control step */
72 unsigned sched_point; /**< the step in which the node is finally scheduled */
73 unsigned visit_idx; /**< Index of the node having visited this node last */
74 unsigned consumer_idx; /**< Index of the node having counted this node as consumer last */
75 unsigned n_consumer; /**< Number of consumers */
76 ir_node **block_consumer; /**< List of consumer being in the same block */
77 unsigned block_idx : 30; /**< A unique per block index */
78 unsigned alap_changed : 1; /**< the current ALAP has changed, revisit preds */
79 unsigned is_dummy_node : 1; /**< this node is assigned to DUMMY unit */
80 bitset_t *transitive_block_nodes; /**< Set of transitive block nodes (predecessors
81 for ASAP, successors for ALAP */
82 unsigned n_unit_types; /**< number of allowed execution unit types */
83 unit_type_info_t *type_info; /**< list of allowed execution unit types */
84 ilp_var_types_t ilp_vars; /**< the different ILP variables */
85 } ilpsched_node_attr_t;
87 /* attributes for a block */
88 typedef struct _ilpsched_block_attr_t {
89 unsigned block_last_idx; /**< The highest node index in block so far */
90 unsigned n_interesting_nodes; /**< The number of nodes interesting for scheduling */
91 unsigned max_steps; /**< Upper bound for block execution */
92 plist_t *root_nodes; /**< A list of nodes having no user in current block */
93 ir_node *head_ilp_nodes; /**< A linked list of nodes which will contribute to ILP */
94 } ilpsched_block_attr_t;
96 typedef union _ilpsched_attr_ {
97 ilpsched_node_attr_t node_attr;
98 ilpsched_block_attr_t block_attr;
101 /* A irn for the phase and it's attributes (either node or block) */
104 ilpsched_attr_t attr;
107 /* The ILP scheduling environment */
109 phase_t ph; /**< The phase */
110 ir_graph *irg; /**< The current irg */
111 heights_t *height; /**< The heights object of the irg */
112 void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
113 void *block_env; /**< An environment for scheduling a block, provided by the backend */
114 const arch_env_t *arch_env;
115 const arch_isa_t *isa; /**< The ISA */
116 const be_main_env_t *main_env;
117 const be_machine_t *cpu; /**< the current abstract machine */
118 ilpsched_options_t *opts; /**< the ilp options for current irg */
119 const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
120 DEBUG_ONLY(firm_dbg_module_t *dbg);
123 /* convenience macros to handle phase irn data */
124 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
125 #define is_ilpsched_block(node) (is_Block((node)->irn))
126 #define get_ilpsched_block_attr(block) (&(block)->attr.block_attr)
127 #define get_ilpsched_node_attr(node) (&(node)->attr.node_attr)
129 /* iterate over a list of ir_nodes linked by link field */
130 #define foreach_linked_irns(head, iter) for ((iter) = (head); (iter); (iter) = get_irn_link((iter)))
132 /* check if node is considered for ILP scheduling */
133 #define consider_for_sched(isa, irn) \
134 (! (is_Block(irn) || \
135 is_normal_Proj(isa, irn) || \
141 /* gives the valid scheduling time step interval for a node */
142 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
144 /* gives the valid interval where a node can die */
145 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
147 /* gives the corresponding ILP variable for given node, unit and time step */
148 #define ILPVAR_IDX(na, unit, control_step) \
149 ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
151 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
152 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
153 ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
155 /* check if a double value is within an epsilon environment of 0 */
156 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
159 #define ilp_timer_push(t) lc_timer_push((t))
160 #define ilp_timer_pop() lc_timer_pop()
161 #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
162 #else /* WITH_LIBCORE */
163 #define ilp_timer_push(t)
164 #define ilp_timer_pop()
165 #define ilp_timer_elapsed_usec(t) 0.0
166 #endif /* WITH_LIBCORE */
168 /* option variable */
169 static ilpsched_options_t ilp_opts = {
170 120, /* 120 sec per block time limit */
176 static const lc_opt_table_entry_t ilpsched_option_table[] = {
177 LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
178 LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
181 #endif /* WITH_LIBCORE */
184 * Check if irn is a Proj, which has no execution units assigned.
185 * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
187 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
188 return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
192 * Skips normal Projs.
193 * @return predecessor if irn is a normal Proj, otherwise irn.
195 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
196 if (is_normal_Proj(isa, irn))
197 return get_Proj_pred(irn);
202 * Compare scheduling time steps of two be_ilpsched_irn's.
204 static int cmp_ilpsched_irn(const void *a, const void *b) {
205 be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
206 be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
207 ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
208 ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
210 return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
214 * In case there is no phase information for irn, initialize it.
216 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
217 be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
220 /* if we have already some data: check for reinitialization */
222 if (! is_Block(irn)) {
223 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
225 if (! na->transitive_block_nodes) {
226 ir_node *block = get_nodes_block(irn);
227 be_ilpsched_irn_t *block_node = phase_get_or_set_irn_data(ph, block);
228 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
230 /* we are called after the block indices have been build: create bitset */
231 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
234 /* we are called from reinit block data: clear the bitset */
235 bitset_clear_all(na->transitive_block_nodes);
237 na->alap_changed = 1;
245 /* set ilpsched irn attributes (either block or irn) */
247 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
249 ba->n_interesting_nodes = 0;
250 ba->block_last_idx = 0;
251 ba->root_nodes = plist_new();
252 ba->head_ilp_nodes = NULL;
255 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
256 memset(na, 0, sizeof(*na));
263 * Assign a per block unique number to each node.
265 static void build_block_idx(ir_node *irn, void *walk_env) {
266 be_ilpsched_env_t *env = walk_env;
267 be_ilpsched_irn_t *node, *block_node;
268 ilpsched_node_attr_t *na;
269 ilpsched_block_attr_t *ba;
271 if (! consider_for_sched(env->arch_env->isa, irn))
274 node = get_ilpsched_irn(env, irn);
275 na = get_ilpsched_node_attr(node);
276 block_node = get_ilpsched_irn(env, get_nodes_block(irn));
277 ba = get_ilpsched_block_attr(block_node);
279 na->block_idx = ba->block_last_idx++;
282 /********************************************************
285 * __ _ ___ __ _ _ __ / / __ _| | __ _ _ __
286 * / _` / __|/ _` | '_ \ / / / _` | |/ _` | '_ \
287 * | (_| \__ \ (_| | |_) | / / | (_| | | (_| | |_) |
288 * \__,_|___/\__,_| .__/ /_/ \__,_|_|\__,_| .__/
291 ********************************************************/
294 * Add all nodes having no user in current block to last_nodes list.
296 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
298 const ir_edge_t *edge;
299 be_ilpsched_irn_t *block_node, *node;
300 ilpsched_block_attr_t *ba;
301 ilpsched_node_attr_t *na;
303 be_ilpsched_env_t *env = walk_env;
304 int has_block_user = 0;
305 unsigned n_consumer = 0;
306 ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
310 if (! consider_for_sched(env->arch_env->isa, irn))
313 block = get_nodes_block(irn);
314 idx = get_irn_idx(irn);
315 consumer = NEW_ARR_F(ir_node *, 0);
317 DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
319 /* check data and dependency out edges */
320 for (i = 0; i < 2 && ! has_block_user; ++i) {
321 foreach_out_edge_kind(irn, edge, ekind[i]) {
322 ir_node *user = get_edge_src_irn(edge);
324 if (is_normal_Proj(env->arch_env->isa, user)) {
325 const ir_edge_t *user_edge;
327 if (get_irn_mode(user) == mode_X)
330 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
331 for (j = 0; j < 2; ++j) {
332 foreach_out_edge_kind(user, user_edge, ekind[j]) {
333 ir_node *real_user = get_edge_src_irn(user_edge);
335 if (! is_Phi(real_user) && ! is_Block(real_user)) {
336 be_ilpsched_irn_t *node = get_ilpsched_irn(env, real_user);
337 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
339 /* skip already visited nodes */
340 if (ua->consumer_idx == idx)
343 /* check if node has user in this block and collect the user if it's a data user */
344 if (get_nodes_block(real_user) == block) {
345 if (i == 0 && j == 0)
346 ARR_APP1(ir_node *, consumer, real_user);
350 /* only count data consumer */
354 /* mark user as visited by this node */
355 ua->consumer_idx = idx;
360 else if (is_Block(user)) {
363 else if (! is_Phi(user)) {
364 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
365 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
367 /* skip already visited nodes */
368 if (ua->consumer_idx == idx)
371 /* check if node has user in this block and collect the user if it's a data user */
372 if (get_nodes_block(user) == block) {
374 ARR_APP1(ir_node *, consumer, user);
378 /* only count data consumer */
382 /* mark user visited by this node */
383 ua->consumer_idx = idx;
388 block_node = get_ilpsched_irn(env, block);
389 ba = get_ilpsched_block_attr(block_node);
391 ba->n_interesting_nodes++;
393 /* current irn has no user inside this block, add to queue */
394 if (! has_block_user) {
395 DB((env->dbg, LEVEL_3, "root node\n"));
396 plist_insert_back(ba->root_nodes, irn);
399 DB((env->dbg, LEVEL_3, "normal node\n"));
402 /* record number of all consumer and the consumer within the same block */
403 node = get_ilpsched_irn(env, irn);
404 na = get_ilpsched_node_attr(node);
405 na->n_consumer = n_consumer;
406 na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
407 memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
412 * Calculate the ASAP scheduling step for current irn.
414 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
415 be_ilpsched_env_t *env = walk_env;
418 be_ilpsched_irn_t *node, *block_node;
419 ilpsched_node_attr_t *na;
420 ilpsched_block_attr_t *ba;
423 /* These nodes are handled separate */
424 if (! consider_for_sched(env->arch_env->isa, irn))
427 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
429 block = get_nodes_block(irn);
430 node = get_ilpsched_irn(env, irn);
431 na = get_ilpsched_node_attr(node);
434 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
435 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
437 /* check for greatest distance to top */
438 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
439 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
440 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
443 lat = be_ilp_sched_latency(env->sel, pred, env->block_env);
444 na->asap = MAX(na->asap, pna->asap + lat);
448 /* add node to ILP node list and update max_steps */
449 block_node = get_ilpsched_irn(env, block);
450 ba = get_ilpsched_block_attr(block_node);
452 set_irn_link(irn, ba->head_ilp_nodes);
453 ba->head_ilp_nodes = irn;
454 lat = be_ilp_sched_latency(env->sel, irn, env->block_env);
456 if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
459 ba->max_steps += lat;
461 DB((env->dbg, LEVEL_2, "%u\n", na->asap));
465 * Calculate the ALAP scheduling step of all irns in current block.
466 * Depends on max_steps being calculated.
468 static void calculate_block_alap(ir_node *block, void *walk_env) {
469 be_ilpsched_env_t *env = walk_env;
470 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
471 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
472 waitq *cur_queue = new_waitq();
475 assert(is_Block(block));
477 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
478 block, ba->n_interesting_nodes, ba->max_steps));
480 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
481 //phase_reinit_block_irn_data(&env->ph, block);
483 /* init start queue */
484 foreach_plist(ba->root_nodes, el) {
485 waitq_put(cur_queue, plist_element_get_value(el));
488 /* repeat until all nodes are processed */
489 while (! waitq_empty(cur_queue)) {
490 waitq *next_queue = new_waitq();
492 /* process all nodes in current step */
493 while (! waitq_empty(cur_queue)) {
494 ir_node *cur_irn = waitq_get(cur_queue);
495 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
496 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
499 /* cur_node has no alap set -> it's a root node, set to max alap */
501 na->alap = ba->max_steps;
502 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
506 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
510 /* set the alap's of all predecessors */
511 for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
512 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
514 /* check for greatest distance to bottom */
515 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
516 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
517 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
520 /* mark the predecessor as visited by current irn */
521 if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
523 pna->visit_idx = get_irn_idx(cur_irn);
525 lat = be_ilp_sched_latency(env->sel, pred, env->block_env);
527 /* set ALAP of current pred */
528 if (pna->alap == 0) {
529 /* current ALAP is 0: node has not yet been visited */
530 pna->alap_changed = 1;
531 pna->alap = na->alap - lat;
533 else if (pna->alap > na->alap - lat) {
534 /* we found a longer path to root node: change ALAP */
535 pna->alap = na->alap - lat;
536 pna->alap_changed = 1;
539 /* current ALAP is best found so far: keep it */
540 pna->alap_changed = 0;
543 DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
545 /* enqueue node for next iteration */
546 if (get_irn_ins_or_deps(pred) > 0)
547 waitq_put(next_queue, pred);
552 /* prepare for next iteration */
553 del_waitq(cur_queue);
554 cur_queue = next_queue;
559 * We can free the list of root nodes here.
561 static void clear_unwanted_data(ir_node *block, void *walk_env) {
562 be_ilpsched_env_t *env = walk_env;
563 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
564 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
566 plist_free(ba->root_nodes);
567 ba->root_nodes = NULL;
571 * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
572 * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
574 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
575 be_ilpsched_env_t *env = walk_env;
576 be_ilpsched_irn_t *node, *pred_node;
577 ilpsched_node_attr_t *na, *pna;
580 if (! consider_for_sched(env->arch_env->isa, irn))
583 if (! is_Proj(irn) && ! be_is_Keep(irn))
586 /* go to the ancestor */
588 irn = get_irn_n(irn, 0);
589 pred = skip_Proj(irn);
591 node = get_ilpsched_irn(env, irn);
592 pred_node = get_ilpsched_irn(env, pred);
593 na = get_ilpsched_node_attr(node);
594 pna = get_ilpsched_node_attr(pred_node);
596 na->asap = pna->asap;
597 na->alap = pna->alap;
599 DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
604 * Calculate the ASAP scheduling step for current irn.
606 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
607 be_ilpsched_irn_t *node;
608 be_ilpsched_env_t *env = walk_env;
611 ilpsched_node_attr_t *na;
613 /* These nodes are handled separate */
614 if (! consider_for_sched(env->arch_env->isa, irn))
617 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F\n", irn));
619 node = get_ilpsched_irn(env, irn);
620 block = get_nodes_block(irn);
621 na = get_ilpsched_node_attr(node);
623 /* accumulate all transitive predecessors of current node */
624 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
625 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
626 be_ilpsched_irn_t *pred_node;
627 ilpsched_node_attr_t *pna;
630 if (be_is_Keep(pred))
631 pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
633 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
636 pred_node = get_ilpsched_irn(env, pred);
637 pna = get_ilpsched_node_attr(pred_node);
638 idx = get_irn_idx(irn);
640 assert(pna->asap && "missing ASAP of predecessor");
643 We have not already visited this predecessor
644 -> accumulate it's predecessors
646 if (pna->visit_idx != idx) {
647 pna->visit_idx = idx;
648 na->transitive_block_nodes = bitset_or(na->transitive_block_nodes, pna->transitive_block_nodes);
649 DBG((env->dbg, LEVEL_3, "\taccumulating preds of %+F\n", pred));
653 /* every node is it's own transitive predecessor in block */
654 bitset_set(na->transitive_block_nodes, na->block_idx);
656 /* asap = number of transitive predecessors in this block */
657 na->asap = bitset_popcnt(na->transitive_block_nodes);
659 DBG((env->dbg, LEVEL_2, "\tcalculated ASAP is %u\n", na->asap));
663 * Calculate the ALAP scheduling step for current irn.
664 * @note: requires ASAP being calculated.
666 static void calculate_irn_alap(ir_node *irn, void *walk_env) {
667 be_ilpsched_env_t *env = walk_env;
670 be_ilpsched_irn_t *node;
671 ilpsched_node_attr_t *na;
673 /* These nodes are handled separate */
674 if (! consider_for_sched(env->arch_env->isa, irn))
677 DBG((env->dbg, LEVEL_2, "Calculating ALAP of node %+F ... ", irn));
679 block = get_nodes_block(irn);
680 node = get_ilpsched_irn(env, irn);
681 na = get_ilpsched_node_attr(node);
684 for (i = get_irn_ins_or_deps(irn) - 1; i >= i; --i) {
685 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
687 /* check, if we have a head node */
688 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
689 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
690 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
693 lat = be_ilp_sched_latency(env->sel, pred, env->block_env);
694 na->alap = MAX(na->alap, pna->alap + lat);
699 /* handle head nodes (no predecessor in same block) */
701 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
702 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
705 DB((env->dbg, LEVEL_2, "head node ... "));
708 We have a head node here:
709 ALAP(m) = sum_over_all_m(ASAP(m))
710 where m is a root node and there is no path from m to n
712 foreach_plist(ba->root_nodes, el) {
713 ir_node *root = plist_element_get_value(el);
715 /* check if current root is independent from irn */
716 if (! heights_reachable_in_block(env->height, root, irn)) {
717 be_ilpsched_irn_t *root_node = get_ilpsched_irn(env, root);
718 ilpsched_node_attr_t *rna = get_ilpsched_node_attr(root_node);
720 na->alap = rna->asap + be_ilp_sched_latency(env->sel, root, env->block_env);
725 DB((env->dbg, LEVEL_2, "%u\n", na->alap));
729 * Accumulate the successors of all nodes from irn on upwards.
731 static void accumulate_succs(be_ilpsched_env_t *env, ir_node *irn) {
733 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
734 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
735 ir_node *block = get_nodes_block(irn);
736 waitq *wq = new_waitq();
738 DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F\n", irn));
740 /* enqueue node for final alap calculation */
741 if (! na->enqueued) {
742 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
743 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
746 na->alap = ba->max_steps;
747 waitq_put(env->alap_queue, node);
749 set_irn_link(irn, ba->head_ilp_nodes);
750 ba->head_ilp_nodes = irn;
751 DBG((env->dbg, LEVEL_5, "\t\tlinked %+F to ilp nodes of %+F, attr %p\n", irn, block, ba));
752 DBG((env->dbg, LEVEL_4, "\t\tenqueueing %+F for final ALAP calculation\n", irn));
755 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
756 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
758 be_ilpsched_irn_t *pred_node;
759 ilpsched_node_attr_t *pna;
761 if (be_is_Keep(pred))
762 pred = skip_normal_Proj(env->arch_env->isa, get_irn_n(pred, 0));
764 if (is_Phi(pred) || block != get_nodes_block(pred) || is_NoMem(pred))
767 pred_node = get_ilpsched_irn(env, pred);
768 pna = get_ilpsched_node_attr(pred_node);
769 idx = get_irn_idx(irn);
771 /* accumulate the successors */
772 if (pna->visit_idx != idx) {
773 pna->visit_idx = idx;
774 pna->transitive_block_nodes = bitset_or(pna->transitive_block_nodes, na->transitive_block_nodes);
776 /* set current node as successor */
777 bitset_set(pna->transitive_block_nodes, na->block_idx);
780 DBG((env->dbg, LEVEL_3, "\taccumulating succs of %+F to %+F\n", irn, pred));
784 /* process all predecessors */
785 while (! waitq_empty(wq)) {
786 accumulate_succs(env, waitq_get(wq));
793 * Calculate the ALAP scheduling step of all irns in current block.
794 * Depends on ASAP being calculated.
796 static void calculate_block_alap(ir_node *block, void *walk_env) {
797 be_ilpsched_env_t *env = walk_env;
798 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
799 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
801 assert(is_Block(block));
803 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes)\n", block, ba->n_interesting_nodes));
805 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
806 phase_reinit_block_irn_data(&env->ph, block);
808 /* calculate the alap of all nodes, starting at collected roots upwards */
809 while (! waitq_empty(ba->root_nodes)) {
810 accumulate_succs(env, waitq_get(ba->root_nodes));
813 /* we don't need it anymore */
814 del_waitq(ba->root_nodes);
815 ba->root_nodes = NULL;
817 /* all interesting nodes should have their successors accumulated now */
818 while (! waitq_empty(env->alap_queue)) {
819 be_ilpsched_irn_t *node = waitq_get(env->alap_queue);
820 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
822 /* control flow ops must always be scheduled last */
823 if (is_cfop(node->irn) && ! is_Start(node->irn) && get_irn_opcode(node->irn) != iro_End)
826 na->alap -= bitset_popcnt(na->transitive_block_nodes);
827 DBG((env->dbg, LEVEL_2, "\tALAP of %+F is %u (%u succs, %u consumer)\n",
828 node->irn, na->alap, bitset_popcnt(na->transitive_block_nodes), na->n_consumer));
830 /* maximum block steps is maximum alap of all nodes */
831 ba->max_steps = MAX(ba->max_steps, na->alap);
836 /*******************************************
839 * ___ ___| |__ ___ __| |_ _| | ___
840 * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
841 * \__ \ (__| | | | __/ (_| | |_| | | __/
842 * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
844 *******************************************/
846 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
847 const ir_edge_t *edge;
849 foreach_out_edge(irn, edge) {
850 ir_node *user = get_edge_src_irn(edge);
852 if (be_is_Keep(user)) {
853 assert(get_nodes_block(user) == block && "Keep must not be in different block.");
854 waitq_put(keeps, user);
860 * Inserts @p irn before @p before into schedule and notifies backend.
862 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
863 ir_node *before, ir_node *irn, unsigned cycle)
865 be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
866 sched_add_before(before, irn);
870 * Adds a node, it's Projs (in case of mode_T nodes) and
871 * it's Keeps to schedule.
873 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
874 const ir_edge_t *edge;
875 waitq *keeps = new_waitq();
877 /* mode_M nodes are not scheduled */
878 if (get_irn_mode(irn) == mode_M)
881 if (! sched_is_scheduled(irn))
882 notified_sched_add_before(env, block, irn, cycle);
885 if (get_irn_mode(irn) == mode_T) {
886 foreach_out_edge(irn, edge) {
887 ir_node *user = get_edge_src_irn(edge);
889 if (to_appear_in_schedule(user))
890 notified_sched_add_before(env, block, user, cycle);
892 check_for_keeps(keeps, block, user);
896 check_for_keeps(keeps, block, irn);
900 while (! waitq_empty(keeps)) {
901 ir_node *keep = waitq_get(keeps);
902 if (! sched_is_scheduled(keep))
903 notified_sched_add_before(env, block, keep, cycle);
910 * Schedule all nodes in the given block, according to the ILP solution.
912 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
913 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
914 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
915 sched_info_t *info = get_irn_sched_info(block);
916 be_ilpsched_irn_t **sched_nodes;
919 const ir_edge_t *edge;
921 /* init block schedule list */
922 INIT_LIST_HEAD(&info->list);
925 /* collect nodes and their scheduling time step */
926 sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
927 if (ba->n_interesting_nodes == 0) {
930 else if (ba->n_interesting_nodes == 1) {
931 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
933 /* add the single node */
934 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
937 /* check all nodes for their positive solution */
938 foreach_linked_irns(ba->head_ilp_nodes, irn) {
939 be_ilpsched_irn_t *node;
940 ilpsched_node_attr_t *na;
944 node = get_ilpsched_irn(env, irn);
945 na = get_ilpsched_node_attr(node);
949 /* go over all variables of a node until the non-zero one is found */
950 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
951 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
952 double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
954 /* check, if variable is set to one (it's not zero then :) */
955 if (! LPP_VALUE_IS_0(val)) {
957 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
958 DBG((env->dbg, LEVEL_1, "Schedpoint of %+F is %u at unit type %s\n",
959 irn, t, na->type_info[tp_idx].tp->name));
966 /* sort nodes ascending by scheduling time step */
967 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
970 /* make all Phis ready and remember the single cf op */
972 foreach_out_edge(block, edge) {
973 irn = get_edge_src_irn(edge);
975 switch (get_irn_opcode(irn)) {
977 add_to_sched(env, block, irn, 0);
986 assert(cfop == NULL && "Highlander - there can be only one");
993 /* add all nodes from list */
994 for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
995 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
996 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
999 /* schedule control flow node if not already done */
1000 if (cfop && ! sched_is_scheduled(cfop))
1001 add_to_sched(env, block, cfop, 0);
1003 DEL_ARR_F(sched_nodes);
1006 /***************************************************************
1007 * _____ _ _____ _____ _ _
1008 * |_ _| | | __ \ / ____| | | (_)
1009 * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
1010 * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
1011 * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
1012 * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
1014 ***************************************************************/
1017 * Check if node can be executed on given unit type.
1019 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
1021 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1023 for (i = na->n_unit_types - 1; i >= 0; --i) {
1024 if (na->type_info[i].tp == tp)
1031 /************************************************
1034 * __ ____ _ _ __ _ __ _| |__ | | ___ ___
1035 * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
1036 * \ V / (_| | | | | (_| | |_) | | __/\__ \
1037 * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
1039 ************************************************/
1042 * Create the following variables:
1043 * - x_{nt}^k binary weigthed with: t
1044 * node n is scheduled at time step t to unit type k
1045 * ==>> These variables represent the schedule
1048 * - d_{nt}^k binary weighted with: t
1049 * node n dies at time step t on unit type k
1051 * - y_{nt}^k binary weighted with: num_nodes^2
1052 * node n is scheduled at time step t to unit type k
1053 * although all units of this type are occupied
1054 * ==>> These variables represent the register pressure
1056 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
1059 unsigned num_block_var, num_nodes;
1060 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1061 unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
1063 lc_timer_t *t_var = lc_timer_register("beilpsched_var", "create ilp variables");
1064 #endif /* WITH_LIBCORE */
1066 ilp_timer_push(t_var);
1067 num_block_var = num_nodes = 0;
1068 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1069 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
1070 be_ilpsched_irn_t *node;
1071 ilpsched_node_attr_t *na;
1072 unsigned n_unit_types, tp_idx, unit_idx, n_var, cur_unit;
1073 unsigned cur_var_d, cur_var_x, cur_var_y, num_die;
1075 /* count number of available unit types for this node */
1076 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
1079 node = get_ilpsched_irn(env, irn);
1080 na = get_ilpsched_node_attr(node);
1082 na->n_unit_types = n_unit_types;
1083 na->type_info = NEW_ARR_D(unit_type_info_t, var_obst, n_unit_types);
1085 /* fill the type info array */
1086 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1087 for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
1088 /* beware: we also count number of available units here */
1089 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
1090 na->is_dummy_node = 1;
1093 na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
1094 na->type_info[tp_idx].n_units = unit_idx;
1097 /* allocate space for ilp variables */
1098 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1099 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
1101 /* we need these variables only for "real" nodes */
1102 if (! na->is_dummy_node) {
1103 na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
1104 memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
1106 num_die = ba->max_steps - na->asap + 1;
1107 na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_die);
1108 memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
1111 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
1112 irn, na->asap, na->alap, na->n_unit_types));
1114 cur_var_x = cur_var_d = cur_var_y = cur_unit = n_var = 0;
1115 /* create variables */
1116 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1119 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1120 /* x_{nt}^k variables */
1121 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1122 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1123 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1124 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1125 /* variable counter */
1129 if (! na->is_dummy_node) {
1130 /* y_{nt}^k variables */
1131 snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1132 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1133 na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_binary, (double)(weigth_y));
1134 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1136 /* variable counter */
1142 /* a node can die at any step t: asap(n) <= t <= U */
1143 if (! na->is_dummy_node) {
1144 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1145 /* d_{nt}^k variables */
1146 snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
1147 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1148 na->ilp_vars.d[cur_var_d++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1149 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1151 /* variable counter */
1158 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1162 DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
1163 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
1166 /*******************************************************
1169 * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
1170 * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1171 * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1172 * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
1174 *******************************************************/
1177 * Create following ILP constraints:
1178 * - the assignment constraints:
1179 * assure each node is executed once by exactly one (allowed) execution unit
1180 * - the dead node assignment constraints:
1181 * assure a node can only die at most once
1182 * - the precedence constraints:
1183 * assure that no data dependencies are violated
1185 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1186 unsigned num_cst_assign, num_cst_prec, num_cst_dead;
1189 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1190 bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
1192 lc_timer_t *t_cst_assign = lc_timer_register("beilpsched_cst_assign", "create assignment constraints");
1193 lc_timer_t *t_cst_dead = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1194 lc_timer_t *t_cst_prec = lc_timer_register("beilpsched_cst_prec", "create precedence constraints");
1195 #endif /* WITH_LIBCORE */
1197 num_cst_assign = num_cst_prec = num_cst_dead = 0;
1198 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1201 be_ilpsched_irn_t *node;
1202 ilpsched_node_attr_t *na;
1204 node = get_ilpsched_irn(env, irn);
1205 na = get_ilpsched_node_attr(node);
1208 /* the assignment constraint */
1209 ilp_timer_push(t_cst_assign);
1210 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1211 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1212 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1215 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1218 /* the dead node assignment constraint */
1219 if (! na->is_dummy_node) {
1220 ilp_timer_push(t_cst_dead);
1221 snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1222 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1223 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1225 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1229 /* the precedence constraints */
1230 ilp_timer_push(t_cst_prec);
1231 bs_block_irns = bitset_clear_all(bs_block_irns);
1232 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1233 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1234 unsigned t_low, t_high, t;
1235 be_ilpsched_irn_t *pred_node;
1236 ilpsched_node_attr_t *pna;
1239 if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1242 pred_node = get_ilpsched_irn(env, pred);
1243 pna = get_ilpsched_node_attr(pred_node);
1245 assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1247 if (! bitset_is_set(bs_block_irns, pna->block_idx))
1248 bitset_set(bs_block_irns, pna->block_idx);
1252 /* irn = n, pred = m */
1253 delay = be_ilp_sched_latency(env->sel, pred, env->block_env);
1254 t_low = MAX(na->asap, pna->asap + delay - 1);
1255 t_high = MIN(na->alap, pna->alap + delay - 1);
1256 for (t = t_low - 1; t <= t_high - 1; ++t) {
1258 int cur_idx, int_na, int_pna;
1261 int_na = (t >= na->asap - 1) ? MIN(t, na->alap - 1) - na->asap + 1 : 0;
1262 int_pna = (t < pna->alap) ? pna->alap - t : 0;
1263 tmp_var_idx = NEW_ARR_F(int, int_na * na->n_unit_types + int_pna * pna->n_unit_types);
1265 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1266 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1267 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1272 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1273 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1274 /* node variables have smaller index than pred variables */
1275 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1276 for (tn = na->asap - 1; tn <= t; ++tn) {
1277 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1278 tmp_var_idx[cur_idx++] = na->ilp_vars.x[idx];
1282 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1283 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1284 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1285 tmp_var_idx[cur_idx++] = pna->ilp_vars.x[idx];
1290 /* pred variables have smaller index than node variables */
1291 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1292 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1293 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1294 tmp_var_idx[cur_idx++] = pna->ilp_vars.x[idx];
1298 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1299 for (tn = na->asap - 1; tn <= t; ++tn) {
1300 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1301 tmp_var_idx[cur_idx++] = na->ilp_vars.x[idx];
1306 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1308 DEL_ARR_F(tmp_var_idx);
1313 DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1314 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1315 DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1316 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1320 * Create ILP resource constraints:
1321 * - assure that for each time step not more instructions are scheduled
1322 * to the same unit types as units of this type are available
1324 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1327 unsigned num_cst_resrc = 0;
1328 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1330 lc_timer_t *t_cst_rsrc = lc_timer_register("beilpsched_cst_rsrc", "create resource constraints");
1331 #endif /* WITH_LIBCORE */
1333 ilp_timer_push(t_cst_rsrc);
1334 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1336 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1338 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1340 /* check each time step */
1341 for (t = 0; t < ba->max_steps; ++t) {
1344 int *tmp_var_idx = NEW_ARR_F(int, 0);
1346 snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1347 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1348 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1351 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1352 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1353 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1356 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1358 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1359 int cur_var = ILPVAR_IDX(na, tp_idx, t);
1360 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1364 /* set constraints if we have some */
1365 if (ARR_LEN(tmp_var_idx) > 0)
1366 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1368 DEL_ARR_F(tmp_var_idx);
1372 DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1373 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1377 * Create ILP bundle constraints:
1378 * - assure, at most bundle_size * bundles_per_cycle instructions
1379 * can be started at a certain point.
1381 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1384 unsigned num_cst_bundle = 0;
1385 unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1386 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1388 lc_timer_t *t_cst_bundle = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1389 #endif /* WITH_LIBCORE */
1391 ilp_timer_push(t_cst_bundle);
1392 for (t = 0; t < ba->max_steps; ++t) {
1395 int *tmp_var_idx = NEW_ARR_F(int, 0);
1397 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1398 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1399 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1402 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1403 be_ilpsched_irn_t *node;
1404 ilpsched_node_attr_t *na;
1407 /* Projs and Keeps do not contribute to bundle size */
1408 if (is_Proj(irn) || be_is_Keep(irn))
1411 node = get_ilpsched_irn(env, irn);
1412 na = get_ilpsched_node_attr(node);
1414 /* nodes assigned to DUMMY unit do not contribute to bundle size */
1415 if (na->is_dummy_node)
1418 if (t >= na->asap - 1 && t <= na->alap - 1) {
1419 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1420 int idx = ILPVAR_IDX(na, tp_idx, t);
1421 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1426 if (ARR_LEN(tmp_var_idx) > 0)
1427 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1429 DEL_ARR_F(tmp_var_idx);
1432 DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1433 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1437 * Create ILP dying nodes constraints:
1438 * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1440 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1443 unsigned num_cst = 0;
1444 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1446 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1447 #endif /* WITH_LIBCORE */
1449 ilp_timer_push(t_cst);
1450 /* check all time_steps */
1451 for (t = 0; t < ba->max_steps; ++t) {
1455 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1456 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1457 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1459 /* if node has no consumer within current block, it cannot die here */
1460 /* we also ignore nodes assigned to dummy unit */
1461 if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1464 /* node can only die here if t at least asap(n) */
1465 if (t >= na->asap - 1) {
1468 /* for all unit types */
1469 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1471 int *tmp_var_idx = NEW_ARR_F(int, 0);
1473 snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1474 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1475 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1478 /* number of consumer scheduled till t */
1479 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1480 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1481 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1483 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1486 for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1487 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1488 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1493 /* could be that no consumer can be scheduled at this point */
1494 if (ARR_LEN(tmp_var_idx)) {
1498 /* subtract possible prior kill points */
1499 for (tn = na->asap - 1; tn < t; ++tn) {
1500 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1501 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1504 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1505 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1506 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1509 DEL_ARR_F(tmp_var_idx);
1516 DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1517 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1521 * Create ILP pressure constraints:
1522 * - add additional costs to objective function if a node is scheduled
1523 * on a unit although all units of this type are currently occupied
1525 static void create_pressure_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1528 unsigned num_cst = 0;
1529 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1531 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1532 #endif /* WITH_LIBCORE */
1534 ilp_timer_push(t_cst);
1535 /* y_{nt}^k is set for each node and timestep and unit type */
1536 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1537 unsigned cur_idx = get_irn_idx(cur_irn);
1538 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1539 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1542 /* we ignore nodes assigned to DUMMY unit here */
1543 if (cur_na->is_dummy_node)
1547 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1548 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1552 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1554 /* check if node can be executed on this unit type */
1555 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1559 /* check all time_steps */
1560 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1563 int *tmp_idx_1 = NEW_ARR_F(int, 0);
1564 int *tmp_idx_m1 = NEW_ARR_F(int, 0);
1566 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1567 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1568 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1572 - accumulate all nodes scheduled on unit type k till t
1573 - subtract all nodes died on unit type k till t
1575 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1576 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1577 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1581 tmax = MIN(t, na->alap - 1);
1582 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1584 /* current unit type is not suitable for current node */
1588 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1591 /* node scheduled */
1592 idx = ILPVAR_IDX(na, tp_idx, tn);
1593 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1596 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1597 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1601 if (ARR_LEN(tmp_idx_1) > 0)
1602 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1604 if (ARR_LEN(tmp_idx_m1) > 0)
1605 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1607 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1608 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1609 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], 0.0 - (double)(t));
1611 DEL_ARR_F(tmp_idx_1);
1612 DEL_ARR_F(tmp_idx_m1);
1617 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1618 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1621 /***************************************************
1623 * |_ _| | | __ \ (_)
1624 * | | | | | |__) | _ __ ___ __ _ _ _ __
1625 * | | | | | ___/ | '_ ` _ \ / _` | | '_ \
1626 * _| |_| |____| | | | | | | | (_| | | | | |
1627 * |_____|______|_| |_| |_| |_|\__,_|_|_| |_|
1629 ***************************************************/
1632 * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1634 static void create_ilp(ir_node *block, void *walk_env) {
1635 be_ilpsched_env_t *env = walk_env;
1636 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
1637 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1638 FILE *logfile = NULL;
1640 struct obstack var_obst;
1642 DBG((env->dbg, 255, "\n\n\n=========================================\n"));
1643 DBG((env->dbg, 255, " ILP Scheduling for %+F\n", block));
1644 DBG((env->dbg, 255, "=========================================\n\n"));
1646 DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
1647 block, ba->n_interesting_nodes, ba->max_steps));
1649 /* notify backend and get block environment */
1650 env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
1652 /* if we have less than two interesting nodes, there is no need to create the ILP */
1653 if (ba->n_interesting_nodes > 1) {
1654 double fact_var = ba->n_interesting_nodes > 25 ? 1.1 : 1.2;
1655 double fact_cst = ba->n_interesting_nodes > 25 ? 0.7 : 1.5;
1656 int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
1657 int estimated_n_var = (int)((double)base_num * fact_var);
1658 int estimated_n_cst = (int)((double)base_num * fact_cst);
1660 DBG((env->dbg, LEVEL_1, "Creating LPP with estimed numbers: %d vars, %d cst\n",
1661 estimated_n_var, estimated_n_cst));
1663 /* set up the LPP object */
1664 lpp = new_lpp_userdef(
1665 "be ilp scheduling",
1667 estimated_n_cst + 1, /* num vars */
1668 estimated_n_cst + 20, /* num cst */
1669 1.2); /* grow factor */
1670 obstack_init(&var_obst);
1672 /* create ILP variables */
1673 create_variables(env, lpp, block_node, &var_obst);
1675 /* create ILP constraints */
1676 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
1677 create_assignment_and_precedence_constraints(env, lpp, block_node);
1678 create_ressource_constraints(env, lpp, block_node);
1679 create_bundle_constraints(env, lpp, block_node);
1680 create_dying_nodes_constraint(env, lpp, block_node);
1681 create_pressure_constraint(env, lpp, block_node);
1683 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
1685 /* debug stuff, dump lpp when debugging is on */
1687 if (firm_dbg_get_mask(env->dbg) > 0) {
1691 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
1692 f = fopen(buf, "w");
1693 lpp_dump_plain(lpp, f);
1695 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
1700 /* set solve time limit */
1701 lpp_set_time_limit(lpp, env->opts->time_limit);
1703 /* set logfile if requested */
1704 if (strlen(env->opts->log_file) > 0) {
1705 if (strcasecmp(env->opts->log_file, "stdout") == 0)
1706 lpp_set_log(lpp, stdout);
1707 else if (strcasecmp(env->opts->log_file, "stderr") == 0)
1708 lpp_set_log(lpp, stderr);
1710 logfile = fopen(env->opts->log_file, "w");
1712 fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
1714 lpp_set_log(lpp, logfile);
1719 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
1724 /* check for valid solution */
1725 if (! lpp_is_sol_valid(lpp)) {
1729 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
1730 f = fopen(buf, "w");
1731 lpp_dump_plain(lpp, f);
1733 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
1735 dump_ir_block_graph(env->irg, "-assert");
1737 assert(0 && "ILP solution is not feasible!");
1740 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
1741 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
1742 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
1743 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
1744 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
1745 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
1746 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
1748 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
1751 /* apply solution */
1752 apply_solution(env, lpp, block);
1757 /* notify backend */
1758 be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
1762 * Perform ILP scheduling on the given irg.
1764 void be_ilp_sched(const be_irg_t *birg) {
1765 be_ilpsched_env_t env;
1766 const char *name = "be ilp scheduling";
1767 arch_isa_t *isa = birg->main_env->arch_env->isa;
1768 const ilp_sched_selector_t *sel = isa->impl->get_ilp_sched_selector(isa);
1770 FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
1772 //firm_dbg_set_mask(env.dbg, 31);
1774 env.irg_env = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
1776 env.irg = birg->irg;
1777 env.height = heights_new(birg->irg);
1778 env.main_env = birg->main_env;
1779 env.arch_env = birg->main_env->arch_env;
1780 env.cpu = arch_isa_get_machine(birg->main_env->arch_env->isa);
1781 env.opts = &ilp_opts;
1782 phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
1784 /* assign a unique per block number to all interesting nodes */
1785 irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
1788 The block indices are completely build after the walk,
1789 now we can allocate the bitsets (size depends on block indices)
1792 phase_reinit_irn_data(&env.ph);
1794 /* Collect all root nodes (having no user in their block) and calculate ASAP. */
1795 irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
1797 /* Calculate ALAP of all irns */
1798 irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
1800 /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
1801 irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
1803 /* we don't need this information any longer */
1804 heights_free(env.height);
1806 /* perform ILP scheduling */
1807 irg_block_walk_graph(env.irg, clear_unwanted_data, create_ilp, &env);
1810 if (firm_dbg_get_mask(env.dbg)) {
1812 phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
1814 fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
1818 /* free all allocated object */
1819 phase_free(&env.ph);
1821 /* notify backend */
1822 be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
1827 * Register ILP scheduler options.
1829 void ilpsched_register_options(lc_opt_entry_t *grp) {
1830 static int run_once = 0;
1831 lc_opt_entry_t *sched_grp;
1835 sched_grp = lc_opt_get_grp(grp, "ilpsched");
1837 lc_opt_add_table(sched_grp, ilpsched_option_table);
1840 #endif /* WITH_LIBCORE */
1842 #else /* WITH_ILP */
1844 static int some_picky_compiler_do_not_allow_empty_files;
1846 #endif /* WITH_ILP */