2 * Scheduling algorithms.
3 * An ILP scheduler based on
4 * "ILP-based Instruction Scheduling for IA-64"
5 * by Daniel Kaestner and Sebastian Winkel
8 * @author Christian Wuerdig
26 #include "irphase_t.h"
36 #include <lpp/lpp_net.h>
38 #include <libcore/lc_opts.h>
39 #include <libcore/lc_opts_enum.h>
40 #include <libcore/lc_timing.h>
44 #include "besched_t.h"
45 #include "beilpsched.h"
48 typedef struct _ilpsched_options_t {
55 typedef struct _unit_type_info_t {
57 const be_execution_unit_type_t *tp;
61 * holding the ILP variables of the different types
63 typedef struct _ilp_var_types_t {
64 int *x; /* x_{nt}^k variables */
65 int *a; /* a_{nt}^k variables */
66 int *d; /* d_{nt}^k variables */
67 int *y; /* y_{nt}^k variables */
71 * Holds alive variables for a node live-in to a block.
73 typedef struct _ilp_livein_node_t {
75 unsigned max_alive_steps;
79 /* attributes for a node */
80 typedef struct _ilpsched_node_attr_t {
81 unsigned asap; /**< The ASAP scheduling control step */
82 unsigned alap; /**< The ALAP scheduling control step */
83 unsigned latency; /**< Latency of this node (needed for sorting) */
84 unsigned sched_point; /**< the step in which the node is finally scheduled */
85 unsigned visit_idx; /**< Index of the node having visited this node last */
86 unsigned consumer_idx; /**< Index of the node having counted this node as consumer last */
87 unsigned n_consumer; /**< Number of consumers */
88 ir_node **block_consumer; /**< List of consumer being in the same block */
89 waitq *projkeeps; /**< A List of Projs and Keeps belonging to this node */
90 unsigned block_idx : 30; /**< A unique per block index */
91 unsigned alap_changed : 1; /**< the current ALAP has changed, revisit preds */
92 unsigned is_dummy_node : 1; /**< this node is assigned to DUMMY unit */
93 bitset_t *transitive_block_nodes; /**< Set of transitive block nodes (predecessors
94 for ASAP, successors for ALAP */
95 unsigned n_unit_types; /**< number of allowed execution unit types */
96 unit_type_info_t *type_info; /**< list of allowed execution unit types */
97 ilp_var_types_t ilp_vars; /**< the different ILP variables */
98 } ilpsched_node_attr_t;
100 /* attributes for a block */
101 typedef struct _ilpsched_block_attr_t {
102 unsigned block_last_idx; /**< The highest node index in block so far */
103 unsigned n_interesting_nodes; /**< The number of nodes interesting for scheduling */
104 unsigned max_steps; /**< Upper bound for block execution */
105 plist_t *root_nodes; /**< A list of nodes having no user in current block */
106 ir_node *head_ilp_nodes; /**< A linked list of nodes which will contribute to ILP */
107 pset *livein_nodes; /**< A set of nodes which are live-in to this block */
108 } ilpsched_block_attr_t;
110 typedef union _ilpsched_attr_ {
111 ilpsched_node_attr_t node_attr;
112 ilpsched_block_attr_t block_attr;
115 /* A irn for the phase and it's attributes (either node or block) */
118 ilpsched_attr_t attr;
121 /* The ILP scheduling environment */
123 phase_t ph; /**< The phase */
124 ir_graph *irg; /**< The current irg */
125 heights_t *height; /**< The heights object of the irg */
126 void *irg_env; /**< An environment for the irg scheduling, provided by the backend */
127 void *block_env; /**< An environment for scheduling a block, provided by the backend */
128 const arch_env_t *arch_env;
129 const arch_isa_t *isa; /**< The ISA */
130 const be_main_env_t *main_env;
131 const be_machine_t *cpu; /**< the current abstract machine */
132 ilpsched_options_t *opts; /**< the ilp options for current irg */
133 const ilp_sched_selector_t *sel; /**< The ILP sched selector provided by the backend */
134 DEBUG_ONLY(firm_dbg_module_t *dbg);
137 /* convenience macros to handle phase irn data */
138 #define get_ilpsched_irn(ilpsched_env, irn) (phase_get_or_set_irn_data(&(ilpsched_env)->ph, (irn)))
139 #define is_ilpsched_block(node) (is_Block((node)->irn))
140 #define get_ilpsched_block_attr(block) (&(block)->attr.block_attr)
141 #define get_ilpsched_node_attr(node) (&(node)->attr.node_attr)
143 /* check if node is considered for ILP scheduling */
144 #define consider_for_sched(isa, irn) \
145 (! (is_Block(irn) || \
146 is_normal_Proj(isa, irn) || \
153 /* gives the valid scheduling time step interval for a node */
154 #define VALID_SCHED_INTERVAL(na) ((na)->alap - (na)->asap + 1)
156 /* gives the valid interval where a node can die */
157 #define VALID_KILL_INTERVAL(ba, na) ((ba)->max_steps - (na)->asap + 1)
159 /* gives the corresponding ILP variable for given node, unit and time step */
160 #define ILPVAR_IDX(na, unit, control_step) \
161 ((unit) * VALID_SCHED_INTERVAL((na)) + (control_step) - (na)->asap + 1)
163 /* gives the corresponding dead nodes ILP variable for given node, unit and time step */
164 #define ILPVAR_IDX_DEAD(ba, na, unit, control_step) \
165 ((unit) * VALID_KILL_INTERVAL((ba), (na)) + (control_step) - (na)->asap + 1)
167 /* check if a double value is within an epsilon environment of 0 */
168 #define LPP_VALUE_IS_0(dbl) (fabs((dbl)) <= 1e-10)
170 #define ilp_timer_push(t) lc_timer_push((t))
171 #define ilp_timer_pop() lc_timer_pop()
172 #define ilp_timer_elapsed_usec(t) lc_timer_elapsed_usec((t))
174 /* option variable */
175 static ilpsched_options_t ilp_opts = {
176 1, /* default is with register pressure constraints */
177 120, /* if we have more than 70 nodes: use alive nodes constraint */
178 300, /* 300 sec per block time limit */
183 static const lc_opt_table_entry_t ilpsched_option_table[] = {
184 LC_OPT_ENT_BOOL("regpress", "Use register pressure constraints", &ilp_opts.regpress),
185 LC_OPT_ENT_INT("limit_dead", "Upto how many nodes the dead node constraint should be used", &ilp_opts.limit_dead),
186 LC_OPT_ENT_INT("time_limit", "ILP time limit per block", &ilp_opts.time_limit),
187 LC_OPT_ENT_STR("lpp_log", "LPP logfile (stderr and stdout are supported)", ilp_opts.log_file, sizeof(ilp_opts.log_file)),
192 We need this global variable as we compare nodes dependent on heights,
193 but we cannot pass any information to the qsort compare function.
195 static heights_t *glob_heights;
198 * Check if irn is a Proj, which has no execution units assigned.
199 * @return 1 if irn is a Proj having no execution units assigned, 0 otherwise
201 static INLINE int is_normal_Proj(const arch_isa_t *isa, const ir_node *irn) {
202 return is_Proj(irn) && (arch_isa_get_allowed_execution_units(isa, irn) == NULL);
206 * Skips normal Projs.
207 * @return predecessor if irn is a normal Proj, otherwise irn.
209 static INLINE ir_node *skip_normal_Proj(const arch_isa_t *isa, ir_node *irn) {
210 if (is_normal_Proj(isa, irn))
211 return get_Proj_pred(irn);
215 static INLINE int fixed_latency(const ilp_sched_selector_t *sel, ir_node *irn, void *env) {
216 unsigned lat = be_ilp_sched_latency(sel, irn, env);
217 if (lat == 0 && ! is_Proj(irn) && ! be_is_Keep(irn))
222 static int cmp_live_in_nodes(const void *a, const void *b) {
223 const ilp_livein_node_t *n1 = a;
224 const ilp_livein_node_t *n2 = b;
226 return n1->irn != n2->irn;
230 * Compare scheduling time steps of two be_ilpsched_irn's.
232 static int cmp_ilpsched_irn(const void *a, const void *b) {
233 be_ilpsched_irn_t *n1 = *(be_ilpsched_irn_t **)a;
234 be_ilpsched_irn_t *n2 = *(be_ilpsched_irn_t **)b;
235 ilpsched_node_attr_t *n1_a = get_ilpsched_node_attr(n1);
236 ilpsched_node_attr_t *n2_a = get_ilpsched_node_attr(n2);
238 if (n1_a->sched_point == n2_a->sched_point) {
239 ir_node *irn_a = n1->irn;
240 ir_node *irn_b = n2->irn;
242 if (heights_reachable_in_block(glob_heights, irn_a, irn_b))
244 if (heights_reachable_in_block(glob_heights, irn_b, irn_a))
248 Ok, timestep is equal and the nodes are parallel,
249 so check latency and schedule high latency first.
251 return QSORT_CMP(n2_a->latency, n1_a->latency);
254 return QSORT_CMP(n1_a->sched_point, n2_a->sched_point);
258 * In case there is no phase information for irn, initialize it.
260 static void *init_ilpsched_irn(phase_t *ph, ir_node *irn, void *old) {
261 be_ilpsched_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
264 /* if we have already some data: check for reinitialization */
266 if (! is_Block(irn)) {
267 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
269 if (! na->transitive_block_nodes) {
270 ir_node *block = get_nodes_block(irn);
271 be_ilpsched_irn_t *block_node = phase_get_or_set_irn_data(ph, block);
272 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
274 /* we are called after the block indices have been build: create bitset */
275 na->transitive_block_nodes = bitset_obstack_alloc(phase_obst(ph), ba->block_last_idx);
278 /* we are called from reinit block data: clear the bitset */
279 bitset_clear_all(na->transitive_block_nodes);
281 na->alap_changed = 1;
289 /* set ilpsched irn attributes (either block or irn) */
291 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(res);
293 ba->n_interesting_nodes = 0;
294 ba->block_last_idx = 0;
295 ba->root_nodes = plist_new();
296 ba->head_ilp_nodes = NULL;
297 ba->livein_nodes = new_pset(cmp_live_in_nodes, 16);
301 ilpsched_node_attr_t *na = get_ilpsched_node_attr(res);
302 memset(na, 0, sizeof(*na));
309 * Assign a per block unique number to each node.
311 static void build_block_idx(ir_node *irn, void *walk_env) {
312 be_ilpsched_env_t *env = walk_env;
313 be_ilpsched_irn_t *node, *block_node;
314 ilpsched_node_attr_t *na;
315 ilpsched_block_attr_t *ba;
317 if (! consider_for_sched(env->arch_env->isa, irn))
320 node = get_ilpsched_irn(env, irn);
321 na = get_ilpsched_node_attr(node);
322 block_node = get_ilpsched_irn(env, get_nodes_block(irn));
323 ba = get_ilpsched_block_attr(block_node);
325 na->block_idx = ba->block_last_idx++;
328 /********************************************************
331 * __ _ ___ __ _ _ __ / / __ _| | __ _ _ __
332 * / _` / __|/ _` | '_ \ / / / _` | |/ _` | '_ \
333 * | (_| \__ \ (_| | |_) | / / | (_| | | (_| | |_) |
334 * \__,_|___/\__,_| .__/ /_/ \__,_|_|\__,_| .__/
337 ********************************************************/
340 * Add all nodes having no user in current block to last_nodes list.
342 static void collect_alap_root_nodes(ir_node *irn, void *walk_env) {
344 const ir_edge_t *edge;
345 be_ilpsched_irn_t *block_node, *node;
346 ilpsched_block_attr_t *ba;
347 ilpsched_node_attr_t *na;
349 be_ilpsched_env_t *env = walk_env;
350 int has_block_user = 0;
351 unsigned n_consumer = 0;
352 ir_edge_kind_t ekind[2] = { EDGE_KIND_NORMAL, EDGE_KIND_DEP };
356 if (! consider_for_sched(env->arch_env->isa, irn))
359 block = get_nodes_block(irn);
360 idx = get_irn_idx(irn);
361 consumer = NEW_ARR_F(ir_node *, 0);
363 DBG((env->dbg, LEVEL_3, "%+F (%+F) is interesting, examining ... ", irn, block));
365 /* check data and dependency out edges */
366 for (i = 0; i < 2 && ! has_block_user; ++i) {
367 foreach_out_edge_kind(irn, edge, ekind[i]) {
368 ir_node *user = get_edge_src_irn(edge);
370 if (is_normal_Proj(env->arch_env->isa, user)) {
371 const ir_edge_t *user_edge;
373 if (get_irn_mode(user) == mode_X)
376 /* The ABI ensures, that there will be no ProjT nodes in the graph. */
377 for (j = 0; j < 2; ++j) {
378 foreach_out_edge_kind(user, user_edge, ekind[j]) {
379 ir_node *real_user = get_edge_src_irn(user_edge);
381 if (! is_Phi(real_user) && ! is_Block(real_user)) {
382 be_ilpsched_irn_t *node = get_ilpsched_irn(env, real_user);
383 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
385 /* skip already visited nodes */
386 if (ua->consumer_idx == idx)
389 /* check if node has user in this block and collect the user if it's a data user */
390 if (get_nodes_block(real_user) == block) {
391 if (i == 0 && j == 0)
392 ARR_APP1(ir_node *, consumer, real_user);
396 /* only count data consumer */
400 /* mark user as visited by this node */
401 ua->consumer_idx = idx;
406 else if (is_Block(user)) {
409 else if (! is_Phi(user)) {
410 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
411 ilpsched_node_attr_t *ua = get_ilpsched_node_attr(node);
413 /* skip already visited nodes */
414 if (ua->consumer_idx == idx)
417 /* check if node has user in this block and collect the user if it's a data user */
418 if (get_nodes_block(user) == block) {
420 ARR_APP1(ir_node *, consumer, user);
424 /* only count data consumer */
428 /* mark user visited by this node */
429 ua->consumer_idx = idx;
431 else if (get_nodes_block(user) != block) {
437 block_node = get_ilpsched_irn(env, block);
438 ba = get_ilpsched_block_attr(block_node);
440 ba->n_interesting_nodes++;
442 /* current irn has no user inside this block, add to queue */
443 if (! has_block_user) {
444 DB((env->dbg, LEVEL_3, "root node\n"));
445 plist_insert_back(ba->root_nodes, irn);
448 DB((env->dbg, LEVEL_3, "normal node\n"));
451 /* record number of all consumer and the consumer within the same block */
452 node = get_ilpsched_irn(env, irn);
453 na = get_ilpsched_node_attr(node);
454 na->n_consumer = n_consumer;
455 na->block_consumer = NEW_ARR_D(ir_node *, phase_obst(&env->ph), ARR_LEN(consumer));
456 memcpy(na->block_consumer, consumer, ARR_LEN(consumer) * sizeof(na->block_consumer[0]));
461 * Calculate the ASAP scheduling step for current irn.
463 static void calculate_irn_asap(ir_node *irn, void *walk_env) {
464 be_ilpsched_env_t *env = walk_env;
467 be_ilpsched_irn_t *node, *block_node;
468 ilpsched_node_attr_t *na;
469 ilpsched_block_attr_t *ba;
471 /* These nodes are handled separate */
472 if (! consider_for_sched(env->arch_env->isa, irn))
475 DBG((env->dbg, LEVEL_2, "Calculating ASAP of node %+F ... ", irn));
477 block = get_nodes_block(irn);
478 node = get_ilpsched_irn(env, irn);
479 na = get_ilpsched_node_attr(node);
482 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
483 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
485 /* check for greatest distance to top */
486 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
487 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
488 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
491 lat = fixed_latency(env->sel, pred, env->block_env);
493 na->asap = MAX(na->asap, pna->asap + lat);
497 /* add node to ILP node list and update max_steps */
498 block_node = get_ilpsched_irn(env, block);
499 ba = get_ilpsched_block_attr(block_node);
501 set_irn_link(irn, ba->head_ilp_nodes);
502 ba->head_ilp_nodes = irn;
503 ba->max_steps += fixed_latency(env->sel, irn, env->block_env);
505 DB((env->dbg, LEVEL_2, "%u\n", na->asap));
509 * Calculate the ALAP scheduling step of all irns in current block.
510 * Depends on max_steps being calculated.
512 static void calculate_block_alap(ir_node *block, void *walk_env) {
513 be_ilpsched_env_t *env = walk_env;
514 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
515 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
516 waitq *cur_queue = new_waitq();
519 assert(is_Block(block));
521 DBG((env->dbg, LEVEL_2, "Calculating ALAP for nodes in %+F (%u nodes, %u max steps)\n",
522 block, ba->n_interesting_nodes, ba->max_steps));
524 /* TODO: Might be faster to use out edges and call phase_reinit_single_irn_data */
525 //phase_reinit_block_irn_data(&env->ph, block);
527 /* init start queue */
528 foreach_plist(ba->root_nodes, el) {
529 waitq_put(cur_queue, plist_element_get_value(el));
532 /* repeat until all nodes are processed */
533 while (! waitq_empty(cur_queue)) {
534 waitq *next_queue = new_waitq();
536 /* process all nodes in current step */
537 while (! waitq_empty(cur_queue)) {
538 ir_node *cur_irn = waitq_get(cur_queue);
539 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
540 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
543 /* cur_node has no alap set -> it's a root node, set to max alap */
545 na->alap = ba->max_steps;
546 DBG((env->dbg, LEVEL_2, "setting ALAP of node %+F to %u, handling preds:\n",
550 DBG((env->dbg, LEVEL_2, "ALAP of node %+F is %u, handling preds:\n",
554 /* set the alap's of all predecessors */
555 for (i = get_irn_ins_or_deps(cur_irn) - 1; i >= 0; --i) {
556 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(cur_irn, i));
558 /* check for greatest distance to bottom */
559 if (! is_Phi(pred) && ! is_NoMem(pred) && get_nodes_block(pred) == block) {
560 be_ilpsched_irn_t *pred_node = get_ilpsched_irn(env, pred);
561 ilpsched_node_attr_t *pna = get_ilpsched_node_attr(pred_node);
564 /* mark the predecessor as visited by current irn */
565 if (pna->visit_idx == get_irn_idx(cur_irn) && ! na->alap_changed)
567 pna->visit_idx = get_irn_idx(cur_irn);
569 lat = fixed_latency(env->sel, pred, env->block_env);
571 /* set ALAP of current pred */
572 if (pna->alap == 0) {
573 /* current ALAP is 0: node has not yet been visited */
574 pna->alap_changed = 1;
575 pna->alap = na->alap - lat;
577 else if (pna->alap > na->alap - lat) {
578 /* we found a longer path to root node: change ALAP */
579 pna->alap = na->alap - lat;
580 pna->alap_changed = 1;
583 /* current ALAP is best found so far: keep it */
584 pna->alap_changed = 0;
587 DBG((env->dbg, LEVEL_2, "\tsetting ALAP of node %+F to %u\n", pred, pna->alap));
589 /* enqueue node for next iteration */
590 if (get_irn_ins_or_deps(pred) > 0)
591 waitq_put(next_queue, pred);
596 /* prepare for next iteration */
597 del_waitq(cur_queue);
598 cur_queue = next_queue;
603 * Free list of root nodes and the set of live-in nodes.
605 static void clear_unwanted_data(ir_node *block, void *walk_env) {
606 be_ilpsched_env_t *env = walk_env;
607 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
608 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
610 plist_free(ba->root_nodes);
611 ba->root_nodes = NULL;
612 del_pset(ba->livein_nodes);
613 ba->livein_nodes = NULL;
617 * Refine the {ASAP(n), ALAP(n)} interval for the nodes.
618 * Set the ASAP/ALAP times of Projs and Keeps to their ancestor ones.
620 static void refine_asap_alap_times(ir_node *irn, void *walk_env) {
621 be_ilpsched_env_t *env = walk_env;
623 be_ilpsched_irn_t *node, *pred_node;
624 ilpsched_node_attr_t *na, *pna;
626 if (! consider_for_sched(env->arch_env->isa, irn))
629 if (! is_Proj(irn) && ! be_is_Keep(irn))
632 /* go to the ancestor */
634 pred = get_irn_n(irn, 0);
635 pred = skip_Proj(pred);
637 node = get_ilpsched_irn(env, irn);
638 pred_node = get_ilpsched_irn(env, pred);
639 na = get_ilpsched_node_attr(node);
640 pna = get_ilpsched_node_attr(pred_node);
642 na->asap = pna->asap;
643 na->alap = pna->alap;
645 /* record all Projs and Keeps for this node */
646 if (! pna->projkeeps)
647 pna->projkeeps = new_waitq();
648 waitq_put(pna->projkeeps, irn);
650 DBG((env->dbg, LEVEL_2, "fixing ASAP/ALAP of %+F to %u/%u\n", irn, na->asap, na->alap));
653 /*******************************************
656 * ___ ___| |__ ___ __| |_ _| | ___
657 * / __|/ __| '_ \ / _ \/ _` | | | | |/ _ \
658 * \__ \ (__| | | | __/ (_| | |_| | | __/
659 * |___/\___|_| |_|\___|\__,_|\__,_|_|\___|
661 *******************************************/
663 static INLINE void check_for_keeps(waitq *keeps, ir_node *block, ir_node *irn) {
664 const ir_edge_t *edge;
666 foreach_out_edge(irn, edge) {
667 ir_node *user = get_edge_src_irn(edge);
669 if (be_is_Keep(user)) {
670 assert(get_nodes_block(user) == block && "Keep must not be in different block.");
671 waitq_put(keeps, user);
677 * Inserts @p irn before @p before into schedule and notifies backend.
679 static INLINE void notified_sched_add_before(be_ilpsched_env_t *env,
680 ir_node *before, ir_node *irn, unsigned cycle)
682 be_ilp_sched_node_scheduled(env->sel, irn, cycle, env->block_env);
683 sched_add_before(before, irn);
687 * Adds a node, it's Projs (in case of mode_T nodes) and
688 * it's Keeps to schedule.
690 static void add_to_sched(be_ilpsched_env_t *env, ir_node *block, ir_node *irn, unsigned cycle) {
691 const ir_edge_t *edge;
692 waitq *keeps = new_waitq();
694 /* mode_M nodes are not scheduled */
695 if (get_irn_mode(irn) == mode_M)
698 if (! sched_is_scheduled(irn))
699 notified_sched_add_before(env, block, irn, cycle);
702 if (get_irn_mode(irn) == mode_T) {
703 foreach_out_edge(irn, edge) {
704 ir_node *user = get_edge_src_irn(edge);
706 if ((to_appear_in_schedule(user) || get_irn_mode(user) == mode_b) &&
707 get_irn_n_edges(user) > 0)
709 notified_sched_add_before(env, block, user, cycle);
712 check_for_keeps(keeps, block, user);
716 check_for_keeps(keeps, block, irn);
720 while (! waitq_empty(keeps)) {
721 ir_node *keep = waitq_get(keeps);
722 if (! sched_is_scheduled(keep))
723 notified_sched_add_before(env, block, keep, cycle);
730 * Schedule all nodes in the given block, according to the ILP solution.
732 static void apply_solution(be_ilpsched_env_t *env, lpp_t *lpp, ir_node *block) {
733 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
734 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
735 sched_info_t *info = get_irn_sched_info(block);
736 be_ilpsched_irn_t **sched_nodes;
739 const ir_edge_t *edge;
741 /* init block schedule list */
742 INIT_LIST_HEAD(&info->list);
745 /* collect nodes and their scheduling time step */
746 sched_nodes = NEW_ARR_F(be_ilpsched_irn_t *, 0);
747 if (ba->n_interesting_nodes == 0) {
750 else if (ba->n_interesting_nodes == 1) {
751 be_ilpsched_irn_t *node = get_ilpsched_irn(env, ba->head_ilp_nodes);
753 /* add the single node */
754 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
757 /* check all nodes for their positive solution */
758 foreach_linked_irns(ba->head_ilp_nodes, irn) {
759 be_ilpsched_irn_t *node;
760 ilpsched_node_attr_t *na;
764 node = get_ilpsched_irn(env, irn);
765 na = get_ilpsched_node_attr(node);
769 if (! na->is_dummy_node) {
770 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
771 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
772 double cost = lpp_get_var_sol(lpp, na->ilp_vars.y[cur_var]);
774 if (! LPP_VALUE_IS_0(cost)) {
775 ir_fprintf(stderr, "\t\t%+F has additional regpressure costs of %f\n", irn, cost);
783 /* go over all variables of a node until the non-zero one is found */
784 for (tp_idx = na->n_unit_types - 1; ! found && tp_idx >= 0; --tp_idx) {
785 for (t = na->asap - 1; ! found && t <= na->alap - 1; ++t) {
786 double val = lpp_get_var_sol(lpp, na->ilp_vars.x[cur_var++]);
788 /* check, if variable is set to one (it's not zero then :) */
789 if (! LPP_VALUE_IS_0(val)) {
791 ARR_APP1(be_ilpsched_irn_t *, sched_nodes, node);
792 DBG((env->dbg, LEVEL_2, "Schedpoint of %+F is %u at unit type %s\n",
793 irn, t, na->type_info[tp_idx].tp->name));
800 glob_heights = env->height;
801 /* sort nodes ascending by scheduling time step */
802 qsort(sched_nodes, ARR_LEN(sched_nodes), sizeof(sched_nodes[0]), cmp_ilpsched_irn);
805 /* make all Phis ready and remember the single cf op */
807 foreach_out_edge(block, edge) {
808 irn = get_edge_src_irn(edge);
810 switch (get_irn_opcode(irn)) {
812 add_to_sched(env, block, irn, 0);
822 assert(cfop == NULL && "Highlander - there can be only one");
829 /* add all nodes from list */
830 for (i = 0, l = ARR_LEN(sched_nodes); i < l; ++i) {
831 ilpsched_node_attr_t *na = get_ilpsched_node_attr(sched_nodes[i]);
832 if (sched_nodes[i]->irn != cfop)
833 add_to_sched(env, block, sched_nodes[i]->irn, na->sched_point);
836 /* schedule control flow node if not already done */
837 if (cfop && ! sched_is_scheduled(cfop))
838 add_to_sched(env, block, cfop, 0);
840 DEL_ARR_F(sched_nodes);
843 /***************************************************************
844 * _____ _ _____ _____ _ _
845 * |_ _| | | __ \ / ____| | | (_)
846 * | | | | | |__) | | (___ ___ ___| |_ _ ___ _ __
847 * | | | | | ___/ \___ \ / _ \/ __| __| |/ _ \| '_ \
848 * _| |_| |____| | ____) | __/ (__| |_| | (_) | | | |
849 * |_____|______|_| |_____/ \___|\___|\__|_|\___/|_| |_|
851 ***************************************************************/
854 * Check if node can be executed on given unit type.
856 static INLINE int is_valid_unit_type_for_node(const be_execution_unit_type_t *tp, be_ilpsched_irn_t *node) {
858 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
860 for (i = na->n_unit_types - 1; i >= 0; --i) {
861 if (na->type_info[i].tp == tp)
868 /************************************************
871 * __ ____ _ _ __ _ __ _| |__ | | ___ ___
872 * \ \ / / _` | '__| |/ _` | '_ \| |/ _ \/ __|
873 * \ V / (_| | | | | (_| | |_) | | __/\__ \
874 * \_/ \__,_|_| |_|\__,_|_.__/|_|\___||___/
876 ************************************************/
878 static int be_ilpsched_set_type_info(be_ilpsched_env_t *env, ir_node *irn, struct obstack *obst) {
879 const be_execution_unit_t ***execunits = arch_isa_get_allowed_execution_units(env->arch_env->isa, irn);
880 unsigned n_unit_types = 0;
881 be_ilpsched_irn_t *node;
882 ilpsched_node_attr_t *na;
883 unsigned unit_idx, tp_idx;
885 /* count number of available unit types for this node */
886 for (n_unit_types = 0; execunits[n_unit_types]; ++n_unit_types)
889 node = get_ilpsched_irn(env, irn);
890 na = get_ilpsched_node_attr(node);
892 if (! na->type_info) {
893 na->n_unit_types = n_unit_types;
894 na->type_info = NEW_ARR_D(unit_type_info_t, obst, n_unit_types);
896 /* fill the type info array */
897 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
898 for (unit_idx = 0; execunits[tp_idx][unit_idx]; ++unit_idx) {
899 /* beware: we also count number of available units here */
900 if (be_machine_is_dummy_unit(execunits[tp_idx][unit_idx]))
901 na->is_dummy_node = 1;
904 na->type_info[tp_idx].tp = execunits[tp_idx][0]->tp;
905 na->type_info[tp_idx].n_units = unit_idx;
913 * Returns the largest alap time of a user of @p irn.
914 * The user must be in block @p block.
916 static unsigned be_ilpsched_get_max_alap_user(be_ilpsched_env_t *env, ir_node *irn, ir_node *block) {
917 const ir_edge_t *edge;
918 unsigned max_alap = 0;
920 foreach_out_edge(irn, edge) {
921 ir_node *user = get_edge_src_irn(edge);
923 if (get_nodes_block(user) == block) {
924 be_ilpsched_irn_t *node = get_ilpsched_irn(env, user);
925 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
927 max_alap = MAX(max_alap, na->alap);
931 assert(max_alap > 0);
936 * Create the following variables:
937 * - x_{nt}^k binary weigthed with: t
938 * node n is scheduled at time step t to unit type k
939 * ==>> These variables represent the schedule
941 * - d_{nt}^k binary weighted with: t
942 * node n dies at time step t on unit type k
943 * - a_{nt}^k binary weighted with num_nodes
944 * node n is alive at time step t on unit type k
946 * - y_{nt}^k binary weighted with: num_nodes^2
947 * node n is scheduled at time step t to unit type k
948 * although all units of this type are occupied
949 * ==>> These variables represent the register pressure
952 static void create_variables(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node, struct obstack *var_obst) {
955 unsigned num_block_var, num_nodes;
956 ilp_livein_node_t *livein;
957 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
958 unsigned weigth_y = ba->n_interesting_nodes * ba->n_interesting_nodes;
959 lc_timer_t *t_var = lc_timer_register("beilpsched_var", "create ilp variables");
961 ilp_timer_push(t_var);
962 num_block_var = num_nodes = 0;
963 foreach_linked_irns(ba->head_ilp_nodes, irn) {
964 be_ilpsched_irn_t *node;
965 ilpsched_node_attr_t *na;
966 unsigned n_unit_types, tp_idx, n_var, cur_unit;
967 unsigned cur_var_ad, cur_var_x, cur_var_y, num_ad;
970 node = get_ilpsched_irn(env, irn);
971 na = get_ilpsched_node_attr(node);
972 n_unit_types = be_ilpsched_set_type_info(env, irn, var_obst);
974 /* allocate space for ilp variables */
975 na->ilp_vars.x = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
976 memset(na->ilp_vars.x, -1, ARR_LEN(na->ilp_vars.x) * sizeof(na->ilp_vars.x[0]));
978 /* we need these variables only for "real" nodes */
979 if (! na->is_dummy_node) {
980 na->ilp_vars.y = NEW_ARR_D(int, var_obst, n_unit_types * VALID_SCHED_INTERVAL(na));
981 memset(na->ilp_vars.y, -1, ARR_LEN(na->ilp_vars.y) * sizeof(na->ilp_vars.y[0]));
983 num_ad = ba->max_steps - na->asap + 1;
985 if (ba->n_interesting_nodes > env->opts->limit_dead) {
986 na->ilp_vars.a = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
987 memset(na->ilp_vars.a, -1, ARR_LEN(na->ilp_vars.a) * sizeof(na->ilp_vars.a[0]));
990 na->ilp_vars.d = NEW_ARR_D(int, var_obst, n_unit_types * num_ad);
991 memset(na->ilp_vars.d, -1, ARR_LEN(na->ilp_vars.d) * sizeof(na->ilp_vars.d[0]));
995 DBG((env->dbg, LEVEL_3, "\thandling %+F (asap %u, alap %u, unit types %u):\n",
996 irn, na->asap, na->alap, na->n_unit_types));
998 cur_var_x = cur_var_ad = cur_var_y = cur_unit = n_var = 0;
999 /* create variables */
1000 for (tp_idx = 0; tp_idx < n_unit_types; ++tp_idx) {
1003 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1004 /* x_{nt}^k variables */
1005 snprintf(buf, sizeof(buf), "x_n%u_%s_%u",
1006 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1007 na->ilp_vars.x[cur_var_x++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1008 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1009 /* variable counter */
1013 if (! na->is_dummy_node) {
1014 /* y_{nt}^k variables */
1015 snprintf(buf, sizeof(buf), "y_n%u_%s_%u",
1016 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1017 na->ilp_vars.y[cur_var_y++] = lpp_add_var(lpp, buf, lpp_continous, (double)(weigth_y));
1018 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1020 /* variable counter */
1026 /* a node can die at any step t: asap(n) <= t <= U */
1027 if (! na->is_dummy_node) {
1028 for (t = na->asap - 1; t <= ba->max_steps; ++t) {
1030 if (ba->n_interesting_nodes > env->opts->limit_dead) {
1031 /* a_{nt}^k variables */
1032 snprintf(buf, sizeof(buf), "a_n%u_%s_%u",
1033 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1034 na->ilp_vars.a[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
1037 /* d_{nt}^k variables */
1038 snprintf(buf, sizeof(buf), "d_n%u_%s_%u",
1039 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1040 na->ilp_vars.d[cur_var_ad++] = lpp_add_var(lpp, buf, lpp_binary, (double)(t + 1));
1042 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1044 /* variable counter */
1050 /* collect live-in nodes */
1051 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
1052 ir_node *pred = get_irn_n(irn, i);
1054 if (get_nodes_block(pred) != block_node->irn && consider_for_sched(env->arch_env->isa, pred)) {
1055 be_ilpsched_set_type_info(env, pred, var_obst);
1056 if (! na->is_dummy_node) {
1057 ilp_livein_node_t *entry = obstack_alloc(var_obst, sizeof(*entry));
1060 pset_insert(ba->livein_nodes, entry, (unsigned)get_irn_idx(pred));
1066 DB((env->dbg, LEVEL_3, "%u variables created\n", n_var));
1070 /* create alive variables a_{nt}^k for live-ins */
1071 foreach_pset(ba->livein_nodes, livein) {
1072 be_ilpsched_irn_t *node;
1073 ilpsched_node_attr_t *na;
1074 unsigned tp_idx, var_idx, max_alap;
1078 node = get_ilpsched_irn(env, irn);
1079 na = get_ilpsched_node_attr(node);
1081 livein->max_alive_steps = be_ilpsched_get_max_alap_user(env, irn, block_node->irn);
1083 livein->a = NEW_ARR_D(int, var_obst, na->n_unit_types * livein->max_alive_steps);
1086 /* create variables */
1087 for (tp_idx = 0; tp_idx < na->n_unit_types; ++tp_idx) {
1090 for (t = 0; t < livein->max_alive_steps; ++t) {
1091 /* a_{nt}^k variables */
1092 snprintf(buf, sizeof(buf), "al_n%u_%s_%u",
1093 get_irn_idx(irn), na->type_info[tp_idx].tp->name, t);
1094 livein->a[var_idx++] = lpp_add_var(lpp, buf, lpp_binary, (double)(ba->n_interesting_nodes));
1095 DBG((env->dbg, LEVEL_4, "\t\tcreated ILP variable %s\n", buf));
1102 DBG((env->dbg, LEVEL_1, "... %u variables for %u nodes created (%g sec)\n",
1103 num_block_var, num_nodes, ilp_timer_elapsed_usec(t_var) / 1000000.0));
1106 /*******************************************************
1109 * ___ ___ _ __ ___| |_ _ __ __ _ _ _ __ | |_ ___
1110 * / __/ _ \| '_ \/ __| __| '__/ _` | | '_ \| __/ __|
1111 * | (_| (_) | | | \__ \ |_| | | (_| | | | | | |_\__ \
1112 * \___\___/|_| |_|___/\__|_| \__,_|_|_| |_|\__|___/
1114 *******************************************************/
1117 * Create following ILP constraints:
1118 * - the assignment constraints:
1119 * assure each node is executed once by exactly one (allowed) execution unit
1120 * - the dead node assignment constraints:
1121 * assure a node can only die at most once
1122 * - the precedence constraints:
1123 * assure that no data dependencies are violated
1125 static void create_assignment_and_precedence_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1126 unsigned num_cst_assign, num_cst_prec, num_cst_dead;
1129 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1130 bitset_t *bs_block_irns = bitset_alloca(ba->block_last_idx);
1131 lc_timer_t *t_cst_assign = lc_timer_register("beilpsched_cst_assign", "create assignment constraints");
1132 lc_timer_t *t_cst_dead = lc_timer_register("beilpsched_cst_assign_dead", "create dead node assignment constraints");
1133 lc_timer_t *t_cst_prec = lc_timer_register("beilpsched_cst_prec", "create precedence constraints");
1135 num_cst_assign = num_cst_prec = num_cst_dead = 0;
1136 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1139 be_ilpsched_irn_t *node;
1140 ilpsched_node_attr_t *na;
1142 node = get_ilpsched_irn(env, irn);
1143 na = get_ilpsched_node_attr(node);
1146 /* the assignment constraint */
1147 ilp_timer_push(t_cst_assign);
1148 snprintf(buf, sizeof(buf), "assignment_cst_n%u", get_irn_idx(irn));
1149 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 1.0);
1150 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1153 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.x, ARR_LEN(na->ilp_vars.x), 1.0);
1156 /* the dead node assignment constraint */
1157 if (! na->is_dummy_node && ba->n_interesting_nodes <= env->opts->limit_dead) {
1158 ilp_timer_push(t_cst_dead);
1159 snprintf(buf, sizeof(buf), "dead_node_assign_cst_n%u", get_irn_idx(irn));
1160 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1161 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1163 lpp_set_factor_fast_bulk(lpp, cst, na->ilp_vars.d, ARR_LEN(na->ilp_vars.d), 1.0);
1167 /* We have separate constraints for Projs and Keeps */
1168 // ILP becomes infeasible ?!?
1169 // if (is_Proj(irn) || be_is_Keep(irn))
1172 /* the precedence constraints */
1173 ilp_timer_push(t_cst_prec);
1174 bs_block_irns = bitset_clear_all(bs_block_irns);
1175 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
1176 ir_node *pred = skip_normal_Proj(env->arch_env->isa, get_irn_in_or_dep(irn, i));
1177 unsigned t_low, t_high, t;
1178 be_ilpsched_irn_t *pred_node;
1179 ilpsched_node_attr_t *pna;
1182 if (is_Phi(pred) || block_node->irn != get_nodes_block(pred) || is_NoMem(pred))
1185 pred_node = get_ilpsched_irn(env, pred);
1186 pna = get_ilpsched_node_attr(pred_node);
1188 assert(pna->asap > 0 && pna->alap >= pna->asap && "Invalid scheduling interval.");
1190 if (! bitset_is_set(bs_block_irns, pna->block_idx))
1191 bitset_set(bs_block_irns, pna->block_idx);
1195 /* irn = n, pred = m */
1196 delay = fixed_latency(env->sel, pred, env->block_env);
1197 t_low = MAX(na->asap, pna->asap + delay - 1);
1198 t_high = MIN(na->alap, pna->alap + delay - 1);
1199 for (t = t_low - 1; t <= t_high - 1; ++t) {
1201 int *tmp_var_idx = NEW_ARR_F(int, 0);
1203 snprintf(buf, sizeof(buf), "precedence_n%u_n%u_%u", get_irn_idx(pred), get_irn_idx(irn), t);
1204 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 1.0);
1205 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1208 /* lpp_set_factor_fast_bulk needs variables sorted ascending by index */
1209 if (na->ilp_vars.x[0] < pna->ilp_vars.x[0]) {
1210 /* node variables have smaller index than pred variables */
1211 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1212 for (tn = na->asap - 1; tn <= t; ++tn) {
1213 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1214 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1218 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1219 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1220 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1221 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1226 /* pred variables have smaller index than node variables */
1227 for (tp_idx = pna->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1228 for (tm = t - delay + 1; tm < pna->alap; ++tm) {
1229 unsigned idx = ILPVAR_IDX(pna, tp_idx, tm);
1230 ARR_APP1(int, tmp_var_idx, pna->ilp_vars.x[idx]);
1234 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1235 for (tn = na->asap - 1; tn <= t; ++tn) {
1236 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1237 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1242 if (ARR_LEN(tmp_var_idx) > 0)
1243 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1245 DEL_ARR_F(tmp_var_idx);
1250 DBG((env->dbg, LEVEL_1, "\t%u assignement constraints (%g sec)\n",
1251 num_cst_assign, ilp_timer_elapsed_usec(t_cst_assign) / 1000000.0));
1252 DBG((env->dbg, LEVEL_1, "\t%u precedence constraints (%g sec)\n",
1253 num_cst_prec, ilp_timer_elapsed_usec(t_cst_prec) / 1000000.0));
1257 * Create ILP resource constraints:
1258 * - assure that for each time step not more instructions are scheduled
1259 * to the same unit types as units of this type are available
1261 static void create_ressource_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1264 unsigned num_cst_resrc = 0;
1265 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1266 lc_timer_t *t_cst_rsrc = lc_timer_register("beilpsched_cst_rsrc", "create resource constraints");
1268 ilp_timer_push(t_cst_rsrc);
1269 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1271 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1273 /* BEWARE: the DUMMY unit type is not in CPU, so it's skipped automatically */
1275 /* check each time step */
1276 for (t = 0; t < ba->max_steps; ++t) {
1279 int *tmp_var_idx = NEW_ARR_F(int, 0);
1281 snprintf(buf, sizeof(buf), "resource_cst_%s_%u", cur_tp->name, t);
1282 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)cur_tp->n_units);
1283 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1286 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1287 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1288 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1291 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1293 if (tp_idx >= 0 && t >= na->asap - 1 && t <= na->alap - 1) {
1294 int cur_var = ILPVAR_IDX(na, tp_idx, t);
1295 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[cur_var]);
1299 /* set constraints if we have some */
1300 if (ARR_LEN(tmp_var_idx) > 0)
1301 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1303 DEL_ARR_F(tmp_var_idx);
1307 DBG((env->dbg, LEVEL_1, "\t%u resource constraints (%g sec)\n",
1308 num_cst_resrc, ilp_timer_elapsed_usec(t_cst_rsrc) / 1000000.0));
1312 * Create ILP bundle constraints:
1313 * - assure, at most bundle_size * bundles_per_cycle instructions
1314 * can be started at a certain point.
1316 static void create_bundle_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1319 unsigned num_cst_bundle = 0;
1320 unsigned n_instr_max = env->cpu->bundle_size * env->cpu->bundels_per_cycle;
1321 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1322 lc_timer_t *t_cst_bundle = lc_timer_register("beilpsched_cst_bundle", "create bundle constraints");
1324 ilp_timer_push(t_cst_bundle);
1325 for (t = 0; t < ba->max_steps; ++t) {
1328 int *tmp_var_idx = NEW_ARR_F(int, 0);
1330 snprintf(buf, sizeof(buf), "bundle_cst_%u", t);
1331 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)n_instr_max);
1332 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1335 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1336 be_ilpsched_irn_t *node;
1337 ilpsched_node_attr_t *na;
1340 /* Projs and Keeps do not contribute to bundle size */
1341 if (is_Proj(irn) || be_is_Keep(irn))
1344 node = get_ilpsched_irn(env, irn);
1345 na = get_ilpsched_node_attr(node);
1347 /* nodes assigned to DUMMY unit do not contribute to bundle size */
1348 if (na->is_dummy_node)
1351 if (t >= na->asap - 1 && t <= na->alap - 1) {
1352 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1353 int idx = ILPVAR_IDX(na, tp_idx, t);
1354 ARR_APP1(int, tmp_var_idx, na->ilp_vars.x[idx]);
1359 if (ARR_LEN(tmp_var_idx) > 0)
1360 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1362 DEL_ARR_F(tmp_var_idx);
1365 DBG((env->dbg, LEVEL_1, "\t%u bundle constraints (%g sec)\n",
1366 num_cst_bundle, ilp_timer_elapsed_usec(t_cst_bundle) / 1000000.0));
1370 * Create ILP dying nodes constraints:
1371 * - set variable d_{nt}^k to 1 if nodes n dies at step t on unit k
1373 static void create_dying_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1376 unsigned num_cst = 0;
1377 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1378 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_dying_nodes", "create dying nodes constraints");
1380 ilp_timer_push(t_cst);
1381 /* check all time_steps */
1382 for (t = 0; t < ba->max_steps; ++t) {
1386 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1387 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1388 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1390 /* if node has no consumer within current block, it cannot die here */
1391 /* we also ignore nodes assigned to dummy unit */
1392 if (ARR_LEN(na->block_consumer) < 1 || na->is_dummy_node)
1395 /* node can only die here if t at least asap(n) */
1396 if (t >= na->asap - 1) {
1399 /* for all unit types */
1400 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1402 int *tmp_var_idx = NEW_ARR_F(int, 0);
1404 snprintf(buf, sizeof(buf), "dying_node_cst_%u_n%u", t, get_irn_idx(irn));
1405 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(na->n_consumer - 1));
1406 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1409 /* number of consumer scheduled till t */
1410 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1411 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1412 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1414 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1417 for (tm = ca->asap - 1; tm <= t && tm <= ca->alap - 1; ++tm) {
1418 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1419 ARR_APP1(int, tmp_var_idx, ca->ilp_vars.x[idx]);
1424 /* could be that no consumer can be scheduled at this point */
1425 if (ARR_LEN(tmp_var_idx)) {
1429 /* subtract possible prior kill points */
1430 for (tn = na->asap - 1; tn < t; ++tn) {
1431 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, tn);
1432 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], -1.0);
1435 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1436 lpp_set_factor_fast(lpp, cst, na->ilp_vars.d[idx], 0.0 - (double)(na->n_consumer));
1437 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1440 DEL_ARR_F(tmp_var_idx);
1447 DBG((env->dbg, LEVEL_1, "\t%u dying nodes constraints (%g sec)\n",
1448 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1452 * Create ILP alive nodes constraints:
1453 * - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
1455 static void create_alive_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1458 unsigned num_cst = 0;
1459 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1460 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_alive_nodes", "create alive nodes constraints");
1462 ilp_timer_push(t_cst);
1464 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1465 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1466 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1469 /* we ignore nodes assigned to dummy unit here */
1470 if (na->is_dummy_node)
1473 /* check check all time steps: asap(n) <= t <= U */
1474 for (t = na->asap - 1; t < ba->max_steps; ++t) {
1477 /* for all unit types available for this node */
1478 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1479 unsigned tn, tn_max, idx;
1481 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1482 int *tmp_var_idx_m = NEW_ARR_F(int, 0);
1484 snprintf(buf, sizeof(buf), "alive_node_cst_%u_n%u_%s",
1485 t, get_irn_idx(irn), na->type_info[node_tp_idx].tp->name);
1486 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, 0.0);
1487 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1490 tn_max = MIN(na->alap - 1, t);
1491 /* check if the node has been scheduled so far */
1492 for (tn = na->asap - 1; tn <= tn_max; ++tn) {
1493 int idx = ILPVAR_IDX(na, node_tp_idx, tn);
1494 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1497 if (ARR_LEN(tmp_var_idx_n) > 0)
1498 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(na->n_consumer));
1499 DEL_ARR_F(tmp_var_idx_n);
1501 /* subtract the number of consumer scheduled so far */
1502 for (i = ARR_LEN(na->block_consumer) - 1; i >= 0; --i) {
1503 be_ilpsched_irn_t *cons = get_ilpsched_irn(env, na->block_consumer[i]);
1504 ilpsched_node_attr_t *ca = get_ilpsched_node_attr(cons);
1506 unsigned tm, tm_max;
1508 tm_max = MIN(ca->alap - 1, t);
1509 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1510 for (tm = ca->asap - 1; tm <= tm_max; ++tm) {
1511 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1512 ARR_APP1(int, tmp_var_idx_m, ca->ilp_vars.x[idx]);
1517 if (ARR_LEN(tmp_var_idx_m) > 0)
1518 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_m, ARR_LEN(tmp_var_idx_m), -1.0);
1519 DEL_ARR_F(tmp_var_idx_m);
1522 idx = ILPVAR_IDX_DEAD(ba, na, node_tp_idx, t);
1523 lpp_set_factor_fast(lpp, cst, na->ilp_vars.a[idx], 0.0 - (double)(na->n_consumer));
1529 DBG((env->dbg, LEVEL_1, "\t%u alive nodes constraints (%g sec)\n",
1530 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1534 * Create ILP alive nodes constraints for live-in nodes:
1535 * - set variable a_{nt}^k to 1 if nodes n is alive at step t on unit k
1537 static void create_alive_livein_nodes_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1539 ilp_livein_node_t *livein;
1540 unsigned num_cst = 0;
1541 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1542 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_alive_livein_nodes", "create alive livein nodes constraints");
1544 ilp_timer_push(t_cst);
1546 foreach_pset(ba->livein_nodes, livein) {
1547 ir_node *irn = livein->irn;
1548 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1549 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1552 /* check check all time steps: 0 <= t < max_alive_steps */
1553 for (t = 0; t < livein->max_alive_steps; ++t) {
1556 /* for all unit types available for this node */
1557 for (node_tp_idx = na->n_unit_types - 1; node_tp_idx >= 0; --node_tp_idx) {
1558 const ir_edge_t *edge;
1559 unsigned tn, tn_max, idx;
1560 int cst, i, num_block_user;
1561 int *tmp_var_idx_m = NEW_ARR_F(int, 0);
1563 /* check the number of consumer scheduled so far */
1565 foreach_out_edge(irn, edge) {
1566 ir_node *user = get_edge_src_irn(edge);
1567 be_ilpsched_irn_t *cons;
1568 ilpsched_node_attr_t *ca;
1570 unsigned tm, tm_max;
1572 /* check only users within current block */
1573 if (get_nodes_block(user) != block_node->irn)
1577 cons = get_ilpsched_irn(env, user);
1578 ca = get_ilpsched_node_attr(cons);
1580 tm_max = MIN(ca->alap - 1, t);
1581 for (tp_idx = ca->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1582 for (tm = ca->asap - 1; tm <= tm_max; ++tm) {
1583 int idx = ILPVAR_IDX(ca, tp_idx, tm);
1584 ARR_APP1(int, tmp_var_idx_m, ca->ilp_vars.x[idx]);
1589 snprintf(buf, sizeof(buf), "alive_livein_node_cst_%u_n%u_%s",
1590 t, get_irn_idx(irn), na->type_info[node_tp_idx].tp->name);
1591 cst = lpp_add_cst_uniq(lpp, buf, lpp_greater, (double)num_block_user);
1592 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1595 /* sum(scheduled users) */
1596 if (ARR_LEN(tmp_var_idx_m) > 0)
1597 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_m, ARR_LEN(tmp_var_idx_m), 1.0);
1598 DEL_ARR_F(tmp_var_idx_m);
1600 /* + c * a_{nt}^k */
1601 idx = node_tp_idx * livein->max_alive_steps + t;
1602 lpp_set_factor_fast(lpp, cst, livein->a[idx], (double)(num_block_user));
1607 DBG((env->dbg, LEVEL_1, "\t%u alive livein nodes constraints (%g sec)\n",
1608 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1612 * Create ILP pressure constraints, based on dead nodes:
1613 * - add additional costs to objective function if a node is scheduled
1614 * on a unit although all units of this type are currently occupied
1616 static void create_pressure_dead_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1619 unsigned num_cst = 0;
1620 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1621 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1623 ilp_timer_push(t_cst);
1624 /* y_{nt}^k is set for each node and timestep and unit type */
1625 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1626 unsigned cur_idx = get_irn_idx(cur_irn);
1627 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1628 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1631 /* we ignore nodes assigned to DUMMY unit here */
1632 if (cur_na->is_dummy_node)
1636 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1637 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1641 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1643 /* check if node can be executed on this unit type */
1644 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1648 /* check all time_steps */
1649 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1652 int *tmp_idx_1 = NEW_ARR_F(int, 0);
1653 int *tmp_idx_m1 = NEW_ARR_F(int, 0);
1655 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1656 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1657 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1661 - accumulate all nodes scheduled on unit type k till t
1662 - subtract all nodes died on unit type k till t
1664 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1665 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1666 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1670 tmax = MIN(t, na->alap - 1);
1671 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1673 /* current unit type is not suitable for current node */
1677 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1680 /* node scheduled */
1681 idx = ILPVAR_IDX(na, tp_idx, tn);
1682 ARR_APP1(int, tmp_idx_1, na->ilp_vars.x[idx]);
1685 idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, tn);
1686 ARR_APP1(int, tmp_idx_m1, na->ilp_vars.d[idx]);
1690 if (ARR_LEN(tmp_idx_1) > 0)
1691 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_1, ARR_LEN(tmp_idx_1), 1.0);
1693 if (ARR_LEN(tmp_idx_m1) > 0)
1694 lpp_set_factor_fast_bulk(lpp, cst, tmp_idx_m1, ARR_LEN(tmp_idx_m1), -1.0);
1696 /* BEWARE: t is unsigned, so (double)(-t) won't work */
1697 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1698 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], -1.0);
1700 DEL_ARR_F(tmp_idx_1);
1701 DEL_ARR_F(tmp_idx_m1);
1706 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1707 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1711 * Create ILP pressure constraints, based on alive nodes:
1712 * - add additional costs to objective function if a node is scheduled
1713 * on a unit although all units of this type are currently occupied
1715 static void create_pressure_alive_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1718 unsigned num_cst = 0;
1719 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1720 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_pressure", "create pressure constraints");
1722 ilp_timer_push(t_cst);
1723 /* y_{nt}^k is set for each node and timestep and unit type */
1724 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1725 unsigned cur_idx = get_irn_idx(cur_irn);
1726 be_ilpsched_irn_t *cur_node = get_ilpsched_irn(env, cur_irn);
1727 ilpsched_node_attr_t *cur_na = get_ilpsched_node_attr(cur_node);
1730 /* we ignore nodes assigned to DUMMY unit here */
1731 if (cur_na->is_dummy_node)
1735 for (glob_type_idx = env->cpu->n_unit_types - 1; glob_type_idx >= 0; --glob_type_idx) {
1736 be_execution_unit_type_t *cur_tp = &env->cpu->unit_types[glob_type_idx];
1740 /* BEWARE: the DUMMY unit types is not in CPU, so it's skipped automatically */
1742 /* check if node can be executed on this unit type */
1743 cur_tp_idx = is_valid_unit_type_for_node(cur_tp, cur_node);
1747 /* check all time_steps at which the current node can be scheduled */
1748 for (t = cur_na->asap - 1; t <= cur_na->alap - 1; ++t) {
1751 int *tmp_var_idx = NEW_ARR_F(int, 0);
1752 ilp_livein_node_t *livein;
1754 snprintf(buf, sizeof(buf), "pressure_cst_n%u_%u_%s", cur_idx, t, cur_tp->name);
1755 cst = lpp_add_cst_uniq(lpp, buf, lpp_less, (double)(cur_tp->n_units - 1));
1756 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1759 /* - accumulate all nodes alive at point t on unit type k */
1760 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1761 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1762 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1765 /* check if node can be alive here */
1766 if (t < na->asap - 1)
1769 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1771 /* current type is not suitable */
1775 a_idx = ILPVAR_IDX_DEAD(ba, na, tp_idx, t);
1776 ARR_APP1(int, tmp_var_idx, na->ilp_vars.a[a_idx]);
1778 /* do the same for livein nodes */
1779 foreach_pset(ba->livein_nodes, livein) {
1780 ir_node *irn = livein->irn;
1781 be_ilpsched_irn_t *node = get_ilpsched_irn(env, irn);
1784 /* check if node can be alive here */
1785 if (t >= livein->max_alive_steps)
1788 tp_idx = is_valid_unit_type_for_node(cur_tp, node);
1790 /* current type is not suitable */
1794 a_idx = tp_idx * livein->max_alive_steps + t;
1795 ARR_APP1(int, tmp_var_idx, livein->a[a_idx]);
1798 if (ARR_LEN(tmp_var_idx) > 0)
1799 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx, ARR_LEN(tmp_var_idx), 1.0);
1800 DEL_ARR_F(tmp_var_idx);
1802 /* - num_nodes * y_{nt}^k */
1803 y_idx = ILPVAR_IDX(cur_na, cur_tp_idx, t);
1804 lpp_set_factor_fast(lpp, cst, cur_na->ilp_vars.y[y_idx], -1.0);
1809 DBG((env->dbg, LEVEL_1, "\t%u pressure constraints (%g sec)\n",
1810 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1814 * Create ILP branch constraints:
1815 * Assure, alle nodes are scheduled prior to cfg op.
1817 static void create_branch_constraint(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1819 ir_node *cur_irn, *cfop;
1820 unsigned num_cst = 0;
1821 unsigned num_non_branches = 0;
1822 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1823 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_branch", "create branch constraints");
1825 ilp_timer_push(t_cst);
1827 /* determine number of non-branch nodes and the one and only branch node */
1828 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1829 switch (get_irn_opcode(cur_irn)) {
1839 if (is_cfop(cur_irn)) {
1840 assert(cfop == NULL && "Highlander - there can be only one to be constrained");
1851 be_ilpsched_irn_t *cf_node = get_ilpsched_irn(env, cfop);
1852 ilpsched_node_attr_t *cf_na = get_ilpsched_node_attr(cf_node);
1855 /* for each time step */
1856 for (t = cf_na->asap - 1; t <= cf_na->alap - 1; ++t) {
1857 int *non_branch_vars, *branch_vars;
1860 snprintf(buf, sizeof(buf), "branch_cst_%u_n%u", t, get_irn_idx(cfop));
1861 cst = lpp_add_cst_uniq(lpp, buf, lpp_greater, 0.0);
1862 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1865 /* sum(overall non branches: n)x_{nt}^k - sum(overall branches: b)(num_non_branches * x_{bt}^k >= 0) */
1866 non_branch_vars = NEW_ARR_F(int, 0);
1867 branch_vars = NEW_ARR_F(int, 0);
1868 foreach_linked_irns(ba->head_ilp_nodes, cur_irn) {
1869 be_ilpsched_irn_t *node = get_ilpsched_irn(env, cur_irn);
1870 ilpsched_node_attr_t *na = get_ilpsched_node_attr(node);
1873 if (cur_irn == cfop) {
1874 /* for all unit types available for this node */
1875 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1876 unsigned idx = ILPVAR_IDX(na, tp_idx, t);
1877 ARR_APP1(int, branch_vars, na->ilp_vars.x[idx]);
1881 /* sum up all possible schedule points for this node upto current timestep */
1882 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1884 unsigned tmax = MIN(t, na->alap - 1);
1886 for (tn = na->asap - 1; tn <= tmax; ++tn) {
1887 unsigned idx = ILPVAR_IDX(na, tp_idx, tn);
1888 ARR_APP1(int, non_branch_vars, na->ilp_vars.x[idx]);
1895 if (ARR_LEN(non_branch_vars) > 0)
1896 lpp_set_factor_fast_bulk(lpp, cst, non_branch_vars, ARR_LEN(non_branch_vars), 1.0);
1897 if (ARR_LEN(branch_vars) > 0)
1898 lpp_set_factor_fast_bulk(lpp, cst, branch_vars, ARR_LEN(branch_vars), 0.0 - (double)num_non_branches);
1900 DEL_ARR_F(branch_vars);
1901 DEL_ARR_F(non_branch_vars);
1905 DBG((env->dbg, LEVEL_1, "\t%u branch constraints (%g sec)\n",
1906 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1910 static void create_proj_keep_constraints(be_ilpsched_env_t *env, lpp_t *lpp, be_ilpsched_irn_t *block_node) {
1913 unsigned num_cst = 0;
1914 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
1915 lc_timer_t *t_cst = lc_timer_register("beilpsched_cst_projkeep", "create proj and keep constraints");
1917 ilp_timer_push(t_cst);
1918 /* check all nodes */
1919 foreach_linked_irns(ba->head_ilp_nodes, irn) {
1920 be_ilpsched_irn_t *node;
1921 ilpsched_node_attr_t *na;
1925 /* only mode_T nodes can have Projs and Keeps assigned */
1926 if (get_irn_mode(irn) != mode_T)
1929 node = get_ilpsched_irn(env, irn);
1930 na = get_ilpsched_node_attr(node);
1932 /* check if has some Projs and Keeps assigned */
1933 if (! na->projkeeps)
1936 /* we can run only once over the queue, so preserve the nodes */
1937 pk = NEW_ARR_F(ir_node *, 0);
1938 while (! waitq_empty(na->projkeeps))
1939 ARR_APP1(ir_node *, pk, waitq_get(na->projkeeps));
1940 del_waitq(na->projkeeps);
1941 na->projkeeps = NULL;
1943 /* for all time steps at which this node can be scheduled */
1944 for (t = na->asap - 1; t <= na->alap - 1; ++t) {
1946 int *tmp_var_idx_n = NEW_ARR_F(int, 0);
1948 /* add the constraint, assure, that a node is always scheduled along with it's Projs and Keeps */
1949 snprintf(buf, sizeof(buf), "projkeep_cst_n%u_%u", get_irn_idx(irn), t);
1950 cst = lpp_add_cst_uniq(lpp, buf, lpp_equal, 0.0);
1951 DBG((env->dbg, LEVEL_2, "added constraint %s\n", buf));
1954 /* sum up scheduling variables for this time step */
1955 for (tp_idx = na->n_unit_types - 1; tp_idx >= 0; --tp_idx) {
1956 int idx = ILPVAR_IDX(na, tp_idx, t);
1957 ARR_APP1(int, tmp_var_idx_n, na->ilp_vars.x[idx]);
1960 if (ARR_LEN(tmp_var_idx_n) > 0)
1961 lpp_set_factor_fast_bulk(lpp, cst, tmp_var_idx_n, ARR_LEN(tmp_var_idx_n), (double)(ARR_LEN(pk)));
1962 DEL_ARR_F(tmp_var_idx_n);
1964 /* subtract all Proj and Keep variables for this step */
1965 for (i = ARR_LEN(pk) - 1; i >= 0; --i) {
1966 be_ilpsched_irn_t *pk_node = get_ilpsched_irn(env, pk[i]);
1967 ilpsched_node_attr_t *pk_na = get_ilpsched_node_attr(pk_node);
1970 for (pk_tp_idx = pk_na->n_unit_types - 1; pk_tp_idx >= 0; --pk_tp_idx) {
1971 int idx = ILPVAR_IDX(pk_na, pk_tp_idx, t);
1972 lpp_set_factor_fast(lpp, cst, pk_na->ilp_vars.x[idx], -1.0);
1978 DBG((env->dbg, LEVEL_1, "\t%u Proj and Keep constraints (%g sec)\n",
1979 num_cst, ilp_timer_elapsed_usec(t_cst) / 1000000.0));
1983 /***************************************************
1985 * |_ _| | | __ \ (_)
1986 * | | | | | |__) | _ __ ___ __ _ _ _ __
1987 * | | | | | ___/ | '_ ` _ \ / _` | | '_ \
1988 * _| |_| |____| | | | | | | | (_| | | | | |
1989 * |_____|______|_| |_| |_| |_|\__,_|_|_| |_|
1991 ***************************************************/
1994 * Create the ilp (add variables, build constraints, solve, build schedule from solution).
1996 static void create_ilp(ir_node *block, void *walk_env) {
1997 be_ilpsched_env_t *env = walk_env;
1998 be_ilpsched_irn_t *block_node = get_ilpsched_irn(env, block);
1999 ilpsched_block_attr_t *ba = get_ilpsched_block_attr(block_node);
2000 FILE *logfile = NULL;
2002 struct obstack var_obst;
2005 DBG((env->dbg, 255, "\n\n\n=========================================\n"));
2006 DBG((env->dbg, 255, " ILP Scheduling for %+F\n", block));
2007 DBG((env->dbg, 255, "=========================================\n\n"));
2009 DBG((env->dbg, LEVEL_1, "Creating ILP Variables for nodes in %+F (%u interesting nodes, %u max steps)\n",
2010 block, ba->n_interesting_nodes, ba->max_steps));
2012 /* notify backend and get block environment */
2013 env->block_env = be_ilp_sched_init_block_ilp_schedule(env->sel, block);
2015 /* if we have less than two interesting nodes, there is no need to create the ILP */
2016 if (ba->n_interesting_nodes > 1) {
2017 double fact_var = ba->n_interesting_nodes > 25 ? 2.3 : 3;
2018 double fact_cst = ba->n_interesting_nodes > 25 ? 3 : 4.5;
2019 int base_num = ba->n_interesting_nodes * ba->n_interesting_nodes;
2020 int estimated_n_var = (int)((double)base_num * fact_var);
2021 int estimated_n_cst = (int)((double)base_num * fact_cst);
2023 DBG((env->dbg, LEVEL_1, "Creating LPP with estimated numbers: %d vars, %d cst\n",
2024 estimated_n_var, estimated_n_cst));
2026 /* set up the LPP object */
2027 snprintf(name, sizeof(name), "ilp scheduling IRG %s", get_entity_ld_name(get_irg_entity(env->irg)));
2029 lpp = new_lpp_userdef(
2032 estimated_n_cst, /* num vars */
2033 estimated_n_cst + 1, /* num cst */
2034 1.3); /* grow factor */
2035 obstack_init(&var_obst);
2037 /* create ILP variables */
2038 create_variables(env, lpp, block_node, &var_obst);
2040 /* create ILP constraints */
2041 DBG((env->dbg, LEVEL_1, "Creating constraints for nodes in %+F:\n", block));
2042 create_assignment_and_precedence_constraints(env, lpp, block_node);
2043 create_ressource_constraints(env, lpp, block_node);
2044 create_bundle_constraints(env, lpp, block_node);
2045 create_branch_constraint(env, lpp, block_node);
2046 //create_proj_keep_constraints(env, lpp, block_node);
2048 if (env->opts->regpress) {
2049 if (ba->n_interesting_nodes > env->opts->limit_dead) {
2050 create_alive_nodes_constraint(env, lpp, block_node);
2051 create_alive_livein_nodes_constraint(env, lpp, block_node);
2052 create_pressure_alive_constraint(env, lpp, block_node);
2054 create_dying_nodes_constraint(env, lpp, block_node);
2055 create_pressure_dead_constraint(env, lpp, block_node);
2059 DBG((env->dbg, LEVEL_1, "ILP to solve: %u variables, %u constraints\n", lpp->var_next, lpp->cst_next));
2061 /* debug stuff, dump lpp when debugging is on */
2063 if (firm_dbg_get_mask(env->dbg) > 1) {
2067 snprintf(buf, sizeof(buf), "lpp_block_%lu.txt", get_irn_node_nr(block));
2068 f = fopen(buf, "w");
2069 lpp_dump_plain(lpp, f);
2071 snprintf(buf, sizeof(buf), "lpp_block_%lu.mps", get_irn_node_nr(block));
2076 /* set solve time limit */
2077 lpp_set_time_limit(lpp, env->opts->time_limit);
2079 /* set logfile if requested */
2080 if (strlen(env->opts->log_file) > 0) {
2081 if (strcasecmp(env->opts->log_file, "stdout") == 0)
2082 lpp_set_log(lpp, stdout);
2083 else if (strcasecmp(env->opts->log_file, "stderr") == 0)
2084 lpp_set_log(lpp, stderr);
2086 logfile = fopen(env->opts->log_file, "w");
2088 fprintf(stderr, "Could not open logfile '%s'! Logging disabled.\n", env->opts->log_file);
2090 lpp_set_log(lpp, logfile);
2095 lpp_solve_net(lpp, env->main_env->options->ilp_server, env->main_env->options->ilp_solver);
2100 /* check for valid solution */
2101 if (! lpp_is_sol_valid(lpp)) {
2105 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.txt", get_irn_node_nr(block));
2106 f = fopen(buf, "w");
2107 lpp_dump_plain(lpp, f);
2109 snprintf(buf, sizeof(buf), "lpp_block_%lu.assert.mps", get_irn_node_nr(block));
2111 dump_ir_block_graph(env->irg, "-assert");
2113 assert(0 && "ILP solution is not feasible!");
2116 DBG((env->dbg, LEVEL_1, "\nSolution:\n"));
2117 DBG((env->dbg, LEVEL_1, "\tsend time: %g sec\n", lpp->send_time / 1000000.0));
2118 DBG((env->dbg, LEVEL_1, "\treceive time: %g sec\n", lpp->recv_time / 1000000.0));
2119 DBG((env->dbg, LEVEL_1, "\tmatrix: %u elements, density %.2f%%, size %.2fMB\n", lpp->n_elems, lpp->density, (double)lpp->matrix_mem / 1024.0 / 1024.0));
2120 DBG((env->dbg, LEVEL_1, "\titerations: %d\n", lpp->iterations));
2121 DBG((env->dbg, LEVEL_1, "\tsolution time: %g\n", lpp->sol_time));
2122 DBG((env->dbg, LEVEL_1, "\tobjective function: %g\n", LPP_VALUE_IS_0(lpp->objval) ? 0.0 : lpp->objval));
2123 DBG((env->dbg, LEVEL_1, "\tbest bound: %g\n", LPP_VALUE_IS_0(lpp->best_bound) ? 0.0 : lpp->best_bound));
2125 DBG((env->dbg, LEVEL_1, "variables used %u bytes\n", obstack_memory_used(&var_obst)));
2128 /* apply solution */
2129 apply_solution(env, lpp, block);
2134 /* notify backend */
2135 be_ilp_sched_finish_block_ilp_schedule(env->sel, block, env->block_env);
2139 * Perform ILP scheduling on the given irg.
2141 void be_ilp_sched(const be_irg_t *birg) {
2142 be_ilpsched_env_t env;
2143 const char *name = "be ilp scheduling";
2144 arch_isa_t *isa = birg->main_env->arch_env->isa;
2145 const ilp_sched_selector_t *sel = isa->impl->get_ilp_sched_selector(isa);
2147 FIRM_DBG_REGISTER(env.dbg, "firm.be.sched.ilp");
2149 // firm_dbg_set_mask(env.dbg, 1);
2151 env.irg_env = be_ilp_sched_init_irg_ilp_schedule(sel, birg->irg);
2153 env.irg = birg->irg;
2154 env.height = heights_new(birg->irg);
2155 env.main_env = birg->main_env;
2156 env.arch_env = birg->main_env->arch_env;
2157 env.cpu = arch_isa_get_machine(birg->main_env->arch_env->isa);
2158 env.opts = &ilp_opts;
2159 phase_init(&env.ph, name, env.irg, PHASE_DEFAULT_GROWTH, init_ilpsched_irn);
2161 /* assign a unique per block number to all interesting nodes */
2162 irg_walk_in_or_dep_graph(env.irg, NULL, build_block_idx, &env);
2165 The block indices are completely build after the walk,
2166 now we can allocate the bitsets (size depends on block indices)
2169 phase_reinit_irn_data(&env.ph);
2171 /* Collect all root nodes (having no user in their block) and calculate ASAP. */
2172 irg_walk_in_or_dep_blkwise_graph(env.irg, collect_alap_root_nodes, calculate_irn_asap, &env);
2174 /* Calculate ALAP of all irns */
2175 irg_block_walk_graph(env.irg, NULL, calculate_block_alap, &env);
2177 /* We refine the {ASAP(n), ALAP(n)} interval and fix the time steps for Projs and Keeps */
2178 irg_walk_in_or_dep_blkwise_graph(env.irg, NULL, refine_asap_alap_times, &env);
2180 /* perform ILP scheduling */
2181 irg_block_walk_graph(env.irg, NULL, create_ilp, &env);
2184 if (firm_dbg_get_mask(env.dbg)) {
2186 phase_stat_t *stat_ptr = phase_stat(&env.ph, &stat);
2188 fprintf(stderr, "Phase used: %u bytes\n", stat_ptr->overall_bytes);
2192 /* free data allocated dynamically */
2193 irg_block_walk_graph(env.irg, NULL, clear_unwanted_data, &env);
2195 /* free all allocated object */
2196 phase_free(&env.ph);
2197 heights_free(env.height);
2199 /* notify backend */
2200 be_ilp_sched_finish_irg_ilp_schedule(sel, birg->irg, env.irg_env);
2204 * Register ILP scheduler options.
2206 void be_init_ilpsched(void)
2208 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2209 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "ilpsched");
2211 lc_opt_add_table(sched_grp, ilpsched_option_table);
2214 #else /* WITH_ILP */
2216 static INLINE void some_picky_compiler_do_not_allow_empty_files(void)
2219 #endif /* WITH_ILP */