2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
24 #include "iredges_t.h"
29 #include "irprintf_t.h"
35 #include "besched_t.h"
38 #include "belistsched.h"
39 #include "beschedmris.h"
40 #include "beschedrss.h"
44 #include <libcore/lc_opts.h>
45 #include <libcore/lc_opts_enum.h>
47 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
49 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
52 BE_SCHED_SELECT_TRIVIAL = 0,
53 BE_SCHED_SELECT_REGPRESS = 1,
54 BE_SCHED_SELECT_MUCHNIK = 2,
55 BE_SCHED_SELECT_HEUR = 3,
56 BE_SCHED_SELECT_HMUCHNIK = 4,
57 BE_SCHED_SELECT_RANDOM = 5
61 BE_SCHED_PREP_NONE = 0,
62 BE_SCHED_PREP_MRIS = 2,
66 typedef struct _list_sched_options_t {
67 int select; /**< the node selector */
68 int prep; /**< schedule preparation */
69 } list_sched_options_t;
71 static list_sched_options_t list_sched_options = {
72 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
73 BE_SCHED_PREP_NONE, /* no scheduling preparation */
76 /* schedule selector options. */
77 static const lc_opt_enum_int_items_t sched_select_items[] = {
78 { "trivial", BE_SCHED_SELECT_TRIVIAL },
79 { "random", BE_SCHED_SELECT_RANDOM },
80 { "regpress", BE_SCHED_SELECT_REGPRESS },
81 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
82 { "heur", BE_SCHED_SELECT_HEUR },
83 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
87 /* schedule preparation options. */
88 static const lc_opt_enum_int_items_t sched_prep_items[] = {
89 { "none", BE_SCHED_PREP_NONE },
90 { "mris", BE_SCHED_PREP_MRIS },
91 { "rss", BE_SCHED_PREP_RSS },
95 static lc_opt_enum_int_var_t sched_select_var = {
96 &list_sched_options.select, sched_select_items
99 static lc_opt_enum_int_var_t sched_prep_var = {
100 &list_sched_options.prep, sched_prep_items
103 static const lc_opt_table_entry_t list_sched_option_table[] = {
104 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
105 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
110 * All scheduling info needed per node.
112 typedef struct _sched_irn_t {
113 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
114 unsigned already_sched : 1; /**< Set if this node is already scheduled */
118 * Scheduling environment for the whole graph.
120 typedef struct _sched_env_t {
121 sched_irn_t *sched_info; /**< scheduling info per node */
122 const list_sched_selector_t *selector; /**< The node selector. */
123 const arch_env_t *arch_env; /**< The architecture environment. */
124 const ir_graph *irg; /**< The graph to schedule. */
125 void *selector_env; /**< A pointer to give to the selector. */
129 * Environment for a block scheduler.
131 typedef struct _block_sched_env_t {
132 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
133 ir_nodeset_t cands; /**< the set of candidates */
134 ir_node *block; /**< the current block */
135 sched_env_t *sched_env; /**< the scheduler environment */
136 ir_nodeset_t live; /**< simple liveness during scheduling */
137 const list_sched_selector_t *selector;
138 void *selector_block_env;
142 * Returns non-zero if the node is already scheduled
144 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
146 int idx = get_irn_idx(n);
148 assert(idx < ARR_LEN(env->sched_info));
149 return env->sched_info[idx].already_sched;
153 * Mark a node as already scheduled
155 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
157 int idx = get_irn_idx(n);
159 assert(idx < ARR_LEN(env->sched_info));
160 env->sched_info[idx].already_sched = 1;
164 * Try to put a node in the ready set.
165 * @param env The block scheduler environment.
166 * @param pred The previous scheduled node.
167 * @param irn The node to make ready.
168 * @return 1, if the node could be made ready, 0 else.
170 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
174 /* Blocks cannot be scheduled. */
175 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
179 * Check, if the given ir node is in a different block as the
180 * currently scheduled one. If that is so, don't make the node ready.
182 if (env->block != get_nodes_block(irn))
185 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
186 ir_node *op = get_irn_in_or_dep(irn, i);
188 /* if irn is an End we have keep-alives and op might be a block, skip that */
190 assert(get_irn_op(irn) == op_End);
194 /* If the operand is local to the scheduled block and not yet
195 * scheduled, this nodes cannot be made ready, so exit. */
196 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
200 ir_nodeset_insert(&env->cands, irn);
202 /* Notify selector about the ready node. */
203 if (env->selector->node_ready)
204 env->selector->node_ready(env->selector_block_env, irn, pred);
206 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
212 * Try, to make all users of a node ready.
213 * In fact, a usage node can only be made ready, if all its operands
214 * have already been scheduled yet. This is checked by make_ready().
215 * @param env The block schedule environment.
216 * @param irn The node, which usages (successors) are to be made ready.
218 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
220 const ir_edge_t *edge;
222 foreach_out_edge(irn, edge) {
223 ir_node *user = get_edge_src_irn(edge);
225 make_ready(env, irn, user);
228 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
229 ir_node *user = get_edge_src_irn(edge);
231 make_ready(env, irn, user);
236 * Returns the number of not yet schedules users.
238 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
239 int idx = get_irn_idx(n);
241 assert(idx < ARR_LEN(env->sched_info));
242 return env->sched_info[idx].num_not_sched_user;
246 * Sets the number of not yet schedules users.
248 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
249 int idx = get_irn_idx(n);
251 assert(idx < ARR_LEN(env->sched_info));
252 env->sched_info[idx].num_not_sched_user = num;
256 * Add @p num to the number of not yet schedules users and returns the result.
258 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
259 int idx = get_irn_idx(n);
261 assert(idx < ARR_LEN(env->sched_info));
262 env->sched_info[idx].num_not_sched_user += num;
263 return env->sched_info[idx].num_not_sched_user;
267 * Returns the number of users of a node having mode datab.
269 static int get_num_successors(ir_node *irn) {
271 const ir_edge_t *edge;
273 if (get_irn_mode(irn) == mode_T) {
274 /* for mode_T nodes: count the users of all Projs */
275 foreach_out_edge(irn, edge) {
276 ir_node *proj = get_edge_src_irn(edge);
277 ir_mode *mode = get_irn_mode(proj);
280 sum += get_num_successors(proj);
281 else if (mode_is_datab(mode))
282 sum += get_irn_n_edges(proj);
286 /* do not count keep-alive edges */
287 foreach_out_edge(irn, edge) {
288 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
297 * Adds irn to @p live, updates all inputs that this user is scheduled
298 * and counts all of it's non scheduled users.
300 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
307 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
308 ir_node *in = get_irn_in_or_dep(irn, i);
310 /* if in is a proj: update predecessor */
312 in = get_Proj_pred(in);
314 /* if in is still in the live set: reduce number of users by one */
315 if (ir_nodeset_contains(&env->live, in)) {
316 if (add_irn_not_sched_user(env, in, -1) <= 0)
317 ir_nodeset_remove(&env->live, in);
322 get_num_successors returns the number of all users. This includes
323 users in different blocks as well. As the each block is scheduled separately
324 the liveness info of those users will not be updated and so these
325 users will keep up the register pressure as it is desired.
327 i = get_num_successors(irn);
329 set_irn_not_sched_user(env, irn, i);
330 ir_nodeset_insert(&env->live, irn);
334 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
338 if (get_irn_n_edges(irn) < 1)
341 if (sel->to_appear_in_schedule)
342 res = sel->to_appear_in_schedule(block_env, irn);
344 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
348 * Append an instruction to a schedule.
349 * @param env The block scheduling environment.
350 * @param irn The node to add to the schedule.
351 * @return The given node.
353 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
355 /* If the node consumes/produces data, it is appended to the schedule
356 * list, otherwise, it is not put into the list */
357 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
358 update_sched_liveness(env, irn);
359 sched_add_before(env->block, irn);
361 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
364 /* notify the selector about the finally selected node. */
365 if (env->selector->node_selected)
366 env->selector->node_selected(env->selector_block_env, irn);
368 /* Insert the node in the set of all already scheduled nodes. */
369 mark_already_scheduled(env, irn);
371 /* Remove the node from the ready set */
372 ir_nodeset_remove(&env->cands, irn);
378 * Add the proj nodes of a tuple-mode irn to the schedule immediately
379 * after the tuple-moded irn. By pinning the projs after the irn, no
380 * other nodes can create a new lifetime between the tuple-moded irn and
381 * one of its projs. This should render a realistic image of a
382 * tuple-moded irn, which in fact models a node which defines multiple
385 * @param irn The tuple-moded irn.
387 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
389 const ir_edge_t *edge;
391 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
397 /* non-proj nodes can have dependency edges to tuple nodes. */
398 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
399 ir_node *out = get_edge_src_irn(edge);
400 make_ready(env, irn, out);
403 /* schedule the normal projs */
404 foreach_out_edge(irn, edge) {
405 ir_node *out = get_edge_src_irn(edge);
407 assert(is_Proj(out) && "successor of a modeT node must be a proj");
409 if (get_irn_mode(out) == mode_T)
410 add_tuple_projs(env, out);
412 add_to_sched(env, out);
413 make_users_ready(env, out);
419 * Perform list scheduling on a block.
421 * Note, that the caller must compute a linked list of nodes in the block
422 * using the link field before calling this function.
424 * Also the outs must have been computed.
426 * @param block The block node.
427 * @param env Scheduling environment.
429 static void list_sched_block(ir_node *block, void *env_ptr)
431 sched_env_t *env = env_ptr;
432 const list_sched_selector_t *selector = env->selector;
433 ir_node *start_node = get_irg_start(get_irn_irg(block));
435 block_sched_env_t be;
436 const ir_edge_t *edge;
440 /* Initialize the block's list head that will hold the schedule. */
441 sched_init_block(block);
443 /* Initialize the block scheduling environment */
444 be.sched_info = env->sched_info;
446 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
447 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
448 be.selector = selector;
451 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
453 if (selector->init_block)
454 be.selector_block_env = selector->init_block(env->selector_env, block);
456 /* Then one can add all nodes are ready to the set. */
457 foreach_out_edge(block, edge) {
458 ir_node *irn = get_edge_src_irn(edge);
460 /* Skip the end node because of keepalive edges. */
461 if (get_irn_opcode(irn) == iro_End)
464 if (get_irn_n_edges(irn) == 0)
469 Phi functions are scheduled immediately, since they only
470 transfer data flow from the predecessors to this block.
472 add_to_sched(&be, irn);
473 make_users_ready(&be, irn);
475 else if (irn == start_node) {
476 /* The start block will be scheduled as the first node */
477 add_to_sched(&be, irn);
478 add_tuple_projs(&be, irn);
481 /* Other nodes must have all operands in other blocks to be made
485 /* Check, if the operands of a node are not local to this block */
486 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
487 ir_node *operand = get_irn_in_or_dep(irn, j);
489 if (get_nodes_block(operand) == block) {
494 /* live in values increase register pressure */
495 update_sched_liveness(&be, operand);
499 /* Make the node ready, if all operands live in a foreign block */
501 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
502 make_ready(&be, NULL, irn);
507 /* Iterate over all remaining nodes */
508 while (ir_nodeset_size(&be.cands) > 0) {
509 ir_nodeset_iterator_t iter;
510 /* collect statistics about amount of ready nodes */
511 be_do_stat_sched_ready(block, &be.cands);
513 /* Keeps must be scheduled immediatly */
514 foreach_ir_nodeset(&be.cands, irn, iter) {
515 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
521 /* Keeps must be immediately scheduled */
522 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
525 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
527 /* Add the node to the schedule. */
528 add_to_sched(&be, irn);
530 if (get_irn_mode(irn) == mode_T)
531 add_tuple_projs(&be, irn);
533 make_users_ready(&be, irn);
535 /* remove the scheduled node from the ready list. */
536 ir_nodeset_remove(&be.cands, irn);
539 if (selector->finish_block)
540 selector->finish_block(be.selector_block_env);
542 ir_nodeset_destroy(&be.cands);
543 ir_nodeset_destroy(&be.live);
546 /* List schedule a graph. */
547 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
549 const arch_env_t *arch_env = birg->main_env->arch_env;
550 ir_graph *irg = birg->irg;
554 mris_env_t *mris = NULL;
555 list_sched_selector_t sel;
557 /* Select a scheduler based on backend options */
558 switch (list_sched_options.select) {
559 case BE_SCHED_SELECT_TRIVIAL:
560 memcpy(&sel, trivial_selector, sizeof(sel));
562 case BE_SCHED_SELECT_RANDOM:
563 memcpy(&sel, random_selector, sizeof(sel));
565 case BE_SCHED_SELECT_REGPRESS:
566 memcpy(&sel, reg_pressure_selector, sizeof(sel));
568 case BE_SCHED_SELECT_MUCHNIK:
569 memcpy(&sel, muchnik_selector, sizeof(sel));
571 case BE_SCHED_SELECT_HEUR:
572 memcpy(&sel, heuristic_selector, sizeof(sel));
574 case BE_SCHED_SELECT_HMUCHNIK:
576 memcpy(&sel, trivial_selector, sizeof(sel));
580 /* Matze: This is very slow, we should avoid it to improve backend speed,
581 * we just have to make sure that we have no dangling out-edges at this
585 /* Assure, that we have no dangling out-edges to deleted stuff */
586 edges_deactivate(birg->irg);
587 edges_activate(birg->irg);
590 switch (list_sched_options.prep) {
591 case BE_SCHED_PREP_MRIS:
592 mris = be_sched_mris_preprocess(birg);
594 case BE_SCHED_PREP_RSS:
595 rss_schedule_preparation(birg);
601 num_nodes = get_irg_last_idx(irg);
603 /* initialize environment for list scheduler */
604 memset(&env, 0, sizeof(env));
605 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
606 env.arch_env = arch_env;
608 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
610 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
612 if (env.selector->init_graph)
613 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
615 /* Schedule each single block. */
616 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
618 if (env.selector->finish_graph)
619 env.selector->finish_graph(env.selector_env);
621 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
622 be_sched_mris_free(mris);
624 DEL_ARR_F(env.sched_info);
627 /* List schedule a block. */
628 void list_sched_single_block(const be_irg_t *birg, ir_node *block, be_options_t *be_opts)
630 const arch_env_t *arch_env = birg->main_env->arch_env;
631 ir_graph *irg = birg->irg;
635 list_sched_selector_t sel;
637 /* Select a scheduler based on backend options */
638 switch (list_sched_options.select) {
639 case BE_SCHED_SELECT_TRIVIAL:
640 memcpy(&sel, trivial_selector, sizeof(sel));
642 case BE_SCHED_SELECT_RANDOM:
643 memcpy(&sel, random_selector, sizeof(sel));
645 case BE_SCHED_SELECT_REGPRESS:
646 memcpy(&sel, reg_pressure_selector, sizeof(sel));
648 case BE_SCHED_SELECT_MUCHNIK:
649 memcpy(&sel, muchnik_selector, sizeof(sel));
651 case BE_SCHED_SELECT_HEUR:
652 memcpy(&sel, heuristic_selector, sizeof(sel));
654 case BE_SCHED_SELECT_HMUCHNIK:
656 memcpy(&sel, trivial_selector, sizeof(sel));
659 /* Assure, that the out edges are computed */
660 edges_deactivate(birg->irg);
661 edges_activate(birg->irg);
663 num_nodes = get_irg_last_idx(irg);
665 /* initialize environment for list scheduler */
666 memset(&env, 0, sizeof(env));
667 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
668 env.arch_env = arch_env;
670 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
672 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
674 if (env.selector->init_graph)
675 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
677 /* Schedule block. */
678 list_sched_block(block, &env);
680 if (env.selector->finish_graph)
681 env.selector->finish_graph(env.selector_env);
683 DEL_ARR_F(env.sched_info);
687 * Register list scheduler options.
689 void be_init_listsched(void) {
690 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
691 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
693 lc_opt_add_table(sched_grp, list_sched_option_table);
695 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
698 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);