2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
25 #include "iredges_t.h"
30 #include "irprintf_t.h"
36 #include "besched_t.h"
39 #include "belistsched.h"
40 #include "beschedmris.h"
41 #include "beschedrss.h"
46 #include <libcore/lc_opts.h>
47 #include <libcore/lc_opts_enum.h>
48 #endif /* WITH_LIBCORE */
50 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
53 BE_SCHED_SELECT_TRIVIAL = 0,
54 BE_SCHED_SELECT_REGPRESS = 1,
55 BE_SCHED_SELECT_MUCHNIK = 2,
56 BE_SCHED_SELECT_HEUR = 3,
57 BE_SCHED_SELECT_HMUCHNIK = 4,
58 BE_SCHED_SELECT_RANDOM = 5
62 BE_SCHED_PREP_NONE = 0,
63 BE_SCHED_PREP_MRIS = 2,
67 typedef struct _list_sched_options_t {
68 int select; /**< the node selector */
69 int prep; /**< schedule preparation */
70 } list_sched_options_t;
72 static list_sched_options_t list_sched_options = {
73 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
74 BE_SCHED_PREP_NONE, /* no scheduling preparation */
78 /* schedule selector options. */
79 static const lc_opt_enum_int_items_t sched_select_items[] = {
80 { "trivial", BE_SCHED_SELECT_TRIVIAL },
81 { "random", BE_SCHED_SELECT_RANDOM },
82 { "regpress", BE_SCHED_SELECT_REGPRESS },
83 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
84 { "heur", BE_SCHED_SELECT_HEUR },
85 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
89 /* schedule preparation options. */
90 static const lc_opt_enum_int_items_t sched_prep_items[] = {
91 { "none", BE_SCHED_PREP_NONE },
92 { "mris", BE_SCHED_PREP_MRIS },
93 { "rss", BE_SCHED_PREP_RSS },
97 static lc_opt_enum_int_var_t sched_select_var = {
98 &list_sched_options.select, sched_select_items
101 static lc_opt_enum_int_var_t sched_prep_var = {
102 &list_sched_options.prep, sched_prep_items
105 static const lc_opt_table_entry_t list_sched_option_table[] = {
106 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
107 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
110 #endif /* WITH_LIBCORE */
113 * All scheduling info needed per node.
115 typedef struct _sched_irn_t {
116 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
117 unsigned already_sched : 1; /**< Set if this node is already scheduled */
121 * Scheduling environment for the whole graph.
123 typedef struct _sched_env_t {
124 sched_irn_t *sched_info; /**< scheduling info per node */
125 const list_sched_selector_t *selector; /**< The node selector. */
126 const arch_env_t *arch_env; /**< The architecture environment. */
127 const ir_graph *irg; /**< The graph to schedule. */
128 void *selector_env; /**< A pointer to give to the selector. */
132 * Environment for a block scheduler.
134 typedef struct _block_sched_env_t {
135 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
136 nodeset *cands; /**< the set of candidates */
137 ir_node *block; /**< the current block */
138 sched_env_t *sched_env; /**< the scheduler environment */
139 nodeset *live; /**< simple liveness during scheduling */
140 const list_sched_selector_t *selector;
141 void *selector_block_env;
142 DEBUG_ONLY(firm_dbg_module_t *dbg;)
146 * Returns non-zero if the node is already scheduled
148 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
150 int idx = get_irn_idx(n);
152 assert(idx < ARR_LEN(env->sched_info));
153 return env->sched_info[idx].already_sched;
157 * Mark a node as already scheduled
159 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
161 int idx = get_irn_idx(n);
163 assert(idx < ARR_LEN(env->sched_info));
164 env->sched_info[idx].already_sched = 1;
168 * Try to put a node in the ready set.
169 * @param env The block scheduler environment.
170 * @param pred The previous scheduled node.
171 * @param irn The node to make ready.
172 * @return 1, if the node could be made ready, 0 else.
174 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
178 /* Blocks cannot be scheduled. */
179 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
183 * Check, if the given ir node is in a different block as the
184 * currently scheduled one. If that is so, don't make the node ready.
186 if (env->block != get_nodes_block(irn))
189 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
190 ir_node *op = get_irn_in_or_dep(irn, i);
192 /* if irn is an End we have keep-alives and op might be a block, skip that */
194 assert(get_irn_op(irn) == op_End);
198 /* If the operand is local to the scheduled block and not yet
199 * scheduled, this nodes cannot be made ready, so exit. */
200 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
204 nodeset_insert(env->cands, irn);
206 /* Notify selector about the ready node. */
207 if (env->selector->node_ready)
208 env->selector->node_ready(env->selector_block_env, irn, pred);
210 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
216 * Try, to make all users of a node ready.
217 * In fact, a usage node can only be made ready, if all its operands
218 * have already been scheduled yet. This is checked by make_ready().
219 * @param env The block schedule environment.
220 * @param irn The node, which usages (successors) are to be made ready.
222 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
224 const ir_edge_t *edge;
226 foreach_out_edge(irn, edge) {
227 ir_node *user = get_edge_src_irn(edge);
229 make_ready(env, irn, user);
232 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
233 ir_node *user = get_edge_src_irn(edge);
235 make_ready(env, irn, user);
240 * Returns the number of not yet schedules users.
242 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
243 int idx = get_irn_idx(n);
245 assert(idx < ARR_LEN(env->sched_info));
246 return env->sched_info[idx].num_not_sched_user;
250 * Sets the number of not yet schedules users.
252 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
253 int idx = get_irn_idx(n);
255 assert(idx < ARR_LEN(env->sched_info));
256 env->sched_info[idx].num_not_sched_user = num;
260 * Add @p num to the number of not yet schedules users and returns the result.
262 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
263 int idx = get_irn_idx(n);
265 assert(idx < ARR_LEN(env->sched_info));
266 env->sched_info[idx].num_not_sched_user += num;
267 return env->sched_info[idx].num_not_sched_user;
271 * Returns the number of users of a node having mode datab.
273 static int get_num_successors(ir_node *irn) {
275 const ir_edge_t *edge;
277 if (get_irn_mode(irn) == mode_T) {
278 /* for mode_T nodes: count the users of all Projs */
279 foreach_out_edge(irn, edge) {
280 ir_node *proj = get_edge_src_irn(edge);
281 ir_mode *mode = get_irn_mode(proj);
284 sum += get_num_successors(proj);
285 else if (mode_is_datab(mode))
286 sum += get_irn_n_edges(proj);
290 /* do not count keep-alive edges */
291 foreach_out_edge(irn, edge) {
292 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
301 * Adds irn to @p live, updates all inputs that this user is scheduled
302 * and counts all of it's non scheduled users.
304 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
311 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
312 ir_node *in = get_irn_in_or_dep(irn, i);
314 /* if in is a proj: update predecessor */
316 in = get_Proj_pred(in);
318 /* if in is still in the live set: reduce number of users by one */
319 if (nodeset_find(env->live, in)) {
320 if (add_irn_not_sched_user(env, in, -1) <= 0)
321 nodeset_remove(env->live, in);
326 get_num_successors returns the number of all users. This includes
327 users in different blocks as well. As the each block is scheduled separately
328 the liveness info of those users will not be updated and so these
329 users will keep up the register pressure as it is desired.
331 i = get_num_successors(irn);
333 set_irn_not_sched_user(env, irn, i);
334 nodeset_insert(env->live, irn);
338 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
342 if (get_irn_n_edges(irn) < 1)
345 if (sel->to_appear_in_schedule)
346 res = sel->to_appear_in_schedule(block_env, irn);
348 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
352 * Append an instruction to a schedule.
353 * @param env The block scheduling environment.
354 * @param irn The node to add to the schedule.
355 * @return The given node.
357 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
359 /* If the node consumes/produces data, it is appended to the schedule
360 * list, otherwise, it is not put into the list */
361 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
362 update_sched_liveness(env, irn);
363 sched_add_before(env->block, irn);
365 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
368 /* notify the selector about the finally selected node. */
369 if (env->selector->node_selected)
370 env->selector->node_selected(env->selector_block_env, irn);
372 /* Insert the node in the set of all already scheduled nodes. */
373 mark_already_scheduled(env, irn);
375 /* Remove the node from the ready set */
376 if(nodeset_find(env->cands, irn))
377 nodeset_remove(env->cands, irn);
383 * Add the proj nodes of a tuple-mode irn to the schedule immediately
384 * after the tuple-moded irn. By pinning the projs after the irn, no
385 * other nodes can create a new lifetime between the tuple-moded irn and
386 * one of its projs. This should render a realistic image of a
387 * tuple-moded irn, which in fact models a node which defines multiple
390 * @param irn The tuple-moded irn.
392 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
394 const ir_edge_t *edge;
396 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
402 /* non-proj nodes can have dependency edges to tuple nodes. */
403 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
404 ir_node *out = get_edge_src_irn(edge);
405 make_ready(env, irn, out);
408 /* schedule the normal projs */
409 foreach_out_edge(irn, edge) {
410 ir_node *out = get_edge_src_irn(edge);
412 assert(is_Proj(out) && "successor of a modeT node must be a proj");
414 if (get_irn_mode(out) == mode_T)
415 add_tuple_projs(env, out);
417 add_to_sched(env, out);
418 make_users_ready(env, out);
424 * Perform list scheduling on a block.
426 * Note, that the caller must compute a linked list of nodes in the block
427 * using the link field before calling this function.
429 * Also the outs must have been computed.
431 * @param block The block node.
432 * @param env Scheduling environment.
434 static void list_sched_block(ir_node *block, void *env_ptr)
436 sched_env_t *env = env_ptr;
437 const list_sched_selector_t *selector = env->selector;
438 ir_node *start_node = get_irg_start(get_irn_irg(block));
440 block_sched_env_t be;
441 const ir_edge_t *edge;
445 /* Initialize the block's list head that will hold the schedule. */
446 sched_init_block(block);
448 /* Initialize the block scheduling environment */
449 be.sched_info = env->sched_info;
451 be.cands = new_nodeset(get_irn_n_edges(block));
452 be.live = new_nodeset(get_irn_n_edges(block));
453 be.selector = selector;
455 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
457 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
459 if (selector->init_block)
460 be.selector_block_env = selector->init_block(env->selector_env, block);
462 /* Then one can add all nodes are ready to the set. */
463 foreach_out_edge(block, edge) {
464 ir_node *irn = get_edge_src_irn(edge);
466 /* Skip the end node because of keepalive edges. */
467 if (get_irn_opcode(irn) == iro_End)
470 if (get_irn_n_edges(irn) == 0)
475 Phi functions are scheduled immediately, since they only
476 transfer data flow from the predecessors to this block.
478 add_to_sched(&be, irn);
479 make_users_ready(&be, irn);
481 else if (irn == start_node) {
482 /* The start block will be scheduled as the first node */
483 add_to_sched(&be, irn);
484 add_tuple_projs(&be, irn);
487 /* Other nodes must have all operands in other blocks to be made
491 /* Check, if the operands of a node are not local to this block */
492 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
493 ir_node *operand = get_irn_in_or_dep(irn, j);
495 if (get_nodes_block(operand) == block) {
500 /* live in values increase register pressure */
501 update_sched_liveness(&be, operand);
505 /* Make the node ready, if all operands live in a foreign block */
507 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
508 make_ready(&be, NULL, irn);
513 /* Iterate over all remaining nodes */
514 while (nodeset_count(be.cands) > 0) {
515 /* collect statistics about amount of ready nodes */
516 be_do_stat_sched_ready(block, be.cands);
518 /* Keeps must be scheduled immediatly */
519 foreach_nodeset(be.cands, irn) {
520 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || get_irn_mode(irn) == mode_M) {
521 nodeset_break(be.cands);
527 /* Keeps must be immediately scheduled */
528 irn = be.selector->select(be.selector_block_env, be.cands, be.live);
531 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
533 /* Add the node to the schedule. */
534 add_to_sched(&be, irn);
536 if (get_irn_mode(irn) == mode_T)
537 add_tuple_projs(&be, irn);
539 make_users_ready(&be, irn);
541 /* remove the scheduled node from the ready list. */
542 if (nodeset_find(be.cands, irn))
543 nodeset_remove(be.cands, irn);
546 if (selector->finish_block)
547 selector->finish_block(be.selector_block_env);
549 del_nodeset(be.cands);
550 del_nodeset(be.live);
553 /* List schedule a graph. */
554 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
556 const arch_env_t *arch_env = birg->main_env->arch_env;
557 ir_graph *irg = birg->irg;
561 mris_env_t *mris = NULL;
562 list_sched_selector_t sel;
564 /* Select a scheduler based on backend options */
565 switch (list_sched_options.select) {
566 case BE_SCHED_SELECT_TRIVIAL:
567 memcpy(&sel, trivial_selector, sizeof(sel));
569 case BE_SCHED_SELECT_RANDOM:
570 memcpy(&sel, random_selector, sizeof(sel));
572 case BE_SCHED_SELECT_REGPRESS:
573 memcpy(&sel, reg_pressure_selector, sizeof(sel));
575 case BE_SCHED_SELECT_MUCHNIK:
576 memcpy(&sel, muchnik_selector, sizeof(sel));
578 case BE_SCHED_SELECT_HEUR:
579 memcpy(&sel, heuristic_selector, sizeof(sel));
581 case BE_SCHED_SELECT_HMUCHNIK:
583 memcpy(&sel, trivial_selector, sizeof(sel));
586 /* Assure, that the out edges are computed */
587 edges_deactivate(birg->irg);
588 edges_activate(birg->irg);
590 switch (list_sched_options.prep) {
591 case BE_SCHED_PREP_MRIS:
592 mris = be_sched_mris_preprocess(birg);
594 case BE_SCHED_PREP_RSS:
595 rss_schedule_preparation(birg);
601 num_nodes = get_irg_last_idx(irg);
603 /* initialize environment for list scheduler */
604 memset(&env, 0, sizeof(env));
605 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
606 env.arch_env = arch_env;
608 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
610 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
612 if (env.selector->init_graph)
613 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
615 /* Schedule each single block. */
616 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
618 if (env.selector->finish_graph)
619 env.selector->finish_graph(env.selector_env);
621 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
622 be_sched_mris_free(mris);
624 DEL_ARR_F(env.sched_info);
629 * Register list scheduler options.
631 void be_init_listsched(void) {
632 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
633 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
635 lc_opt_add_table(sched_grp, list_sched_option_table);
638 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
639 #endif /* WITH_LIBCORE */