2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
25 #include "iredges_t.h"
30 #include "irprintf_t.h"
36 #include "besched_t.h"
39 #include "belistsched.h"
40 #include "beschedmris.h"
41 #include "beschedrss.h"
46 #include <libcore/lc_opts.h>
47 #include <libcore/lc_opts_enum.h>
48 #endif /* WITH_LIBCORE */
51 BE_SCHED_SELECT_TRIVIAL = 0,
52 BE_SCHED_SELECT_REGPRESS = 1,
53 BE_SCHED_SELECT_MUCHNIK = 2,
54 BE_SCHED_SELECT_HEUR = 3,
55 BE_SCHED_SELECT_HMUCHNIK = 4,
56 BE_SCHED_SELECT_RANDOM = 5
60 BE_SCHED_PREP_NONE = 0,
61 BE_SCHED_PREP_MRIS = 2,
65 typedef struct _list_sched_options_t {
66 int select; /**< the node selector */
67 int prep; /**< schedule preparation */
68 } list_sched_options_t;
70 static list_sched_options_t list_sched_options = {
71 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
72 BE_SCHED_PREP_NONE, /* no scheduling preparation */
76 /* schedule selector options. */
77 static const lc_opt_enum_int_items_t sched_select_items[] = {
78 { "trivial", BE_SCHED_SELECT_TRIVIAL },
79 { "random", BE_SCHED_SELECT_RANDOM },
80 { "regpress", BE_SCHED_SELECT_REGPRESS },
81 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
82 { "heur", BE_SCHED_SELECT_HEUR },
83 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
87 /* schedule preparation options. */
88 static const lc_opt_enum_int_items_t sched_prep_items[] = {
89 { "none", BE_SCHED_PREP_NONE },
90 { "mris", BE_SCHED_PREP_MRIS },
91 { "rss", BE_SCHED_PREP_RSS },
95 static lc_opt_enum_int_var_t sched_select_var = {
96 &list_sched_options.select, sched_select_items
99 static lc_opt_enum_int_var_t sched_prep_var = {
100 &list_sched_options.prep, sched_prep_items
103 static const lc_opt_table_entry_t list_sched_option_table[] = {
104 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
105 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
108 #endif /* WITH_LIBCORE */
111 * All scheduling info needed per node.
113 typedef struct _sched_irn_t {
114 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
115 unsigned already_sched : 1; /**< Set if this node is already scheduled */
119 * Scheduling environment for the whole graph.
121 typedef struct _sched_env_t {
122 sched_irn_t *sched_info; /**< scheduling info per node */
123 const list_sched_selector_t *selector; /**< The node selector. */
124 const arch_env_t *arch_env; /**< The architecture environment. */
125 const ir_graph *irg; /**< The graph to schedule. */
126 void *selector_env; /**< A pointer to give to the selector. */
130 * Environment for a block scheduler.
132 typedef struct _block_sched_env_t {
133 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
134 nodeset *cands; /**< the set of candidates */
135 ir_node *block; /**< the current block */
136 sched_env_t *sched_env; /**< the scheduler environment */
137 nodeset *live; /**< simple liveness during scheduling */
138 const list_sched_selector_t *selector;
139 void *selector_block_env;
140 DEBUG_ONLY(firm_dbg_module_t *dbg;)
144 * Returns non-zero if the node is already scheduled
146 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
148 int idx = get_irn_idx(n);
150 assert(idx < ARR_LEN(env->sched_info));
151 return env->sched_info[idx].already_sched;
155 * Mark a node as already scheduled
157 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
159 int idx = get_irn_idx(n);
161 assert(idx < ARR_LEN(env->sched_info));
162 env->sched_info[idx].already_sched = 1;
166 * Try to put a node in the ready set.
167 * @param env The block scheduler environment.
168 * @param pred The previous scheduled node.
169 * @param irn The node to make ready.
170 * @return 1, if the node could be made ready, 0 else.
172 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
176 /* Blocks cannot be scheduled. */
177 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
181 * Check, if the given ir node is in a different block as the
182 * currently scheduled one. If that is so, don't make the node ready.
184 if (env->block != get_nodes_block(irn))
187 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
188 ir_node *op = get_irn_in_or_dep(irn, i);
190 /* if irn is an End we have keep-alives and op might be a block, skip that */
192 assert(get_irn_op(irn) == op_End);
196 /* If the operand is local to the scheduled block and not yet
197 * scheduled, this nodes cannot be made ready, so exit. */
198 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
202 nodeset_insert(env->cands, irn);
204 /* Notify selector about the ready node. */
205 if (env->selector->node_ready)
206 env->selector->node_ready(env->selector_block_env, irn, pred);
208 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
214 * Try, to make all users of a node ready.
215 * In fact, a usage node can only be made ready, if all its operands
216 * have already been scheduled yet. This is checked my make_ready().
217 * @param env The block schedule environment.
218 * @param irn The node, which usages (successors) are to be made ready.
220 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
222 const ir_edge_t *edge;
224 foreach_out_edge(irn, edge) {
225 ir_node *user = get_edge_src_irn(edge);
227 make_ready(env, irn, user);
230 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
231 ir_node *user = get_edge_src_irn(edge);
233 make_ready(env, irn, user);
238 * Returns the number of not yet schedules users.
240 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
241 int idx = get_irn_idx(n);
243 assert(idx < ARR_LEN(env->sched_info));
244 return env->sched_info[idx].num_not_sched_user;
248 * Sets the number of not yet schedules users.
250 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
251 int idx = get_irn_idx(n);
253 assert(idx < ARR_LEN(env->sched_info));
254 env->sched_info[idx].num_not_sched_user = num;
258 * Add @p num to the number of not yet schedules users and returns the result.
260 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
261 int idx = get_irn_idx(n);
263 assert(idx < ARR_LEN(env->sched_info));
264 env->sched_info[idx].num_not_sched_user += num;
265 return env->sched_info[idx].num_not_sched_user;
269 * Returns the number of users of a node having mode datab.
271 static int get_num_successors(ir_node *irn) {
273 const ir_edge_t *edge;
275 if (get_irn_mode(irn) == mode_T) {
276 /* for mode_T nodes: count the users of all Projs */
277 foreach_out_edge(irn, edge) {
278 ir_node *proj = get_edge_src_irn(edge);
279 ir_mode *mode = get_irn_mode(proj);
282 sum += get_num_successors(proj);
283 else if (mode_is_datab(mode))
284 sum += get_irn_n_edges(proj);
288 /* do not count keep-alive edges */
289 foreach_out_edge(irn, edge) {
290 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
299 * Adds irn to @p live, updates all inputs that this user is scheduled
300 * and counts all of it's non scheduled users.
302 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
309 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
310 ir_node *in = get_irn_in_or_dep(irn, i);
312 /* if in is a proj: update predecessor */
314 in = get_Proj_pred(in);
316 /* if in is still in the live set: reduce number of users by one */
317 if (nodeset_find(env->live, in)) {
318 if (add_irn_not_sched_user(env, in, -1) <= 0)
319 nodeset_remove(env->live, in);
324 get_num_successors returns the number of all users. This includes
325 users in different blocks as well. As the each block is scheduled separately
326 the liveness info of those users will not be updated and so these
327 users will keep up the register pressure as it is desired.
329 i = get_num_successors(irn);
331 set_irn_not_sched_user(env, irn, i);
332 nodeset_insert(env->live, irn);
336 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
340 if(sel->to_appear_in_schedule)
341 res = sel->to_appear_in_schedule(block_env, irn);
343 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn));
347 * Append an instruction to a schedule.
348 * @param env The block scheduling environment.
349 * @param irn The node to add to the schedule.
350 * @return The given node.
352 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
354 /* If the node consumes/produces data, it is appended to the schedule
355 * list, otherwise, it is not put into the list */
356 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
357 update_sched_liveness(env, irn);
358 sched_add_before(env->block, irn);
360 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
363 /* notify the selector about the finally selected node. */
364 if (env->selector->node_selected)
365 env->selector->node_selected(env->selector_block_env, irn);
367 /* Insert the node in the set of all already scheduled nodes. */
368 mark_already_scheduled(env, irn);
370 /* Remove the node from the ready set */
371 if(nodeset_find(env->cands, irn))
372 nodeset_remove(env->cands, irn);
378 * Add the proj nodes of a tuple-mode irn to the schedule immediately
379 * after the tuple-moded irn. By pinning the projs after the irn, no
380 * other nodes can create a new lifetime between the tuple-moded irn and
381 * one of its projs. This should render a realistic image of a
382 * tuple-moded irn, which in fact models a node which defines multiple
385 * @param irn The tuple-moded irn.
387 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
389 const ir_edge_t *edge;
391 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
397 /* non-proj nodes can have dependency edges to tuple nodes. */
398 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
399 ir_node *out = get_edge_src_irn(edge);
400 make_ready(env, irn, out);
403 /* schedule the normal projs */
404 foreach_out_edge(irn, edge) {
405 ir_node *out = get_edge_src_irn(edge);
407 assert(is_Proj(out) && "successor of a modeT node must be a proj");
409 if (get_irn_mode(out) == mode_T)
410 add_tuple_projs(env, out);
412 add_to_sched(env, out);
413 make_users_ready(env, out);
419 * Perform list scheduling on a block.
421 * Note, that the caller must compute a linked list of nodes in the block
422 * using the link field before calling this function.
424 * Also the outs must have been computed.
426 * @param block The block node.
427 * @param env Scheduling environment.
429 static void list_sched_block(ir_node *block, void *env_ptr)
431 sched_env_t *env = env_ptr;
432 const list_sched_selector_t *selector = env->selector;
433 ir_node *start_node = get_irg_start(get_irn_irg(block));
435 block_sched_env_t be;
436 const ir_edge_t *edge;
440 /* Initialize the block's list head that will hold the schedule. */
441 sched_init_block(block);
443 /* Initialize the block scheduling environment */
444 be.sched_info = env->sched_info;
446 be.cands = new_nodeset(get_irn_n_edges(block));
447 be.live = new_nodeset(get_irn_n_edges(block));
448 be.selector = selector;
450 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
452 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
454 if (selector->init_block)
455 be.selector_block_env = selector->init_block(env->selector_env, block);
457 /* Then one can add all nodes are ready to the set. */
458 foreach_out_edge(block, edge) {
459 ir_node *irn = get_edge_src_irn(edge);
461 /* Skip the end node because of keepalive edges. */
462 if (get_irn_opcode(irn) == iro_End)
465 if (get_irn_n_edges(irn) == 0)
470 Phi functions are scheduled immediately, since they only
471 transfer data flow from the predecessors to this block.
473 add_to_sched(&be, irn);
474 make_users_ready(&be, irn);
476 else if (irn == start_node) {
477 /* The start block will be scheduled as the first node */
478 add_to_sched(&be, irn);
479 add_tuple_projs(&be, irn);
482 /* Other nodes must have all operands in other blocks to be made
486 /* Check, if the operands of a node are not local to this block */
487 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
488 ir_node *operand = get_irn_in_or_dep(irn, j);
490 if (get_nodes_block(operand) == block) {
495 /* live in values increase register pressure */
496 update_sched_liveness(&be, operand);
500 /* Make the node ready, if all operands live in a foreign block */
502 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
503 make_ready(&be, NULL, irn);
508 /* Iterate over all remaining nodes */
509 while (nodeset_count(be.cands) > 0) {
510 /* collect statistics about amount of ready nodes */
511 be_do_stat_sched_ready(block, be.cands);
513 /* Keeps must be scheduled immediatly */
514 foreach_nodeset(be.cands, irn) {
515 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || get_irn_mode(irn) == mode_M) {
516 nodeset_break(be.cands);
522 /* Keeps must be immediately scheduled */
523 irn = be.selector->select(be.selector_block_env, be.cands, be.live);
526 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
528 /* Add the node to the schedule. */
529 add_to_sched(&be, irn);
531 if (get_irn_mode(irn) == mode_T)
532 add_tuple_projs(&be, irn);
534 make_users_ready(&be, irn);
536 /* remove the scheduled node from the ready list. */
537 if (nodeset_find(be.cands, irn))
538 nodeset_remove(be.cands, irn);
541 if (selector->finish_block)
542 selector->finish_block(be.selector_block_env);
544 del_nodeset(be.cands);
545 del_nodeset(be.live);
548 /* List schedule a graph. */
549 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
551 const arch_env_t *arch_env = birg->main_env->arch_env;
552 ir_graph *irg = birg->irg;
556 mris_env_t *mris = NULL;
557 list_sched_selector_t sel;
559 /* Select a scheduler based on backend options */
560 switch (list_sched_options.select) {
561 case BE_SCHED_SELECT_TRIVIAL:
562 memcpy(&sel, trivial_selector, sizeof(sel));
564 case BE_SCHED_SELECT_RANDOM:
565 memcpy(&sel, random_selector, sizeof(sel));
567 case BE_SCHED_SELECT_REGPRESS:
568 memcpy(&sel, reg_pressure_selector, sizeof(sel));
570 case BE_SCHED_SELECT_MUCHNIK:
571 memcpy(&sel, muchnik_selector, sizeof(sel));
573 case BE_SCHED_SELECT_HEUR:
574 memcpy(&sel, heuristic_selector, sizeof(sel));
576 case BE_SCHED_SELECT_HMUCHNIK:
578 memcpy(&sel, trivial_selector, sizeof(sel));
581 /* Assure, that the out edges are computed */
582 edges_deactivate(birg->irg);
583 edges_activate(birg->irg);
585 switch (list_sched_options.prep) {
586 case BE_SCHED_PREP_MRIS:
587 mris = be_sched_mris_preprocess(birg);
589 case BE_SCHED_PREP_RSS:
590 rss_schedule_preparation(birg);
596 num_nodes = get_irg_last_idx(irg);
598 /* initialize environment for list scheduler */
599 memset(&env, 0, sizeof(env));
600 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
601 env.arch_env = arch_env;
603 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
605 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
607 if (env.selector->init_graph)
608 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
610 /* Schedule each single block. */
611 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
613 if (env.selector->finish_graph)
614 env.selector->finish_graph(env.selector_env);
616 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
617 be_sched_mris_free(mris);
619 DEL_ARR_F(env.sched_info);
624 * Register list scheduler options.
626 void be_init_listsched(void) {
627 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
628 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
630 lc_opt_add_table(sched_grp, list_sched_option_table);
633 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
634 #endif /* WITH_LIBCORE */