2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
25 #include "iredges_t.h"
30 #include "irprintf_t.h"
35 #include "besched_t.h"
38 #include "belistsched.h"
39 #include "beschedmris.h"
40 #include "beschedrss.h"
45 #include <libcore/lc_opts.h>
46 #include <libcore/lc_opts_enum.h>
47 #endif /* WITH_LIBCORE */
50 BE_SCHED_SELECT_TRIVIAL = 0,
51 BE_SCHED_SELECT_REGPRESS = 1,
52 BE_SCHED_SELECT_MUCHNIK = 2,
53 BE_SCHED_SELECT_HEUR = 3,
54 BE_SCHED_SELECT_HMUCHNIK = 4,
55 BE_SCHED_SELECT_RANDOM = 5
59 BE_SCHED_PREP_NONE = 0,
60 BE_SCHED_PREP_MRIS = 2,
64 typedef struct _list_sched_options_t {
65 int select; /**< the node selector */
66 int prep; /**< schedule preparation */
67 } list_sched_options_t;
69 static list_sched_options_t list_sched_options = {
70 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
71 BE_SCHED_PREP_NONE, /* no scheduling preparation */
75 /* schedule selector options. */
76 static const lc_opt_enum_int_items_t sched_select_items[] = {
77 { "trivial", BE_SCHED_SELECT_TRIVIAL },
78 { "random", BE_SCHED_SELECT_RANDOM },
79 { "regpress", BE_SCHED_SELECT_REGPRESS },
80 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
81 { "heur", BE_SCHED_SELECT_HEUR },
82 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
86 /* schedule preparation options. */
87 static const lc_opt_enum_int_items_t sched_prep_items[] = {
88 { "none", BE_SCHED_PREP_NONE },
89 { "mris", BE_SCHED_PREP_MRIS },
90 { "rss", BE_SCHED_PREP_RSS },
94 static lc_opt_enum_int_var_t sched_select_var = {
95 &list_sched_options.select, sched_select_items
98 static lc_opt_enum_int_var_t sched_prep_var = {
99 &list_sched_options.prep, sched_prep_items
102 static const lc_opt_table_entry_t list_sched_option_table[] = {
103 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
104 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
107 #endif /* WITH_LIBCORE */
110 * All scheduling info needed per node.
112 typedef struct _sched_irn_t {
113 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
114 unsigned already_sched : 1; /**< Set if this node is already scheduled */
118 * Scheduling environment for the whole graph.
120 typedef struct _sched_env_t {
121 sched_irn_t *sched_info; /**< scheduling info per node */
122 const list_sched_selector_t *selector; /**< The node selector. */
123 const arch_env_t *arch_env; /**< The architecture environment. */
124 const ir_graph *irg; /**< The graph to schedule. */
125 void *selector_env; /**< A pointer to give to the selector. */
129 * Environment for a block scheduler.
131 typedef struct _block_sched_env_t {
132 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
133 nodeset *cands; /**< the set of candidates */
134 ir_node *block; /**< the current block */
135 sched_env_t *sched_env; /**< the scheduler environment */
136 nodeset *live; /**< simple liveness during scheduling */
137 const list_sched_selector_t *selector;
138 void *selector_block_env;
139 DEBUG_ONLY(firm_dbg_module_t *dbg;)
143 * Returns non-zero if the node is already scheduled
145 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
147 int idx = get_irn_idx(n);
149 assert(idx < ARR_LEN(env->sched_info));
150 return env->sched_info[idx].already_sched;
154 * Mark a node as already scheduled
156 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
158 int idx = get_irn_idx(n);
160 assert(idx < ARR_LEN(env->sched_info));
161 env->sched_info[idx].already_sched = 1;
165 * Try to put a node in the ready set.
166 * @param env The block scheduler environment.
167 * @param pred The previous scheduled node.
168 * @param irn The node to make ready.
169 * @return 1, if the node could be made ready, 0 else.
171 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
175 /* Blocks cannot be scheduled. */
176 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
180 * Check, if the given ir node is in a different block as the
181 * currently scheduled one. If that is so, don't make the node ready.
183 if (env->block != get_nodes_block(irn))
186 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
187 ir_node *op = get_irn_in_or_dep(irn, i);
189 /* if irn is an End we have keep-alives and op might be a block, skip that */
191 assert(get_irn_op(irn) == op_End);
195 /* If the operand is local to the scheduled block and not yet
196 * scheduled, this nodes cannot be made ready, so exit. */
197 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
201 nodeset_insert(env->cands, irn);
203 /* Notify selector about the ready node. */
204 if (env->selector->node_ready)
205 env->selector->node_ready(env->selector_block_env, irn, pred);
207 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
213 * Try, to make all users of a node ready.
214 * In fact, a usage node can only be made ready, if all its operands
215 * have already been scheduled yet. This is checked my make_ready().
216 * @param env The block schedule environment.
217 * @param irn The node, which usages (successors) are to be made ready.
219 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
221 const ir_edge_t *edge;
223 foreach_out_edge(irn, edge) {
224 ir_node *user = get_edge_src_irn(edge);
226 make_ready(env, irn, user);
229 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
230 ir_node *user = get_edge_src_irn(edge);
232 make_ready(env, irn, user);
237 * Returns the number of not yet schedules users.
239 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
240 int idx = get_irn_idx(n);
242 assert(idx < ARR_LEN(env->sched_info));
243 return env->sched_info[idx].num_not_sched_user;
247 * Sets the number of not yet schedules users.
249 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
250 int idx = get_irn_idx(n);
252 assert(idx < ARR_LEN(env->sched_info));
253 env->sched_info[idx].num_not_sched_user = num;
257 * Add @p num to the number of not yet schedules users and returns the result.
259 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
260 int idx = get_irn_idx(n);
262 assert(idx < ARR_LEN(env->sched_info));
263 env->sched_info[idx].num_not_sched_user += num;
264 return env->sched_info[idx].num_not_sched_user;
268 * Returns the number of users of a node having mode datab.
270 static int get_num_successors(ir_node *irn) {
272 const ir_edge_t *edge;
274 if (get_irn_mode(irn) == mode_T) {
275 /* for mode_T nodes: count the users of all Projs */
276 foreach_out_edge(irn, edge) {
277 ir_node *proj = get_edge_src_irn(edge);
278 ir_mode *mode = get_irn_mode(proj);
281 sum += get_num_successors(proj);
282 else if (mode_is_datab(mode))
283 sum += get_irn_n_edges(proj);
287 /* do not count keep-alive edges */
288 foreach_out_edge(irn, edge) {
289 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
298 * Adds irn to @p live, updates all inputs that this user is scheduled
299 * and counts all of it's non scheduled users.
301 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
308 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
309 ir_node *in = get_irn_in_or_dep(irn, i);
311 /* if in is a proj: update predecessor */
313 in = get_Proj_pred(in);
315 /* if in is still in the live set: reduce number of users by one */
316 if (nodeset_find(env->live, in)) {
317 if (add_irn_not_sched_user(env, in, -1) <= 0)
318 nodeset_remove(env->live, in);
323 get_num_successors returns the number of all users. This includes
324 users in different blocks as well. As the each block is scheduled separately
325 the liveness info of those users will not be updated and so these
326 users will keep up the register pressure as it is desired.
328 i = get_num_successors(irn);
330 set_irn_not_sched_user(env, irn, i);
331 nodeset_insert(env->live, irn);
335 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
339 if(sel->to_appear_in_schedule)
340 res = sel->to_appear_in_schedule(block_env, irn);
342 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn));
346 * Append an instruction to a schedule.
347 * @param env The block scheduling environment.
348 * @param irn The node to add to the schedule.
349 * @return The given node.
351 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
353 /* If the node consumes/produces data, it is appended to the schedule
354 * list, otherwise, it is not put into the list */
355 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
356 sched_info_t *info = get_irn_sched_info(irn);
357 INIT_LIST_HEAD(&info->list);
359 update_sched_liveness(env, irn);
360 sched_add_before(env->block, irn);
362 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
365 /* notify the selector about the finally selected node. */
366 if (env->selector->node_selected)
367 env->selector->node_selected(env->selector_block_env, irn);
369 /* Insert the node in the set of all already scheduled nodes. */
370 mark_already_scheduled(env, irn);
372 /* Remove the node from the ready set */
373 if(nodeset_find(env->cands, irn))
374 nodeset_remove(env->cands, irn);
380 * Add the proj nodes of a tuple-mode irn to the schedule immediately
381 * after the tuple-moded irn. By pinning the projs after the irn, no
382 * other nodes can create a new lifetime between the tuple-moded irn and
383 * one of its projs. This should render a realistic image of a
384 * tuple-moded irn, which in fact models a node which defines multiple
387 * @param irn The tuple-moded irn.
389 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
391 const ir_edge_t *edge;
393 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
399 /* non-proj nodes can have dependency edges to tuple nodes. */
400 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
401 ir_node *out = get_edge_src_irn(edge);
402 make_ready(env, irn, out);
405 /* schedule the normal projs */
406 foreach_out_edge(irn, edge) {
407 ir_node *out = get_edge_src_irn(edge);
409 assert(is_Proj(out) && "successor of a modeT node must be a proj");
411 if (get_irn_mode(out) == mode_T)
412 add_tuple_projs(env, out);
414 add_to_sched(env, out);
415 make_users_ready(env, out);
421 * Perform list scheduling on a block.
423 * Note, that the caller must compute a linked list of nodes in the block
424 * using the link field before calling this function.
426 * Also the outs must have been computed.
428 * @param block The block node.
429 * @param env Scheduling environment.
431 static void list_sched_block(ir_node *block, void *env_ptr)
433 sched_env_t *env = env_ptr;
434 const list_sched_selector_t *selector = env->selector;
435 ir_node *start_node = get_irg_start(get_irn_irg(block));
436 sched_info_t *info = get_irn_sched_info(block);
438 block_sched_env_t be;
439 const ir_edge_t *edge;
443 /* Initialize the block's list head that will hold the schedule. */
444 INIT_LIST_HEAD(&info->list);
446 /* Initialize the block scheduling environment */
447 be.sched_info = env->sched_info;
449 be.cands = new_nodeset(get_irn_n_edges(block));
450 be.live = new_nodeset(get_irn_n_edges(block));
451 be.selector = selector;
453 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
455 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
457 if (selector->init_block)
458 be.selector_block_env = selector->init_block(env->selector_env, block);
460 /* Then one can add all nodes are ready to the set. */
461 foreach_out_edge(block, edge) {
462 ir_node *irn = get_edge_src_irn(edge);
464 /* Skip the end node because of keepalive edges. */
465 if (get_irn_opcode(irn) == iro_End)
468 if (get_irn_n_edges(irn) == 0)
473 Phi functions are scheduled immediately, since they only
474 transfer data flow from the predecessors to this block.
476 add_to_sched(&be, irn);
477 make_users_ready(&be, irn);
479 else if (irn == start_node) {
480 /* The start block will be scheduled as the first node */
481 add_to_sched(&be, irn);
482 add_tuple_projs(&be, irn);
485 /* Other nodes must have all operands in other blocks to be made
489 /* Check, if the operands of a node are not local to this block */
490 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
491 ir_node *operand = get_irn_in_or_dep(irn, j);
493 if (get_nodes_block(operand) == block) {
498 /* live in values increase register pressure */
499 update_sched_liveness(&be, operand);
503 /* Make the node ready, if all operands live in a foreign block */
505 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
506 make_ready(&be, NULL, irn);
511 /* Iterate over all remaining nodes */
512 while (nodeset_count(be.cands) > 0) {
513 /* collect statistics about amount of ready nodes */
514 be_do_stat_sched_ready(block, be.cands);
516 /* Keeps must be scheduled immediatly */
517 foreach_nodeset(be.cands, irn) {
518 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || get_irn_mode(irn) == mode_M) {
519 nodeset_break(be.cands);
525 /* Keeps must be immediately scheduled */
526 irn = be.selector->select(be.selector_block_env, be.cands, be.live);
529 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
531 /* Add the node to the schedule. */
532 add_to_sched(&be, irn);
534 if (get_irn_mode(irn) == mode_T)
535 add_tuple_projs(&be, irn);
537 make_users_ready(&be, irn);
539 /* remove the scheduled node from the ready list. */
540 if (nodeset_find(be.cands, irn))
541 nodeset_remove(be.cands, irn);
544 if (selector->finish_block)
545 selector->finish_block(be.selector_block_env);
547 del_nodeset(be.cands);
548 del_nodeset(be.live);
551 /* List schedule a graph. */
552 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
554 const arch_env_t *arch_env = birg->main_env->arch_env;
555 ir_graph *irg = birg->irg;
559 mris_env_t *mris = NULL;
560 list_sched_selector_t sel;
562 /* Select a scheduler based on backend options */
563 switch (list_sched_options.select) {
564 case BE_SCHED_SELECT_TRIVIAL:
565 memcpy(&sel, trivial_selector, sizeof(sel));
567 case BE_SCHED_SELECT_RANDOM:
568 memcpy(&sel, random_selector, sizeof(sel));
570 case BE_SCHED_SELECT_REGPRESS:
571 memcpy(&sel, reg_pressure_selector, sizeof(sel));
573 case BE_SCHED_SELECT_MUCHNIK:
574 memcpy(&sel, muchnik_selector, sizeof(sel));
576 case BE_SCHED_SELECT_HEUR:
577 memcpy(&sel, heuristic_selector, sizeof(sel));
579 case BE_SCHED_SELECT_HMUCHNIK:
581 memcpy(&sel, trivial_selector, sizeof(sel));
584 /* Assure, that the out edges are computed */
585 edges_deactivate(birg->irg);
586 edges_activate(birg->irg);
588 switch (list_sched_options.prep) {
589 case BE_SCHED_PREP_MRIS:
590 mris = be_sched_mris_preprocess(birg);
592 case BE_SCHED_PREP_RSS:
593 rss_schedule_preparation(birg);
599 num_nodes = get_irg_last_idx(irg);
601 /* initialize environment for list scheduler */
602 memset(&env, 0, sizeof(env));
603 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
604 env.arch_env = arch_env;
606 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
608 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
610 if (env.selector->init_graph)
611 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
613 /* Schedule each single block. */
614 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
616 if (env.selector->finish_graph)
617 env.selector->finish_graph(env.selector_env);
619 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
620 be_sched_mris_free(mris);
622 DEL_ARR_F(env.sched_info);
627 * Register list scheduler options.
629 void list_sched_register_options(lc_opt_entry_t *grp) {
630 static int run_once = 0;
631 lc_opt_entry_t *sched_grp;
635 sched_grp = lc_opt_get_grp(grp, "listsched");
637 lc_opt_add_table(sched_grp, list_sched_option_table);
638 rss_register_options(sched_grp);
641 #endif /* WITH_LIBCORE */