2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
43 #include "iredges_t.h"
48 #include "irprintf_t.h"
54 #include "besched_t.h"
57 #include "belistsched.h"
58 #include "beschedmris.h"
59 #include "beschedrss.h"
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
66 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
69 BE_SCHED_SELECT_TRIVIAL = 0,
70 BE_SCHED_SELECT_REGPRESS = 1,
71 BE_SCHED_SELECT_MUCHNIK = 2,
72 BE_SCHED_SELECT_HEUR = 3,
73 BE_SCHED_SELECT_HMUCHNIK = 4,
74 BE_SCHED_SELECT_RANDOM = 5
78 BE_SCHED_PREP_NONE = 0,
79 BE_SCHED_PREP_MRIS = 2,
83 typedef struct _list_sched_options_t {
84 int select; /**< the node selector */
85 int prep; /**< schedule preparation */
86 } list_sched_options_t;
88 static list_sched_options_t list_sched_options = {
89 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
90 BE_SCHED_PREP_NONE, /* no scheduling preparation */
95 #include <libcore/lc_opts.h>
96 #include <libcore/lc_opts_enum.h>
98 /* schedule selector options. */
99 static const lc_opt_enum_int_items_t sched_select_items[] = {
100 { "trivial", BE_SCHED_SELECT_TRIVIAL },
101 { "random", BE_SCHED_SELECT_RANDOM },
102 { "regpress", BE_SCHED_SELECT_REGPRESS },
103 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
104 { "heur", BE_SCHED_SELECT_HEUR },
105 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
109 /* schedule preparation options. */
110 static const lc_opt_enum_int_items_t sched_prep_items[] = {
111 { "none", BE_SCHED_PREP_NONE },
112 { "mris", BE_SCHED_PREP_MRIS },
113 { "rss", BE_SCHED_PREP_RSS },
117 static lc_opt_enum_int_var_t sched_select_var = {
118 &list_sched_options.select, sched_select_items
121 static lc_opt_enum_int_var_t sched_prep_var = {
122 &list_sched_options.prep, sched_prep_items
125 static const lc_opt_table_entry_t list_sched_option_table[] = {
126 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
127 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
130 #endif /* WITH_LIBCORE */
133 * All scheduling info needed per node.
135 typedef struct _sched_irn_t {
136 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
137 unsigned already_sched : 1; /**< Set if this node is already scheduled */
141 * Scheduling environment for the whole graph.
143 typedef struct _sched_env_t {
144 sched_irn_t *sched_info; /**< scheduling info per node */
145 const list_sched_selector_t *selector; /**< The node selector. */
146 const arch_env_t *arch_env; /**< The architecture environment. */
147 const ir_graph *irg; /**< The graph to schedule. */
148 void *selector_env; /**< A pointer to give to the selector. */
152 * Environment for a block scheduler.
154 typedef struct _block_sched_env_t {
155 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
156 ir_nodeset_t cands; /**< the set of candidates */
157 ir_node *block; /**< the current block */
158 sched_env_t *sched_env; /**< the scheduler environment */
159 ir_nodeset_t live; /**< simple liveness during scheduling */
160 const list_sched_selector_t *selector;
161 void *selector_block_env;
165 * Returns non-zero if a node must be placed in the schedule.
167 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
171 /* if there are no uses, don't schedule */
172 if (get_irn_n_edges(irn) < 1)
175 /* else ask the scheduler */
176 if (sel->to_appear_in_schedule)
177 res = sel->to_appear_in_schedule(block_env, irn);
179 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
183 * Returns non-zero if the node is already scheduled
185 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
187 int idx = get_irn_idx(n);
189 assert(idx < ARR_LEN(env->sched_info));
190 return env->sched_info[idx].already_sched;
194 * Mark a node as already scheduled
196 static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
198 int idx = get_irn_idx(n);
200 assert(idx < ARR_LEN(env->sched_info));
201 env->sched_info[idx].already_sched = 1;
204 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
207 * Try to put a node in the ready set.
208 * @param env The block scheduler environment.
209 * @param pred The previous scheduled node.
210 * @param irn The node to make ready.
211 * @return 1, if the node could be made ready, 0 else.
213 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
217 /* Blocks cannot be scheduled. */
218 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
222 * Check, if the given ir node is in a different block as the
223 * currently scheduled one. If that is so, don't make the node ready.
225 if (env->block != get_nodes_block(irn))
228 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
229 ir_node *op = get_irn_in_or_dep(irn, i);
231 /* if irn is an End we have keep-alives and op might be a block, skip that */
233 assert(get_irn_op(irn) == op_End);
237 /* If the operand is local to the scheduled block and not yet
238 * scheduled, this nodes cannot be made ready, so exit. */
239 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
243 if (! must_appear_in_schedule(env->selector, env, irn)) {
244 add_to_sched(env, irn);
245 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
247 ir_nodeset_insert(&env->cands, irn);
249 /* Notify selector about the ready node. */
250 if (env->selector->node_ready)
251 env->selector->node_ready(env->selector_block_env, irn, pred);
253 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
260 * Try, to make all users of a node ready.
261 * In fact, a usage node can only be made ready, if all its operands
262 * have already been scheduled yet. This is checked by make_ready().
263 * @param env The block schedule environment.
264 * @param irn The node, which usages (successors) are to be made ready.
266 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
268 const ir_edge_t *edge;
270 /* make all data users ready */
271 foreach_out_edge(irn, edge) {
272 ir_node *user = get_edge_src_irn(edge);
275 make_ready(env, irn, user);
278 /* and the dependent nodes as well */
279 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
280 ir_node *user = get_edge_src_irn(edge);
283 make_ready(env, irn, user);
288 * Returns the number of not yet schedules users.
290 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
291 int idx = get_irn_idx(n);
293 assert(idx < ARR_LEN(env->sched_info));
294 return env->sched_info[idx].num_not_sched_user;
298 * Sets the number of not yet schedules users.
300 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
301 int idx = get_irn_idx(n);
303 assert(idx < ARR_LEN(env->sched_info));
304 env->sched_info[idx].num_not_sched_user = num;
308 * Add @p num to the number of not yet schedules users and returns the result.
310 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
311 int idx = get_irn_idx(n);
313 assert(idx < ARR_LEN(env->sched_info));
314 env->sched_info[idx].num_not_sched_user += num;
315 return env->sched_info[idx].num_not_sched_user;
319 * Returns the number of users of a node having mode datab.
321 static int get_num_successors(ir_node *irn) {
323 const ir_edge_t *edge;
325 if (get_irn_mode(irn) == mode_T) {
326 /* for mode_T nodes: count the users of all Projs */
327 foreach_out_edge(irn, edge) {
328 ir_node *proj = get_edge_src_irn(edge);
329 ir_mode *mode = get_irn_mode(proj);
331 if (mode == mode_T) {
332 sum += get_num_successors(proj);
333 } else if (mode_is_datab(mode)) {
334 sum += get_irn_n_edges(proj);
339 /* do not count keep-alive edges */
340 foreach_out_edge(irn, edge) {
341 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
350 * Adds irn to @p live, updates all inputs that this user is scheduled
351 * and counts all of its non scheduled users.
353 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
360 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
361 ir_node *in = get_irn_in_or_dep(irn, i);
363 /* if in is a proj: update predecessor */
366 /* if in is still in the live set: reduce number of users by one */
367 if (ir_nodeset_contains(&env->live, in)) {
368 if (add_irn_not_sched_user(env, in, -1) <= 0)
369 ir_nodeset_remove(&env->live, in);
374 get_num_successors returns the number of all users. This includes
375 users in different blocks as well. As the each block is scheduled separately
376 the liveness info of those users will not be updated and so these
377 users will keep up the register pressure as it is desired.
379 i = get_num_successors(irn);
381 set_irn_not_sched_user(env, irn, i);
382 ir_nodeset_insert(&env->live, irn);
387 * Append an instruction to a schedule.
388 * @param env The block scheduling environment.
389 * @param irn The node to add to the schedule.
390 * @return The given node.
392 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
394 /* If the node consumes/produces data, it is appended to the schedule
395 * list, otherwise, it is not put into the list */
396 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
397 update_sched_liveness(env, irn);
398 sched_add_before(env->block, irn);
400 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
402 /* Remove the node from the ready set */
403 ir_nodeset_remove(&env->cands, irn);
406 /* notify the selector about the finally selected node. */
407 if (env->selector->node_selected)
408 env->selector->node_selected(env->selector_block_env, irn);
410 /* Insert the node in the set of all available scheduled nodes. */
411 set_already_scheduled(env, irn);
413 make_users_ready(env, irn);
416 #ifdef SCHEDULE_PROJS
418 * Add the proj nodes of a tuple-mode irn to the schedule immediately
419 * after the tuple-moded irn. By pinning the projs after the irn, no
420 * other nodes can create a new lifetime between the tuple-moded irn and
421 * one of its projs. This should render a realistic image of a
422 * tuple-moded irn, which in fact models a node which defines multiple
425 * @param irn The tuple-moded irn.
427 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
429 const ir_edge_t *edge;
431 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
437 /* non-proj nodes can have dependency edges to tuple nodes. */
438 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
439 ir_node *out = get_edge_src_irn(edge);
440 make_ready(env, irn, out);
443 /* schedule the normal projs */
444 foreach_out_edge(irn, edge) {
445 ir_node *out = get_edge_src_irn(edge);
447 assert(is_Proj(out) && "successor of a modeT node must be a proj");
449 if (get_irn_mode(out) == mode_T) {
450 add_tuple_projs(env, out);
452 add_to_sched(env, out);
459 * Perform list scheduling on a block.
461 * Note, that the caller must compute a linked list of nodes in the block
462 * using the link field before calling this function.
464 * Also the outs must have been computed.
466 * @param block The block node.
467 * @param env Scheduling environment.
469 static void list_sched_block(ir_node *block, void *env_ptr)
471 sched_env_t *env = env_ptr;
472 const list_sched_selector_t *selector = env->selector;
473 ir_node *start_node = get_irg_start(get_irn_irg(block));
475 block_sched_env_t be;
476 const ir_edge_t *edge;
480 /* Initialize the block's list head that will hold the schedule. */
481 sched_init_block(block);
483 /* Initialize the block scheduling environment */
484 be.sched_info = env->sched_info;
486 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
487 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
488 be.selector = selector;
491 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
493 if (selector->init_block)
494 be.selector_block_env = selector->init_block(env->selector_env, block);
496 /* Then one can add all nodes are ready to the set. */
497 foreach_out_edge(block, edge) {
498 ir_node *irn = get_edge_src_irn(edge);
500 /* Skip the end node because of keepalive edges. */
501 if (get_irn_opcode(irn) == iro_End)
504 if (get_irn_n_edges(irn) == 0)
509 Phi functions are scheduled immediately, since they only
510 transfer data flow from the predecessors to this block.
512 add_to_sched(&be, irn);
514 else if (irn == start_node) {
515 /* The start block will be scheduled as the first node */
516 add_to_sched(&be, irn);
517 #ifdef SCHEDULE_PROJS
518 add_tuple_projs(&be, irn);
522 /* Other nodes must have all operands in other blocks to be made
526 /* Check, if the operands of a node are not local to this block */
527 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
528 ir_node *operand = get_irn_in_or_dep(irn, j);
530 if (get_nodes_block(operand) == block) {
534 /* live in values increase register pressure */
535 ir_nodeset_insert(&be.live, operand);
539 /* Make the node ready, if all operands live in a foreign block */
541 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
542 make_ready(&be, NULL, irn);
547 /* Iterate over all remaining nodes */
548 while (ir_nodeset_size(&be.cands) > 0) {
549 ir_nodeset_iterator_t iter;
550 /* collect statistics about amount of ready nodes */
551 be_do_stat_sched_ready(block, &be.cands);
553 /* Keeps must be scheduled immediately */
554 foreach_ir_nodeset(&be.cands, irn, iter) {
555 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
561 /* Keeps must be immediately scheduled */
562 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
565 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
567 /* Add the node to the schedule. */
568 add_to_sched(&be, irn);
570 #ifdef SCHEDULE_PROJS
571 if (get_irn_mode(irn) == mode_T)
572 add_tuple_projs(&be, irn);
575 /* remove the scheduled node from the ready list. */
576 ir_nodeset_remove(&be.cands, irn);
579 if (selector->finish_block)
580 selector->finish_block(be.selector_block_env);
582 ir_nodeset_destroy(&be.cands);
583 ir_nodeset_destroy(&be.live);
586 /* List schedule a graph. */
587 void list_sched(be_irg_t *birg, be_options_t *be_opts)
589 const arch_env_t *arch_env = birg->main_env->arch_env;
590 ir_graph *irg = birg->irg;
594 mris_env_t *mris = NULL;
595 list_sched_selector_t sel;
599 /* Select a scheduler based on backend options */
600 switch (list_sched_options.select) {
601 case BE_SCHED_SELECT_TRIVIAL:
602 memcpy(&sel, trivial_selector, sizeof(sel));
604 case BE_SCHED_SELECT_RANDOM:
605 memcpy(&sel, random_selector, sizeof(sel));
607 case BE_SCHED_SELECT_REGPRESS:
608 memcpy(&sel, reg_pressure_selector, sizeof(sel));
610 case BE_SCHED_SELECT_MUCHNIK:
611 memcpy(&sel, muchnik_selector, sizeof(sel));
613 case BE_SCHED_SELECT_HEUR:
614 memcpy(&sel, heuristic_selector, sizeof(sel));
616 case BE_SCHED_SELECT_HMUCHNIK:
618 memcpy(&sel, trivial_selector, sizeof(sel));
622 /* Matze: This is very slow, we should avoid it to improve backend speed,
623 * we just have to make sure that we have no dangling out-edges at this
627 /* Assure, that we have no dangling out-edges to deleted stuff */
628 edges_deactivate(birg->irg);
629 edges_activate(birg->irg);
632 switch (list_sched_options.prep) {
633 case BE_SCHED_PREP_MRIS:
634 mris = be_sched_mris_preprocess(birg);
636 case BE_SCHED_PREP_RSS:
637 rss_schedule_preparation(birg);
643 num_nodes = get_irg_last_idx(irg);
645 /* initialize environment for list scheduler */
646 memset(&env, 0, sizeof(env));
647 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
648 env.arch_env = arch_env;
650 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
652 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
654 if (env.selector->init_graph)
655 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
657 /* Schedule each single block. */
658 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
660 if (env.selector->finish_graph)
661 env.selector->finish_graph(env.selector_env);
663 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
664 be_sched_mris_free(mris);
666 DEL_ARR_F(env.sched_info);
669 /* List schedule a block. */
670 void list_sched_single_block(const be_irg_t *birg, ir_node *block,
671 be_options_t *be_opts)
673 const arch_env_t *arch_env = birg->main_env->arch_env;
674 ir_graph *irg = birg->irg;
678 list_sched_selector_t sel;
682 /* Select a scheduler based on backend options */
683 switch (list_sched_options.select) {
684 case BE_SCHED_SELECT_TRIVIAL:
685 memcpy(&sel, trivial_selector, sizeof(sel));
687 case BE_SCHED_SELECT_RANDOM:
688 memcpy(&sel, random_selector, sizeof(sel));
690 case BE_SCHED_SELECT_REGPRESS:
691 memcpy(&sel, reg_pressure_selector, sizeof(sel));
693 case BE_SCHED_SELECT_MUCHNIK:
694 memcpy(&sel, muchnik_selector, sizeof(sel));
696 case BE_SCHED_SELECT_HEUR:
697 memcpy(&sel, heuristic_selector, sizeof(sel));
699 case BE_SCHED_SELECT_HMUCHNIK:
701 memcpy(&sel, trivial_selector, sizeof(sel));
704 /* Assure, that the out edges are computed */
705 edges_deactivate(birg->irg);
706 edges_activate(birg->irg);
708 num_nodes = get_irg_last_idx(irg);
710 /* initialize environment for list scheduler */
711 memset(&env, 0, sizeof(env));
712 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
713 env.arch_env = arch_env;
715 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
717 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
719 if (env.selector->init_graph)
720 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
722 /* Schedule block. */
723 list_sched_block(block, &env);
725 if (env.selector->finish_graph)
726 env.selector->finish_graph(env.selector_env);
728 DEL_ARR_F(env.sched_info);
732 * Register list scheduler options.
734 void be_init_listsched(void) {
736 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
737 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
739 lc_opt_add_table(sched_grp, list_sched_option_table);
741 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
744 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);