2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
43 #include "iredges_t.h"
48 #include "irprintf_t.h"
54 #include "besched_t.h"
57 #include "belistsched.h"
58 #include "beschedmris.h"
59 #include "beschedrss.h"
64 #include <libcore/lc_opts.h>
65 #include <libcore/lc_opts_enum.h>
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
69 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
72 BE_SCHED_SELECT_TRIVIAL = 0,
73 BE_SCHED_SELECT_REGPRESS = 1,
74 BE_SCHED_SELECT_MUCHNIK = 2,
75 BE_SCHED_SELECT_HEUR = 3,
76 BE_SCHED_SELECT_HMUCHNIK = 4,
77 BE_SCHED_SELECT_RANDOM = 5
81 BE_SCHED_PREP_NONE = 0,
82 BE_SCHED_PREP_MRIS = 2,
86 typedef struct _list_sched_options_t {
87 int select; /**< the node selector */
88 int prep; /**< schedule preparation */
89 } list_sched_options_t;
91 static list_sched_options_t list_sched_options = {
92 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
93 BE_SCHED_PREP_NONE, /* no scheduling preparation */
96 /* schedule selector options. */
97 static const lc_opt_enum_int_items_t sched_select_items[] = {
98 { "trivial", BE_SCHED_SELECT_TRIVIAL },
99 { "random", BE_SCHED_SELECT_RANDOM },
100 { "regpress", BE_SCHED_SELECT_REGPRESS },
101 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
102 { "heur", BE_SCHED_SELECT_HEUR },
103 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
107 /* schedule preparation options. */
108 static const lc_opt_enum_int_items_t sched_prep_items[] = {
109 { "none", BE_SCHED_PREP_NONE },
110 { "mris", BE_SCHED_PREP_MRIS },
111 { "rss", BE_SCHED_PREP_RSS },
115 static lc_opt_enum_int_var_t sched_select_var = {
116 &list_sched_options.select, sched_select_items
119 static lc_opt_enum_int_var_t sched_prep_var = {
120 &list_sched_options.prep, sched_prep_items
123 static const lc_opt_table_entry_t list_sched_option_table[] = {
124 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
125 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
130 * All scheduling info needed per node.
132 typedef struct _sched_irn_t {
133 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
134 unsigned already_sched : 1; /**< Set if this node is already scheduled */
138 * Scheduling environment for the whole graph.
140 typedef struct _sched_env_t {
141 sched_irn_t *sched_info; /**< scheduling info per node */
142 const list_sched_selector_t *selector; /**< The node selector. */
143 const arch_env_t *arch_env; /**< The architecture environment. */
144 const ir_graph *irg; /**< The graph to schedule. */
145 void *selector_env; /**< A pointer to give to the selector. */
149 * Environment for a block scheduler.
151 typedef struct _block_sched_env_t {
152 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
153 ir_nodeset_t cands; /**< the set of candidates */
154 ir_node *block; /**< the current block */
155 sched_env_t *sched_env; /**< the scheduler environment */
156 ir_nodeset_t live; /**< simple liveness during scheduling */
157 const list_sched_selector_t *selector;
158 void *selector_block_env;
162 * Returns non-zero if the node is already scheduled
164 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
166 int idx = get_irn_idx(n);
168 assert(idx < ARR_LEN(env->sched_info));
169 return env->sched_info[idx].already_sched;
173 * Mark a node as already scheduled
175 static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
177 int idx = get_irn_idx(n);
179 assert(idx < ARR_LEN(env->sched_info));
180 env->sched_info[idx].already_sched = 1;
184 * Try to put a node in the ready set.
185 * @param env The block scheduler environment.
186 * @param pred The previous scheduled node.
187 * @param irn The node to make ready.
188 * @return 1, if the node could be made ready, 0 else.
190 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
194 /* Blocks cannot be scheduled. */
195 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
199 * Check, if the given ir node is in a different block as the
200 * currently scheduled one. If that is so, don't make the node ready.
202 if (env->block != get_nodes_block(irn))
205 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
206 ir_node *op = get_irn_in_or_dep(irn, i);
208 /* if irn is an End we have keep-alives and op might be a block, skip that */
210 assert(get_irn_op(irn) == op_End);
214 /* If the operand is local to the scheduled block and not yet
215 * scheduled, this nodes cannot be made ready, so exit. */
216 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
220 ir_nodeset_insert(&env->cands, irn);
222 /* Notify selector about the ready node. */
223 if (env->selector->node_ready)
224 env->selector->node_ready(env->selector_block_env, irn, pred);
226 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
232 * Returns non-zero if a node must be placed in the schedule.
234 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
238 /* if there are no uses, don't schedule */
239 if (get_irn_n_edges(irn) < 1)
242 /* else ask the scheduler */
243 if (sel->to_appear_in_schedule)
244 res = sel->to_appear_in_schedule(block_env, irn);
246 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
250 static void make_users_ready(block_sched_env_t *env, ir_node *irn);
252 static void make_user_ready(block_sched_env_t *env, ir_node *pred, ir_node *user) {
253 if (! is_Phi(user)) {
254 if (! must_appear_in_schedule(env->selector, env, user)) {
255 /* notify the selector about the finally selected node. */
256 if (env->selector->node_selected)
257 env->selector->node_selected(env->selector_block_env, user);
259 /* Insert the node in the set of all available scheduled nodes. */
260 set_already_scheduled(env, user);
262 make_users_ready(env, user);
264 if (! ir_nodeset_contains(&env->cands, user)) {
265 /* work-around: this should NEVER be true, else we have a cycle in the basic block.
266 for now it's needed to compile bzip2.c */
267 if (sched_is_scheduled(user)) {
268 //assert(!"make an already scheduled user ready");
271 make_ready(env, pred, user);
280 * Try, to make all users of a node ready.
281 * In fact, a usage node can only be made ready, if all its operands
282 * have already been scheduled yet. This is checked by make_ready().
283 * @param env The block schedule environment.
284 * @param irn The node, which usages (successors) are to be made ready.
286 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
288 const ir_edge_t *edge;
290 /* make all data users ready */
291 foreach_out_edge(irn, edge) {
292 ir_node *user = get_edge_src_irn(edge);
294 if (get_block(user) == env->block)
295 make_user_ready(env, irn, user);
298 /* and the dependent nodes as well */
299 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
300 ir_node *user = get_edge_src_irn(edge);
302 if (get_block(user) == env->block)
303 make_user_ready(env, irn, user);
308 * Returns the number of not yet schedules users.
310 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
311 int idx = get_irn_idx(n);
313 assert(idx < ARR_LEN(env->sched_info));
314 return env->sched_info[idx].num_not_sched_user;
318 * Sets the number of not yet schedules users.
320 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
321 int idx = get_irn_idx(n);
323 assert(idx < ARR_LEN(env->sched_info));
324 env->sched_info[idx].num_not_sched_user = num;
328 * Add @p num to the number of not yet schedules users and returns the result.
330 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
331 int idx = get_irn_idx(n);
333 assert(idx < ARR_LEN(env->sched_info));
334 env->sched_info[idx].num_not_sched_user += num;
335 return env->sched_info[idx].num_not_sched_user;
339 * Returns the number of users of a node having mode datab.
341 static int get_num_successors(ir_node *irn) {
343 const ir_edge_t *edge;
345 if (get_irn_mode(irn) == mode_T) {
346 /* for mode_T nodes: count the users of all Projs */
347 foreach_out_edge(irn, edge) {
348 ir_node *proj = get_edge_src_irn(edge);
349 ir_mode *mode = get_irn_mode(proj);
351 if (mode == mode_T) {
352 sum += get_num_successors(proj);
353 } else if (mode_is_datab(mode)) {
354 sum += get_irn_n_edges(proj);
359 /* do not count keep-alive edges */
360 foreach_out_edge(irn, edge) {
361 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
370 * Adds irn to @p live, updates all inputs that this user is scheduled
371 * and counts all of its non scheduled users.
373 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
380 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
381 ir_node *in = get_irn_in_or_dep(irn, i);
383 /* if in is a proj: update predecessor */
386 /* if in is still in the live set: reduce number of users by one */
387 if (ir_nodeset_contains(&env->live, in)) {
388 if (add_irn_not_sched_user(env, in, -1) <= 0)
389 ir_nodeset_remove(&env->live, in);
394 get_num_successors returns the number of all users. This includes
395 users in different blocks as well. As the each block is scheduled separately
396 the liveness info of those users will not be updated and so these
397 users will keep up the register pressure as it is desired.
399 i = get_num_successors(irn);
401 set_irn_not_sched_user(env, irn, i);
402 ir_nodeset_insert(&env->live, irn);
407 * Append an instruction to a schedule.
408 * @param env The block scheduling environment.
409 * @param irn The node to add to the schedule.
410 * @return The given node.
412 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
414 /* If the node consumes/produces data, it is appended to the schedule
415 * list, otherwise, it is not put into the list */
416 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
417 update_sched_liveness(env, irn);
418 sched_add_before(env->block, irn);
420 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
423 /* notify the selector about the finally selected node. */
424 if (env->selector->node_selected)
425 env->selector->node_selected(env->selector_block_env, irn);
427 /* Insert the node in the set of all available scheduled nodes. */
428 set_already_scheduled(env, irn);
430 /* Remove the node from the ready set */
431 ir_nodeset_remove(&env->cands, irn);
436 #ifdef SCHEDULE_PROJS
438 * Add the proj nodes of a tuple-mode irn to the schedule immediately
439 * after the tuple-moded irn. By pinning the projs after the irn, no
440 * other nodes can create a new lifetime between the tuple-moded irn and
441 * one of its projs. This should render a realistic image of a
442 * tuple-moded irn, which in fact models a node which defines multiple
445 * @param irn The tuple-moded irn.
447 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
449 const ir_edge_t *edge;
451 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
457 /* non-proj nodes can have dependency edges to tuple nodes. */
458 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
459 ir_node *out = get_edge_src_irn(edge);
460 make_ready(env, irn, out);
463 /* schedule the normal projs */
464 foreach_out_edge(irn, edge) {
465 ir_node *out = get_edge_src_irn(edge);
467 assert(is_Proj(out) && "successor of a modeT node must be a proj");
469 if (get_irn_mode(out) == mode_T)
470 add_tuple_projs(env, out);
472 add_to_sched(env, out);
473 make_users_ready(env, out);
480 * Perform list scheduling on a block.
482 * Note, that the caller must compute a linked list of nodes in the block
483 * using the link field before calling this function.
485 * Also the outs must have been computed.
487 * @param block The block node.
488 * @param env Scheduling environment.
490 static void list_sched_block(ir_node *block, void *env_ptr)
492 sched_env_t *env = env_ptr;
493 const list_sched_selector_t *selector = env->selector;
494 ir_node *start_node = get_irg_start(get_irn_irg(block));
496 block_sched_env_t be;
497 const ir_edge_t *edge;
501 /* Initialize the block's list head that will hold the schedule. */
502 sched_init_block(block);
504 /* Initialize the block scheduling environment */
505 be.sched_info = env->sched_info;
507 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
508 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
509 be.selector = selector;
512 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
514 if (selector->init_block)
515 be.selector_block_env = selector->init_block(env->selector_env, block);
517 /* Then one can add all nodes are ready to the set. */
518 foreach_out_edge(block, edge) {
519 ir_node *irn = get_edge_src_irn(edge);
521 /* Skip the end node because of keepalive edges. */
522 if (get_irn_opcode(irn) == iro_End)
525 if (get_irn_n_edges(irn) == 0)
530 Phi functions are scheduled immediately, since they only
531 transfer data flow from the predecessors to this block.
533 add_to_sched(&be, irn);
534 make_users_ready(&be, irn);
536 else if (irn == start_node) {
537 /* The start block will be scheduled as the first node */
538 add_to_sched(&be, irn);
539 #ifdef SCHEDULE_PROJS
540 add_tuple_projs(&be, irn);
542 make_users_ready(&be, irn);
546 /* Other nodes must have all operands in other blocks to be made
550 /* Check, if the operands of a node are not local to this block */
551 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
552 ir_node *operand = get_irn_in_or_dep(irn, j);
554 if (get_nodes_block(operand) == block) {
558 /* live in values increase register pressure */
559 ir_nodeset_insert(&be.live, operand);
563 /* Make the node ready, if all operands live in a foreign block */
565 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
566 make_ready(&be, NULL, irn);
571 /* Iterate over all remaining nodes */
572 while (ir_nodeset_size(&be.cands) > 0) {
573 ir_nodeset_iterator_t iter;
574 /* collect statistics about amount of ready nodes */
575 be_do_stat_sched_ready(block, &be.cands);
577 /* Keeps must be scheduled immediately */
578 foreach_ir_nodeset(&be.cands, irn, iter) {
579 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
585 /* Keeps must be immediately scheduled */
586 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
589 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
591 /* Add the node to the schedule. */
592 add_to_sched(&be, irn);
594 #ifdef SCHEDULE_PROJS
595 if (get_irn_mode(irn) == mode_T)
596 add_tuple_projs(&be, irn);
600 make_users_ready(&be, irn);
603 /* remove the scheduled node from the ready list. */
604 ir_nodeset_remove(&be.cands, irn);
607 if (selector->finish_block)
608 selector->finish_block(be.selector_block_env);
610 ir_nodeset_destroy(&be.cands);
611 ir_nodeset_destroy(&be.live);
614 /* List schedule a graph. */
615 void list_sched(be_irg_t *birg, be_options_t *be_opts)
617 const arch_env_t *arch_env = birg->main_env->arch_env;
618 ir_graph *irg = birg->irg;
622 mris_env_t *mris = NULL;
623 list_sched_selector_t sel;
627 /* Select a scheduler based on backend options */
628 switch (list_sched_options.select) {
629 case BE_SCHED_SELECT_TRIVIAL:
630 memcpy(&sel, trivial_selector, sizeof(sel));
632 case BE_SCHED_SELECT_RANDOM:
633 memcpy(&sel, random_selector, sizeof(sel));
635 case BE_SCHED_SELECT_REGPRESS:
636 memcpy(&sel, reg_pressure_selector, sizeof(sel));
638 case BE_SCHED_SELECT_MUCHNIK:
639 memcpy(&sel, muchnik_selector, sizeof(sel));
641 case BE_SCHED_SELECT_HEUR:
642 memcpy(&sel, heuristic_selector, sizeof(sel));
644 case BE_SCHED_SELECT_HMUCHNIK:
646 memcpy(&sel, trivial_selector, sizeof(sel));
650 /* Matze: This is very slow, we should avoid it to improve backend speed,
651 * we just have to make sure that we have no dangling out-edges at this
655 /* Assure, that we have no dangling out-edges to deleted stuff */
656 edges_deactivate(birg->irg);
657 edges_activate(birg->irg);
660 switch (list_sched_options.prep) {
661 case BE_SCHED_PREP_MRIS:
662 mris = be_sched_mris_preprocess(birg);
664 case BE_SCHED_PREP_RSS:
665 rss_schedule_preparation(birg);
671 num_nodes = get_irg_last_idx(irg);
673 /* initialize environment for list scheduler */
674 memset(&env, 0, sizeof(env));
675 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
676 env.arch_env = arch_env;
678 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
680 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
682 if (env.selector->init_graph)
683 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
685 /* Schedule each single block. */
686 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
688 if (env.selector->finish_graph)
689 env.selector->finish_graph(env.selector_env);
691 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
692 be_sched_mris_free(mris);
694 DEL_ARR_F(env.sched_info);
697 /* List schedule a block. */
698 void list_sched_single_block(const be_irg_t *birg, ir_node *block,
699 be_options_t *be_opts)
701 const arch_env_t *arch_env = birg->main_env->arch_env;
702 ir_graph *irg = birg->irg;
706 list_sched_selector_t sel;
710 /* Select a scheduler based on backend options */
711 switch (list_sched_options.select) {
712 case BE_SCHED_SELECT_TRIVIAL:
713 memcpy(&sel, trivial_selector, sizeof(sel));
715 case BE_SCHED_SELECT_RANDOM:
716 memcpy(&sel, random_selector, sizeof(sel));
718 case BE_SCHED_SELECT_REGPRESS:
719 memcpy(&sel, reg_pressure_selector, sizeof(sel));
721 case BE_SCHED_SELECT_MUCHNIK:
722 memcpy(&sel, muchnik_selector, sizeof(sel));
724 case BE_SCHED_SELECT_HEUR:
725 memcpy(&sel, heuristic_selector, sizeof(sel));
727 case BE_SCHED_SELECT_HMUCHNIK:
729 memcpy(&sel, trivial_selector, sizeof(sel));
732 /* Assure, that the out edges are computed */
733 edges_deactivate(birg->irg);
734 edges_activate(birg->irg);
736 num_nodes = get_irg_last_idx(irg);
738 /* initialize environment for list scheduler */
739 memset(&env, 0, sizeof(env));
740 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
741 env.arch_env = arch_env;
743 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
745 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
747 if (env.selector->init_graph)
748 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
750 /* Schedule block. */
751 list_sched_block(block, &env);
753 if (env.selector->finish_graph)
754 env.selector->finish_graph(env.selector_env);
756 DEL_ARR_F(env.sched_info);
760 * Register list scheduler options.
762 void be_init_listsched(void) {
763 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
764 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
766 lc_opt_add_table(sched_grp, list_sched_option_table);
768 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
771 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);