2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
43 #include "iredges_t.h"
48 #include "irprintf_t.h"
54 #include "besched_t.h"
57 #include "belistsched.h"
58 #include "beschedmris.h"
59 #include "beschedrss.h"
64 #include <libcore/lc_opts.h>
65 #include <libcore/lc_opts_enum.h>
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
69 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
72 BE_SCHED_SELECT_TRIVIAL = 0,
73 BE_SCHED_SELECT_REGPRESS = 1,
74 BE_SCHED_SELECT_MUCHNIK = 2,
75 BE_SCHED_SELECT_HEUR = 3,
76 BE_SCHED_SELECT_HMUCHNIK = 4,
77 BE_SCHED_SELECT_RANDOM = 5
81 BE_SCHED_PREP_NONE = 0,
82 BE_SCHED_PREP_MRIS = 2,
86 typedef struct _list_sched_options_t {
87 int select; /**< the node selector */
88 int prep; /**< schedule preparation */
89 } list_sched_options_t;
91 static list_sched_options_t list_sched_options = {
92 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
93 BE_SCHED_PREP_NONE, /* no scheduling preparation */
96 /* schedule selector options. */
97 static const lc_opt_enum_int_items_t sched_select_items[] = {
98 { "trivial", BE_SCHED_SELECT_TRIVIAL },
99 { "random", BE_SCHED_SELECT_RANDOM },
100 { "regpress", BE_SCHED_SELECT_REGPRESS },
101 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
102 { "heur", BE_SCHED_SELECT_HEUR },
103 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
107 /* schedule preparation options. */
108 static const lc_opt_enum_int_items_t sched_prep_items[] = {
109 { "none", BE_SCHED_PREP_NONE },
110 { "mris", BE_SCHED_PREP_MRIS },
111 { "rss", BE_SCHED_PREP_RSS },
115 static lc_opt_enum_int_var_t sched_select_var = {
116 &list_sched_options.select, sched_select_items
119 static lc_opt_enum_int_var_t sched_prep_var = {
120 &list_sched_options.prep, sched_prep_items
123 static const lc_opt_table_entry_t list_sched_option_table[] = {
124 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
125 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
130 * All scheduling info needed per node.
132 typedef struct _sched_irn_t {
133 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
134 unsigned already_sched : 1; /**< Set if this node is already scheduled */
138 * Scheduling environment for the whole graph.
140 typedef struct _sched_env_t {
141 sched_irn_t *sched_info; /**< scheduling info per node */
142 const list_sched_selector_t *selector; /**< The node selector. */
143 const arch_env_t *arch_env; /**< The architecture environment. */
144 const ir_graph *irg; /**< The graph to schedule. */
145 void *selector_env; /**< A pointer to give to the selector. */
149 * Environment for a block scheduler.
151 typedef struct _block_sched_env_t {
152 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
153 ir_nodeset_t cands; /**< the set of candidates */
154 ir_node *block; /**< the current block */
155 sched_env_t *sched_env; /**< the scheduler environment */
156 ir_nodeset_t live; /**< simple liveness during scheduling */
157 const list_sched_selector_t *selector;
158 void *selector_block_env;
162 * Returns non-zero if a node must be placed in the schedule.
164 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
168 /* if there are no uses, don't schedule */
169 if (get_irn_n_edges(irn) < 1)
172 /* else ask the scheduler */
173 if (sel->to_appear_in_schedule)
174 res = sel->to_appear_in_schedule(block_env, irn);
176 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
180 * Returns non-zero if the node is already scheduled
182 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
184 int idx = get_irn_idx(n);
186 assert(idx < ARR_LEN(env->sched_info));
187 return env->sched_info[idx].already_sched;
191 * Mark a node as already scheduled
193 static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
195 int idx = get_irn_idx(n);
197 assert(idx < ARR_LEN(env->sched_info));
198 env->sched_info[idx].already_sched = 1;
201 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
204 * Try to put a node in the ready set.
205 * @param env The block scheduler environment.
206 * @param pred The previous scheduled node.
207 * @param irn The node to make ready.
208 * @return 1, if the node could be made ready, 0 else.
210 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
214 /* Blocks cannot be scheduled. */
215 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
219 * Check, if the given ir node is in a different block as the
220 * currently scheduled one. If that is so, don't make the node ready.
222 if (env->block != get_nodes_block(irn))
225 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
226 ir_node *op = get_irn_in_or_dep(irn, i);
228 /* if irn is an End we have keep-alives and op might be a block, skip that */
230 assert(get_irn_op(irn) == op_End);
234 /* If the operand is local to the scheduled block and not yet
235 * scheduled, this nodes cannot be made ready, so exit. */
236 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
240 if (! must_appear_in_schedule(env->selector, env, irn)) {
241 add_to_sched(env, irn);
242 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
244 ir_nodeset_insert(&env->cands, irn);
246 /* Notify selector about the ready node. */
247 if (env->selector->node_ready)
248 env->selector->node_ready(env->selector_block_env, irn, pred);
250 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
257 * Try, to make all users of a node ready.
258 * In fact, a usage node can only be made ready, if all its operands
259 * have already been scheduled yet. This is checked by make_ready().
260 * @param env The block schedule environment.
261 * @param irn The node, which usages (successors) are to be made ready.
263 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
265 const ir_edge_t *edge;
267 /* make all data users ready */
268 foreach_out_edge(irn, edge) {
269 ir_node *user = get_edge_src_irn(edge);
272 make_ready(env, irn, user);
275 /* and the dependent nodes as well */
276 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
277 ir_node *user = get_edge_src_irn(edge);
280 make_ready(env, irn, user);
285 * Returns the number of not yet schedules users.
287 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
288 int idx = get_irn_idx(n);
290 assert(idx < ARR_LEN(env->sched_info));
291 return env->sched_info[idx].num_not_sched_user;
295 * Sets the number of not yet schedules users.
297 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
298 int idx = get_irn_idx(n);
300 assert(idx < ARR_LEN(env->sched_info));
301 env->sched_info[idx].num_not_sched_user = num;
305 * Add @p num to the number of not yet schedules users and returns the result.
307 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
308 int idx = get_irn_idx(n);
310 assert(idx < ARR_LEN(env->sched_info));
311 env->sched_info[idx].num_not_sched_user += num;
312 return env->sched_info[idx].num_not_sched_user;
316 * Returns the number of users of a node having mode datab.
318 static int get_num_successors(ir_node *irn) {
320 const ir_edge_t *edge;
322 if (get_irn_mode(irn) == mode_T) {
323 /* for mode_T nodes: count the users of all Projs */
324 foreach_out_edge(irn, edge) {
325 ir_node *proj = get_edge_src_irn(edge);
326 ir_mode *mode = get_irn_mode(proj);
328 if (mode == mode_T) {
329 sum += get_num_successors(proj);
330 } else if (mode_is_datab(mode)) {
331 sum += get_irn_n_edges(proj);
336 /* do not count keep-alive edges */
337 foreach_out_edge(irn, edge) {
338 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
347 * Adds irn to @p live, updates all inputs that this user is scheduled
348 * and counts all of its non scheduled users.
350 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
357 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
358 ir_node *in = get_irn_in_or_dep(irn, i);
360 /* if in is a proj: update predecessor */
363 /* if in is still in the live set: reduce number of users by one */
364 if (ir_nodeset_contains(&env->live, in)) {
365 if (add_irn_not_sched_user(env, in, -1) <= 0)
366 ir_nodeset_remove(&env->live, in);
371 get_num_successors returns the number of all users. This includes
372 users in different blocks as well. As the each block is scheduled separately
373 the liveness info of those users will not be updated and so these
374 users will keep up the register pressure as it is desired.
376 i = get_num_successors(irn);
378 set_irn_not_sched_user(env, irn, i);
379 ir_nodeset_insert(&env->live, irn);
384 * Append an instruction to a schedule.
385 * @param env The block scheduling environment.
386 * @param irn The node to add to the schedule.
387 * @return The given node.
389 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
391 /* If the node consumes/produces data, it is appended to the schedule
392 * list, otherwise, it is not put into the list */
393 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
394 update_sched_liveness(env, irn);
395 sched_add_before(env->block, irn);
397 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
399 /* Remove the node from the ready set */
400 ir_nodeset_remove(&env->cands, irn);
403 /* notify the selector about the finally selected node. */
404 if (env->selector->node_selected)
405 env->selector->node_selected(env->selector_block_env, irn);
407 /* Insert the node in the set of all available scheduled nodes. */
408 set_already_scheduled(env, irn);
410 make_users_ready(env, irn);
413 #ifdef SCHEDULE_PROJS
415 * Add the proj nodes of a tuple-mode irn to the schedule immediately
416 * after the tuple-moded irn. By pinning the projs after the irn, no
417 * other nodes can create a new lifetime between the tuple-moded irn and
418 * one of its projs. This should render a realistic image of a
419 * tuple-moded irn, which in fact models a node which defines multiple
422 * @param irn The tuple-moded irn.
424 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
426 const ir_edge_t *edge;
428 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
434 /* non-proj nodes can have dependency edges to tuple nodes. */
435 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
436 ir_node *out = get_edge_src_irn(edge);
437 make_ready(env, irn, out);
440 /* schedule the normal projs */
441 foreach_out_edge(irn, edge) {
442 ir_node *out = get_edge_src_irn(edge);
444 assert(is_Proj(out) && "successor of a modeT node must be a proj");
446 if (get_irn_mode(out) == mode_T) {
447 add_tuple_projs(env, out);
449 add_to_sched(env, out);
456 * Perform list scheduling on a block.
458 * Note, that the caller must compute a linked list of nodes in the block
459 * using the link field before calling this function.
461 * Also the outs must have been computed.
463 * @param block The block node.
464 * @param env Scheduling environment.
466 static void list_sched_block(ir_node *block, void *env_ptr)
468 sched_env_t *env = env_ptr;
469 const list_sched_selector_t *selector = env->selector;
470 ir_node *start_node = get_irg_start(get_irn_irg(block));
472 block_sched_env_t be;
473 const ir_edge_t *edge;
477 /* Initialize the block's list head that will hold the schedule. */
478 sched_init_block(block);
480 /* Initialize the block scheduling environment */
481 be.sched_info = env->sched_info;
483 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
484 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
485 be.selector = selector;
488 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
490 if (selector->init_block)
491 be.selector_block_env = selector->init_block(env->selector_env, block);
493 /* Then one can add all nodes are ready to the set. */
494 foreach_out_edge(block, edge) {
495 ir_node *irn = get_edge_src_irn(edge);
497 /* Skip the end node because of keepalive edges. */
498 if (get_irn_opcode(irn) == iro_End)
501 if (get_irn_n_edges(irn) == 0)
506 Phi functions are scheduled immediately, since they only
507 transfer data flow from the predecessors to this block.
509 add_to_sched(&be, irn);
511 else if (irn == start_node) {
512 /* The start block will be scheduled as the first node */
513 add_to_sched(&be, irn);
514 #ifdef SCHEDULE_PROJS
515 add_tuple_projs(&be, irn);
519 /* Other nodes must have all operands in other blocks to be made
523 /* Check, if the operands of a node are not local to this block */
524 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
525 ir_node *operand = get_irn_in_or_dep(irn, j);
527 if (get_nodes_block(operand) == block) {
531 /* live in values increase register pressure */
532 ir_nodeset_insert(&be.live, operand);
536 /* Make the node ready, if all operands live in a foreign block */
538 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
539 make_ready(&be, NULL, irn);
544 /* Iterate over all remaining nodes */
545 while (ir_nodeset_size(&be.cands) > 0) {
546 ir_nodeset_iterator_t iter;
547 /* collect statistics about amount of ready nodes */
548 be_do_stat_sched_ready(block, &be.cands);
550 /* Keeps must be scheduled immediately */
551 foreach_ir_nodeset(&be.cands, irn, iter) {
552 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
558 /* Keeps must be immediately scheduled */
559 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
562 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
564 /* Add the node to the schedule. */
565 add_to_sched(&be, irn);
567 #ifdef SCHEDULE_PROJS
568 if (get_irn_mode(irn) == mode_T)
569 add_tuple_projs(&be, irn);
572 /* remove the scheduled node from the ready list. */
573 ir_nodeset_remove(&be.cands, irn);
576 if (selector->finish_block)
577 selector->finish_block(be.selector_block_env);
579 ir_nodeset_destroy(&be.cands);
580 ir_nodeset_destroy(&be.live);
583 /* List schedule a graph. */
584 void list_sched(be_irg_t *birg, be_options_t *be_opts)
586 const arch_env_t *arch_env = birg->main_env->arch_env;
587 ir_graph *irg = birg->irg;
591 mris_env_t *mris = NULL;
592 list_sched_selector_t sel;
596 /* Select a scheduler based on backend options */
597 switch (list_sched_options.select) {
598 case BE_SCHED_SELECT_TRIVIAL:
599 memcpy(&sel, trivial_selector, sizeof(sel));
601 case BE_SCHED_SELECT_RANDOM:
602 memcpy(&sel, random_selector, sizeof(sel));
604 case BE_SCHED_SELECT_REGPRESS:
605 memcpy(&sel, reg_pressure_selector, sizeof(sel));
607 case BE_SCHED_SELECT_MUCHNIK:
608 memcpy(&sel, muchnik_selector, sizeof(sel));
610 case BE_SCHED_SELECT_HEUR:
611 memcpy(&sel, heuristic_selector, sizeof(sel));
613 case BE_SCHED_SELECT_HMUCHNIK:
615 memcpy(&sel, trivial_selector, sizeof(sel));
619 /* Matze: This is very slow, we should avoid it to improve backend speed,
620 * we just have to make sure that we have no dangling out-edges at this
624 /* Assure, that we have no dangling out-edges to deleted stuff */
625 edges_deactivate(birg->irg);
626 edges_activate(birg->irg);
629 switch (list_sched_options.prep) {
630 case BE_SCHED_PREP_MRIS:
631 mris = be_sched_mris_preprocess(birg);
633 case BE_SCHED_PREP_RSS:
634 rss_schedule_preparation(birg);
640 num_nodes = get_irg_last_idx(irg);
642 /* initialize environment for list scheduler */
643 memset(&env, 0, sizeof(env));
644 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
645 env.arch_env = arch_env;
647 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
649 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
651 if (env.selector->init_graph)
652 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
654 /* Schedule each single block. */
655 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
657 if (env.selector->finish_graph)
658 env.selector->finish_graph(env.selector_env);
660 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
661 be_sched_mris_free(mris);
663 DEL_ARR_F(env.sched_info);
666 /* List schedule a block. */
667 void list_sched_single_block(const be_irg_t *birg, ir_node *block,
668 be_options_t *be_opts)
670 const arch_env_t *arch_env = birg->main_env->arch_env;
671 ir_graph *irg = birg->irg;
675 list_sched_selector_t sel;
679 /* Select a scheduler based on backend options */
680 switch (list_sched_options.select) {
681 case BE_SCHED_SELECT_TRIVIAL:
682 memcpy(&sel, trivial_selector, sizeof(sel));
684 case BE_SCHED_SELECT_RANDOM:
685 memcpy(&sel, random_selector, sizeof(sel));
687 case BE_SCHED_SELECT_REGPRESS:
688 memcpy(&sel, reg_pressure_selector, sizeof(sel));
690 case BE_SCHED_SELECT_MUCHNIK:
691 memcpy(&sel, muchnik_selector, sizeof(sel));
693 case BE_SCHED_SELECT_HEUR:
694 memcpy(&sel, heuristic_selector, sizeof(sel));
696 case BE_SCHED_SELECT_HMUCHNIK:
698 memcpy(&sel, trivial_selector, sizeof(sel));
701 /* Assure, that the out edges are computed */
702 edges_deactivate(birg->irg);
703 edges_activate(birg->irg);
705 num_nodes = get_irg_last_idx(irg);
707 /* initialize environment for list scheduler */
708 memset(&env, 0, sizeof(env));
709 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
710 env.arch_env = arch_env;
712 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
714 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
716 if (env.selector->init_graph)
717 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
719 /* Schedule block. */
720 list_sched_block(block, &env);
722 if (env.selector->finish_graph)
723 env.selector->finish_graph(env.selector_env);
725 DEL_ARR_F(env.sched_info);
729 * Register list scheduler options.
731 void be_init_listsched(void) {
732 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
733 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
735 lc_opt_add_table(sched_grp, list_sched_option_table);
737 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
740 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);