2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
43 #include "iredges_t.h"
48 #include "irprintf_t.h"
54 #include "besched_t.h"
57 #include "belistsched.h"
58 #include "beschedmris.h"
59 #include "beschedrss.h"
64 #include <libcore/lc_opts.h>
65 #include <libcore/lc_opts_enum.h>
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
69 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
72 BE_SCHED_SELECT_TRIVIAL,
73 BE_SCHED_SELECT_REGPRESS,
74 BE_SCHED_SELECT_MUCHNIK,
76 BE_SCHED_SELECT_HMUCHNIK,
77 BE_SCHED_SELECT_RANDOM,
78 BE_SCHED_SELECT_NORMAL,
82 BE_SCHED_PREP_NONE = 0,
83 BE_SCHED_PREP_MRIS = 2,
87 typedef struct _list_sched_options_t {
88 int select; /**< the node selector */
89 int prep; /**< schedule preparation */
90 } list_sched_options_t;
92 static list_sched_options_t list_sched_options = {
93 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
94 BE_SCHED_PREP_NONE, /* no scheduling preparation */
97 /* schedule selector options. */
98 static const lc_opt_enum_int_items_t sched_select_items[] = {
99 { "trivial", BE_SCHED_SELECT_TRIVIAL },
100 { "random", BE_SCHED_SELECT_RANDOM },
101 { "regpress", BE_SCHED_SELECT_REGPRESS },
102 { "normal", BE_SCHED_SELECT_NORMAL },
103 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
104 { "heur", BE_SCHED_SELECT_HEUR },
105 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
109 /* schedule preparation options. */
110 static const lc_opt_enum_int_items_t sched_prep_items[] = {
111 { "none", BE_SCHED_PREP_NONE },
112 { "mris", BE_SCHED_PREP_MRIS },
113 { "rss", BE_SCHED_PREP_RSS },
117 static lc_opt_enum_int_var_t sched_select_var = {
118 &list_sched_options.select, sched_select_items
121 static lc_opt_enum_int_var_t sched_prep_var = {
122 &list_sched_options.prep, sched_prep_items
125 static const lc_opt_table_entry_t list_sched_option_table[] = {
126 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
127 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
132 * All scheduling info needed per node.
134 typedef struct _sched_irn_t {
135 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
136 unsigned already_sched : 1; /**< Set if this node is already scheduled */
140 * Scheduling environment for the whole graph.
142 typedef struct _sched_env_t {
143 sched_irn_t *sched_info; /**< scheduling info per node */
144 const list_sched_selector_t *selector; /**< The node selector. */
145 const arch_env_t *arch_env; /**< The architecture environment. */
146 const ir_graph *irg; /**< The graph to schedule. */
147 void *selector_env; /**< A pointer to give to the selector. */
151 * Environment for a block scheduler.
153 typedef struct _block_sched_env_t {
154 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
155 ir_nodeset_t cands; /**< the set of candidates */
156 ir_node *block; /**< the current block */
157 sched_env_t *sched_env; /**< the scheduler environment */
158 ir_nodeset_t live; /**< simple liveness during scheduling */
159 const list_sched_selector_t *selector;
160 void *selector_block_env;
164 * Returns non-zero if a node must be placed in the schedule.
166 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
170 /* if there are no uses, don't schedule */
171 if (get_irn_n_edges(irn) < 1)
174 /* else ask the scheduler */
175 if (sel->to_appear_in_schedule)
176 res = sel->to_appear_in_schedule(block_env, irn);
178 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
182 * Returns non-zero if the node is already scheduled
184 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
186 int idx = get_irn_idx(n);
188 assert(idx < ARR_LEN(env->sched_info));
189 return env->sched_info[idx].already_sched;
193 * Mark a node as already scheduled
195 static INLINE void set_already_scheduled(block_sched_env_t *env, ir_node *n)
197 int idx = get_irn_idx(n);
199 assert(idx < ARR_LEN(env->sched_info));
200 env->sched_info[idx].already_sched = 1;
203 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
206 * Try to put a node in the ready set.
207 * @param env The block scheduler environment.
208 * @param pred The previous scheduled node.
209 * @param irn The node to make ready.
210 * @return 1, if the node could be made ready, 0 else.
212 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
216 /* Blocks cannot be scheduled. */
217 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
221 * Check, if the given ir node is in a different block as the
222 * currently scheduled one. If that is so, don't make the node ready.
224 if (env->block != get_nodes_block(irn))
227 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
228 ir_node *op = get_irn_in_or_dep(irn, i);
230 /* if irn is an End we have keep-alives and op might be a block, skip that */
232 assert(get_irn_op(irn) == op_End);
236 /* If the operand is local to the scheduled block and not yet
237 * scheduled, this nodes cannot be made ready, so exit. */
238 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
242 if (! must_appear_in_schedule(env->selector, env, irn)) {
243 add_to_sched(env, irn);
244 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
246 ir_nodeset_insert(&env->cands, irn);
248 /* Notify selector about the ready node. */
249 if (env->selector->node_ready)
250 env->selector->node_ready(env->selector_block_env, irn, pred);
252 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
259 * Try, to make all users of a node ready.
260 * In fact, a usage node can only be made ready, if all its operands
261 * have already been scheduled yet. This is checked by make_ready().
262 * @param env The block schedule environment.
263 * @param irn The node, which usages (successors) are to be made ready.
265 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
267 const ir_edge_t *edge;
269 /* make all data users ready */
270 foreach_out_edge(irn, edge) {
271 ir_node *user = get_edge_src_irn(edge);
274 make_ready(env, irn, user);
277 /* and the dependent nodes as well */
278 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
279 ir_node *user = get_edge_src_irn(edge);
282 make_ready(env, irn, user);
287 * Returns the number of not yet schedules users.
289 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
290 int idx = get_irn_idx(n);
292 assert(idx < ARR_LEN(env->sched_info));
293 return env->sched_info[idx].num_not_sched_user;
297 * Sets the number of not yet schedules users.
299 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
300 int idx = get_irn_idx(n);
302 assert(idx < ARR_LEN(env->sched_info));
303 env->sched_info[idx].num_not_sched_user = num;
307 * Add @p num to the number of not yet schedules users and returns the result.
309 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
310 int idx = get_irn_idx(n);
312 assert(idx < ARR_LEN(env->sched_info));
313 env->sched_info[idx].num_not_sched_user += num;
314 return env->sched_info[idx].num_not_sched_user;
318 * Returns the number of users of a node having mode datab.
320 static int get_num_successors(ir_node *irn) {
322 const ir_edge_t *edge;
324 if (get_irn_mode(irn) == mode_T) {
325 /* for mode_T nodes: count the users of all Projs */
326 foreach_out_edge(irn, edge) {
327 ir_node *proj = get_edge_src_irn(edge);
328 ir_mode *mode = get_irn_mode(proj);
330 if (mode == mode_T) {
331 sum += get_num_successors(proj);
332 } else if (mode_is_datab(mode)) {
333 sum += get_irn_n_edges(proj);
338 /* do not count keep-alive edges */
339 foreach_out_edge(irn, edge) {
340 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
349 * Adds irn to @p live, updates all inputs that this user is scheduled
350 * and counts all of its non scheduled users.
352 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
359 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
360 ir_node *in = get_irn_in_or_dep(irn, i);
362 /* if in is a proj: update predecessor */
365 /* if in is still in the live set: reduce number of users by one */
366 if (ir_nodeset_contains(&env->live, in)) {
367 if (add_irn_not_sched_user(env, in, -1) <= 0)
368 ir_nodeset_remove(&env->live, in);
373 get_num_successors returns the number of all users. This includes
374 users in different blocks as well. As the each block is scheduled separately
375 the liveness info of those users will not be updated and so these
376 users will keep up the register pressure as it is desired.
378 i = get_num_successors(irn);
380 set_irn_not_sched_user(env, irn, i);
381 ir_nodeset_insert(&env->live, irn);
386 * Append an instruction to a schedule.
387 * @param env The block scheduling environment.
388 * @param irn The node to add to the schedule.
389 * @return The given node.
391 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
393 /* If the node consumes/produces data, it is appended to the schedule
394 * list, otherwise, it is not put into the list */
395 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
396 update_sched_liveness(env, irn);
397 sched_add_before(env->block, irn);
399 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
401 /* Remove the node from the ready set */
402 ir_nodeset_remove(&env->cands, irn);
405 /* notify the selector about the finally selected node. */
406 if (env->selector->node_selected)
407 env->selector->node_selected(env->selector_block_env, irn);
409 /* Insert the node in the set of all available scheduled nodes. */
410 set_already_scheduled(env, irn);
412 make_users_ready(env, irn);
416 * Perform list scheduling on a block.
418 * Note, that the caller must compute a linked list of nodes in the block
419 * using the link field before calling this function.
421 * Also the outs must have been computed.
423 * @param block The block node.
424 * @param env Scheduling environment.
426 static void list_sched_block(ir_node *block, void *env_ptr)
428 sched_env_t *env = env_ptr;
429 const list_sched_selector_t *selector = env->selector;
430 ir_node *start_node = get_irg_start(get_irn_irg(block));
432 block_sched_env_t be;
433 const ir_edge_t *edge;
437 /* Initialize the block's list head that will hold the schedule. */
438 sched_init_block(block);
440 /* Initialize the block scheduling environment */
441 be.sched_info = env->sched_info;
443 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
444 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
445 be.selector = selector;
448 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
450 if (selector->init_block)
451 be.selector_block_env = selector->init_block(env->selector_env, block);
453 /* Then one can add all nodes are ready to the set. */
454 foreach_out_edge(block, edge) {
455 ir_node *irn = get_edge_src_irn(edge);
456 ir_opcode code = get_irn_opcode(irn);
459 if (code == iro_End) {
460 /* Skip the end node because of keep-alive edges. */
462 } else if (code == iro_Block) {
463 /* A Block-Block edge. This should be the MacroBlock
464 * edge, ignore it. */
465 assert(get_Block_MacroBlock(irn) == block && "Block-Block edge found");
469 users = get_irn_n_edges(irn);
472 else if (users == 1) { /* ignore nodes that are only hold by the anchor */
473 const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
474 ir_node *user = get_edge_src_irn(edge);
481 Phi functions are scheduled immediately, since they only
482 transfer data flow from the predecessors to this block.
484 add_to_sched(&be, irn);
486 else if (irn == start_node) {
487 /* The start block will be scheduled as the first node */
488 add_to_sched(&be, irn);
489 #ifdef SCHEDULE_PROJS
490 add_tuple_projs(&be, irn);
494 /* Other nodes must have all operands in other blocks to be made
498 /* Check, if the operands of a node are not local to this block */
499 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
500 ir_node *operand = get_irn_in_or_dep(irn, j);
502 if (get_nodes_block(operand) == block) {
506 /* live in values increase register pressure */
507 ir_nodeset_insert(&be.live, operand);
511 /* Make the node ready, if all operands live in a foreign block */
513 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
514 make_ready(&be, NULL, irn);
519 /* Iterate over all remaining nodes */
520 while (ir_nodeset_size(&be.cands) > 0) {
521 ir_nodeset_iterator_t iter;
522 /* collect statistics about amount of ready nodes */
523 be_do_stat_sched_ready(block, &be.cands);
525 /* Keeps must be scheduled immediately */
526 foreach_ir_nodeset(&be.cands, irn, iter) {
527 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
533 /* Keeps must be immediately scheduled */
534 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
537 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
539 /* Add the node to the schedule. */
540 add_to_sched(&be, irn);
542 /* remove the scheduled node from the ready list. */
543 ir_nodeset_remove(&be.cands, irn);
546 if (selector->finish_block)
547 selector->finish_block(be.selector_block_env);
549 ir_nodeset_destroy(&be.cands);
550 ir_nodeset_destroy(&be.live);
553 /* List schedule a graph. */
554 void list_sched(be_irg_t *birg, be_options_t *be_opts)
556 const arch_env_t *arch_env = birg->main_env->arch_env;
557 ir_graph *irg = birg->irg;
561 mris_env_t *mris = NULL;
562 list_sched_selector_t sel;
566 /* Select a scheduler based on backend options */
567 switch (list_sched_options.select) {
568 case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
569 case BE_SCHED_SELECT_RANDOM: sel = random_selector; break;
570 case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
571 case BE_SCHED_SELECT_MUCHNIK: sel = muchnik_selector; break;
572 case BE_SCHED_SELECT_HEUR: sel = heuristic_selector; break;
573 case BE_SCHED_SELECT_NORMAL: sel = normal_selector; break;
575 case BE_SCHED_SELECT_HMUCHNIK: sel = trivial_selector; break;
579 /* Matze: This is very slow, we should avoid it to improve backend speed,
580 * we just have to make sure that we have no dangling out-edges at this
584 /* Assure, that we have no dangling out-edges to deleted stuff */
585 edges_deactivate(birg->irg);
586 edges_activate(birg->irg);
589 switch (list_sched_options.prep) {
590 case BE_SCHED_PREP_MRIS:
591 mris = be_sched_mris_preprocess(birg);
593 case BE_SCHED_PREP_RSS:
594 rss_schedule_preparation(birg);
600 num_nodes = get_irg_last_idx(irg);
602 /* initialize environment for list scheduler */
603 memset(&env, 0, sizeof(env));
604 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
605 env.arch_env = arch_env;
607 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
609 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
611 if (env.selector->init_graph)
612 env.selector_env = env.selector->init_graph(env.selector, birg);
614 /* Schedule each single block. */
615 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
617 if (env.selector->finish_graph)
618 env.selector->finish_graph(env.selector_env);
620 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
621 be_sched_mris_free(mris);
623 DEL_ARR_F(env.sched_info);
626 /* List schedule a block. */
627 void list_sched_single_block(const be_irg_t *birg, ir_node *block,
628 be_options_t *be_opts)
630 const arch_env_t *arch_env = birg->main_env->arch_env;
631 ir_graph *irg = birg->irg;
635 list_sched_selector_t sel;
639 /* Select a scheduler based on backend options */
640 switch (list_sched_options.select) {
641 case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
642 case BE_SCHED_SELECT_RANDOM: sel = random_selector; break;
643 case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
644 case BE_SCHED_SELECT_MUCHNIK: sel = muchnik_selector; break;
645 case BE_SCHED_SELECT_HEUR: sel = heuristic_selector; break;
646 case BE_SCHED_SELECT_NORMAL: sel = normal_selector; break;
648 case BE_SCHED_SELECT_HMUCHNIK: sel = trivial_selector; break;
651 /* Assure, that the out edges are computed */
652 edges_deactivate(birg->irg);
653 edges_activate(birg->irg);
655 num_nodes = get_irg_last_idx(irg);
657 /* initialize environment for list scheduler */
658 memset(&env, 0, sizeof(env));
659 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
660 env.arch_env = arch_env;
662 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
664 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
666 if (env.selector->init_graph)
667 env.selector_env = env.selector->init_graph(env.selector, birg);
669 /* Schedule block. */
670 list_sched_block(block, &env);
672 if (env.selector->finish_graph)
673 env.selector->finish_graph(env.selector_env);
675 DEL_ARR_F(env.sched_info);
679 * Register list scheduler options.
681 void be_init_listsched(void) {
682 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
683 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
685 lc_opt_add_table(sched_grp, list_sched_option_table);
687 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
690 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);