2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
43 #include "iredges_t.h"
48 #include "irprintf_t.h"
54 #include "besched_t.h"
57 #include "belistsched.h"
58 #include "beschedmris.h"
59 #include "beschedrss.h"
64 #include <libcore/lc_opts.h>
65 #include <libcore/lc_opts_enum.h>
67 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
69 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
72 BE_SCHED_SELECT_TRIVIAL = 0,
73 BE_SCHED_SELECT_REGPRESS = 1,
74 BE_SCHED_SELECT_MUCHNIK = 2,
75 BE_SCHED_SELECT_HEUR = 3,
76 BE_SCHED_SELECT_HMUCHNIK = 4,
77 BE_SCHED_SELECT_RANDOM = 5
81 BE_SCHED_PREP_NONE = 0,
82 BE_SCHED_PREP_MRIS = 2,
86 typedef struct _list_sched_options_t {
87 int select; /**< the node selector */
88 int prep; /**< schedule preparation */
89 } list_sched_options_t;
91 static list_sched_options_t list_sched_options = {
92 BE_SCHED_SELECT_HEUR, /* mueller heuristic selector */
93 BE_SCHED_PREP_NONE, /* no scheduling preparation */
96 /* schedule selector options. */
97 static const lc_opt_enum_int_items_t sched_select_items[] = {
98 { "trivial", BE_SCHED_SELECT_TRIVIAL },
99 { "random", BE_SCHED_SELECT_RANDOM },
100 { "regpress", BE_SCHED_SELECT_REGPRESS },
101 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
102 { "heur", BE_SCHED_SELECT_HEUR },
103 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
107 /* schedule preparation options. */
108 static const lc_opt_enum_int_items_t sched_prep_items[] = {
109 { "none", BE_SCHED_PREP_NONE },
110 { "mris", BE_SCHED_PREP_MRIS },
111 { "rss", BE_SCHED_PREP_RSS },
115 static lc_opt_enum_int_var_t sched_select_var = {
116 &list_sched_options.select, sched_select_items
119 static lc_opt_enum_int_var_t sched_prep_var = {
120 &list_sched_options.prep, sched_prep_items
123 static const lc_opt_table_entry_t list_sched_option_table[] = {
124 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
125 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
130 * All scheduling info needed per node.
132 typedef struct _sched_irn_t {
133 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
134 unsigned already_sched : 1; /**< Set if this node is already scheduled */
138 * Scheduling environment for the whole graph.
140 typedef struct _sched_env_t {
141 sched_irn_t *sched_info; /**< scheduling info per node */
142 const list_sched_selector_t *selector; /**< The node selector. */
143 const arch_env_t *arch_env; /**< The architecture environment. */
144 const ir_graph *irg; /**< The graph to schedule. */
145 void *selector_env; /**< A pointer to give to the selector. */
149 * Environment for a block scheduler.
151 typedef struct _block_sched_env_t {
152 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
153 ir_nodeset_t cands; /**< the set of candidates */
154 ir_node *block; /**< the current block */
155 sched_env_t *sched_env; /**< the scheduler environment */
156 ir_nodeset_t live; /**< simple liveness during scheduling */
157 const list_sched_selector_t *selector;
158 void *selector_block_env;
162 * Returns non-zero if the node is already scheduled
164 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
166 int idx = get_irn_idx(n);
168 assert(idx < ARR_LEN(env->sched_info));
169 return env->sched_info[idx].already_sched;
173 * Mark a node as already scheduled
175 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
177 int idx = get_irn_idx(n);
179 assert(idx < ARR_LEN(env->sched_info));
180 env->sched_info[idx].already_sched = 1;
184 * Try to put a node in the ready set.
185 * @param env The block scheduler environment.
186 * @param pred The previous scheduled node.
187 * @param irn The node to make ready.
188 * @return 1, if the node could be made ready, 0 else.
190 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
194 /* Blocks cannot be scheduled. */
195 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
199 * Check, if the given ir node is in a different block as the
200 * currently scheduled one. If that is so, don't make the node ready.
202 if (env->block != get_nodes_block(irn))
205 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
206 ir_node *op = get_irn_in_or_dep(irn, i);
208 /* if irn is an End we have keep-alives and op might be a block, skip that */
210 assert(get_irn_op(irn) == op_End);
214 /* If the operand is local to the scheduled block and not yet
215 * scheduled, this nodes cannot be made ready, so exit. */
216 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
220 ir_nodeset_insert(&env->cands, irn);
222 /* Notify selector about the ready node. */
223 if (env->selector->node_ready)
224 env->selector->node_ready(env->selector_block_env, irn, pred);
226 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
232 * Try, to make all users of a node ready.
233 * In fact, a usage node can only be made ready, if all its operands
234 * have already been scheduled yet. This is checked by make_ready().
235 * @param env The block schedule environment.
236 * @param irn The node, which usages (successors) are to be made ready.
238 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
240 const ir_edge_t *edge;
242 foreach_out_edge(irn, edge) {
243 ir_node *user = get_edge_src_irn(edge);
245 make_ready(env, irn, user);
248 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
249 ir_node *user = get_edge_src_irn(edge);
251 make_ready(env, irn, user);
256 * Returns the number of not yet schedules users.
258 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
259 int idx = get_irn_idx(n);
261 assert(idx < ARR_LEN(env->sched_info));
262 return env->sched_info[idx].num_not_sched_user;
266 * Sets the number of not yet schedules users.
268 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
269 int idx = get_irn_idx(n);
271 assert(idx < ARR_LEN(env->sched_info));
272 env->sched_info[idx].num_not_sched_user = num;
276 * Add @p num to the number of not yet schedules users and returns the result.
278 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
279 int idx = get_irn_idx(n);
281 assert(idx < ARR_LEN(env->sched_info));
282 env->sched_info[idx].num_not_sched_user += num;
283 return env->sched_info[idx].num_not_sched_user;
287 * Returns the number of users of a node having mode datab.
289 static int get_num_successors(ir_node *irn) {
291 const ir_edge_t *edge;
293 if (get_irn_mode(irn) == mode_T) {
294 /* for mode_T nodes: count the users of all Projs */
295 foreach_out_edge(irn, edge) {
296 ir_node *proj = get_edge_src_irn(edge);
297 ir_mode *mode = get_irn_mode(proj);
300 sum += get_num_successors(proj);
301 else if (mode_is_datab(mode))
302 sum += get_irn_n_edges(proj);
306 /* do not count keep-alive edges */
307 foreach_out_edge(irn, edge) {
308 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
317 * Adds irn to @p live, updates all inputs that this user is scheduled
318 * and counts all of it's non scheduled users.
320 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
327 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
328 ir_node *in = get_irn_in_or_dep(irn, i);
330 /* if in is a proj: update predecessor */
332 in = get_Proj_pred(in);
334 /* if in is still in the live set: reduce number of users by one */
335 if (ir_nodeset_contains(&env->live, in)) {
336 if (add_irn_not_sched_user(env, in, -1) <= 0)
337 ir_nodeset_remove(&env->live, in);
342 get_num_successors returns the number of all users. This includes
343 users in different blocks as well. As the each block is scheduled separately
344 the liveness info of those users will not be updated and so these
345 users will keep up the register pressure as it is desired.
347 i = get_num_successors(irn);
349 set_irn_not_sched_user(env, irn, i);
350 ir_nodeset_insert(&env->live, irn);
354 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
358 if (get_irn_n_edges(irn) < 1)
361 if (sel->to_appear_in_schedule)
362 res = sel->to_appear_in_schedule(block_env, irn);
364 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
368 * Append an instruction to a schedule.
369 * @param env The block scheduling environment.
370 * @param irn The node to add to the schedule.
371 * @return The given node.
373 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
375 /* If the node consumes/produces data, it is appended to the schedule
376 * list, otherwise, it is not put into the list */
377 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
378 update_sched_liveness(env, irn);
379 sched_add_before(env->block, irn);
381 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
384 /* notify the selector about the finally selected node. */
385 if (env->selector->node_selected)
386 env->selector->node_selected(env->selector_block_env, irn);
388 /* Insert the node in the set of all already scheduled nodes. */
389 mark_already_scheduled(env, irn);
391 /* Remove the node from the ready set */
392 ir_nodeset_remove(&env->cands, irn);
398 * Add the proj nodes of a tuple-mode irn to the schedule immediately
399 * after the tuple-moded irn. By pinning the projs after the irn, no
400 * other nodes can create a new lifetime between the tuple-moded irn and
401 * one of its projs. This should render a realistic image of a
402 * tuple-moded irn, which in fact models a node which defines multiple
405 * @param irn The tuple-moded irn.
407 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
409 const ir_edge_t *edge;
411 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
417 /* non-proj nodes can have dependency edges to tuple nodes. */
418 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
419 ir_node *out = get_edge_src_irn(edge);
420 make_ready(env, irn, out);
423 /* schedule the normal projs */
424 foreach_out_edge(irn, edge) {
425 ir_node *out = get_edge_src_irn(edge);
427 assert(is_Proj(out) && "successor of a modeT node must be a proj");
429 if (get_irn_mode(out) == mode_T)
430 add_tuple_projs(env, out);
432 add_to_sched(env, out);
433 make_users_ready(env, out);
439 * Perform list scheduling on a block.
441 * Note, that the caller must compute a linked list of nodes in the block
442 * using the link field before calling this function.
444 * Also the outs must have been computed.
446 * @param block The block node.
447 * @param env Scheduling environment.
449 static void list_sched_block(ir_node *block, void *env_ptr)
451 sched_env_t *env = env_ptr;
452 const list_sched_selector_t *selector = env->selector;
453 ir_node *start_node = get_irg_start(get_irn_irg(block));
455 block_sched_env_t be;
456 const ir_edge_t *edge;
460 /* Initialize the block's list head that will hold the schedule. */
461 sched_init_block(block);
463 /* Initialize the block scheduling environment */
464 be.sched_info = env->sched_info;
466 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
467 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
468 be.selector = selector;
471 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
473 if (selector->init_block)
474 be.selector_block_env = selector->init_block(env->selector_env, block);
476 /* Then one can add all nodes are ready to the set. */
477 foreach_out_edge(block, edge) {
478 ir_node *irn = get_edge_src_irn(edge);
480 /* Skip the end node because of keepalive edges. */
481 if (get_irn_opcode(irn) == iro_End)
484 if (get_irn_n_edges(irn) == 0)
489 Phi functions are scheduled immediately, since they only
490 transfer data flow from the predecessors to this block.
492 add_to_sched(&be, irn);
493 make_users_ready(&be, irn);
495 else if (irn == start_node) {
496 /* The start block will be scheduled as the first node */
497 add_to_sched(&be, irn);
498 add_tuple_projs(&be, irn);
501 /* Other nodes must have all operands in other blocks to be made
505 /* Check, if the operands of a node are not local to this block */
506 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
507 ir_node *operand = get_irn_in_or_dep(irn, j);
509 if (get_nodes_block(operand) == block) {
514 /* live in values increase register pressure */
515 update_sched_liveness(&be, operand);
519 /* Make the node ready, if all operands live in a foreign block */
521 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
522 make_ready(&be, NULL, irn);
527 /* Iterate over all remaining nodes */
528 while (ir_nodeset_size(&be.cands) > 0) {
529 ir_nodeset_iterator_t iter;
530 /* collect statistics about amount of ready nodes */
531 be_do_stat_sched_ready(block, &be.cands);
533 /* Keeps must be scheduled immediatly */
534 foreach_ir_nodeset(&be.cands, irn, iter) {
535 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
541 /* Keeps must be immediately scheduled */
542 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
545 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
547 /* Add the node to the schedule. */
548 add_to_sched(&be, irn);
550 if (get_irn_mode(irn) == mode_T)
551 add_tuple_projs(&be, irn);
553 make_users_ready(&be, irn);
555 /* remove the scheduled node from the ready list. */
556 ir_nodeset_remove(&be.cands, irn);
559 if (selector->finish_block)
560 selector->finish_block(be.selector_block_env);
562 ir_nodeset_destroy(&be.cands);
563 ir_nodeset_destroy(&be.live);
566 /* List schedule a graph. */
567 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
569 const arch_env_t *arch_env = birg->main_env->arch_env;
570 ir_graph *irg = birg->irg;
574 mris_env_t *mris = NULL;
575 list_sched_selector_t sel;
577 /* Select a scheduler based on backend options */
578 switch (list_sched_options.select) {
579 case BE_SCHED_SELECT_TRIVIAL:
580 memcpy(&sel, trivial_selector, sizeof(sel));
582 case BE_SCHED_SELECT_RANDOM:
583 memcpy(&sel, random_selector, sizeof(sel));
585 case BE_SCHED_SELECT_REGPRESS:
586 memcpy(&sel, reg_pressure_selector, sizeof(sel));
588 case BE_SCHED_SELECT_MUCHNIK:
589 memcpy(&sel, muchnik_selector, sizeof(sel));
591 case BE_SCHED_SELECT_HEUR:
592 memcpy(&sel, heuristic_selector, sizeof(sel));
594 case BE_SCHED_SELECT_HMUCHNIK:
596 memcpy(&sel, trivial_selector, sizeof(sel));
600 /* Matze: This is very slow, we should avoid it to improve backend speed,
601 * we just have to make sure that we have no dangling out-edges at this
605 /* Assure, that we have no dangling out-edges to deleted stuff */
606 edges_deactivate(birg->irg);
607 edges_activate(birg->irg);
610 switch (list_sched_options.prep) {
611 case BE_SCHED_PREP_MRIS:
612 mris = be_sched_mris_preprocess(birg);
614 case BE_SCHED_PREP_RSS:
615 rss_schedule_preparation(birg);
621 num_nodes = get_irg_last_idx(irg);
623 /* initialize environment for list scheduler */
624 memset(&env, 0, sizeof(env));
625 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
626 env.arch_env = arch_env;
628 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
630 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
632 if (env.selector->init_graph)
633 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
635 /* Schedule each single block. */
636 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
638 if (env.selector->finish_graph)
639 env.selector->finish_graph(env.selector_env);
641 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
642 be_sched_mris_free(mris);
644 DEL_ARR_F(env.sched_info);
647 /* List schedule a block. */
648 void list_sched_single_block(const be_irg_t *birg, ir_node *block, be_options_t *be_opts)
650 const arch_env_t *arch_env = birg->main_env->arch_env;
651 ir_graph *irg = birg->irg;
655 list_sched_selector_t sel;
657 /* Select a scheduler based on backend options */
658 switch (list_sched_options.select) {
659 case BE_SCHED_SELECT_TRIVIAL:
660 memcpy(&sel, trivial_selector, sizeof(sel));
662 case BE_SCHED_SELECT_RANDOM:
663 memcpy(&sel, random_selector, sizeof(sel));
665 case BE_SCHED_SELECT_REGPRESS:
666 memcpy(&sel, reg_pressure_selector, sizeof(sel));
668 case BE_SCHED_SELECT_MUCHNIK:
669 memcpy(&sel, muchnik_selector, sizeof(sel));
671 case BE_SCHED_SELECT_HEUR:
672 memcpy(&sel, heuristic_selector, sizeof(sel));
674 case BE_SCHED_SELECT_HMUCHNIK:
676 memcpy(&sel, trivial_selector, sizeof(sel));
679 /* Assure, that the out edges are computed */
680 edges_deactivate(birg->irg);
681 edges_activate(birg->irg);
683 num_nodes = get_irg_last_idx(irg);
685 /* initialize environment for list scheduler */
686 memset(&env, 0, sizeof(env));
687 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
688 env.arch_env = arch_env;
690 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
692 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
694 if (env.selector->init_graph)
695 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
697 /* Schedule block. */
698 list_sched_block(block, &env);
700 if (env.selector->finish_graph)
701 env.selector->finish_graph(env.selector_env);
703 DEL_ARR_F(env.sched_info);
707 * Register list scheduler options.
709 void be_init_listsched(void) {
710 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
711 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
713 lc_opt_add_table(sched_grp, list_sched_option_table);
715 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
718 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);