2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
41 #include "iredges_t.h"
46 #include "irprintf_t.h"
55 #include "belistsched.h"
56 #include "beschedmris.h"
57 #include "beschedrss.h"
63 #include "lc_opts_enum.h"
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
68 BE_SCHED_SELECT_TRIVIAL,
69 BE_SCHED_SELECT_REGPRESS,
70 BE_SCHED_SELECT_MUCHNIK,
72 BE_SCHED_SELECT_HMUCHNIK,
73 BE_SCHED_SELECT_RANDOM,
74 BE_SCHED_SELECT_NORMAL,
78 BE_SCHED_PREP_NONE = 0,
79 BE_SCHED_PREP_MRIS = 2,
83 typedef struct list_sched_options_t {
84 int select; /**< the node selector */
85 int prep; /**< schedule preparation */
86 } list_sched_options_t;
88 static list_sched_options_t list_sched_options = {
89 BE_SCHED_SELECT_NORMAL, /* mueller heuristic selector */
90 BE_SCHED_PREP_NONE, /* no scheduling preparation */
93 /* schedule selector options. */
94 static const lc_opt_enum_int_items_t sched_select_items[] = {
95 { "trivial", BE_SCHED_SELECT_TRIVIAL },
96 { "random", BE_SCHED_SELECT_RANDOM },
97 { "regpress", BE_SCHED_SELECT_REGPRESS },
98 { "normal", BE_SCHED_SELECT_NORMAL },
99 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
100 { "heur", BE_SCHED_SELECT_HEUR },
101 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
105 /* schedule preparation options. */
106 static const lc_opt_enum_int_items_t sched_prep_items[] = {
107 { "none", BE_SCHED_PREP_NONE },
108 { "mris", BE_SCHED_PREP_MRIS },
109 { "rss", BE_SCHED_PREP_RSS },
113 static lc_opt_enum_int_var_t sched_select_var = {
114 &list_sched_options.select, sched_select_items
117 static lc_opt_enum_int_var_t sched_prep_var = {
118 &list_sched_options.prep, sched_prep_items
121 static const lc_opt_table_entry_t list_sched_option_table[] = {
122 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
123 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
128 * All scheduling info needed per node.
130 typedef struct sched_irn_t {
131 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
132 unsigned already_sched : 1; /**< Set if this node is already scheduled */
136 * Scheduling environment for the whole graph.
138 typedef struct sched_env_t {
139 sched_irn_t *sched_info; /**< scheduling info per node */
140 const list_sched_selector_t *selector; /**< The node selector. */
141 void *selector_env; /**< A pointer to give to the selector. */
145 * Environment for a block scheduler.
147 typedef struct block_sched_env_t {
148 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
149 ir_nodeset_t cands; /**< the set of candidates */
150 ir_node *block; /**< the current block */
151 sched_env_t *sched_env; /**< the scheduler environment */
152 ir_nodeset_t live; /**< simple liveness during scheduling */
153 const list_sched_selector_t *selector;
154 void *selector_block_env;
158 * Returns non-zero if the node is already scheduled
160 static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
162 int idx = get_irn_idx(n);
164 assert(idx < ARR_LEN(env->sched_info));
165 return env->sched_info[idx].already_sched;
169 * Mark a node as already scheduled
171 static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
173 int idx = get_irn_idx(n);
175 assert(idx < ARR_LEN(env->sched_info));
176 env->sched_info[idx].already_sched = 1;
179 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
182 * Try to put a node in the ready set.
183 * @param env The block scheduler environment.
184 * @param pred The previous scheduled node.
185 * @param irn The node to make ready.
186 * @return 1, if the node could be made ready, 0 else.
188 static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
192 /* Blocks cannot be scheduled. */
193 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
197 * Check, if the given ir node is in a different block as the
198 * currently scheduled one. If that is so, don't make the node ready.
200 if (env->block != get_nodes_block(irn))
203 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
204 ir_node *op = get_irn_in_or_dep(irn, i);
206 /* if irn is an End we have keep-alives and op might be a block, skip that */
212 /* If the operand is local to the scheduled block and not yet
213 * scheduled, this nodes cannot be made ready, so exit. */
214 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
218 if (! to_appear_in_schedule(irn)) {
219 add_to_sched(env, irn);
220 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
222 ir_nodeset_insert(&env->cands, irn);
224 /* Notify selector about the ready node. */
225 if (env->selector->node_ready)
226 env->selector->node_ready(env->selector_block_env, irn, pred);
228 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
235 * Try, to make all users of a node ready.
236 * In fact, a usage node can only be made ready, if all its operands
237 * have already been scheduled yet. This is checked by make_ready().
238 * @param env The block schedule environment.
239 * @param irn The node, which usages (successors) are to be made ready.
241 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
243 const ir_edge_t *edge;
245 /* make all data users ready */
246 foreach_out_edge(irn, edge) {
247 ir_node *user = get_edge_src_irn(edge);
250 make_ready(env, irn, user);
253 /* and the dependent nodes as well */
254 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
255 ir_node *user = get_edge_src_irn(edge);
258 make_ready(env, irn, user);
263 * Returns the number of not yet schedules users.
265 static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
267 int idx = get_irn_idx(n);
269 assert(idx < ARR_LEN(env->sched_info));
270 return env->sched_info[idx].num_not_sched_user;
274 * Sets the number of not yet schedules users.
276 static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
278 int idx = get_irn_idx(n);
280 assert(idx < ARR_LEN(env->sched_info));
281 env->sched_info[idx].num_not_sched_user = num;
285 * Add @p num to the number of not yet schedules users and returns the result.
287 static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
289 int idx = get_irn_idx(n);
291 assert(idx < ARR_LEN(env->sched_info));
292 env->sched_info[idx].num_not_sched_user += num;
293 return env->sched_info[idx].num_not_sched_user;
297 * Returns the number of users of a node having mode datab.
299 static int get_num_successors(ir_node *irn)
302 const ir_edge_t *edge;
304 if (get_irn_mode(irn) == mode_T) {
305 /* for mode_T nodes: count the users of all Projs */
306 foreach_out_edge(irn, edge) {
307 ir_node *proj = get_edge_src_irn(edge);
308 ir_mode *mode = get_irn_mode(proj);
310 if (mode == mode_T) {
311 sum += get_num_successors(proj);
312 } else if (mode_is_datab(mode)) {
313 sum += get_irn_n_edges(proj);
318 /* do not count keep-alive edges */
319 foreach_out_edge(irn, edge) {
320 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
329 * Adds irn to @p live, updates all inputs that this user is scheduled
330 * and counts all of its non scheduled users.
332 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn)
340 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
341 ir_node *in = get_irn_in_or_dep(irn, i);
343 /* if in is a proj: update predecessor */
346 /* if in is still in the live set: reduce number of users by one */
347 if (ir_nodeset_contains(&env->live, in)) {
348 if (add_irn_not_sched_user(env, in, -1) <= 0)
349 ir_nodeset_remove(&env->live, in);
354 get_num_successors returns the number of all users. This includes
355 users in different blocks as well. As the each block is scheduled separately
356 the liveness info of those users will not be updated and so these
357 users will keep up the register pressure as it is desired.
359 i = get_num_successors(irn);
361 set_irn_not_sched_user(env, irn, i);
362 ir_nodeset_insert(&env->live, irn);
367 * Append an instruction to a schedule.
368 * @param env The block scheduling environment.
369 * @param irn The node to add to the schedule.
370 * @return The given node.
372 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
374 /* If the node consumes/produces data, it is appended to the schedule
375 * list, otherwise, it is not put into the list */
376 if (to_appear_in_schedule(irn)) {
377 update_sched_liveness(env, irn);
378 sched_add_before(env->block, irn);
380 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
382 /* Remove the node from the ready set */
383 ir_nodeset_remove(&env->cands, irn);
386 /* notify the selector about the finally selected node. */
387 if (env->selector->node_selected)
388 env->selector->node_selected(env->selector_block_env, irn);
390 /* Insert the node in the set of all available scheduled nodes. */
391 set_already_scheduled(env, irn);
393 make_users_ready(env, irn);
397 * Perform list scheduling on a block.
399 * Note, that the caller must compute a linked list of nodes in the block
400 * using the link field before calling this function.
402 * Also the outs must have been computed.
404 * @param block The block node.
405 * @param env Scheduling environment.
407 static void list_sched_block(ir_node *block, void *env_ptr)
409 sched_env_t *env = env_ptr;
410 const list_sched_selector_t *selector = env->selector;
412 block_sched_env_t be;
413 const ir_edge_t *edge;
417 /* Initialize the block's list head that will hold the schedule. */
418 sched_init_block(block);
420 /* Initialize the block scheduling environment */
421 be.sched_info = env->sched_info;
423 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
424 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
425 be.selector = selector;
428 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
430 if (selector->init_block)
431 be.selector_block_env = selector->init_block(env->selector_env, block);
433 /* Then one can add all nodes are ready to the set. */
434 foreach_out_edge(block, edge) {
435 ir_node *irn = get_edge_src_irn(edge);
436 ir_opcode code = get_irn_opcode(irn);
439 if (code == iro_End) {
440 /* Skip the end node because of keep-alive edges. */
444 users = get_irn_n_edges(irn);
447 else if (users == 1) { /* ignore nodes that are only hold by the anchor */
448 const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
449 ir_node *user = get_edge_src_irn(edge);
456 Phi functions are scheduled immediately, since they only
457 transfer data flow from the predecessors to this block.
459 add_to_sched(&be, irn);
460 } else if (be_is_Start(irn)) {
461 /* The start block will be scheduled as the first node */
462 add_to_sched(&be, irn);
464 /* Other nodes must have all operands in other blocks to be made
468 /* Check, if the operands of a node are not local to this block */
469 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
470 ir_node *operand = get_irn_in_or_dep(irn, j);
472 if (get_nodes_block(operand) == block) {
476 /* live in values increase register pressure */
477 ir_nodeset_insert(&be.live, operand);
481 /* Make the node ready, if all operands live in a foreign block */
483 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
484 make_ready(&be, NULL, irn);
489 /* Iterate over all remaining nodes */
490 while (ir_nodeset_size(&be.cands) > 0) {
491 ir_nodeset_iterator_t iter;
493 /* Keeps must be scheduled immediately */
494 foreach_ir_nodeset(&be.cands, irn, iter) {
495 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
501 /* Keeps must be immediately scheduled */
502 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
505 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
507 /* Add the node to the schedule. */
508 add_to_sched(&be, irn);
510 /* remove the scheduled node from the ready list. */
511 ir_nodeset_remove(&be.cands, irn);
514 if (selector->finish_block)
515 selector->finish_block(be.selector_block_env);
517 ir_nodeset_destroy(&be.cands);
518 ir_nodeset_destroy(&be.live);
521 /* List schedule a graph. */
522 void list_sched(ir_graph *irg)
526 mris_env_t *mris = NULL;
527 const list_sched_selector_t *selector;
529 /* Select a scheduler based on backend options */
530 switch (list_sched_options.select) {
531 case BE_SCHED_SELECT_TRIVIAL: selector = &trivial_selector; break;
532 case BE_SCHED_SELECT_RANDOM: selector = &random_selector; break;
533 case BE_SCHED_SELECT_REGPRESS: selector = ®_pressure_selector; break;
534 case BE_SCHED_SELECT_MUCHNIK: selector = &muchnik_selector; break;
535 case BE_SCHED_SELECT_HEUR: selector = &heuristic_selector; break;
536 case BE_SCHED_SELECT_NORMAL: selector = &normal_selector; break;
538 case BE_SCHED_SELECT_HMUCHNIK: selector = &heuristic_selector; break;
542 /* Matze: This is very slow, we should avoid it to improve backend speed,
543 * we just have to make sure that we have no dangling out-edges at this
547 /* Assure, that we have no dangling out-edges to deleted stuff */
548 edges_deactivate(irg);
552 switch (list_sched_options.prep) {
553 case BE_SCHED_PREP_MRIS:
554 mris = be_sched_mris_preprocess(irg);
556 case BE_SCHED_PREP_RSS:
557 rss_schedule_preparation(irg);
563 num_nodes = get_irg_last_idx(irg);
565 /* initialize environment for list scheduler */
566 memset(&env, 0, sizeof(env));
567 env.selector = selector;
568 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
570 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
572 if (env.selector->init_graph)
573 env.selector_env = env.selector->init_graph(env.selector, irg);
575 /* Schedule each single block. */
576 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
578 if (env.selector->finish_graph)
579 env.selector->finish_graph(env.selector_env);
581 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
582 be_sched_mris_free(mris);
584 DEL_ARR_F(env.sched_info);
587 /* List schedule a block. */
588 void list_sched_single_block(ir_graph *irg, ir_node *block)
592 const list_sched_selector_t *selector;
594 /* Select a scheduler based on backend options */
595 switch (list_sched_options.select) {
596 case BE_SCHED_SELECT_TRIVIAL: selector = &trivial_selector; break;
597 case BE_SCHED_SELECT_RANDOM: selector = &random_selector; break;
598 case BE_SCHED_SELECT_REGPRESS: selector = ®_pressure_selector; break;
599 case BE_SCHED_SELECT_MUCHNIK: selector = &muchnik_selector; break;
600 case BE_SCHED_SELECT_HEUR: selector = &heuristic_selector; break;
601 case BE_SCHED_SELECT_NORMAL: selector = &normal_selector; break;
603 case BE_SCHED_SELECT_HMUCHNIK: selector = &trivial_selector; break;
606 /* Assure, that the out edges are computed */
607 edges_deactivate(irg);
610 num_nodes = get_irg_last_idx(irg);
612 /* initialize environment for list scheduler */
613 memset(&env, 0, sizeof(env));
614 env.selector = selector;
615 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
617 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
619 if (env.selector->init_graph)
620 env.selector_env = env.selector->init_graph(env.selector, irg);
622 /* Schedule block. */
623 list_sched_block(block, &env);
625 if (env.selector->finish_graph)
626 env.selector->finish_graph(env.selector_env);
628 DEL_ARR_F(env.sched_info);
631 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
632 void be_init_listsched(void)
634 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
635 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
637 lc_opt_add_table(sched_grp, list_sched_option_table);
639 FIRM_DBG_REGISTER(dbg, "firm.be.sched");