2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
41 #include "iredges_t.h"
46 #include "irprintf_t.h"
52 #include "besched_t.h"
55 #include "belistsched.h"
56 #include "beschedmris.h"
57 #include "beschedrss.h"
63 #include "lc_opts_enum.h"
65 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
67 #define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn))
70 BE_SCHED_SELECT_TRIVIAL,
71 BE_SCHED_SELECT_REGPRESS,
72 BE_SCHED_SELECT_MUCHNIK,
74 BE_SCHED_SELECT_HMUCHNIK,
75 BE_SCHED_SELECT_RANDOM,
76 BE_SCHED_SELECT_NORMAL,
80 BE_SCHED_PREP_NONE = 0,
81 BE_SCHED_PREP_MRIS = 2,
85 typedef struct _list_sched_options_t {
86 int select; /**< the node selector */
87 int prep; /**< schedule preparation */
88 } list_sched_options_t;
90 static list_sched_options_t list_sched_options = {
91 BE_SCHED_SELECT_NORMAL, /* mueller heuristic selector */
92 BE_SCHED_PREP_NONE, /* no scheduling preparation */
95 /* schedule selector options. */
96 static const lc_opt_enum_int_items_t sched_select_items[] = {
97 { "trivial", BE_SCHED_SELECT_TRIVIAL },
98 { "random", BE_SCHED_SELECT_RANDOM },
99 { "regpress", BE_SCHED_SELECT_REGPRESS },
100 { "normal", BE_SCHED_SELECT_NORMAL },
101 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
102 { "heur", BE_SCHED_SELECT_HEUR },
103 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
107 /* schedule preparation options. */
108 static const lc_opt_enum_int_items_t sched_prep_items[] = {
109 { "none", BE_SCHED_PREP_NONE },
110 { "mris", BE_SCHED_PREP_MRIS },
111 { "rss", BE_SCHED_PREP_RSS },
115 static lc_opt_enum_int_var_t sched_select_var = {
116 &list_sched_options.select, sched_select_items
119 static lc_opt_enum_int_var_t sched_prep_var = {
120 &list_sched_options.prep, sched_prep_items
123 static const lc_opt_table_entry_t list_sched_option_table[] = {
124 LC_OPT_ENT_ENUM_PTR("prep", "schedule preparation", &sched_prep_var),
125 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
130 * All scheduling info needed per node.
132 typedef struct _sched_irn_t {
133 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
134 unsigned already_sched : 1; /**< Set if this node is already scheduled */
138 * Scheduling environment for the whole graph.
140 typedef struct _sched_env_t {
141 sched_irn_t *sched_info; /**< scheduling info per node */
142 const list_sched_selector_t *selector; /**< The node selector. */
143 void *selector_env; /**< A pointer to give to the selector. */
147 * Environment for a block scheduler.
149 typedef struct _block_sched_env_t {
150 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
151 ir_nodeset_t cands; /**< the set of candidates */
152 ir_node *block; /**< the current block */
153 sched_env_t *sched_env; /**< the scheduler environment */
154 ir_nodeset_t live; /**< simple liveness during scheduling */
155 const list_sched_selector_t *selector;
156 void *selector_block_env;
160 * Returns non-zero if a node must be placed in the schedule.
162 static inline int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
166 /* if there are no uses, don't schedule */
167 if (get_irn_n_edges(irn) < 1)
170 /* else ask the scheduler */
171 if (sel->to_appear_in_schedule)
172 res = sel->to_appear_in_schedule(block_env, irn);
174 return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
178 * Returns non-zero if the node is already scheduled
180 static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
182 int idx = get_irn_idx(n);
184 assert(idx < ARR_LEN(env->sched_info));
185 return env->sched_info[idx].already_sched;
189 * Mark a node as already scheduled
191 static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
193 int idx = get_irn_idx(n);
195 assert(idx < ARR_LEN(env->sched_info));
196 env->sched_info[idx].already_sched = 1;
199 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
202 * Try to put a node in the ready set.
203 * @param env The block scheduler environment.
204 * @param pred The previous scheduled node.
205 * @param irn The node to make ready.
206 * @return 1, if the node could be made ready, 0 else.
208 static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
212 /* Blocks cannot be scheduled. */
213 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
217 * Check, if the given ir node is in a different block as the
218 * currently scheduled one. If that is so, don't make the node ready.
220 if (env->block != get_nodes_block(irn))
223 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
224 ir_node *op = get_irn_in_or_dep(irn, i);
226 /* if irn is an End we have keep-alives and op might be a block, skip that */
232 /* If the operand is local to the scheduled block and not yet
233 * scheduled, this nodes cannot be made ready, so exit. */
234 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
238 if (! must_appear_in_schedule(env->selector, env, irn)) {
239 add_to_sched(env, irn);
240 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
242 ir_nodeset_insert(&env->cands, irn);
244 /* Notify selector about the ready node. */
245 if (env->selector->node_ready)
246 env->selector->node_ready(env->selector_block_env, irn, pred);
248 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
255 * Try, to make all users of a node ready.
256 * In fact, a usage node can only be made ready, if all its operands
257 * have already been scheduled yet. This is checked by make_ready().
258 * @param env The block schedule environment.
259 * @param irn The node, which usages (successors) are to be made ready.
261 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
263 const ir_edge_t *edge;
265 /* make all data users ready */
266 foreach_out_edge(irn, edge) {
267 ir_node *user = get_edge_src_irn(edge);
270 make_ready(env, irn, user);
273 /* and the dependent nodes as well */
274 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
275 ir_node *user = get_edge_src_irn(edge);
278 make_ready(env, irn, user);
283 * Returns the number of not yet schedules users.
285 static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
286 int idx = get_irn_idx(n);
288 assert(idx < ARR_LEN(env->sched_info));
289 return env->sched_info[idx].num_not_sched_user;
293 * Sets the number of not yet schedules users.
295 static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
296 int idx = get_irn_idx(n);
298 assert(idx < ARR_LEN(env->sched_info));
299 env->sched_info[idx].num_not_sched_user = num;
303 * Add @p num to the number of not yet schedules users and returns the result.
305 static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
306 int idx = get_irn_idx(n);
308 assert(idx < ARR_LEN(env->sched_info));
309 env->sched_info[idx].num_not_sched_user += num;
310 return env->sched_info[idx].num_not_sched_user;
314 * Returns the number of users of a node having mode datab.
316 static int get_num_successors(ir_node *irn) {
318 const ir_edge_t *edge;
320 if (get_irn_mode(irn) == mode_T) {
321 /* for mode_T nodes: count the users of all Projs */
322 foreach_out_edge(irn, edge) {
323 ir_node *proj = get_edge_src_irn(edge);
324 ir_mode *mode = get_irn_mode(proj);
326 if (mode == mode_T) {
327 sum += get_num_successors(proj);
328 } else if (mode_is_datab(mode)) {
329 sum += get_irn_n_edges(proj);
334 /* do not count keep-alive edges */
335 foreach_out_edge(irn, edge) {
336 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
345 * Adds irn to @p live, updates all inputs that this user is scheduled
346 * and counts all of its non scheduled users.
348 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
355 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
356 ir_node *in = get_irn_in_or_dep(irn, i);
358 /* if in is a proj: update predecessor */
361 /* if in is still in the live set: reduce number of users by one */
362 if (ir_nodeset_contains(&env->live, in)) {
363 if (add_irn_not_sched_user(env, in, -1) <= 0)
364 ir_nodeset_remove(&env->live, in);
369 get_num_successors returns the number of all users. This includes
370 users in different blocks as well. As the each block is scheduled separately
371 the liveness info of those users will not be updated and so these
372 users will keep up the register pressure as it is desired.
374 i = get_num_successors(irn);
376 set_irn_not_sched_user(env, irn, i);
377 ir_nodeset_insert(&env->live, irn);
382 * Append an instruction to a schedule.
383 * @param env The block scheduling environment.
384 * @param irn The node to add to the schedule.
385 * @return The given node.
387 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
389 /* If the node consumes/produces data, it is appended to the schedule
390 * list, otherwise, it is not put into the list */
391 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
392 update_sched_liveness(env, irn);
393 sched_add_before(env->block, irn);
395 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
397 /* Remove the node from the ready set */
398 ir_nodeset_remove(&env->cands, irn);
401 /* notify the selector about the finally selected node. */
402 if (env->selector->node_selected)
403 env->selector->node_selected(env->selector_block_env, irn);
405 /* Insert the node in the set of all available scheduled nodes. */
406 set_already_scheduled(env, irn);
408 make_users_ready(env, irn);
412 * Perform list scheduling on a block.
414 * Note, that the caller must compute a linked list of nodes in the block
415 * using the link field before calling this function.
417 * Also the outs must have been computed.
419 * @param block The block node.
420 * @param env Scheduling environment.
422 static void list_sched_block(ir_node *block, void *env_ptr)
424 sched_env_t *env = env_ptr;
425 const list_sched_selector_t *selector = env->selector;
426 ir_node *start_node = get_irg_start(get_irn_irg(block));
428 block_sched_env_t be;
429 const ir_edge_t *edge;
433 /* Initialize the block's list head that will hold the schedule. */
434 sched_init_block(block);
436 /* Initialize the block scheduling environment */
437 be.sched_info = env->sched_info;
439 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
440 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
441 be.selector = selector;
444 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
446 if (selector->init_block)
447 be.selector_block_env = selector->init_block(env->selector_env, block);
449 /* Then one can add all nodes are ready to the set. */
450 foreach_out_edge(block, edge) {
451 ir_node *irn = get_edge_src_irn(edge);
452 ir_opcode code = get_irn_opcode(irn);
455 if (code == iro_End) {
456 /* Skip the end node because of keep-alive edges. */
458 } else if (code == iro_Block) {
459 /* A Block-Block edge. This should be the MacroBlock
460 * edge, ignore it. */
461 assert(get_Block_MacroBlock(irn) == block && "Block-Block edge found");
465 users = get_irn_n_edges(irn);
468 else if (users == 1) { /* ignore nodes that are only hold by the anchor */
469 const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
470 ir_node *user = get_edge_src_irn(edge);
477 Phi functions are scheduled immediately, since they only
478 transfer data flow from the predecessors to this block.
480 add_to_sched(&be, irn);
482 else if (irn == start_node) {
483 /* The start block will be scheduled as the first node */
484 add_to_sched(&be, irn);
487 /* Other nodes must have all operands in other blocks to be made
491 /* Check, if the operands of a node are not local to this block */
492 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
493 ir_node *operand = get_irn_in_or_dep(irn, j);
495 if (get_nodes_block(operand) == block) {
499 /* live in values increase register pressure */
500 ir_nodeset_insert(&be.live, operand);
504 /* Make the node ready, if all operands live in a foreign block */
506 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
507 make_ready(&be, NULL, irn);
512 /* Iterate over all remaining nodes */
513 while (ir_nodeset_size(&be.cands) > 0) {
514 ir_nodeset_iterator_t iter;
516 /* Keeps must be scheduled immediately */
517 foreach_ir_nodeset(&be.cands, irn, iter) {
518 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
524 /* Keeps must be immediately scheduled */
525 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
528 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
530 /* Add the node to the schedule. */
531 add_to_sched(&be, irn);
533 /* remove the scheduled node from the ready list. */
534 ir_nodeset_remove(&be.cands, irn);
537 if (selector->finish_block)
538 selector->finish_block(be.selector_block_env);
540 ir_nodeset_destroy(&be.cands);
541 ir_nodeset_destroy(&be.live);
544 /* List schedule a graph. */
545 void list_sched(be_irg_t *birg, be_options_t *be_opts)
547 ir_graph *irg = birg->irg;
551 mris_env_t *mris = NULL;
552 list_sched_selector_t sel;
556 /* Select a scheduler based on backend options */
557 switch (list_sched_options.select) {
558 case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
559 case BE_SCHED_SELECT_RANDOM: sel = random_selector; break;
560 case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
561 case BE_SCHED_SELECT_MUCHNIK: sel = muchnik_selector; break;
562 case BE_SCHED_SELECT_HEUR: sel = heuristic_selector; break;
563 case BE_SCHED_SELECT_NORMAL: sel = normal_selector; break;
565 case BE_SCHED_SELECT_HMUCHNIK: sel = heuristic_selector; break;
569 /* Matze: This is very slow, we should avoid it to improve backend speed,
570 * we just have to make sure that we have no dangling out-edges at this
574 /* Assure, that we have no dangling out-edges to deleted stuff */
575 edges_deactivate(irg);
579 switch (list_sched_options.prep) {
580 case BE_SCHED_PREP_MRIS:
581 mris = be_sched_mris_preprocess(birg);
583 case BE_SCHED_PREP_RSS:
584 rss_schedule_preparation(birg);
590 num_nodes = get_irg_last_idx(irg);
592 /* initialize environment for list scheduler */
593 memset(&env, 0, sizeof(env));
594 env.selector = arch_env_get_list_sched_selector(birg->main_env->arch_env, &sel);
595 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
597 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
599 if (env.selector->init_graph)
600 env.selector_env = env.selector->init_graph(env.selector, birg);
602 /* Schedule each single block. */
603 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
605 if (env.selector->finish_graph)
606 env.selector->finish_graph(env.selector_env);
608 if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
609 be_sched_mris_free(mris);
611 DEL_ARR_F(env.sched_info);
614 /* List schedule a block. */
615 void list_sched_single_block(const be_irg_t *birg, ir_node *block,
616 be_options_t *be_opts)
618 ir_graph *irg = birg->irg;
622 list_sched_selector_t sel;
626 /* Select a scheduler based on backend options */
627 switch (list_sched_options.select) {
628 case BE_SCHED_SELECT_TRIVIAL: sel = trivial_selector; break;
629 case BE_SCHED_SELECT_RANDOM: sel = random_selector; break;
630 case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
631 case BE_SCHED_SELECT_MUCHNIK: sel = muchnik_selector; break;
632 case BE_SCHED_SELECT_HEUR: sel = heuristic_selector; break;
633 case BE_SCHED_SELECT_NORMAL: sel = normal_selector; break;
635 case BE_SCHED_SELECT_HMUCHNIK: sel = trivial_selector; break;
638 /* Assure, that the out edges are computed */
639 edges_deactivate(irg);
642 num_nodes = get_irg_last_idx(irg);
644 /* initialize environment for list scheduler */
645 memset(&env, 0, sizeof(env));
646 env.selector = arch_env_get_list_sched_selector(birg->main_env->arch_env, &sel);
647 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
649 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
651 if (env.selector->init_graph)
652 env.selector_env = env.selector->init_graph(env.selector, birg);
654 /* Schedule block. */
655 list_sched_block(block, &env);
657 if (env.selector->finish_graph)
658 env.selector->finish_graph(env.selector_env);
660 DEL_ARR_F(env.sched_info);
664 * Register list scheduler options.
666 void be_init_listsched(void) {
667 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
668 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
670 lc_opt_add_table(sched_grp, list_sched_option_table);
672 FIRM_DBG_REGISTER(dbg, "firm.be.sched");
675 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);