2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
41 #include "iredges_t.h"
46 #include "irprintf_t.h"
55 #include "belistsched.h"
61 #include "lc_opts_enum.h"
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
66 BE_SCHED_SELECT_TRIVIAL,
67 BE_SCHED_SELECT_REGPRESS,
68 BE_SCHED_SELECT_MUCHNIK,
70 BE_SCHED_SELECT_HMUCHNIK,
71 BE_SCHED_SELECT_RANDOM,
72 BE_SCHED_SELECT_NORMAL,
76 BE_SCHED_PREP_NONE = 0,
77 BE_SCHED_PREP_MRIS = 2,
81 typedef struct list_sched_options_t {
82 int select; /**< the node selector */
83 } list_sched_options_t;
85 static list_sched_options_t list_sched_options = {
86 BE_SCHED_SELECT_NORMAL, /* mueller heuristic selector */
89 /* schedule selector options. */
90 static const lc_opt_enum_int_items_t sched_select_items[] = {
91 { "trivial", BE_SCHED_SELECT_TRIVIAL },
92 { "random", BE_SCHED_SELECT_RANDOM },
93 { "regpress", BE_SCHED_SELECT_REGPRESS },
94 { "normal", BE_SCHED_SELECT_NORMAL },
95 { "muchnik", BE_SCHED_SELECT_MUCHNIK },
96 { "heur", BE_SCHED_SELECT_HEUR },
97 { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
101 static lc_opt_enum_int_var_t sched_select_var = {
102 &list_sched_options.select, sched_select_items
105 static const lc_opt_table_entry_t list_sched_option_table[] = {
106 LC_OPT_ENT_ENUM_PTR("select", "node selector", &sched_select_var),
111 * All scheduling info needed per node.
113 typedef struct sched_irn_t {
114 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
115 unsigned already_sched : 1; /**< Set if this node is already scheduled */
119 * Scheduling environment for the whole graph.
121 typedef struct sched_env_t {
122 sched_irn_t *sched_info; /**< scheduling info per node */
123 const list_sched_selector_t *selector; /**< The node selector. */
124 void *selector_env; /**< A pointer to give to the selector. */
128 * Environment for a block scheduler.
130 typedef struct block_sched_env_t {
131 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
132 ir_nodeset_t cands; /**< the set of candidates */
133 ir_node *block; /**< the current block */
134 sched_env_t *sched_env; /**< the scheduler environment */
135 ir_nodeset_t live; /**< simple liveness during scheduling */
136 const list_sched_selector_t *selector;
137 void *selector_block_env;
141 * Returns non-zero if the node is already scheduled
143 static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
145 unsigned const idx = get_irn_idx(n);
147 assert(idx < ARR_LEN(env->sched_info));
148 return env->sched_info[idx].already_sched;
152 * Mark a node as already scheduled
154 static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
156 unsigned const idx = get_irn_idx(n);
158 assert(idx < ARR_LEN(env->sched_info));
159 env->sched_info[idx].already_sched = 1;
162 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
165 * Try to put a node in the ready set.
166 * @param env The block scheduler environment.
167 * @param pred The previous scheduled node.
168 * @param irn The node to make ready.
169 * @return 1, if the node could be made ready, 0 else.
171 static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
175 /* Blocks cannot be scheduled. */
176 if (is_Block(irn) || get_irn_n_edges(irn) == 0)
180 * Check, if the given ir node is in a different block as the
181 * currently scheduled one. If that is so, don't make the node ready.
183 if (env->block != get_nodes_block(irn))
186 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
187 ir_node *op = get_irn_in_or_dep(irn, i);
189 /* if irn is an End we have keep-alives and op might be a block, skip that */
195 /* If the operand is local to the scheduled block and not yet
196 * scheduled, this nodes cannot be made ready, so exit. */
197 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
201 if (! to_appear_in_schedule(irn)) {
202 add_to_sched(env, irn);
203 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
205 ir_nodeset_insert(&env->cands, irn);
207 /* Notify selector about the ready node. */
208 if (env->selector->node_ready)
209 env->selector->node_ready(env->selector_block_env, irn, pred);
211 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
218 * Try, to make all users of a node ready.
219 * In fact, a usage node can only be made ready, if all its operands
220 * have already been scheduled yet. This is checked by make_ready().
221 * @param env The block schedule environment.
222 * @param irn The node, which usages (successors) are to be made ready.
224 static void make_users_ready(block_sched_env_t *env, ir_node *irn)
226 const ir_edge_t *edge;
228 /* make all data users ready */
229 foreach_out_edge(irn, edge) {
230 ir_node *user = get_edge_src_irn(edge);
233 make_ready(env, irn, user);
236 /* and the dependent nodes as well */
237 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
238 ir_node *user = get_edge_src_irn(edge);
241 make_ready(env, irn, user);
246 * Returns the number of not yet schedules users.
248 static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
250 unsigned const idx = get_irn_idx(n);
252 assert(idx < ARR_LEN(env->sched_info));
253 return env->sched_info[idx].num_not_sched_user;
257 * Sets the number of not yet schedules users.
259 static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
261 unsigned const idx = get_irn_idx(n);
263 assert(idx < ARR_LEN(env->sched_info));
264 env->sched_info[idx].num_not_sched_user = num;
268 * Add @p num to the number of not yet schedules users and returns the result.
270 static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
272 unsigned const idx = get_irn_idx(n);
274 assert(idx < ARR_LEN(env->sched_info));
275 env->sched_info[idx].num_not_sched_user += num;
276 return env->sched_info[idx].num_not_sched_user;
280 * Returns the number of users of a node having mode datab.
282 static int get_num_successors(ir_node *irn)
285 const ir_edge_t *edge;
287 if (get_irn_mode(irn) == mode_T) {
288 /* for mode_T nodes: count the users of all Projs */
289 foreach_out_edge(irn, edge) {
290 ir_node *proj = get_edge_src_irn(edge);
291 ir_mode *mode = get_irn_mode(proj);
293 if (mode == mode_T) {
294 sum += get_num_successors(proj);
295 } else if (mode_is_datab(mode)) {
296 sum += get_irn_n_edges(proj);
301 /* do not count keep-alive edges */
302 foreach_out_edge(irn, edge) {
303 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
312 * Adds irn to @p live, updates all inputs that this user is scheduled
313 * and counts all of its non scheduled users.
315 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn)
323 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
324 ir_node *in = get_irn_in_or_dep(irn, i);
326 /* if in is a proj: update predecessor */
329 /* if in is still in the live set: reduce number of users by one */
330 if (ir_nodeset_contains(&env->live, in)) {
331 if (add_irn_not_sched_user(env, in, -1) <= 0)
332 ir_nodeset_remove(&env->live, in);
337 get_num_successors returns the number of all users. This includes
338 users in different blocks as well. As the each block is scheduled separately
339 the liveness info of those users will not be updated and so these
340 users will keep up the register pressure as it is desired.
342 i = get_num_successors(irn);
344 set_irn_not_sched_user(env, irn, i);
345 ir_nodeset_insert(&env->live, irn);
350 * Append an instruction to a schedule.
351 * @param env The block scheduling environment.
352 * @param irn The node to add to the schedule.
353 * @return The given node.
355 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
357 /* If the node consumes/produces data, it is appended to the schedule
358 * list, otherwise, it is not put into the list */
359 if (to_appear_in_schedule(irn)) {
360 update_sched_liveness(env, irn);
361 sched_add_before(env->block, irn);
363 DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
365 /* Remove the node from the ready set */
366 ir_nodeset_remove(&env->cands, irn);
369 /* notify the selector about the finally selected node. */
370 if (env->selector->node_selected)
371 env->selector->node_selected(env->selector_block_env, irn);
373 /* Insert the node in the set of all available scheduled nodes. */
374 set_already_scheduled(env, irn);
376 make_users_ready(env, irn);
380 * Perform list scheduling on a block.
382 * Note, that the caller must compute a linked list of nodes in the block
383 * using the link field before calling this function.
385 * Also the outs must have been computed.
387 * @param block The block node.
388 * @param env Scheduling environment.
390 static void list_sched_block(ir_node *block, void *env_ptr)
392 sched_env_t *env = (sched_env_t*)env_ptr;
393 const list_sched_selector_t *selector = env->selector;
395 block_sched_env_t be;
396 const ir_edge_t *edge;
400 /* Initialize the block's list head that will hold the schedule. */
401 sched_init_block(block);
403 /* Initialize the block scheduling environment */
404 be.sched_info = env->sched_info;
406 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
407 ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
408 be.selector = selector;
411 DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
413 if (selector->init_block)
414 be.selector_block_env = selector->init_block(env->selector_env, block);
416 /* Then one can add all nodes are ready to the set. */
417 foreach_out_edge(block, edge) {
418 ir_node *irn = get_edge_src_irn(edge);
419 unsigned code = get_irn_opcode(irn);
422 if (code == iro_End) {
423 /* Skip the end node because of keep-alive edges. */
427 users = get_irn_n_edges(irn);
430 else if (users == 1) { /* ignore nodes that are only hold by the anchor */
431 const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
432 ir_node *user = get_edge_src_irn(edge);
439 Phi functions are scheduled immediately, since they only
440 transfer data flow from the predecessors to this block.
442 add_to_sched(&be, irn);
443 } else if (be_is_Start(irn)) {
444 /* The start block will be scheduled as the first node */
445 add_to_sched(&be, irn);
447 /* Other nodes must have all operands in other blocks to be made
451 /* Check, if the operands of a node are not local to this block */
452 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
453 ir_node *operand = get_irn_in_or_dep(irn, j);
455 if (get_nodes_block(operand) == block) {
459 /* live in values increase register pressure */
460 ir_nodeset_insert(&be.live, operand);
464 /* Make the node ready, if all operands live in a foreign block */
466 DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
467 make_ready(&be, NULL, irn);
472 /* Iterate over all remaining nodes */
473 while (ir_nodeset_size(&be.cands) > 0) {
474 ir_nodeset_iterator_t iter;
476 /* Keeps must be scheduled immediately */
477 foreach_ir_nodeset(&be.cands, irn, iter) {
478 if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
484 /* Keeps must be immediately scheduled */
485 irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
488 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
490 /* Add the node to the schedule. */
491 add_to_sched(&be, irn);
493 /* remove the scheduled node from the ready list. */
494 ir_nodeset_remove(&be.cands, irn);
497 if (selector->finish_block)
498 selector->finish_block(be.selector_block_env);
500 ir_nodeset_destroy(&be.cands);
501 ir_nodeset_destroy(&be.live);
504 /* List schedule a graph. */
505 void list_sched(ir_graph *irg)
509 const list_sched_selector_t *selector;
511 /* Select a scheduler based on backend options */
512 switch (list_sched_options.select) {
513 case BE_SCHED_SELECT_TRIVIAL: selector = &trivial_selector; break;
514 case BE_SCHED_SELECT_RANDOM: selector = &random_selector; break;
515 case BE_SCHED_SELECT_REGPRESS: selector = ®_pressure_selector; break;
516 case BE_SCHED_SELECT_MUCHNIK: selector = &muchnik_selector; break;
517 case BE_SCHED_SELECT_HEUR: selector = &heuristic_selector; break;
518 case BE_SCHED_SELECT_NORMAL: selector = &normal_selector; break;
520 case BE_SCHED_SELECT_HMUCHNIK: selector = &heuristic_selector; break;
524 /* Matze: This is very slow, we should avoid it to improve backend speed,
525 * we just have to make sure that we have no dangling out-edges at this
529 /* Assure, that we have no dangling out-edges to deleted stuff */
530 edges_deactivate(irg);
534 num_nodes = get_irg_last_idx(irg);
536 /* initialize environment for list scheduler */
537 memset(&env, 0, sizeof(env));
538 env.selector = selector;
539 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
541 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
543 if (env.selector->init_graph)
544 env.selector_env = env.selector->init_graph(env.selector, irg);
546 /* Schedule each single block. */
547 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
549 if (env.selector->finish_graph)
550 env.selector->finish_graph(env.selector_env);
552 DEL_ARR_F(env.sched_info);
555 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
556 void be_init_listsched(void)
558 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
559 lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
561 lc_opt_add_table(sched_grp, list_sched_option_table);
563 FIRM_DBG_REGISTER(dbg, "firm.be.sched");