2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Primitive list scheduling with different node selectors.
23 * @author Sebastian Hack
42 #include "iredges_t.h"
47 #include "irprintf_t.h"
56 #include "belistsched.h"
62 #include "lc_opts_enum.h"
64 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
67 * Scheduling environment for the whole graph.
69 typedef struct sched_env_t {
70 unsigned *scheduled; /**< bitset of already scheduled nodes */
71 const list_sched_selector_t *selector; /**< The node selector. */
72 void *selector_env; /**< A pointer to give to the selector. */
76 * Environment for a block scheduler.
78 typedef struct block_sched_env_t {
79 unsigned *scheduled; /**< scheduling info per node, copied from the
80 global scheduler object */
81 ir_nodeset_t cands; /**< the set of candidates */
82 ir_node *block; /**< the current block */
83 sched_env_t *sched_env; /**< the scheduler environment */
84 const list_sched_selector_t *selector;
85 void *selector_block_env;
89 * Returns non-zero if the node is already scheduled
91 static bool is_already_scheduled(block_sched_env_t *env, ir_node *n)
93 unsigned idx = get_irn_idx(n);
94 return rbitset_is_set(env->scheduled, idx);
98 * Mark a node as already scheduled
100 static void set_already_scheduled(block_sched_env_t *env, ir_node *n)
102 unsigned idx = get_irn_idx(n);
103 rbitset_set(env->scheduled, idx);
106 static void selected(block_sched_env_t *env, ir_node *irn);
107 static void add_to_sched(block_sched_env_t *env, ir_node *irn);
110 * Put a node in the ready set, or make it available immediately if it doesn't
111 * need to be scheduled
113 static void node_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
116 || (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled)) {
118 DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
119 } else if (be_is_Keep(irn) || be_is_CopyKeep(irn)) {
120 /* Keeps must be scheduled immediately */
121 add_to_sched(env, irn);
123 ir_nodeset_insert(&env->cands, irn);
125 /* Notify selector about the ready node. */
126 if (env->selector->node_ready)
127 env->selector->node_ready(env->selector_block_env, irn, pred);
129 DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
134 * Try to put a node in the ready set.
135 * @param env The block scheduler environment.
136 * @param pred The previous scheduled node.
137 * @param irn The node to make ready.
138 * @return 1, if the node could be made ready, 0 else.
140 static void try_make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
144 /* we schedule one block at a time, so no need to consider users in other
146 if (is_Block(irn) || get_nodes_block(irn) != env->block)
148 if (is_Phi(irn) || is_End(irn))
150 /* check if all operands are already available */
151 n = get_irn_ins_or_deps(irn);
152 for (i = 0; i < n; ++i) {
153 ir_node *op = get_irn_in_or_dep(irn, i);
155 /* If the operand is local to the scheduled block and not yet
156 * scheduled, this nodes cannot be made ready, so exit. */
157 if (get_nodes_block(op) == env->block && !is_already_scheduled(env, op))
161 node_ready(env, pred, irn);
164 static void selected(block_sched_env_t *env, ir_node *node)
166 const ir_edge_t *edge;
168 /* notify the selector about the finally selected node. */
169 if (env->selector->node_selected)
170 env->selector->node_selected(env->selector_block_env, node);
172 /* Insert the node in the set of all available scheduled nodes. */
173 set_already_scheduled(env, node);
175 /* check users, they might be ready now */
176 foreach_out_edge(node, edge) {
177 ir_node *user = get_edge_src_irn(edge);
178 try_make_ready(env, node, user);
180 foreach_out_edge_kind(node, edge, EDGE_KIND_DEP) {
181 ir_node *user = get_edge_src_irn(edge);
182 try_make_ready(env, node, user);
187 * Append an instruction to a schedule.
188 * @param env The block scheduling environment.
189 * @param irn The node to add to the schedule.
190 * @return The given node.
192 static void add_to_sched(block_sched_env_t *env, ir_node *irn)
194 assert(! (arch_irn_get_flags(irn) & arch_irn_flags_not_scheduled));
196 sched_add_before(env->block, irn);
198 DB((dbg, LEVEL_2, "\tschedule %+F\n", irn));
200 /* Remove the node from the ready set */
201 ir_nodeset_remove(&env->cands, irn);
207 * Perform list scheduling on a block.
209 * Note, that the caller must compute a linked list of nodes in the block
210 * using the link field before calling this function.
212 * Also the outs must have been computed.
214 * @param block The block node.
215 * @param env Scheduling environment.
217 static void list_sched_block(ir_node *block, void *env_ptr)
219 sched_env_t *env = (sched_env_t*)env_ptr;
220 const list_sched_selector_t *selector = env->selector;
222 block_sched_env_t be;
223 const ir_edge_t *edge;
225 /* Initialize the block's list head that will hold the schedule. */
226 sched_init_block(block);
228 /* Initialize the block scheduling environment */
229 be.scheduled = env->scheduled;
231 ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
232 be.selector = selector;
235 DB((dbg, LEVEL_1, "scheduling %+F\n", block));
237 if (selector->init_block)
238 be.selector_block_env = selector->init_block(env->selector_env, block);
240 /* Then one can add all nodes are ready to the set. */
241 foreach_out_edge(block, edge) {
242 ir_node *irn = get_edge_src_irn(edge);
245 /* Phi functions are scheduled immediately, since they only
246 * transfer data flow from the predecessors to this block. */
247 add_to_sched(&be, irn);
248 } else if (be_is_Start(irn)) {
249 /* The start block will be scheduled as the first node */
250 add_to_sched(&be, irn);
252 try_make_ready(&be, NULL, irn);
256 /* Iterate over all remaining nodes */
257 while (ir_nodeset_size(&be.cands) > 0) {
258 ir_node *irn = be.selector->select(be.selector_block_env, &be.cands);
259 DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
261 /* remove the scheduled node from the ready list. */
262 ir_nodeset_remove(&be.cands, irn);
263 /* Add the node to the schedule. */
264 add_to_sched(&be, irn);
267 if (selector->finish_block)
268 selector->finish_block(be.selector_block_env);
270 ir_nodeset_destroy(&be.cands);
273 /* List schedule a graph. */
274 void be_list_sched_graph(ir_graph *irg, const list_sched_selector_t *selector)
279 /* Matze: This is very slow, we should avoid it to improve backend speed,
280 * we just have to make sure that we have no dangling out-edges at this
283 edges_deactivate(irg);
286 num_nodes = get_irg_last_idx(irg);
288 /* initialize environment for list scheduler */
289 memset(&env, 0, sizeof(env));
290 env.selector = selector;
291 env.scheduled = rbitset_malloc(num_nodes);
293 if (selector->init_graph != NULL)
294 env.selector_env = selector->init_graph(irg);
296 /* Schedule each single block. */
297 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
299 if (selector->finish_graph != NULL)
300 selector->finish_graph(env.selector_env);
305 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
306 void be_init_listsched(void)
308 FIRM_DBG_REGISTER(dbg, "firm.be.sched");