2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
20 #include "iredges_t.h"
25 #include "irprintf_t.h"
28 #include "besched_t.h"
31 #include "belistsched.h"
34 * Scheduling environment for the whole graph.
36 typedef struct _sched_env_t {
37 const ir_graph *irg; /**< The graph to schedule. */
38 const list_sched_selector_t *selector; /**< The node selector. */
39 void *selector_env; /**< A pointer to give to the selector. */
44 * Ugly global variable for the compare function
45 * since qsort(3) does not pass an extra pointer.
47 static ir_node *curr_bl = NULL;
49 static int cmp_usage(const void *a, const void *b)
51 struct trivial_sched_env *env;
56 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
59 * One of them is live at the end of the block.
60 * Then, that one shall be scheduled at after the other
70 static ir_node *trivial_select(void *env, void *block_env,
71 const struct list_head *sched_head,
72 int curr_time, pset *ready_set)
75 int i, n = pset_count(ready_set);
77 ir_node **ready = alloca(n * sizeof(ready[0]));
79 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
84 ir_node *res = pset_first(ready_set);
85 pset_break(ready_set);
89 static const list_sched_selector_t trivial_selector_struct = {
97 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
99 static void list_sched_block(ir_node *block, void *env_ptr);
101 void list_sched(ir_graph *irg, const list_sched_selector_t *selector)
105 memset(&env, 0, sizeof(env));
106 env.selector = selector;
107 env.selector_env = selector->init_graph ? selector->init_graph(irg) : NULL;
110 /* Assure, that the out edges are computed */
113 /* Schedule each single block. */
114 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
116 if(selector->finish_graph)
117 selector->finish_graph(env.selector_env, irg);
122 * Environment for a block scheduler.
124 typedef struct _block_sched_env_t {
127 pset *already_scheduled;
129 firm_dbg_module_t *dbg;
136 * Try to put a node in the ready set.
137 * @param env The block scheduler environment.
138 * @param irn The node to make ready.
139 * @return 1, if the node could be made ready, 0 else.
141 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
145 /* Blocks cannot be scheduled. */
150 * Check, if the given ir node is in a different block as the
151 * currently scheduled one. If that is so, don't make the node ready.
153 if(env->block != get_nodes_block(irn))
156 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
157 ir_node *op = get_irn_n(irn, i);
159 /* If the operand is local to the scheduled block and not yet
160 * scheduled, this nodes cannot be made ready, so exit. */
161 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
165 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
166 pset_insert_ptr(env->ready_set, irn);
172 * Check, if a node is ready in a block schedule.
173 * @param env The block schedule environment.
174 * @param irn The node to check for.
175 * @return 1 if the node was ready, 0 if not.
177 #define is_ready(env,irn) \
178 (pset_find_ptr((env)->ready_set, irn) != NULL)
181 * Check, if a node has already been schedules.
182 * @param env The block schedule environment.
183 * @param irn The node to check for.
184 * @return 1 if the node was already scheduled, 0 if not.
186 #define is_scheduled(env,irn) \
187 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
190 * Try, to make all users of a node ready.
191 * In fact, a usage node can only be made ready, if all its operands
192 * have already been scheduled yet. This is checked my make_ready().
193 * @param env The block schedule environment.
194 * @param irn The node, which usages (successors) are to be made ready.
196 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
199 const ir_edge_t *edge;
201 foreach_out_edge(irn, edge) {
202 ir_node *user = edge->src;
204 make_ready(env, user);
209 * Compare to nodes using pointer equality.
210 * @param p1 Node one.
211 * @param p2 Node two.
212 * @return 0 if they are identical.
214 static int node_cmp_func(const void *p1, const void *p2)
220 * Append an instruction to a schedule.
221 * @param env The block scheduleing environment.
222 * @param irn The node to add to the schedule.
223 * @return The given node.
225 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
227 /* If the node consumes/produces data, it is appended to the schedule
228 * list, otherwise, it is not put into the list */
229 if(to_appear_in_schedule(irn)) {
230 sched_info_t *info = get_irn_sched_info(irn);
231 INIT_LIST_HEAD(&info->list);
233 sched_add_before(env->block, irn);
235 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
238 /* Insert the node in the set of all already scheduled nodes. */
239 pset_insert_ptr(env->already_scheduled, irn);
241 /* Remove the node from the ready set */
242 if(pset_find_ptr(env->ready_set, irn))
243 pset_remove_ptr(env->ready_set, irn);
250 * Add the proj nodes of a tuple-mode irn to the schedule immediately
251 * after the tuple-moded irn. By pinning the projs after the irn, no
252 * other nodes can create a new lifetime between the tuple-moded irn and
253 * one of its projs. This should render a realistic image of a
254 * tuple-moded irn, which in fact models a node which defines multiple
257 * @param irn The tuple-moded irn.
258 * @param list The schedule list to append all the projs.
259 * @param time The time step to which the irn and all its projs are
261 * @param obst The obstack the scheduling data structures shall be
263 * @param ready_set The ready set of the list scheduler.
264 * @param already_scheduled A set containing all nodes already
267 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
270 const ir_edge_t *edge;
272 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
274 foreach_out_edge(irn, edge) {
275 ir_node *out = edge->src;
277 assert(is_Proj(out) && "successor of a modeT node must be a proj");
279 if(get_irn_mode(out) == mode_T)
280 add_tuple_projs(env, out);
282 add_to_sched(env, out);
283 make_users_ready(env, out);
289 * Perform list scheduling on a block.
291 * Note, that the caller must compute a linked list of nodes in the block
292 * using the link field before calling this function.
294 * Also the outs must have been computed.
296 * @param block The block node.
297 * @param env Schedulting environment.
299 static void list_sched_block(ir_node *block, void *env_ptr)
301 void *block_env = NULL;
302 sched_env_t *env = env_ptr;
303 block_sched_env_t be;
304 const list_sched_selector_t *selector = env->selector;
306 const ir_edge_t *edge;
310 sched_info_t *info = get_irn_sched_info(block);
312 /* Initialize the block's list head that will hold the schedule. */
313 INIT_LIST_HEAD(&info->list);
315 /* Initialize the block scheduling environment */
316 be.dbg = firm_dbg_register("firm.be.sched");
319 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
320 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
322 firm_dbg_set_mask(be.dbg, 0);
324 if(selector->init_block)
325 block_env = selector->init_block(env->selector_env, block);
327 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
329 /* Then one can add all nodes are ready to the set. */
330 foreach_out_edge(block, edge) {
331 ir_node *irn = get_edge_src_irn(edge);
333 /* Skip the end node because of keepalive edges. */
334 if(get_irn_opcode(irn) == iro_End)
337 /* Phi functions are scheduled immediately, since they only transfer
338 * data flow from the predecessors to this block. */
340 add_to_sched(&be, irn);
341 make_users_ready(&be, irn);
345 /* Other nodes must have all operands in other blocks to be made
350 /* Check, if the operands of a node are not local to this block */
351 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
352 ir_node *operand = get_irn_n(irn, j);
354 if(get_nodes_block(operand) == block) {
360 /* Make the node ready, if all operands live in a foreign block */
362 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
363 make_ready(&be, irn);
368 /* Increase the time, if some phi functions have been scheduled */
369 be.curr_time += phi_seen;
371 while(pset_count(be.ready_set) > 0) {
372 // DBG((be.dbg, LEVEL_2, "\tready set: %*n\n", pset_iterator, be.ready_set));
374 /* select a node to be scheduled and check if it was ready */
375 irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
377 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
379 /* Add the node to the schedule. */
380 add_to_sched(&be, irn);
382 if(get_irn_mode(irn) == mode_T)
383 add_tuple_projs(&be, irn);
385 make_users_ready(&be, irn);
387 /* Increase the time step. */
390 /* remove the scheduled node from the ready list. */
391 if(pset_find_ptr(be.ready_set, irn))
392 pset_remove_ptr(be.ready_set, irn);
395 if(selector->finish_block)
396 selector->finish_block(env->selector_env, block_env, block);
398 del_pset(be.ready_set);
399 del_pset(be.already_scheduled);