2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
20 #include "iredges_t.h"
25 #include "irprintf_t.h"
28 #include "besched_t.h"
31 #include "belistsched.h"
32 #include "bearch_firm.h"
36 * Scheduling environment for the whole graph.
38 typedef struct _sched_env_t {
39 const ir_graph *irg; /**< The graph to schedule. */
40 const list_sched_selector_t *selector; /**< The node selector. */
41 void *selector_env; /**< A pointer to give to the selector. */
46 * Ugly global variable for the compare function
47 * since qsort(3) does not pass an extra pointer.
49 static ir_node *curr_bl = NULL;
51 static int cmp_usage(const void *a, const void *b)
53 struct trivial_sched_env *env;
58 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
61 * One of them is live at the end of the block.
62 * Then, that one shall be scheduled at after the other
72 static ir_node *trivial_select(void *env, void *block_env,
73 const struct list_head *sched_head,
74 int curr_time, pset *ready_set)
79 int i, n = pset_count(ready_set);
81 ir_node **ready = alloca(n * sizeof(ready[0]));
83 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
87 res = pset_first(ready_set);
88 pset_break(ready_set);
92 static const list_sched_selector_t trivial_selector_struct = {
100 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
102 static void list_sched_block(ir_node *block, void *env_ptr);
104 void list_sched(ir_graph *irg, const list_sched_selector_t *selector)
108 memset(&env, 0, sizeof(env));
109 env.selector = selector;
110 env.selector_env = selector->init_graph ? selector->init_graph(irg) : NULL;
113 /* Assure, that the out edges are computed */
116 /* Schedule each single block. */
117 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
119 if(selector->finish_graph)
120 selector->finish_graph(env.selector_env, irg);
125 * Environment for a block scheduler.
127 typedef struct _block_sched_env_t {
130 pset *already_scheduled;
132 firm_dbg_module_t *dbg;
139 * Try to put a node in the ready set.
140 * @param env The block scheduler environment.
141 * @param irn The node to make ready.
142 * @return 1, if the node could be made ready, 0 else.
144 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
148 /* Blocks cannot be scheduled. */
153 * Check, if the given ir node is in a different block as the
154 * currently scheduled one. If that is so, don't make the node ready.
156 if(env->block != get_nodes_block(irn))
159 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
160 ir_node *op = get_irn_n(irn, i);
162 /* If the operand is local to the scheduled block and not yet
163 * scheduled, this nodes cannot be made ready, so exit. */
164 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
168 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
169 pset_insert_ptr(env->ready_set, irn);
175 * Check, if a node is ready in a block schedule.
176 * @param env The block schedule environment.
177 * @param irn The node to check for.
178 * @return 1 if the node was ready, 0 if not.
180 #define is_ready(env,irn) \
181 (pset_find_ptr((env)->ready_set, irn) != NULL)
184 * Check, if a node has already been schedules.
185 * @param env The block schedule environment.
186 * @param irn The node to check for.
187 * @return 1 if the node was already scheduled, 0 if not.
189 #define is_scheduled(env,irn) \
190 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
193 * Try, to make all users of a node ready.
194 * In fact, a usage node can only be made ready, if all its operands
195 * have already been scheduled yet. This is checked my make_ready().
196 * @param env The block schedule environment.
197 * @param irn The node, which usages (successors) are to be made ready.
199 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
202 const ir_edge_t *edge;
204 foreach_out_edge(irn, edge) {
205 ir_node *user = edge->src;
207 make_ready(env, user);
212 * Compare to nodes using pointer equality.
213 * @param p1 Node one.
214 * @param p2 Node two.
215 * @return 0 if they are identical.
217 static int node_cmp_func(const void *p1, const void *p2)
223 * Append an instruction to a schedule.
224 * @param env The block scheduleing environment.
225 * @param irn The node to add to the schedule.
226 * @return The given node.
228 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
230 /* If the node consumes/produces data, it is appended to the schedule
231 * list, otherwise, it is not put into the list */
232 if(to_appear_in_schedule(irn)) {
233 sched_info_t *info = get_irn_sched_info(irn);
234 INIT_LIST_HEAD(&info->list);
236 sched_add_before(env->block, irn);
238 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
241 /* Insert the node in the set of all already scheduled nodes. */
242 pset_insert_ptr(env->already_scheduled, irn);
244 /* Remove the node from the ready set */
245 if(pset_find_ptr(env->ready_set, irn))
246 pset_remove_ptr(env->ready_set, irn);
253 * Add the proj nodes of a tuple-mode irn to the schedule immediately
254 * after the tuple-moded irn. By pinning the projs after the irn, no
255 * other nodes can create a new lifetime between the tuple-moded irn and
256 * one of its projs. This should render a realistic image of a
257 * tuple-moded irn, which in fact models a node which defines multiple
260 * @param irn The tuple-moded irn.
261 * @param list The schedule list to append all the projs.
262 * @param time The time step to which the irn and all its projs are
264 * @param obst The obstack the scheduling data structures shall be
266 * @param ready_set The ready set of the list scheduler.
267 * @param already_scheduled A set containing all nodes already
270 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
273 const ir_edge_t *edge;
275 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
277 foreach_out_edge(irn, edge) {
278 ir_node *out = edge->src;
280 assert(is_Proj(out) && "successor of a modeT node must be a proj");
282 if(get_irn_mode(out) == mode_T)
283 add_tuple_projs(env, out);
285 add_to_sched(env, out);
286 make_users_ready(env, out);
292 * Perform list scheduling on a block.
294 * Note, that the caller must compute a linked list of nodes in the block
295 * using the link field before calling this function.
297 * Also the outs must have been computed.
299 * @param block The block node.
300 * @param env Schedulting environment.
302 static void list_sched_block(ir_node *block, void *env_ptr)
304 void *block_env = NULL;
305 sched_env_t *env = env_ptr;
306 block_sched_env_t be;
307 const list_sched_selector_t *selector = env->selector;
308 const ir_edge_t *edge;
312 sched_info_t *info = get_irn_sched_info(block);
314 /* Initialize the block's list head that will hold the schedule. */
315 INIT_LIST_HEAD(&info->list);
317 /* Initialize the block scheduling environment */
318 be.dbg = firm_dbg_register("firm.be.sched");
321 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
322 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
324 firm_dbg_set_mask(be.dbg, 0);
326 if(selector->init_block)
327 block_env = selector->init_block(env->selector_env, block);
329 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
331 /* Then one can add all nodes are ready to the set. */
332 foreach_out_edge(block, edge) {
333 ir_node *irn = get_edge_src_irn(edge);
335 /* Skip the end node because of keepalive edges. */
336 if(get_irn_opcode(irn) == iro_End)
339 /* Phi functions are scheduled immediately, since they only transfer
340 * data flow from the predecessors to this block. */
342 add_to_sched(&be, irn);
343 make_users_ready(&be, irn);
347 /* Other nodes must have all operands in other blocks to be made
352 /* Check, if the operands of a node are not local to this block */
353 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
354 ir_node *operand = get_irn_n(irn, j);
356 if(get_nodes_block(operand) == block) {
362 /* Make the node ready, if all operands live in a foreign block */
364 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
365 make_ready(&be, irn);
370 /* Increase the time, if some phi functions have been scheduled */
371 be.curr_time += phi_seen;
373 while(pset_count(be.ready_set) > 0) {
374 // DBG((be.dbg, LEVEL_2, "\tready set: %*n\n", pset_iterator, be.ready_set));
376 /* select a node to be scheduled and check if it was ready */
377 irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
379 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
381 /* Add the node to the schedule. */
382 add_to_sched(&be, irn);
384 if(get_irn_mode(irn) == mode_T)
385 add_tuple_projs(&be, irn);
387 make_users_ready(&be, irn);
389 /* Increase the time step. */
392 /* remove the scheduled node from the ready list. */
393 if(pset_find_ptr(be.ready_set, irn))
394 pset_remove_ptr(be.ready_set, irn);
397 if(selector->finish_block)
398 selector->finish_block(env->selector_env, block_env, block);
400 del_pset(be.ready_set);
401 del_pset(be.already_scheduled);
404 static void imm_scheduler(ir_node *irn, void *env) {
407 ir_node *user, *user_block, *before, *tgt_block;
409 if (1 != get_irn_n_edges(irn)) {
410 printf("Out edges: %d\n", get_irn_n_edges(irn));
411 assert(1 == get_irn_n_edges(irn));
414 e = get_irn_out_edge_first(irn);
416 user_block = get_nodes_block(user);
418 before = get_Block_cfgpred_block(user_block, e->pos);
422 tgt_block = user_block;
426 set_nodes_block(irn, tgt_block);
427 sched_add_before(before, irn);
431 void be_sched_imm(ir_graph *irg) {
432 irg_walk_graph(irg, imm_scheduler, NULL, NULL);