2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
20 #include "iredges_t.h"
25 #include "irprintf_t.h"
28 #include "besched_t.h"
31 #include "belistsched.h"
32 #include "bearch_firm.h"
36 * Scheduling environment for the whole graph.
38 typedef struct _sched_env_t {
39 const ir_graph *irg; /**< The graph to schedule. */
40 const list_sched_selector_t *selector; /**< The node selector. */
41 void *selector_env; /**< A pointer to give to the selector. */
46 * Ugly global variable for the compare function
47 * since qsort(3) does not pass an extra pointer.
49 static ir_node *curr_bl = NULL;
51 static int cmp_usage(const void *a, const void *b)
53 struct trivial_sched_env *env;
58 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
61 * One of them is live at the end of the block.
62 * Then, that one shall be scheduled at after the other
72 static ir_node *trivial_select(void *env, void *block_env,
73 const struct list_head *sched_head,
74 int curr_time, pset *ready_set)
79 int i, n = pset_count(ready_set);
81 ir_node **ready = alloca(n * sizeof(ready[0]));
83 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
87 res = pset_first(ready_set);
88 pset_break(ready_set);
92 static const list_sched_selector_t trivial_selector_struct = {
100 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
102 static void list_sched_block(ir_node *block, void *env_ptr);
104 void list_sched(ir_graph *irg, const list_sched_selector_t *selector)
108 memset(&env, 0, sizeof(env));
109 env.selector = selector;
110 env.selector_env = selector->init_graph ? selector->init_graph(irg) : NULL;
113 /* Assure, that the out edges are computed */
116 /* Schedule each single block. */
117 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
119 if(selector->finish_graph)
120 selector->finish_graph(env.selector_env, irg);
125 * Environment for a block scheduler.
127 typedef struct _block_sched_env_t {
130 pset *already_scheduled;
132 firm_dbg_module_t *dbg;
139 * Try to put a node in the ready set.
140 * @param env The block scheduler environment.
141 * @param irn The node to make ready.
142 * @return 1, if the node could be made ready, 0 else.
144 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
148 /* Blocks cannot be scheduled. */
153 * Check, if the given ir node is in a different block as the
154 * currently scheduled one. If that is so, don't make the node ready.
156 if(env->block != get_nodes_block(irn))
159 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
160 ir_node *op = get_irn_n(irn, i);
162 /* If the operand is local to the scheduled block and not yet
163 * scheduled, this nodes cannot be made ready, so exit. */
164 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
168 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
169 pset_insert_ptr(env->ready_set, irn);
175 * Check, if a node is ready in a block schedule.
176 * @param env The block schedule environment.
177 * @param irn The node to check for.
178 * @return 1 if the node was ready, 0 if not.
180 #define is_ready(env,irn) \
181 (pset_find_ptr((env)->ready_set, irn) != NULL)
184 * Check, if a node has already been schedules.
185 * @param env The block schedule environment.
186 * @param irn The node to check for.
187 * @return 1 if the node was already scheduled, 0 if not.
189 #define is_scheduled(env,irn) \
190 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
193 * Try, to make all users of a node ready.
194 * In fact, a usage node can only be made ready, if all its operands
195 * have already been scheduled yet. This is checked my make_ready().
196 * @param env The block schedule environment.
197 * @param irn The node, which usages (successors) are to be made ready.
199 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
202 const ir_edge_t *edge;
204 foreach_out_edge(irn, edge) {
205 ir_node *user = edge->src;
207 make_ready(env, user);
212 * Compare to nodes using pointer equality.
213 * @param p1 Node one.
214 * @param p2 Node two.
215 * @return 0 if they are identical.
217 static int node_cmp_func(const void *p1, const void *p2)
223 * Append an instruction to a schedule.
224 * @param env The block scheduleing environment.
225 * @param irn The node to add to the schedule.
226 * @return The given node.
228 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
230 /* If the node consumes/produces data, it is appended to the schedule
231 * list, otherwise, it is not put into the list */
232 if(to_appear_in_schedule(irn)) {
233 sched_info_t *info = get_irn_sched_info(irn);
234 INIT_LIST_HEAD(&info->list);
236 assert(get_irn_opcode(irn) != iro_Unknown && "'Unknown' in schedule!");
237 sched_add_before(env->block, irn);
239 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
242 /* Insert the node in the set of all already scheduled nodes. */
243 pset_insert_ptr(env->already_scheduled, irn);
245 /* Remove the node from the ready set */
246 if(pset_find_ptr(env->ready_set, irn))
247 pset_remove_ptr(env->ready_set, irn);
254 * Add the proj nodes of a tuple-mode irn to the schedule immediately
255 * after the tuple-moded irn. By pinning the projs after the irn, no
256 * other nodes can create a new lifetime between the tuple-moded irn and
257 * one of its projs. This should render a realistic image of a
258 * tuple-moded irn, which in fact models a node which defines multiple
261 * @param irn The tuple-moded irn.
262 * @param list The schedule list to append all the projs.
263 * @param time The time step to which the irn and all its projs are
265 * @param obst The obstack the scheduling data structures shall be
267 * @param ready_set The ready set of the list scheduler.
268 * @param already_scheduled A set containing all nodes already
271 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
274 const ir_edge_t *edge;
276 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
278 foreach_out_edge(irn, edge) {
279 ir_node *out = edge->src;
281 assert(is_Proj(out) && "successor of a modeT node must be a proj");
283 if(get_irn_mode(out) == mode_T)
284 add_tuple_projs(env, out);
286 add_to_sched(env, out);
287 make_users_ready(env, out);
293 * Perform list scheduling on a block.
295 * Note, that the caller must compute a linked list of nodes in the block
296 * using the link field before calling this function.
298 * Also the outs must have been computed.
300 * @param block The block node.
301 * @param env Schedulting environment.
303 static void list_sched_block(ir_node *block, void *env_ptr)
305 void *block_env = NULL;
306 sched_env_t *env = env_ptr;
307 block_sched_env_t be;
308 const list_sched_selector_t *selector = env->selector;
309 const ir_edge_t *edge;
313 sched_info_t *info = get_irn_sched_info(block);
315 /* Initialize the block's list head that will hold the schedule. */
316 INIT_LIST_HEAD(&info->list);
318 /* Initialize the block scheduling environment */
319 be.dbg = firm_dbg_register("firm.be.sched");
322 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
323 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
325 firm_dbg_set_mask(be.dbg, 0);
327 if(selector->init_block)
328 block_env = selector->init_block(env->selector_env, block);
330 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
332 /* Then one can add all nodes are ready to the set. */
333 foreach_out_edge(block, edge) {
334 ir_node *irn = get_edge_src_irn(edge);
336 /* Skip the end node because of keepalive edges. */
337 if(get_irn_opcode(irn) == iro_End)
340 /* Phi functions are scheduled immediately, since they only transfer
341 * data flow from the predecessors to this block. */
343 add_to_sched(&be, irn);
344 make_users_ready(&be, irn);
348 /* Other nodes must have all operands in other blocks to be made
353 /* Check, if the operands of a node are not local to this block */
354 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
355 ir_node *operand = get_irn_n(irn, j);
357 if(get_nodes_block(operand) == block) {
363 /* Make the node ready, if all operands live in a foreign block */
365 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
366 make_ready(&be, irn);
371 /* Increase the time, if some phi functions have been scheduled */
372 be.curr_time += phi_seen;
374 while(pset_count(be.ready_set) > 0) {
375 // DBG((be.dbg, LEVEL_2, "\tready set: %*n\n", pset_iterator, be.ready_set));
377 /* select a node to be scheduled and check if it was ready */
378 irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
380 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
382 /* Add the node to the schedule. */
383 add_to_sched(&be, irn);
385 if(get_irn_mode(irn) == mode_T)
386 add_tuple_projs(&be, irn);
388 make_users_ready(&be, irn);
390 /* Increase the time step. */
393 /* remove the scheduled node from the ready list. */
394 if(pset_find_ptr(be.ready_set, irn))
395 pset_remove_ptr(be.ready_set, irn);
398 if(selector->finish_block)
399 selector->finish_block(env->selector_env, block_env, block);
401 del_pset(be.ready_set);
402 del_pset(be.already_scheduled);
405 static void imm_scheduler(ir_node *irn, void *env) {
408 ir_node *user, *user_block, *before, *tgt_block;
410 // assert(1 == get_irn_n_edges(irn)); why is this wrong ?
412 e = get_irn_out_edge_first(irn);
414 user_block = get_nodes_block(user);
416 before = get_Block_cfgpred_block(user_block, e->pos);
420 tgt_block = user_block;
424 set_nodes_block(irn, tgt_block);
425 sched_add_before(before, irn);
429 void be_sched_imm(ir_graph *irg) {
430 irg_walk_graph(irg, imm_scheduler, NULL, NULL);