2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
20 #include "iredges_t.h"
25 #include "irprintf_t.h"
28 #include "besched_t.h"
31 #include "belistsched.h"
32 #include "firm/bearch_firm.h"
36 * Scheduling environment for the whole graph.
38 typedef struct _sched_env_t {
39 const ir_graph *irg; /**< The graph to schedule. */
40 const list_sched_selector_t *selector; /**< The node selector. */
41 void *selector_env; /**< A pointer to give to the selector. */
46 * Ugly global variable for the compare function
47 * since qsort(3) does not pass an extra pointer.
49 static ir_node *curr_bl = NULL;
51 static int cmp_usage(const void *a, const void *b)
53 struct trivial_sched_env *env;
58 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
61 * One of them is live at the end of the block.
62 * Then, that one shall be scheduled at after the other
72 static ir_node *trivial_select(void *env, void *block_env,
73 const struct list_head *sched_head,
74 int curr_time, pset *ready_set)
79 int i, n = pset_count(ready_set);
81 ir_node **ready = alloca(n * sizeof(ready[0]));
83 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
87 res = pset_first(ready_set);
88 pset_break(ready_set);
92 static const list_sched_selector_t trivial_selector_struct = {
100 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
102 static void list_sched_block(ir_node *block, void *env_ptr);
104 void list_sched(const struct _arch_isa_t *isa, ir_graph *irg)
107 const list_sched_selector_t *selector;
109 memset(&env, 0, sizeof(env));
110 selector = env.selector = isa->impl->get_list_sched_selector(isa);
111 env.selector_env = selector->init_graph ? selector->init_graph(isa, irg) : NULL;
114 /* Assure, that the out edges are computed */
117 /* Schedule each single block. */
118 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
120 if(selector->finish_graph)
121 selector->finish_graph(env.selector_env, irg);
126 * Environment for a block scheduler.
128 typedef struct _block_sched_env_t {
131 pset *already_scheduled;
133 firm_dbg_module_t *dbg;
137 * Try to put a node in the ready set.
138 * @param env The block scheduler environment.
139 * @param irn The node to make ready.
140 * @return 1, if the node could be made ready, 0 else.
142 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
146 /* Blocks cannot be scheduled. */
151 * Check, if the given ir node is in a different block as the
152 * currently scheduled one. If that is so, don't make the node ready.
154 if(env->block != get_nodes_block(irn))
157 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
158 ir_node *op = get_irn_n(irn, i);
160 /* If the operand is local to the scheduled block and not yet
161 * scheduled, this nodes cannot be made ready, so exit. */
162 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
166 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
167 pset_insert_ptr(env->ready_set, irn);
173 * Check, if a node is ready in a block schedule.
174 * @param env The block schedule environment.
175 * @param irn The node to check for.
176 * @return 1 if the node was ready, 0 if not.
178 #define is_ready(env,irn) \
179 (pset_find_ptr((env)->ready_set, irn) != NULL)
182 * Check, if a node has already been schedules.
183 * @param env The block schedule environment.
184 * @param irn The node to check for.
185 * @return 1 if the node was already scheduled, 0 if not.
187 #define is_scheduled(env,irn) \
188 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
191 * Try, to make all users of a node ready.
192 * In fact, a usage node can only be made ready, if all its operands
193 * have already been scheduled yet. This is checked my make_ready().
194 * @param env The block schedule environment.
195 * @param irn The node, which usages (successors) are to be made ready.
197 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
199 const ir_edge_t *edge;
201 foreach_out_edge(irn, edge) {
202 ir_node *user = edge->src;
204 make_ready(env, user);
209 * Compare to nodes using pointer equality.
210 * @param p1 Node one.
211 * @param p2 Node two.
212 * @return 0 if they are identical.
214 static int node_cmp_func(const void *p1, const void *p2)
220 * Append an instruction to a schedule.
221 * @param env The block scheduleing environment.
222 * @param irn The node to add to the schedule.
223 * @return The given node.
225 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
227 /* If the node consumes/produces data, it is appended to the schedule
228 * list, otherwise, it is not put into the list */
229 if(to_appear_in_schedule(irn)) {
230 sched_info_t *info = get_irn_sched_info(irn);
231 INIT_LIST_HEAD(&info->list);
233 sched_add_before(env->block, irn);
235 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
238 /* Insert the node in the set of all already scheduled nodes. */
239 pset_insert_ptr(env->already_scheduled, irn);
241 /* Remove the node from the ready set */
242 if(pset_find_ptr(env->ready_set, irn))
243 pset_remove_ptr(env->ready_set, irn);
250 * Add the proj nodes of a tuple-mode irn to the schedule immediately
251 * after the tuple-moded irn. By pinning the projs after the irn, no
252 * other nodes can create a new lifetime between the tuple-moded irn and
253 * one of its projs. This should render a realistic image of a
254 * tuple-moded irn, which in fact models a node which defines multiple
257 * @param irn The tuple-moded irn.
258 * @param list The schedule list to append all the projs.
259 * @param time The time step to which the irn and all its projs are
261 * @param obst The obstack the scheduling data structures shall be
263 * @param ready_set The ready set of the list scheduler.
264 * @param already_scheduled A set containing all nodes already
267 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
269 const ir_edge_t *edge;
271 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
273 foreach_out_edge(irn, edge) {
274 ir_node *out = edge->src;
276 assert(is_Proj(out) && "successor of a modeT node must be a proj");
278 if(get_irn_mode(out) == mode_T)
279 add_tuple_projs(env, out);
281 add_to_sched(env, out);
282 make_users_ready(env, out);
288 * Perform list scheduling on a block.
290 * Note, that the caller must compute a linked list of nodes in the block
291 * using the link field before calling this function.
293 * Also the outs must have been computed.
295 * @param block The block node.
296 * @param env Scheduling environment.
298 static void list_sched_block(ir_node *block, void *env_ptr)
300 void *block_env = NULL;
301 sched_env_t *env = env_ptr;
302 block_sched_env_t be;
303 const list_sched_selector_t *selector = env->selector;
304 const ir_edge_t *edge;
308 sched_info_t *info = get_irn_sched_info(block);
310 /* Initialize the block's list head that will hold the schedule. */
311 INIT_LIST_HEAD(&info->list);
313 /* Initialize the block scheduling environment */
314 be.dbg = firm_dbg_register("firm.be.sched");
317 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
318 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
320 firm_dbg_set_mask(be.dbg, 0);
322 if(selector->init_block)
323 block_env = selector->init_block(env->selector_env, block);
325 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
327 /* Then one can add all nodes are ready to the set. */
328 foreach_out_edge(block, edge) {
329 ir_node *irn = get_edge_src_irn(edge);
331 /* Skip the end node because of keepalive edges. */
332 if(get_irn_opcode(irn) == iro_End)
335 /* Phi functions are scheduled immediately, since they only transfer
336 * data flow from the predecessors to this block. */
338 add_to_sched(&be, irn);
339 make_users_ready(&be, irn);
343 /* Other nodes must have all operands in other blocks to be made
348 /* Check, if the operands of a node are not local to this block */
349 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
350 ir_node *operand = get_irn_n(irn, j);
352 if(get_nodes_block(operand) == block) {
358 /* Make the node ready, if all operands live in a foreign block */
360 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
361 make_ready(&be, irn);
366 /* Increase the time, if some phi functions have been scheduled */
367 be.curr_time += phi_seen;
369 while(pset_count(be.ready_set) > 0) {
370 // DBG((be.dbg, LEVEL_2, "\tready set: %*n\n", pset_iterator, be.ready_set));
372 /* select a node to be scheduled and check if it was ready */
373 irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
375 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
377 /* Add the node to the schedule. */
378 add_to_sched(&be, irn);
380 if(get_irn_mode(irn) == mode_T)
381 add_tuple_projs(&be, irn);
383 make_users_ready(&be, irn);
385 /* Increase the time step. */
388 /* remove the scheduled node from the ready list. */
389 if(pset_find_ptr(be.ready_set, irn))
390 pset_remove_ptr(be.ready_set, irn);
393 if(selector->finish_block)
394 selector->finish_block(env->selector_env, block_env, block);
396 del_pset(be.ready_set);
397 del_pset(be.already_scheduled);