2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
25 #include "iredges_t.h"
30 #include "irprintf_t.h"
35 #include "besched_t.h"
38 #include "belistsched.h"
39 #include "beschedmris.h"
44 * All scheduling info needed per node.
46 typedef struct _sched_irn_t {
47 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
48 unsigned already_sched : 1; /**< Set if this node is already scheduled */
52 * Scheduling environment for the whole graph.
54 typedef struct _sched_env_t {
55 sched_irn_t *sched_info; /**< scheduling info per node */
56 const list_sched_selector_t *selector; /**< The node selector. */
57 const arch_env_t *arch_env; /**< The architecture environment. */
58 const ir_graph *irg; /**< The graph to schedule. */
59 void *selector_env; /**< A pointer to give to the selector. */
63 * Environment for a block scheduler.
65 typedef struct _block_sched_env_t {
66 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
67 nodeset *cands; /**< the set of candidates */
68 ir_node *block; /**< the current block */
69 sched_env_t *sched_env; /**< the scheduler environment */
70 nodeset *live; /**< simple liveness during scheduling */
71 const list_sched_selector_t *selector;
72 void *selector_block_env;
73 DEBUG_ONLY(firm_dbg_module_t *dbg;)
77 * Returns non-zero if the node is already scheduled
79 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
81 int idx = get_irn_idx(n);
83 assert(idx < ARR_LEN(env->sched_info));
84 return env->sched_info[idx].already_sched;
88 * Mark a node as already scheduled
90 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
92 int idx = get_irn_idx(n);
94 assert(idx < ARR_LEN(env->sched_info));
95 env->sched_info[idx].already_sched = 1;
99 * Try to put a node in the ready set.
100 * @param env The block scheduler environment.
101 * @param pred The previous scheduled node.
102 * @param irn The node to make ready.
103 * @return 1, if the node could be made ready, 0 else.
105 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
109 /* Blocks cannot be scheduled. */
114 * Check, if the given ir node is in a different block as the
115 * currently scheduled one. If that is so, don't make the node ready.
117 if (env->block != get_nodes_block(irn))
120 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
121 ir_node *op = get_irn_in_or_dep(irn, i);
123 /* if irn is an End we have keep-alives and op might be a block, skip that */
125 assert(get_irn_op(irn) == op_End);
129 /* If the operand is local to the scheduled block and not yet
130 * scheduled, this nodes cannot be made ready, so exit. */
131 if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
135 nodeset_insert(env->cands, irn);
137 /* Notify selector about the ready node. */
138 if (env->selector->node_ready)
139 env->selector->node_ready(env->selector_block_env, irn, pred);
141 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
147 * Try, to make all users of a node ready.
148 * In fact, a usage node can only be made ready, if all its operands
149 * have already been scheduled yet. This is checked my make_ready().
150 * @param env The block schedule environment.
151 * @param irn The node, which usages (successors) are to be made ready.
153 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
155 const ir_edge_t *edge;
157 foreach_out_edge(irn, edge) {
158 ir_node *user = get_edge_src_irn(edge);
160 make_ready(env, irn, user);
163 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
164 ir_node *user = get_edge_src_irn(edge);
166 make_ready(env, irn, user);
171 * Returns the number of not yet schedules users.
173 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
174 int idx = get_irn_idx(n);
176 assert(idx < ARR_LEN(env->sched_info));
177 return env->sched_info[idx].num_not_sched_user;
181 * Sets the number of not yet schedules users.
183 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
184 int idx = get_irn_idx(n);
186 assert(idx < ARR_LEN(env->sched_info));
187 env->sched_info[idx].num_not_sched_user = num;
191 * Add @p num to the number of not yet schedules users and returns the result.
193 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
194 int idx = get_irn_idx(n);
196 assert(idx < ARR_LEN(env->sched_info));
197 env->sched_info[idx].num_not_sched_user += num;
198 return env->sched_info[idx].num_not_sched_user;
202 * Returns the number of users of a node having mode datab.
204 static int get_num_successors(ir_node *irn) {
206 const ir_edge_t *edge;
208 if (get_irn_mode(irn) == mode_T) {
209 /* for mode_T nodes: count the users of all Projs */
210 foreach_out_edge(irn, edge) {
211 ir_node *proj = get_edge_src_irn(edge);
212 ir_mode *mode = get_irn_mode(proj);
215 sum += get_num_successors(proj);
216 else if (mode_is_datab(mode))
217 sum += get_irn_n_edges(proj);
221 /* do not count keep-alive edges */
222 foreach_out_edge(irn, edge) {
223 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
232 * Adds irn to @p live, updates all inputs that this user is scheduled
233 * and counts all of it's non scheduled users.
235 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
242 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
243 ir_node *in = get_irn_in_or_dep(irn, i);
245 /* if in is a proj: update predecessor */
247 in = get_Proj_pred(in);
249 /* if in is still in the live set: reduce number of users by one */
250 if (nodeset_find(env->live, in)) {
251 if (add_irn_not_sched_user(env, in, -1) <= 0)
252 nodeset_remove(env->live, in);
257 get_num_successors returns the number of all users. This includes
258 users in different blocks as well. As the each block is scheduled separately
259 the liveness info of those users will not be updated and so these
260 users will keep up the register pressure as it is desired.
262 i = get_num_successors(irn);
264 set_irn_not_sched_user(env, irn, i);
265 nodeset_insert(env->live, irn);
269 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
273 if(sel->to_appear_in_schedule)
274 res = sel->to_appear_in_schedule(block_env, irn);
276 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_RegParams(irn));
280 * Append an instruction to a schedule.
281 * @param env The block scheduling environment.
282 * @param irn The node to add to the schedule.
283 * @return The given node.
285 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
287 /* If the node consumes/produces data, it is appended to the schedule
288 * list, otherwise, it is not put into the list */
289 if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
290 sched_info_t *info = get_irn_sched_info(irn);
291 INIT_LIST_HEAD(&info->list);
293 update_sched_liveness(env, irn);
294 sched_add_before(env->block, irn);
296 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
299 /* notify the selector about the finally selected node. */
300 if (env->selector->node_selected)
301 env->selector->node_selected(env->selector_block_env, irn);
303 /* Insert the node in the set of all already scheduled nodes. */
304 mark_already_scheduled(env, irn);
306 /* Remove the node from the ready set */
307 if(nodeset_find(env->cands, irn))
308 nodeset_remove(env->cands, irn);
314 * Add the proj nodes of a tuple-mode irn to the schedule immediately
315 * after the tuple-moded irn. By pinning the projs after the irn, no
316 * other nodes can create a new lifetime between the tuple-moded irn and
317 * one of its projs. This should render a realistic image of a
318 * tuple-moded irn, which in fact models a node which defines multiple
321 * @param irn The tuple-moded irn.
323 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
325 const ir_edge_t *edge;
327 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
333 /* non-proj nodes can have dependency edges to tuple nodes. */
334 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
335 ir_node *out = get_edge_src_irn(edge);
336 make_ready(env, irn, out);
339 /* schedule the normal projs */
340 foreach_out_edge(irn, edge) {
341 ir_node *out = get_edge_src_irn(edge);
343 assert(is_Proj(out) && "successor of a modeT node must be a proj");
345 if (get_irn_mode(out) == mode_T)
346 add_tuple_projs(env, out);
348 add_to_sched(env, out);
349 make_users_ready(env, out);
355 * Perform list scheduling on a block.
357 * Note, that the caller must compute a linked list of nodes in the block
358 * using the link field before calling this function.
360 * Also the outs must have been computed.
362 * @param block The block node.
363 * @param env Scheduling environment.
365 static void list_sched_block(ir_node *block, void *env_ptr)
367 sched_env_t *env = env_ptr;
368 const list_sched_selector_t *selector = env->selector;
369 ir_node *start_node = get_irg_start(get_irn_irg(block));
370 sched_info_t *info = get_irn_sched_info(block);
372 block_sched_env_t be;
373 const ir_edge_t *edge;
377 /* Initialize the block's list head that will hold the schedule. */
378 INIT_LIST_HEAD(&info->list);
380 /* Initialize the block scheduling environment */
381 be.sched_info = env->sched_info;
383 be.cands = new_nodeset(get_irn_n_edges(block));
384 be.live = new_nodeset(get_irn_n_edges(block));
385 be.selector = selector;
387 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
389 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
391 if (selector->init_block)
392 be.selector_block_env = selector->init_block(env->selector_env, block);
394 /* Then one can add all nodes are ready to the set. */
395 foreach_out_edge(block, edge) {
396 ir_node *irn = get_edge_src_irn(edge);
398 /* Skip the end node because of keepalive edges. */
399 if (get_irn_opcode(irn) == iro_End)
404 Phi functions are scheduled immediately, since they only
405 transfer data flow from the predecessors to this block.
407 add_to_sched(&be, irn);
408 make_users_ready(&be, irn);
410 else if (irn == start_node) {
411 /* The start block will be scheduled as the first node */
412 add_to_sched(&be, irn);
413 add_tuple_projs(&be, irn);
416 /* Other nodes must have all operands in other blocks to be made
420 /* Check, if the operands of a node are not local to this block */
421 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
422 ir_node *operand = get_irn_in_or_dep(irn, j);
424 if (get_nodes_block(operand) == block) {
429 /* live in values increase register pressure */
430 update_sched_liveness(&be, operand);
434 /* Make the node ready, if all operands live in a foreign block */
436 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
437 make_ready(&be, NULL, irn);
442 /* Iterate over all remaining nodes */
443 while (nodeset_count(be.cands) > 0) {
444 /* collect statistics about amount of ready nodes */
445 be_do_stat_sched_ready(block, be.cands);
447 /* Keeps must be scheduled immediatly */
448 foreach_nodeset(be.cands, irn) {
449 if (be_is_Keep(irn) || be_is_CopyKeep(irn)) {
450 nodeset_break(be.cands);
456 /* Keeps must be immediately scheduled */
457 irn = be.selector->select(be.selector_block_env, be.cands, be.live);
460 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
462 /* Add the node to the schedule. */
463 add_to_sched(&be, irn);
465 if (get_irn_mode(irn) == mode_T)
466 add_tuple_projs(&be, irn);
468 make_users_ready(&be, irn);
470 /* remove the scheduled node from the ready list. */
471 if (nodeset_find(be.cands, irn))
472 nodeset_remove(be.cands, irn);
475 if (selector->finish_block)
476 selector->finish_block(be.selector_block_env);
478 del_nodeset(be.cands);
479 del_nodeset(be.live);
482 /* List schedule a graph. */
483 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
485 const arch_env_t *arch_env = birg->main_env->arch_env;
486 ir_graph *irg = birg->irg;
490 mris_env_t *mris = NULL;
491 list_sched_selector_t sel;
493 /* Select a scheduler based on backend options */
494 switch (be_opts->sched_select) {
495 case BE_SCHED_SELECT_TRIVIAL:
496 memcpy(&sel, trivial_selector, sizeof(sel));
498 case BE_SCHED_SELECT_REGPRESS:
499 memcpy(&sel, reg_pressure_selector, sizeof(sel));
501 case BE_SCHED_SELECT_MUCHNIK:
502 memcpy(&sel, muchnik_selector, sizeof(sel));
504 case BE_SCHED_SELECT_HEUR:
505 memcpy(&sel, heuristic_selector, sizeof(sel));
507 case BE_SCHED_SELECT_HMUCHNIK:
509 memcpy(&sel, trivial_selector, sizeof(sel));
512 /* Assure, that the out edges are computed */
516 mris = be_sched_mris_preprocess(birg);
518 num_nodes = get_irg_last_idx(irg);
520 /* initialize environment for list scheduler */
521 memset(&env, 0, sizeof(env));
522 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa, &sel);
523 env.arch_env = arch_env;
525 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
527 memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
529 if (env.selector->init_graph)
530 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
532 /* Schedule each single block. */
533 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
535 if (env.selector->finish_graph)
536 env.selector->finish_graph(env.selector_env);
539 be_sched_mris_free(mris);
541 DEL_ARR_F(env.sched_info);