2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
21 #include "iredges_t.h"
26 #include "irprintf_t.h"
29 #include "besched_t.h"
32 #include "belistsched.h"
35 #define MAX(x,y) ((x) > (y) ? (x) : (y))
36 #define MIN(x,y) ((x) < (y) ? (x) : (y))
39 * Scheduling environment for the whole graph.
41 typedef struct _sched_env_t {
42 const ir_graph *irg; /**< The graph to schedule. */
43 const list_sched_selector_t *selector; /**< The node selector. */
44 void *selector_env; /**< A pointer to give to the selector. */
49 * Ugly global variable for the compare function
50 * since qsort(3) does not pass an extra pointer.
52 static ir_node *curr_bl = NULL;
54 static int cmp_usage(const void *a, const void *b)
56 struct trivial_sched_env *env;
61 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
64 * One of them is live at the end of the block.
65 * Then, that one shall be scheduled at after the other
75 static ir_node *trivial_select(void *block_env, pset *ready_set)
79 res = pset_first(ready_set);
80 pset_break(ready_set);
84 static int default_to_appear_in_schedule(void *env, const ir_node *irn)
86 return to_appear_in_schedule(irn);
89 static const list_sched_selector_t trivial_selector_struct = {
93 default_to_appear_in_schedule,
98 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
100 typedef struct _usage_stats_t {
102 struct _usage_stats_t *next;
104 int uses_in_block; /**< Number of uses inside the current block. */
105 int already_consumed; /**< Number of insns using this value already
112 pset *already_scheduled;
113 const list_sched_selector_t *vtab;
114 } reg_pressure_selector_env_t;
116 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
118 usage_stats_t *us = get_irn_link(irn);
121 us = obstack_alloc(&env->obst, sizeof(us[0]));
123 us->already_consumed = 0;
124 us->max_hops = INT_MAX;
125 us->next = env->root;
127 set_irn_link(irn, us);
133 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
135 usage_stats_t *us = get_irn_link(irn);
136 assert(us && "This node must have usage stats");
140 static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr)
148 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
149 ir_node *operand = get_irn_n(irn, i);
151 if(get_irn_visited(operand) < visited_nr) {
154 set_irn_visited(operand, visited_nr);
155 tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr);
164 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
166 ir_node *bl = get_nodes_block(irn);
167 ir_graph *irg = get_irn_irg(bl);
170 const ir_edge_t *edge;
172 foreach_out_edge(irn, edge) {
173 ir_node *user = get_edge_src_irn(edge);
175 if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) {
176 unsigned visited_nr = get_irg_visited(irg) + 1;
179 set_irg_visited(irg, visited_nr);
180 max_hops = max_hops_walker(user, irn, 0, visited_nr);
181 res = MAX(res, max_hops);
188 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_isa_t *isa, ir_graph *irg)
190 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
191 return (void *) vtab;
194 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
197 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
199 obstack_init(&env->obst);
200 env->already_scheduled = pset_new_ptr(32);
202 env->vtab = graph_env;
205 * Collect usage statistics.
207 sched_foreach(bl, irn) {
208 if(env->vtab->to_appear_in_schedule(env, irn)) {
211 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
212 ir_node *op = get_irn_n(irn, i);
213 if(env->vtab->to_appear_in_schedule(env, irn)) {
214 usage_stats_t *us = get_or_set_usage_stats(env, irn);
215 if(is_live_end(bl, op))
216 us->uses_in_block = 99999;
227 static void reg_pressure_block_free(void *block_env)
229 reg_pressure_selector_env_t *env = block_env;
232 for(us = env->root; us; us = us->next)
233 set_irn_link(us->irn, NULL);
235 obstack_free(&env->obst, NULL);
236 del_pset(env->already_scheduled);
240 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
245 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
246 ir_node *op = get_irn_n(irn, i);
248 if(env->vtab->to_appear_in_schedule(env, op))
249 sum += compute_max_hops(env, op);
255 ir_node *reg_pressure_select(void *block_env, pset *ready_set)
257 reg_pressure_selector_env_t *env = block_env;
258 ir_node *irn, *res = NULL;
259 int curr_cost = INT_MAX;
261 assert(pset_count(ready_set) > 0);
263 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
264 int costs = reg_pr_costs(env, irn);
265 if(costs <= curr_cost) {
271 pset_insert_ptr(env->already_scheduled, res);
275 static const list_sched_selector_t reg_pressure_selector_struct = {
276 reg_pressure_graph_init,
277 reg_pressure_block_init,
279 default_to_appear_in_schedule,
280 reg_pressure_block_free,
284 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
286 static void list_sched_block(ir_node *block, void *env_ptr);
288 void list_sched(const struct _arch_isa_t *isa, ir_graph *irg)
291 const list_sched_selector_t *selector;
293 memset(&env, 0, sizeof(env));
294 selector = env.selector = isa->impl->get_list_sched_selector(isa);
295 env.selector_env = selector->init_graph ? selector->init_graph(selector, isa, irg) : NULL;
298 /* Assure, that the out edges are computed */
301 /* Schedule each single block. */
302 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
304 if(selector->finish_graph)
305 selector->finish_graph(env.selector_env);
310 * Environment for a block scheduler.
312 typedef struct _block_sched_env_t {
315 pset *already_scheduled;
317 firm_dbg_module_t *dbg;
318 const list_sched_selector_t *selector;
319 void *selector_block_env;
323 * Try to put a node in the ready set.
324 * @param env The block scheduler environment.
325 * @param irn The node to make ready.
326 * @return 1, if the node could be made ready, 0 else.
328 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
332 /* Blocks cannot be scheduled. */
337 * Check, if the given ir node is in a different block as the
338 * currently scheduled one. If that is so, don't make the node ready.
340 if(env->block != get_nodes_block(irn))
343 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
344 ir_node *op = get_irn_n(irn, i);
346 /* If the operand is local to the scheduled block and not yet
347 * scheduled, this nodes cannot be made ready, so exit. */
348 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
352 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
353 pset_insert_ptr(env->ready_set, irn);
359 * Check, if a node is ready in a block schedule.
360 * @param env The block schedule environment.
361 * @param irn The node to check for.
362 * @return 1 if the node was ready, 0 if not.
364 #define is_ready(env,irn) \
365 (pset_find_ptr((env)->ready_set, irn) != NULL)
368 * Check, if a node has already been schedules.
369 * @param env The block schedule environment.
370 * @param irn The node to check for.
371 * @return 1 if the node was already scheduled, 0 if not.
373 #define is_scheduled(env,irn) \
374 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
377 * Try, to make all users of a node ready.
378 * In fact, a usage node can only be made ready, if all its operands
379 * have already been scheduled yet. This is checked my make_ready().
380 * @param env The block schedule environment.
381 * @param irn The node, which usages (successors) are to be made ready.
383 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
385 const ir_edge_t *edge;
387 foreach_out_edge(irn, edge) {
388 ir_node *user = edge->src;
390 make_ready(env, user);
395 * Compare to nodes using pointer equality.
396 * @param p1 Node one.
397 * @param p2 Node two.
398 * @return 0 if they are identical.
400 static int node_cmp_func(const void *p1, const void *p2)
406 * Append an instruction to a schedule.
407 * @param env The block scheduleing environment.
408 * @param irn The node to add to the schedule.
409 * @return The given node.
411 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
413 /* If the node consumes/produces data, it is appended to the schedule
414 * list, otherwise, it is not put into the list */
415 if(to_appear_in_schedule(irn)) {
416 sched_info_t *info = get_irn_sched_info(irn);
417 INIT_LIST_HEAD(&info->list);
419 sched_add_before(env->block, irn);
421 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
424 /* Insert the node in the set of all already scheduled nodes. */
425 pset_insert_ptr(env->already_scheduled, irn);
427 /* Remove the node from the ready set */
428 if(pset_find_ptr(env->ready_set, irn))
429 pset_remove_ptr(env->ready_set, irn);
436 * Add the proj nodes of a tuple-mode irn to the schedule immediately
437 * after the tuple-moded irn. By pinning the projs after the irn, no
438 * other nodes can create a new lifetime between the tuple-moded irn and
439 * one of its projs. This should render a realistic image of a
440 * tuple-moded irn, which in fact models a node which defines multiple
443 * @param irn The tuple-moded irn.
444 * @param list The schedule list to append all the projs.
445 * @param time The time step to which the irn and all its projs are
447 * @param obst The obstack the scheduling data structures shall be
449 * @param ready_set The ready set of the list scheduler.
450 * @param already_scheduled A set containing all nodes already
453 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
455 const ir_edge_t *edge;
457 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
459 foreach_out_edge(irn, edge) {
460 ir_node *out = edge->src;
462 assert(is_Proj(out) && "successor of a modeT node must be a proj");
464 if(get_irn_mode(out) == mode_T)
465 add_tuple_projs(env, out);
467 add_to_sched(env, out);
468 make_users_ready(env, out);
474 * Perform list scheduling on a block.
476 * Note, that the caller must compute a linked list of nodes in the block
477 * using the link field before calling this function.
479 * Also the outs must have been computed.
481 * @param block The block node.
482 * @param env Scheduling environment.
484 static void list_sched_block(ir_node *block, void *env_ptr)
486 sched_env_t *env = env_ptr;
487 block_sched_env_t be;
488 const list_sched_selector_t *selector = env->selector;
489 const ir_edge_t *edge;
491 ir_node *start_node = get_irg_start(get_irn_irg(block));
492 ir_node *final_jmp = NULL;
495 sched_info_t *info = get_irn_sched_info(block);
497 /* Initialize the block's list head that will hold the schedule. */
498 INIT_LIST_HEAD(&info->list);
500 /* Initialize the block scheduling environment */
501 be.dbg = firm_dbg_register("firm.be.sched");
504 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
505 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
506 be.selector = selector;
508 firm_dbg_set_mask(be.dbg, 0);
510 if(selector->init_block)
511 be.selector_block_env = selector->init_block(env->selector_env, block);
513 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
515 /* Then one can add all nodes are ready to the set. */
516 foreach_out_edge(block, edge) {
517 ir_node *irn = get_edge_src_irn(edge);
519 /* Skip the end node because of keepalive edges. */
520 if(get_irn_opcode(irn) == iro_End)
523 /* Phi functions are scheduled immediately, since they only transfer
524 * data flow from the predecessors to this block. */
526 add_to_sched(&be, irn);
527 make_users_ready(&be, irn);
531 else if(irn == start_node) {
532 add_to_sched(&be, irn);
533 add_tuple_projs(&be, irn);
536 else if(get_irn_opcode(irn) == iro_Jmp) {
540 /* Other nodes must have all operands in other blocks to be made
545 /* Check, if the operands of a node are not local to this block */
546 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
547 ir_node *operand = get_irn_n(irn, j);
549 if(get_nodes_block(operand) == block) {
555 /* Make the node ready, if all operands live in a foreign block */
557 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
558 make_ready(&be, irn);
563 /* Increase the time, if some phi functions have been scheduled */
564 be.curr_time += phi_seen;
566 while(pset_count(be.ready_set) > 0) {
567 /* select a node to be scheduled and check if it was ready */
568 irn = selector->select(be.selector_block_env, be.ready_set);
570 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
572 /* Add the node to the schedule. */
573 add_to_sched(&be, irn);
575 if(get_irn_mode(irn) == mode_T)
576 add_tuple_projs(&be, irn);
578 make_users_ready(&be, irn);
580 /* Increase the time step. */
583 /* remove the scheduled node from the ready list. */
584 if(pset_find_ptr(be.ready_set, irn))
585 pset_remove_ptr(be.ready_set, irn);
588 if(selector->finish_block)
589 selector->finish_block(be.selector_block_env);
592 add_to_sched(&be, final_jmp);
594 del_pset(be.ready_set);
595 del_pset(be.already_scheduled);