2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
23 #include "iredges_t.h"
28 #include "irprintf_t.h"
31 #include "besched_t.h"
34 #include "belistsched.h"
37 #define MAX(x,y) ((x) > (y) ? (x) : (y))
38 #define MIN(x,y) ((x) < (y) ? (x) : (y))
41 * Scheduling environment for the whole graph.
43 typedef struct _sched_env_t {
44 const list_sched_selector_t *selector; /**< The node selector. */
45 const arch_env_t *arch_env; /**< The architecture enviromnent. */
46 const ir_graph *irg; /**< The graph to schedule. */
47 void *selector_env; /**< A pointer to give to the selector. */
52 * Ugly global variable for the compare function
53 * since qsort(3) does not pass an extra pointer.
55 static ir_node *curr_bl = NULL;
57 static int cmp_usage(const void *a, const void *b)
59 struct trivial_sched_env *env;
64 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
67 * One of them is live at the end of the block.
68 * Then, that one shall be scheduled at after the other
79 * The trivial selector:
80 * Just assure that branches are executed last, otherwise select
81 * the first node ready.
83 static ir_node *trivial_select(void *block_env, nodeset *ready_set)
85 const arch_env_t *arch_env = block_env;
88 /* assure that branches are executed last */
89 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
90 if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
91 nodeset_break(ready_set);
96 irn = nodeset_first(ready_set);
97 nodeset_break(ready_set);
102 static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
104 return (void *) arch_env;
107 static void *trivial_init_block(void *graph_env, ir_node *bl)
112 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
116 if(sel->to_appear_in_schedule)
117 res = sel->to_appear_in_schedule(block_env, irn);
119 return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
122 static const list_sched_selector_t trivial_selector_struct = {
131 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
133 typedef struct _usage_stats_t {
135 struct _usage_stats_t *next;
137 int uses_in_block; /**< Number of uses inside the current block. */
138 int already_consumed; /**< Number of insns using this value already
143 const list_sched_selector_t *vtab;
144 const arch_env_t *arch_env;
145 } reg_pressure_main_env_t;
149 const reg_pressure_main_env_t *main_env;
151 nodeset *already_scheduled;
152 } reg_pressure_selector_env_t;
154 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
156 usage_stats_t *us = get_irn_link(irn);
159 us = obstack_alloc(&env->obst, sizeof(us[0]));
161 us->already_consumed = 0;
162 us->max_hops = INT_MAX;
163 us->next = env->root;
165 set_irn_link(irn, us);
171 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
173 usage_stats_t *us = get_irn_link(irn);
174 assert(us && "This node must have usage stats");
178 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
180 ir_node *bl = get_nodes_block(irn);
182 * If the reached node is not in the block desired,
183 * return the value passed for this situation.
185 if(get_nodes_block(irn) != bl)
186 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
189 * If the node is in the current block but not
190 * yet scheduled, we keep on searching from that node.
192 if(!nodeset_find(env->already_scheduled, irn)) {
195 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
196 ir_node *operand = get_irn_n(irn, i);
198 if(get_irn_visited(operand) < visited_nr) {
201 set_irn_visited(operand, visited_nr);
202 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
211 * If the node is in the current block and scheduled, return
212 * the depth which indicates the number of steps to the
213 * region of scheduled nodes.
218 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
220 ir_node *bl = get_nodes_block(irn);
221 ir_graph *irg = get_irn_irg(bl);
224 const ir_edge_t *edge;
226 foreach_out_edge(irn, edge) {
227 ir_node *user = get_edge_src_irn(edge);
228 unsigned visited_nr = get_irg_visited(irg) + 1;
231 set_irg_visited(irg, visited_nr);
232 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
233 res = MAX(res, max_hops);
239 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
241 reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
243 main_env->arch_env = arch_env;
244 main_env->vtab = vtab;
245 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
250 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
253 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
255 obstack_init(&env->obst);
256 env->already_scheduled = new_nodeset(32);
258 env->main_env = graph_env;
261 * Collect usage statistics.
263 sched_foreach(bl, irn) {
264 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
267 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
268 ir_node *op = get_irn_n(irn, i);
269 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
270 usage_stats_t *us = get_or_set_usage_stats(env, irn);
271 if(is_live_end(bl, op))
272 us->uses_in_block = 99999;
283 static void reg_pressure_block_free(void *block_env)
285 reg_pressure_selector_env_t *env = block_env;
288 for(us = env->root; us; us = us->next)
289 set_irn_link(us->irn, NULL);
291 obstack_free(&env->obst, NULL);
292 del_nodeset(env->already_scheduled);
296 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
299 if(get_irn_mode(irn) == mode_T) {
300 const ir_edge_t *edge;
302 foreach_out_edge(irn, edge)
303 res += get_result_hops_sum(env, get_edge_src_irn(edge));
306 else if(mode_is_data(get_irn_mode(irn)))
307 res = compute_max_hops(env, irn);
313 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
318 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
319 ir_node *op = get_irn_n(irn, i);
321 if(must_appear_in_schedule(env->main_env->vtab, env, op))
322 sum += compute_max_hops(env, op);
325 sum += get_result_hops_sum(env, irn);
330 static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
332 reg_pressure_selector_env_t *env = block_env;
333 ir_node *irn, *res = NULL;
334 int curr_cost = INT_MAX;
336 assert(nodeset_count(ready_set) > 0);
338 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
340 Ignore branch instructions for the time being.
341 They should only be scheduled if there is nothing else.
343 if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
344 int costs = reg_pr_costs(env, irn);
345 if (costs <= curr_cost) {
353 There was no result so we only saw a branch.
358 res = nodeset_first(ready_set);
359 nodeset_break(ready_set);
361 assert(res && "There must be a node scheduled.");
364 nodeset_insert(env->already_scheduled, res);
368 static const list_sched_selector_t reg_pressure_selector_struct = {
369 reg_pressure_graph_init,
370 reg_pressure_block_init,
373 reg_pressure_block_free,
377 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
379 static void list_sched_block(ir_node *block, void *env_ptr);
381 void list_sched(const arch_env_t *arch_env, ir_graph *irg)
385 memset(&env, 0, sizeof(env));
386 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
387 env.arch_env = arch_env;
390 if(env.selector->init_graph)
391 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
393 /* Assure, that the out edges are computed */
396 /* Schedule each single block. */
397 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
399 if(env.selector->finish_graph)
400 env.selector->finish_graph(env.selector_env);
405 * Environment for a block scheduler.
407 typedef struct _block_sched_env_t {
410 nodeset *already_scheduled;
412 const list_sched_selector_t *selector;
413 void *selector_block_env;
414 DEBUG_ONLY(firm_dbg_module_t *dbg;)
418 * Try to put a node in the ready set.
419 * @param env The block scheduler environment.
420 * @param irn The node to make ready.
421 * @return 1, if the node could be made ready, 0 else.
423 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
427 /* Blocks cannot be scheduled. */
432 * Check, if the given ir node is in a different block as the
433 * currently scheduled one. If that is so, don't make the node ready.
435 if(env->block != get_nodes_block(irn))
438 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
439 ir_node *op = get_irn_n(irn, i);
441 /* If the operand is local to the scheduled block and not yet
442 * scheduled, this nodes cannot be made ready, so exit. */
443 if(!nodeset_find(env->already_scheduled, op) && get_nodes_block(op) == env->block)
447 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
448 nodeset_insert(env->ready_set, irn);
454 * Check, if a node is ready in a block schedule.
455 * @param env The block schedule environment.
456 * @param irn The node to check for.
457 * @return 1 if the node was ready, 0 if not.
459 #define is_ready(env,irn) \
460 (nodeset_find((env)->ready_set, irn) != NULL)
463 * Check, if a node has already been schedules.
464 * @param env The block schedule environment.
465 * @param irn The node to check for.
466 * @return 1 if the node was already scheduled, 0 if not.
468 #define is_scheduled(env,irn) \
469 (nodeset_find((env)->already_scheduled, irn) != NULL)
472 * Try, to make all users of a node ready.
473 * In fact, a usage node can only be made ready, if all its operands
474 * have already been scheduled yet. This is checked my make_ready().
475 * @param env The block schedule environment.
476 * @param irn The node, which usages (successors) are to be made ready.
478 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
480 const ir_edge_t *edge;
482 foreach_out_edge(irn, edge) {
483 ir_node *user = edge->src;
485 make_ready(env, user);
490 * Compare to nodes using pointer equality.
491 * @param p1 Node one.
492 * @param p2 Node two.
493 * @return 0 if they are identical.
495 static int node_cmp_func(const void *p1, const void *p2)
501 * Append an instruction to a schedule.
502 * @param env The block scheduling environment.
503 * @param irn The node to add to the schedule.
504 * @return The given node.
506 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
508 /* If the node consumes/produces data, it is appended to the schedule
509 * list, otherwise, it is not put into the list */
510 if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
511 sched_info_t *info = get_irn_sched_info(irn);
512 INIT_LIST_HEAD(&info->list);
514 sched_add_before(env->block, irn);
516 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
519 /* Insert the node in the set of all already scheduled nodes. */
520 nodeset_insert(env->already_scheduled, irn);
522 /* Remove the node from the ready set */
523 if(nodeset_find(env->ready_set, irn))
524 nodeset_remove(env->ready_set, irn);
530 * Add the proj nodes of a tuple-mode irn to the schedule immediately
531 * after the tuple-moded irn. By pinning the projs after the irn, no
532 * other nodes can create a new lifetime between the tuple-moded irn and
533 * one of its projs. This should render a realistic image of a
534 * tuple-moded irn, which in fact models a node which defines multiple
537 * @param irn The tuple-moded irn.
538 * @param list The schedule list to append all the projs.
539 * @param time The time step to which the irn and all its projs are
541 * @param obst The obstack the scheduling data structures shall be
543 * @param ready_set The ready set of the list scheduler.
544 * @param already_scheduled A set containing all nodes already
547 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
549 const ir_edge_t *edge;
551 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
553 foreach_out_edge(irn, edge) {
554 ir_node *out = edge->src;
556 assert(is_Proj(out) && "successor of a modeT node must be a proj");
558 if(get_irn_mode(out) == mode_T)
559 add_tuple_projs(env, out);
561 add_to_sched(env, out);
562 make_users_ready(env, out);
567 static ir_node *select_node(block_sched_env_t *be)
571 for (irn = nodeset_first(be->ready_set); irn; irn = nodeset_next(be->ready_set)) {
572 if (be_is_Keep(irn)) {
573 nodeset_break(be->ready_set);
578 return be->selector->select(be->selector_block_env, be->ready_set);
582 * Perform list scheduling on a block.
584 * Note, that the caller must compute a linked list of nodes in the block
585 * using the link field before calling this function.
587 * Also the outs must have been computed.
589 * @param block The block node.
590 * @param env Scheduling environment.
592 static void list_sched_block(ir_node *block, void *env_ptr)
594 sched_env_t *env = env_ptr;
595 const list_sched_selector_t *selector = env->selector;
596 ir_node *start_node = get_irg_start(get_irn_irg(block));
598 sched_info_t *info = get_irn_sched_info(block);
600 block_sched_env_t be;
601 const ir_edge_t *edge;
605 /* Initialize the block's list head that will hold the schedule. */
606 INIT_LIST_HEAD(&info->list);
608 /* Initialize the block scheduling environment */
611 be.ready_set = new_nodeset(get_irn_n_edges(block));
612 be.already_scheduled = new_nodeset(get_irn_n_edges(block));
613 be.selector = selector;
614 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
616 if(selector->init_block)
617 be.selector_block_env = selector->init_block(env->selector_env, block);
619 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
621 /* Then one can add all nodes are ready to the set. */
622 foreach_out_edge(block, edge) {
623 ir_node *irn = get_edge_src_irn(edge);
625 /* Skip the end node because of keepalive edges. */
626 if(get_irn_opcode(irn) == iro_End)
629 /* Phi functions are scheduled immediately, since they only transfer
630 * data flow from the predecessors to this block. */
632 add_to_sched(&be, irn);
633 make_users_ready(&be, irn);
637 /* The start block will be scheduled as the first node */
638 else if(irn == start_node) {
639 add_to_sched(&be, irn);
640 add_tuple_projs(&be, irn);
644 /* Other nodes must have all operands in other blocks to be made
649 /* Check, if the operands of a node are not local to this block */
650 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
651 ir_node *operand = get_irn_n(irn, j);
653 if(get_nodes_block(operand) == block) {
659 /* Make the node ready, if all operands live in a foreign block */
661 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
662 make_ready(&be, irn);
667 /* Increase the time, if some phi functions have been scheduled */
668 be.curr_time += phi_seen;
670 while (nodeset_count(be.ready_set) > 0) {
671 /* select a node to be scheduled and check if it was ready */
672 irn = select_node(&be);
674 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
676 /* Add the node to the schedule. */
677 add_to_sched(&be, irn);
679 if(get_irn_mode(irn) == mode_T)
680 add_tuple_projs(&be, irn);
682 make_users_ready(&be, irn);
684 /* Increase the time step. */
687 /* remove the scheduled node from the ready list. */
688 if (nodeset_find(be.ready_set, irn))
689 nodeset_remove(be.ready_set, irn);
692 if(selector->finish_block)
693 selector->finish_block(be.selector_block_env);
695 del_nodeset(be.ready_set);
696 del_nodeset(be.already_scheduled);