2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
21 #include "iredges_t.h"
26 #include "irprintf_t.h"
29 #include "besched_t.h"
32 #include "belistsched.h"
35 #define MAX(x,y) ((x) > (y) ? (x) : (y))
36 #define MIN(x,y) ((x) < (y) ? (x) : (y))
39 * Scheduling environment for the whole graph.
41 typedef struct _sched_env_t {
42 const ir_graph *irg; /**< The graph to schedule. */
43 const list_sched_selector_t *selector; /**< The node selector. */
44 void *selector_env; /**< A pointer to give to the selector. */
49 * Ugly global variable for the compare function
50 * since qsort(3) does not pass an extra pointer.
52 static ir_node *curr_bl = NULL;
54 static int cmp_usage(const void *a, const void *b)
56 struct trivial_sched_env *env;
61 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
64 * One of them is live at the end of the block.
65 * Then, that one shall be scheduled at after the other
75 static ir_node *trivial_select(void *env, void *block_env,
76 const struct list_head *sched_head,
77 int curr_time, pset *ready_set)
82 int i, n = pset_count(ready_set);
84 ir_node **ready = alloca(n * sizeof(ready[0]));
86 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
90 res = pset_first(ready_set);
91 pset_break(ready_set);
95 static const list_sched_selector_t trivial_selector_struct = {
103 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
105 typedef struct _usage_stats_t {
107 struct _usage_stats_t *next;
109 int uses_in_block; /**< Number of uses inside the current block. */
110 int already_consumed; /**< Number of insns using this value already
117 pset *already_scheduled;
118 } reg_pressure_selector_env_t;
120 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
122 usage_stats_t *us = get_irn_link(irn);
125 us = obstack_alloc(&env->obst, sizeof(us[0]));
127 us->already_consumed = 0;
128 us->max_hops = INT_MAX;
129 us->next = env->root;
131 set_irn_link(irn, us);
137 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
139 usage_stats_t *us = get_irn_link(irn);
140 assert(us && "This node must have usage stats");
144 static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr)
152 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
153 ir_node *operand = get_irn_n(irn, i);
155 if(get_irn_visited(operand) < visited_nr) {
158 set_irn_visited(operand, visited_nr);
159 tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr);
168 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
170 ir_node *bl = get_nodes_block(irn);
171 ir_graph *irg = get_irn_irg(bl);
174 const ir_edge_t *edge;
176 foreach_out_edge(irn, edge) {
177 ir_node *user = get_edge_src_irn(edge);
179 if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) {
180 unsigned visited_nr = get_irg_visited(irg) + 1;
183 set_irg_visited(irg, visited_nr);
184 max_hops = max_hops_walker(user, irn, 0, visited_nr);
185 res = MAX(res, max_hops);
192 static void *reg_pressure_graph_init(const arch_isa_t *isa, ir_graph *irg)
194 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
198 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
201 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
203 obstack_init(&env->obst);
205 env->already_scheduled = pset_new_ptr(32);
208 * Collect usage statistics.
210 sched_foreach(bl, irn) {
211 if(to_appear_in_schedule(irn)) {
214 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
215 ir_node *op = get_irn_n(irn, i);
216 if(to_appear_in_schedule(op)) {
217 usage_stats_t *us = get_or_set_usage_stats(env, irn);
218 if(is_live_end(bl, op))
219 us->uses_in_block = 99999;
230 static void reg_pressure_block_free(void *graph_env, void *block_env, ir_node *bl)
232 reg_pressure_selector_env_t *env = block_env;
235 for(us = env->root; us; us = us->next)
236 set_irn_link(us->irn, NULL);
238 obstack_free(&env->obst, NULL);
239 del_pset(env->already_scheduled);
243 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
248 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
249 ir_node *op = get_irn_n(irn, i);
251 if(to_appear_in_schedule(op))
252 sum += compute_max_hops(env, op);
258 ir_node *reg_pressure_select(void *graph_env, void *block_env,
259 const struct list_head *sched_head,
260 int curr_time, pset *ready_set)
262 reg_pressure_selector_env_t *env = block_env;
263 ir_node *irn, *res = NULL;
264 int curr_cost = INT_MAX;
266 assert(pset_count(ready_set) > 0);
268 for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
269 int costs = reg_pr_costs(env, irn);
270 if(costs <= curr_cost) {
276 pset_insert_ptr(env->already_scheduled, res);
280 static const list_sched_selector_t reg_pressure_selector_struct = {
281 reg_pressure_graph_init,
282 reg_pressure_block_init,
284 reg_pressure_block_free,
288 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
290 static void list_sched_block(ir_node *block, void *env_ptr);
292 void list_sched(const struct _arch_isa_t *isa, ir_graph *irg)
295 const list_sched_selector_t *selector;
297 memset(&env, 0, sizeof(env));
298 selector = env.selector = isa->impl->get_list_sched_selector(isa);
299 env.selector_env = selector->init_graph ? selector->init_graph(isa, irg) : NULL;
302 /* Assure, that the out edges are computed */
305 /* Schedule each single block. */
306 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
308 if(selector->finish_graph)
309 selector->finish_graph(env.selector_env, irg);
314 * Environment for a block scheduler.
316 typedef struct _block_sched_env_t {
319 pset *already_scheduled;
321 firm_dbg_module_t *dbg;
325 * Try to put a node in the ready set.
326 * @param env The block scheduler environment.
327 * @param irn The node to make ready.
328 * @return 1, if the node could be made ready, 0 else.
330 static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
334 /* Blocks cannot be scheduled. */
339 * Check, if the given ir node is in a different block as the
340 * currently scheduled one. If that is so, don't make the node ready.
342 if(env->block != get_nodes_block(irn))
345 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
346 ir_node *op = get_irn_n(irn, i);
348 /* If the operand is local to the scheduled block and not yet
349 * scheduled, this nodes cannot be made ready, so exit. */
350 if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
354 DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
355 pset_insert_ptr(env->ready_set, irn);
361 * Check, if a node is ready in a block schedule.
362 * @param env The block schedule environment.
363 * @param irn The node to check for.
364 * @return 1 if the node was ready, 0 if not.
366 #define is_ready(env,irn) \
367 (pset_find_ptr((env)->ready_set, irn) != NULL)
370 * Check, if a node has already been schedules.
371 * @param env The block schedule environment.
372 * @param irn The node to check for.
373 * @return 1 if the node was already scheduled, 0 if not.
375 #define is_scheduled(env,irn) \
376 (pset_find_ptr((env)->already_scheduled, irn) != NULL)
379 * Try, to make all users of a node ready.
380 * In fact, a usage node can only be made ready, if all its operands
381 * have already been scheduled yet. This is checked my make_ready().
382 * @param env The block schedule environment.
383 * @param irn The node, which usages (successors) are to be made ready.
385 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
387 const ir_edge_t *edge;
389 foreach_out_edge(irn, edge) {
390 ir_node *user = edge->src;
392 make_ready(env, user);
397 * Compare to nodes using pointer equality.
398 * @param p1 Node one.
399 * @param p2 Node two.
400 * @return 0 if they are identical.
402 static int node_cmp_func(const void *p1, const void *p2)
408 * Append an instruction to a schedule.
409 * @param env The block scheduleing environment.
410 * @param irn The node to add to the schedule.
411 * @return The given node.
413 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
415 /* If the node consumes/produces data, it is appended to the schedule
416 * list, otherwise, it is not put into the list */
417 if(to_appear_in_schedule(irn)) {
418 sched_info_t *info = get_irn_sched_info(irn);
419 INIT_LIST_HEAD(&info->list);
421 sched_add_before(env->block, irn);
423 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
426 /* Insert the node in the set of all already scheduled nodes. */
427 pset_insert_ptr(env->already_scheduled, irn);
429 /* Remove the node from the ready set */
430 if(pset_find_ptr(env->ready_set, irn))
431 pset_remove_ptr(env->ready_set, irn);
438 * Add the proj nodes of a tuple-mode irn to the schedule immediately
439 * after the tuple-moded irn. By pinning the projs after the irn, no
440 * other nodes can create a new lifetime between the tuple-moded irn and
441 * one of its projs. This should render a realistic image of a
442 * tuple-moded irn, which in fact models a node which defines multiple
445 * @param irn The tuple-moded irn.
446 * @param list The schedule list to append all the projs.
447 * @param time The time step to which the irn and all its projs are
449 * @param obst The obstack the scheduling data structures shall be
451 * @param ready_set The ready set of the list scheduler.
452 * @param already_scheduled A set containing all nodes already
455 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
457 const ir_edge_t *edge;
459 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
461 foreach_out_edge(irn, edge) {
462 ir_node *out = edge->src;
464 assert(is_Proj(out) && "successor of a modeT node must be a proj");
466 if(get_irn_mode(out) == mode_T)
467 add_tuple_projs(env, out);
469 add_to_sched(env, out);
470 make_users_ready(env, out);
476 * Perform list scheduling on a block.
478 * Note, that the caller must compute a linked list of nodes in the block
479 * using the link field before calling this function.
481 * Also the outs must have been computed.
483 * @param block The block node.
484 * @param env Scheduling environment.
486 static void list_sched_block(ir_node *block, void *env_ptr)
488 void *block_env = NULL;
489 sched_env_t *env = env_ptr;
490 block_sched_env_t be;
491 const list_sched_selector_t *selector = env->selector;
492 const ir_edge_t *edge;
496 sched_info_t *info = get_irn_sched_info(block);
498 /* Initialize the block's list head that will hold the schedule. */
499 INIT_LIST_HEAD(&info->list);
501 /* Initialize the block scheduling environment */
502 be.dbg = firm_dbg_register("firm.be.sched");
505 be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
506 be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
508 firm_dbg_set_mask(be.dbg, 0);
510 if(selector->init_block)
511 block_env = selector->init_block(env->selector_env, block);
513 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
515 /* Then one can add all nodes are ready to the set. */
516 foreach_out_edge(block, edge) {
517 ir_node *irn = get_edge_src_irn(edge);
519 /* Skip the end node because of keepalive edges. */
520 if(get_irn_opcode(irn) == iro_End)
523 /* Phi functions are scheduled immediately, since they only transfer
524 * data flow from the predecessors to this block. */
526 add_to_sched(&be, irn);
527 make_users_ready(&be, irn);
531 /* Other nodes must have all operands in other blocks to be made
536 /* Check, if the operands of a node are not local to this block */
537 for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
538 ir_node *operand = get_irn_n(irn, j);
540 if(get_nodes_block(operand) == block) {
546 /* Make the node ready, if all operands live in a foreign block */
548 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
549 make_ready(&be, irn);
554 /* Increase the time, if some phi functions have been scheduled */
555 be.curr_time += phi_seen;
557 while(pset_count(be.ready_set) > 0) {
558 /* select a node to be scheduled and check if it was ready */
559 irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
561 DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
563 /* Add the node to the schedule. */
564 add_to_sched(&be, irn);
566 if(get_irn_mode(irn) == mode_T)
567 add_tuple_projs(&be, irn);
569 make_users_ready(&be, irn);
571 /* Increase the time step. */
574 /* remove the scheduled node from the ready list. */
575 if(pset_find_ptr(be.ready_set, irn))
576 pset_remove_ptr(be.ready_set, irn);
579 if(selector->finish_block)
580 selector->finish_block(env->selector_env, block_env, block);
582 del_pset(be.ready_set);
583 del_pset(be.already_scheduled);