2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
23 #include "iredges_t.h"
28 #include "irprintf_t.h"
32 #include "besched_t.h"
35 #include "belistsched.h"
39 #define MAX(x,y) ((x) > (y) ? (x) : (y))
40 #define MIN(x,y) ((x) < (y) ? (x) : (y))
43 * All scheduling info needed per node.
45 typedef struct _sched_irn_t {
46 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
47 sched_timestep_t etime; /**< The earliest time of this node. */
48 unsigned already_sched : 1; /**< Set if this node is already scheduled */
49 unsigned is_root : 1; /**< is a root node of a block */
53 * Scheduling environment for the whole graph.
55 typedef struct _sched_env_t {
56 sched_irn_t *sched_info; /**< scheduling info per node */
57 const list_sched_selector_t *selector; /**< The node selector. */
58 const arch_env_t *arch_env; /**< The architecture environment. */
59 const ir_graph *irg; /**< The graph to schedule. */
60 void *selector_env; /**< A pointer to give to the selector. */
65 * Ugly global variable for the compare function
66 * since qsort(3) does not pass an extra pointer.
68 static ir_node *curr_bl = NULL;
70 static int cmp_usage(const void *a, const void *b)
72 struct trivial_sched_env *env;
77 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
80 * One of them is live at the end of the block.
81 * Then, that one shall be scheduled at after the other
92 * The trivial selector:
93 * Just assure that branches are executed last, otherwise select
94 * the first node ready.
96 static ir_node *trivial_select(void *block_env, nodeset *ready_set)
98 const arch_env_t *arch_env = block_env;
102 /* assure that branches and constants are executed last */
103 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
104 arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
106 if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
107 nodeset_break(ready_set);
112 /* assure that constants are executed before branches */
114 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
115 if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
116 nodeset_break(ready_set);
123 /* at last: schedule branches */
124 irn = nodeset_first(ready_set);
125 nodeset_break(ready_set);
130 static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
132 return (void *) arch_env;
135 static void *trivial_init_block(void *graph_env, ir_node *bl)
140 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
144 if(sel->to_appear_in_schedule)
145 res = sel->to_appear_in_schedule(block_env, irn);
147 return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
150 static const list_sched_selector_t trivial_selector_struct = {
154 NULL, /* to_appear_in_schedule */
157 NULL, /* finish_block */
158 NULL /* finish_graph */
161 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
163 typedef struct _usage_stats_t {
165 struct _usage_stats_t *next;
167 int uses_in_block; /**< Number of uses inside the current block. */
168 int already_consumed; /**< Number of insns using this value already
173 const list_sched_selector_t *vtab;
174 const arch_env_t *arch_env;
175 } reg_pressure_main_env_t;
179 const reg_pressure_main_env_t *main_env;
181 nodeset *already_scheduled;
182 } reg_pressure_selector_env_t;
184 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
186 usage_stats_t *us = get_irn_link(irn);
189 us = obstack_alloc(&env->obst, sizeof(us[0]));
191 us->already_consumed = 0;
192 us->max_hops = INT_MAX;
193 us->next = env->root;
195 set_irn_link(irn, us);
201 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
203 usage_stats_t *us = get_irn_link(irn);
204 assert(us && "This node must have usage stats");
208 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
210 ir_node *bl = get_nodes_block(irn);
212 * If the reached node is not in the block desired,
213 * return the value passed for this situation.
215 if(get_nodes_block(irn) != bl)
216 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
219 * If the node is in the current block but not
220 * yet scheduled, we keep on searching from that node.
222 if(!nodeset_find(env->already_scheduled, irn)) {
225 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
226 ir_node *operand = get_irn_n(irn, i);
228 if(get_irn_visited(operand) < visited_nr) {
231 set_irn_visited(operand, visited_nr);
232 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
241 * If the node is in the current block and scheduled, return
242 * the depth which indicates the number of steps to the
243 * region of scheduled nodes.
248 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
250 ir_node *bl = get_nodes_block(irn);
251 ir_graph *irg = get_irn_irg(bl);
254 const ir_edge_t *edge;
256 foreach_out_edge(irn, edge) {
257 ir_node *user = get_edge_src_irn(edge);
258 unsigned visited_nr = get_irg_visited(irg) + 1;
261 set_irg_visited(irg, visited_nr);
262 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
263 res = MAX(res, max_hops);
269 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
271 reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
273 main_env->arch_env = arch_env;
274 main_env->vtab = vtab;
275 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
280 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
283 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
285 obstack_init(&env->obst);
286 env->already_scheduled = new_nodeset(32);
288 env->main_env = graph_env;
291 * Collect usage statistics.
293 sched_foreach(bl, irn) {
294 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
297 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
298 ir_node *op = get_irn_n(irn, i);
299 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
300 usage_stats_t *us = get_or_set_usage_stats(env, irn);
301 if(is_live_end(bl, op))
302 us->uses_in_block = 99999;
313 static void reg_pressure_block_free(void *block_env)
315 reg_pressure_selector_env_t *env = block_env;
318 for(us = env->root; us; us = us->next)
319 set_irn_link(us->irn, NULL);
321 obstack_free(&env->obst, NULL);
322 del_nodeset(env->already_scheduled);
326 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
329 if(get_irn_mode(irn) == mode_T) {
330 const ir_edge_t *edge;
332 foreach_out_edge(irn, edge)
333 res += get_result_hops_sum(env, get_edge_src_irn(edge));
336 else if(mode_is_data(get_irn_mode(irn)))
337 res = compute_max_hops(env, irn);
343 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
348 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
349 ir_node *op = get_irn_n(irn, i);
351 if(must_appear_in_schedule(env->main_env->vtab, env, op))
352 sum += compute_max_hops(env, op);
355 sum += get_result_hops_sum(env, irn);
360 static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
362 reg_pressure_selector_env_t *env = block_env;
363 ir_node *irn, *res = NULL;
364 int curr_cost = INT_MAX;
366 assert(nodeset_count(ready_set) > 0);
368 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
370 Ignore branch instructions for the time being.
371 They should only be scheduled if there is nothing else.
373 if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
374 int costs = reg_pr_costs(env, irn);
375 if (costs <= curr_cost) {
383 There was no result so we only saw a branch.
388 res = nodeset_first(ready_set);
389 nodeset_break(ready_set);
391 assert(res && "There must be a node scheduled.");
394 nodeset_insert(env->already_scheduled, res);
399 * Environment for a block scheduler.
401 typedef struct _block_sched_env_t {
402 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
403 sched_timestep_t curr_time; /**< current time of the scheduler */
404 nodeset *cands; /**< the set of candidates */
405 ir_node *block; /**< the current block */
406 sched_env_t *sched_env; /**< the scheduler environment */
407 const list_sched_selector_t *selector;
408 void *selector_block_env;
409 DEBUG_ONLY(firm_dbg_module_t *dbg;)
413 * Returns non-zero if the node is already scheduled
415 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
417 int idx = get_irn_idx(n);
419 assert(idx < ARR_LEN(env->sched_info));
420 return env->sched_info[idx].already_sched;
424 * Mark a node as already scheduled
426 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
428 int idx = get_irn_idx(n);
430 assert(idx < ARR_LEN(env->sched_info));
431 env->sched_info[idx].already_sched = 1;
435 * Returns non-zero if the node is a root node
437 static INLINE unsigned is_root_node(block_sched_env_t *env, ir_node *n)
439 int idx = get_irn_idx(n);
441 assert(idx < ARR_LEN(env->sched_info));
442 return env->sched_info[idx].is_root;
446 * Mark a node as roto node
448 static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
450 int idx = get_irn_idx(n);
452 assert(idx < ARR_LEN(env->sched_info));
453 env->sched_info[idx].is_root = 1;
457 * Get the current delay.
459 static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
460 int idx = get_irn_idx(n);
462 assert(idx < ARR_LEN(env->sched_info));
463 return env->sched_info[idx].delay;
467 * Set the current delay.
469 static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
470 int idx = get_irn_idx(n);
472 assert(idx < ARR_LEN(env->sched_info));
473 env->sched_info[idx].delay = delay;
477 * Get the current etime.
479 static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
480 int idx = get_irn_idx(n);
482 assert(idx < ARR_LEN(env->sched_info));
483 return env->sched_info[idx].etime;
487 * Set the current etime.
489 static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
490 int idx = get_irn_idx(n);
492 assert(idx < ARR_LEN(env->sched_info));
493 env->sched_info[idx].etime = etime;
497 * returns the exec-time for node n.
499 static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
500 if (be_is_Keep(n) || is_Proj(n))
502 if (env->selector->exectime)
503 return env->selector->exectime(env->selector_env, n);
508 * Calculates the latency for between two ops
510 static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
511 /* a Keep hides a root */
512 if (be_is_Keep(curr))
513 return exectime(env, pred);
515 /* Proj's are executed immediately */
519 /* predecessors Proj's must be skipped */
521 pred = get_Proj_pred(pred);
523 if (env->selector->latency)
524 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
529 * Try to put a node in the ready set.
530 * @param env The block scheduler environment.
531 * @param pred The previous scheduled node.
532 * @param irn The node to make ready.
533 * @return 1, if the node could be made ready, 0 else.
535 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
538 sched_timestep_t etime_p, etime;
540 /* Blocks cannot be scheduled. */
545 * Check, if the given ir node is in a different block as the
546 * currently scheduled one. If that is so, don't make the node ready.
548 if (env->block != get_nodes_block(irn))
551 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
552 ir_node *op = get_irn_n(irn, i);
554 /* if irn is an End we have keep-alives and op might be a block, skip that */
556 assert(get_irn_op(irn) == op_End);
560 /* If the operand is local to the scheduled block and not yet
561 * scheduled, this nodes cannot be made ready, so exit. */
562 if (!is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
566 nodeset_insert(env->cands, irn);
568 /* calculate the etime of this node */
569 etime = env->curr_time;
571 etime_p = get_irn_etime(env, pred);
572 etime += latency(env->sched_env, pred, 1, irn, 0);
574 etime = etime_p > etime ? etime_p : etime;
577 set_irn_etime(env, irn, etime);
579 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
585 * Try, to make all users of a node ready.
586 * In fact, a usage node can only be made ready, if all its operands
587 * have already been scheduled yet. This is checked my make_ready().
588 * @param env The block schedule environment.
589 * @param irn The node, which usages (successors) are to be made ready.
591 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
593 const ir_edge_t *edge;
595 foreach_out_edge(irn, edge) {
596 ir_node *user = edge->src;
598 make_ready(env, irn, user);
603 * Compare to nodes using pointer equality.
604 * @param p1 Node one.
605 * @param p2 Node two.
606 * @return 0 if they are identical.
608 static int node_cmp_func(const void *p1, const void *p2)
614 * Append an instruction to a schedule.
615 * @param env The block scheduling environment.
616 * @param irn The node to add to the schedule.
617 * @return The given node.
619 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
621 /* If the node consumes/produces data, it is appended to the schedule
622 * list, otherwise, it is not put into the list */
623 if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
624 sched_info_t *info = get_irn_sched_info(irn);
625 INIT_LIST_HEAD(&info->list);
627 sched_add_before(env->block, irn);
629 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
632 /* Insert the node in the set of all already scheduled nodes. */
633 mark_already_scheduled(env, irn);
635 /* Remove the node from the ready set */
636 if(nodeset_find(env->cands, irn))
637 nodeset_remove(env->cands, irn);
643 * Add the proj nodes of a tuple-mode irn to the schedule immediately
644 * after the tuple-moded irn. By pinning the projs after the irn, no
645 * other nodes can create a new lifetime between the tuple-moded irn and
646 * one of its projs. This should render a realistic image of a
647 * tuple-moded irn, which in fact models a node which defines multiple
650 * @param irn The tuple-moded irn.
652 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
654 const ir_edge_t *edge;
656 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
658 foreach_out_edge(irn, edge) {
659 ir_node *out = edge->src;
661 assert(is_Proj(out) && "successor of a modeT node must be a proj");
663 if (get_irn_mode(out) == mode_T)
664 add_tuple_projs(env, out);
666 add_to_sched(env, out);
667 make_users_ready(env, out);
673 * Execute the heuristic function,
675 static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
679 for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
680 if (be_is_Keep(irn)) {
686 return be->selector->select(be->selector_block_env, ns);
690 * Returns non-zero if root is a root in the block block.
692 static int is_root(ir_node *root, ir_node *block) {
693 const ir_edge_t *edge;
695 foreach_out_edge(root, edge) {
696 ir_node *succ = get_edge_src_irn(edge);
700 if (get_nodes_block(succ) == block)
706 /* we need a special mark */
711 * descent into a dag and create a pre-order list.
713 static void descent(ir_node *root, ir_node *block, ir_node **list) {
716 if (! is_Phi(root)) {
717 /* Phi nodes always leave the block */
718 for (i = get_irn_arity(root) - 1; i >= 0; --i) {
719 ir_node *pred = get_irn_n(root, i);
721 /* Blocks may happen as predecessors of End nodes */
725 /* already seen nodes are not marked */
726 if (get_irn_link(pred) != MARK)
729 /* don't leave our block */
730 if (get_nodes_block(pred) != block)
733 descent(pred, block, list);
736 set_irn_link(root, *list);
741 * Perform list scheduling on a block.
743 * Note, that the caller must compute a linked list of nodes in the block
744 * using the link field before calling this function.
746 * Also the outs must have been computed.
748 * @param block The block node.
749 * @param env Scheduling environment.
751 static void list_sched_block(ir_node *block, void *env_ptr)
753 sched_env_t *env = env_ptr;
754 const list_sched_selector_t *selector = env->selector;
755 ir_node *start_node = get_irg_start(get_irn_irg(block));
756 sched_info_t *info = get_irn_sched_info(block);
758 block_sched_env_t be;
759 const ir_edge_t *edge;
763 ir_node *root = NULL, *preord = NULL;
766 /* Initialize the block's list head that will hold the schedule. */
767 INIT_LIST_HEAD(&info->list);
769 /* Initialize the block scheduling environment */
770 be.sched_info = env->sched_info;
773 be.cands = new_nodeset(get_irn_n_edges(block));
774 be.selector = selector;
776 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
778 // firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
780 if (selector->init_block)
781 be.selector_block_env = selector->init_block(env->selector_env, block);
783 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
785 /* First step: Find the root set. */
786 foreach_out_edge(block, edge) {
787 ir_node *succ = get_edge_src_irn(edge);
789 if (is_root(succ, block)) {
790 mark_root_node(&be, succ);
791 set_irn_link(succ, root);
795 set_irn_link(succ, MARK);
798 /* Second step: calculate the pre-order list. */
800 for (curr = root; curr; curr = irn) {
801 irn = get_irn_link(curr);
802 descent(curr, block, &preord);
806 /* Third step: calculate the Delay. Note that our
807 * list is now in pre-order, starting at root
809 for (curr = root; curr; curr = get_irn_link(curr)) {
812 if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
813 /* assure, that branches can be executed last */
817 if (is_root_node(&be, curr))
818 d = exectime(env, curr);
821 foreach_out_edge(curr, edge) {
822 ir_node *n = get_edge_src_irn(edge);
824 if (get_nodes_block(n) == block) {
827 ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
833 set_irn_delay(&be, curr, d);
834 DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
836 /* set the etime of all nodes to 0 */
837 set_irn_etime(&be, curr, 0);
841 /* Then one can add all nodes are ready to the set. */
842 foreach_out_edge(block, edge) {
843 ir_node *irn = get_edge_src_irn(edge);
845 /* Skip the end node because of keepalive edges. */
846 if (get_irn_opcode(irn) == iro_End)
850 /* Phi functions are scheduled immediately, since they only transfer
851 * data flow from the predecessors to this block. */
853 /* Increase the time step. */
854 be.curr_time += get_irn_etime(&be, irn);
855 add_to_sched(&be, irn);
856 make_users_ready(&be, irn);
858 else if (irn == start_node) {
859 /* The start block will be scheduled as the first node */
860 be.curr_time += get_irn_etime(&be, irn);
862 add_to_sched(&be, irn);
863 add_tuple_projs(&be, irn);
866 /* Other nodes must have all operands in other blocks to be made
870 /* Check, if the operands of a node are not local to this block */
871 for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
872 ir_node *operand = get_irn_n(irn, j);
874 if (get_nodes_block(operand) == block) {
880 /* Make the node ready, if all operands live in a foreign block */
882 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
883 make_ready(&be, NULL, irn);
888 while (nodeset_count(be.cands) > 0) {
889 nodeset *mcands; /**< the set of candidates with maximum delay time */
890 nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
891 sched_timestep_t max_delay = 0;
893 /* collect statistics about amount of ready nodes */
894 be_do_stat_sched_ready(block, be.cands);
896 /* calculate the max delay of all candidates */
897 foreach_nodeset(be.cands, irn) {
898 sched_timestep_t d = get_irn_delay(&be, irn);
900 max_delay = d > max_delay ? d : max_delay;
902 mcands = new_nodeset(8);
903 ecands = new_nodeset(8);
905 /* calculate mcands and ecands */
906 foreach_nodeset(be.cands, irn) {
907 if (be_is_Keep(irn)) {
908 nodeset_break(be.cands);
911 if (get_irn_delay(&be, irn) == max_delay) {
912 nodeset_insert(mcands, irn);
913 if (get_irn_etime(&be, irn) <= be.curr_time)
914 nodeset_insert(ecands, irn);
919 /* Keeps must be immediately scheduled */
922 DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
924 /* select a node to be scheduled and check if it was ready */
925 if (nodeset_count(mcands) == 1) {
926 DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
927 irn = nodeset_first(mcands);
930 int cnt = nodeset_count(ecands);
932 arch_irn_class_t irn_class;
934 irn = nodeset_first(ecands);
935 irn_class = arch_irn_classify(env->arch_env, irn);
937 if (irn_class == arch_irn_class_branch) {
938 /* BEWARE: don't select a JUMP if others are still possible */
941 DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
944 DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
945 irn = select_node_heuristic(&be, ecands);
949 DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
950 irn = select_node_heuristic(&be, mcands);
957 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
959 /* Increase the time step. */
960 be.curr_time += exectime(env, irn);
962 /* Add the node to the schedule. */
963 add_to_sched(&be, irn);
965 if (get_irn_mode(irn) == mode_T)
966 add_tuple_projs(&be, irn);
968 make_users_ready(&be, irn);
970 /* remove the scheduled node from the ready list. */
971 if (nodeset_find(be.cands, irn))
972 nodeset_remove(be.cands, irn);
975 if (selector->finish_block)
976 selector->finish_block(be.selector_block_env);
978 del_nodeset(be.cands);
981 static const list_sched_selector_t reg_pressure_selector_struct = {
982 reg_pressure_graph_init,
983 reg_pressure_block_init,
985 NULL, /* to_appear_in_schedule */
988 reg_pressure_block_free,
992 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
994 /* List schedule a graph. */
995 void list_sched(const arch_env_t *arch_env, ir_graph *irg)
998 int num_nodes = get_irg_last_idx(irg);
1000 memset(&env, 0, sizeof(env));
1001 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
1002 env.arch_env = arch_env;
1004 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
1006 memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
1008 if (env.selector->init_graph)
1009 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
1011 /* Assure, that the out edges are computed */
1014 /* Schedule each single block. */
1015 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
1017 if (env.selector->finish_graph)
1018 env.selector->finish_graph(env.selector_env);
1020 DEL_ARR_F(env.sched_info);