2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
23 #include "iredges_t.h"
28 #include "irprintf_t.h"
33 #include "besched_t.h"
36 #include "belistsched.h"
37 #include "beschedmris.h"
42 * All scheduling info needed per node.
44 typedef struct _sched_irn_t {
45 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
46 sched_timestep_t etime; /**< The earliest time of this node. */
47 unsigned already_sched : 1; /**< Set if this node is already scheduled */
48 unsigned is_root : 1; /**< is a root node of a block */
52 * Scheduling environment for the whole graph.
54 typedef struct _sched_env_t {
55 sched_irn_t *sched_info; /**< scheduling info per node */
56 const list_sched_selector_t *selector; /**< The node selector. */
57 const arch_env_t *arch_env; /**< The architecture environment. */
58 const ir_graph *irg; /**< The graph to schedule. */
59 void *selector_env; /**< A pointer to give to the selector. */
64 * Ugly global variable for the compare function
65 * since qsort(3) does not pass an extra pointer.
67 static ir_node *curr_bl = NULL;
69 static int cmp_usage(const void *a, const void *b)
71 struct trivial_sched_env *env;
76 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
79 * One of them is live at the end of the block.
80 * Then, that one shall be scheduled at after the other
91 * The trivial selector:
92 * Just assure that branches are executed last, otherwise select
93 * the first node ready.
95 static ir_node *trivial_select(void *block_env, nodeset *ready_set)
97 const arch_env_t *arch_env = block_env;
101 /* assure that branches and constants are executed last */
102 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
103 if (! arch_irn_class_is(arch_env, irn, branch) && (const_last ? (! arch_irn_class_is(arch_env, irn, const)) : 1)) {
104 nodeset_break(ready_set);
109 /* assure that constants are executed before branches */
111 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
112 if (! arch_irn_class_is(arch_env, irn, branch)) {
113 nodeset_break(ready_set);
120 /* at last: schedule branches */
121 irn = nodeset_first(ready_set);
122 nodeset_break(ready_set);
127 static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
129 return (void *) arch_env;
132 static void *trivial_init_block(void *graph_env, ir_node *bl)
137 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
141 if(sel->to_appear_in_schedule)
142 res = sel->to_appear_in_schedule(block_env, irn);
144 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn));
147 static const list_sched_selector_t trivial_selector_struct = {
151 NULL, /* to_appear_in_schedule */
154 NULL, /* finish_block */
155 NULL /* finish_graph */
158 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
160 typedef struct _usage_stats_t {
162 struct _usage_stats_t *next;
164 int uses_in_block; /**< Number of uses inside the current block. */
165 int already_consumed; /**< Number of insns using this value already
170 const list_sched_selector_t *vtab;
171 const arch_env_t *arch_env;
172 } reg_pressure_main_env_t;
176 const reg_pressure_main_env_t *main_env;
178 nodeset *already_scheduled;
179 } reg_pressure_selector_env_t;
181 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
183 usage_stats_t *us = get_irn_link(irn);
186 us = obstack_alloc(&env->obst, sizeof(us[0]));
188 us->already_consumed = 0;
189 us->max_hops = INT_MAX;
190 us->next = env->root;
192 set_irn_link(irn, us);
198 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
200 usage_stats_t *us = get_irn_link(irn);
201 assert(us && "This node must have usage stats");
205 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
207 ir_node *bl = get_nodes_block(irn);
209 * If the reached node is not in the block desired,
210 * return the value passed for this situation.
212 if(get_nodes_block(irn) != bl)
213 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
216 * If the node is in the current block but not
217 * yet scheduled, we keep on searching from that node.
219 if(!nodeset_find(env->already_scheduled, irn)) {
222 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
223 ir_node *operand = get_irn_n(irn, i);
225 if(get_irn_visited(operand) < visited_nr) {
228 set_irn_visited(operand, visited_nr);
229 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
238 * If the node is in the current block and scheduled, return
239 * the depth which indicates the number of steps to the
240 * region of scheduled nodes.
245 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
247 ir_node *bl = get_nodes_block(irn);
248 ir_graph *irg = get_irn_irg(bl);
251 const ir_edge_t *edge;
253 foreach_out_edge(irn, edge) {
254 ir_node *user = get_edge_src_irn(edge);
255 unsigned visited_nr = get_irg_visited(irg) + 1;
258 set_irg_visited(irg, visited_nr);
259 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
260 res = MAX(res, max_hops);
266 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
268 reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
270 main_env->arch_env = arch_env;
271 main_env->vtab = vtab;
272 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
277 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
280 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
282 obstack_init(&env->obst);
283 env->already_scheduled = new_nodeset(32);
285 env->main_env = graph_env;
288 * Collect usage statistics.
290 sched_foreach(bl, irn) {
291 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
294 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
295 //ir_node *op = get_irn_n(irn, i);
296 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
297 usage_stats_t *us = get_or_set_usage_stats(env, irn);
298 #if 0 /* Liveness is not computed here! */
299 if(is_live_end(bl, op))
300 us->uses_in_block = 99999;
312 static void reg_pressure_block_free(void *block_env)
314 reg_pressure_selector_env_t *env = block_env;
317 for(us = env->root; us; us = us->next)
318 set_irn_link(us->irn, NULL);
320 obstack_free(&env->obst, NULL);
321 del_nodeset(env->already_scheduled);
325 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
328 if(get_irn_mode(irn) == mode_T) {
329 const ir_edge_t *edge;
331 foreach_out_edge(irn, edge)
332 res += get_result_hops_sum(env, get_edge_src_irn(edge));
335 else if(mode_is_data(get_irn_mode(irn)))
336 res = compute_max_hops(env, irn);
342 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
347 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
348 ir_node *op = get_irn_n(irn, i);
350 if(must_appear_in_schedule(env->main_env->vtab, env, op))
351 sum += compute_max_hops(env, op);
354 sum += get_result_hops_sum(env, irn);
359 static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
361 reg_pressure_selector_env_t *env = block_env;
362 ir_node *irn, *res = NULL;
363 int curr_cost = INT_MAX;
365 assert(nodeset_count(ready_set) > 0);
367 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
369 Ignore branch instructions for the time being.
370 They should only be scheduled if there is nothing else.
372 if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
373 int costs = reg_pr_costs(env, irn);
374 if (costs <= curr_cost) {
382 There was no result so we only saw a branch.
387 res = nodeset_first(ready_set);
388 nodeset_break(ready_set);
390 assert(res && "There must be a node scheduled.");
393 nodeset_insert(env->already_scheduled, res);
398 * Environment for a block scheduler.
400 typedef struct _block_sched_env_t {
401 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
402 sched_timestep_t curr_time; /**< current time of the scheduler */
403 nodeset *cands; /**< the set of candidates */
404 ir_node *block; /**< the current block */
405 sched_env_t *sched_env; /**< the scheduler environment */
406 const list_sched_selector_t *selector;
407 void *selector_block_env;
408 DEBUG_ONLY(firm_dbg_module_t *dbg;)
412 * Returns non-zero if the node is already scheduled
414 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
416 int idx = get_irn_idx(n);
418 assert(idx < ARR_LEN(env->sched_info));
419 return env->sched_info[idx].already_sched;
423 * Mark a node as already scheduled
425 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
427 int idx = get_irn_idx(n);
429 assert(idx < ARR_LEN(env->sched_info));
430 env->sched_info[idx].already_sched = 1;
434 * Returns non-zero if the node is a root node
436 static INLINE unsigned is_root_node(block_sched_env_t *env, ir_node *n)
438 int idx = get_irn_idx(n);
440 assert(idx < ARR_LEN(env->sched_info));
441 return env->sched_info[idx].is_root;
445 * Mark a node as roto node
447 static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
449 int idx = get_irn_idx(n);
451 assert(idx < ARR_LEN(env->sched_info));
452 env->sched_info[idx].is_root = 1;
456 * Get the current delay.
458 static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
459 int idx = get_irn_idx(n);
461 assert(idx < ARR_LEN(env->sched_info));
462 return env->sched_info[idx].delay;
466 * Set the current delay.
468 static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
469 int idx = get_irn_idx(n);
471 assert(idx < ARR_LEN(env->sched_info));
472 env->sched_info[idx].delay = delay;
476 * Get the current etime.
478 static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
479 int idx = get_irn_idx(n);
481 assert(idx < ARR_LEN(env->sched_info));
482 return env->sched_info[idx].etime;
486 * Set the current etime.
488 static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
489 int idx = get_irn_idx(n);
491 assert(idx < ARR_LEN(env->sched_info));
492 env->sched_info[idx].etime = etime;
496 * returns the exec-time for node n.
498 static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
499 if (be_is_Keep(n) || is_Proj(n))
501 if (env->selector->exectime)
502 return env->selector->exectime(env->selector_env, n);
507 * Calculates the latency for between two ops
509 static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
510 /* a Keep hides a root */
511 if (be_is_Keep(curr))
512 return exectime(env, pred);
514 /* Proj's are executed immediately */
518 /* predecessors Proj's must be skipped */
520 pred = get_Proj_pred(pred);
522 if (env->selector->latency)
523 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
528 * Try to put a node in the ready set.
529 * @param env The block scheduler environment.
530 * @param pred The previous scheduled node.
531 * @param irn The node to make ready.
532 * @return 1, if the node could be made ready, 0 else.
534 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
537 sched_timestep_t etime_p, etime;
539 /* Blocks cannot be scheduled. */
544 * Check, if the given ir node is in a different block as the
545 * currently scheduled one. If that is so, don't make the node ready.
547 if (env->block != get_nodes_block(irn))
550 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
551 ir_node *op = get_irn_n(irn, i);
553 /* if irn is an End we have keep-alives and op might be a block, skip that */
555 assert(get_irn_op(irn) == op_End);
559 /* If the operand is local to the scheduled block and not yet
560 * scheduled, this nodes cannot be made ready, so exit. */
561 if (!is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
565 nodeset_insert(env->cands, irn);
567 /* calculate the etime of this node */
568 etime = env->curr_time;
570 etime_p = get_irn_etime(env, pred);
571 etime += latency(env->sched_env, pred, 1, irn, 0);
573 etime = etime_p > etime ? etime_p : etime;
576 set_irn_etime(env, irn, etime);
578 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
584 * Try, to make all users of a node ready.
585 * In fact, a usage node can only be made ready, if all its operands
586 * have already been scheduled yet. This is checked my make_ready().
587 * @param env The block schedule environment.
588 * @param irn The node, which usages (successors) are to be made ready.
590 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
592 const ir_edge_t *edge;
594 foreach_out_edge(irn, edge) {
595 ir_node *user = edge->src;
597 make_ready(env, irn, user);
602 * Append an instruction to a schedule.
603 * @param env The block scheduling environment.
604 * @param irn The node to add to the schedule.
605 * @return The given node.
607 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
609 /* If the node consumes/produces data, it is appended to the schedule
610 * list, otherwise, it is not put into the list */
611 if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
612 sched_info_t *info = get_irn_sched_info(irn);
613 INIT_LIST_HEAD(&info->list);
615 sched_add_before(env->block, irn);
617 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
620 /* Insert the node in the set of all already scheduled nodes. */
621 mark_already_scheduled(env, irn);
623 /* Remove the node from the ready set */
624 if(nodeset_find(env->cands, irn))
625 nodeset_remove(env->cands, irn);
631 * Add the proj nodes of a tuple-mode irn to the schedule immediately
632 * after the tuple-moded irn. By pinning the projs after the irn, no
633 * other nodes can create a new lifetime between the tuple-moded irn and
634 * one of its projs. This should render a realistic image of a
635 * tuple-moded irn, which in fact models a node which defines multiple
638 * @param irn The tuple-moded irn.
640 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
642 const ir_edge_t *edge;
644 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
649 foreach_out_edge(irn, edge) {
650 ir_node *out = edge->src;
652 assert(is_Proj(out) && "successor of a modeT node must be a proj");
654 if (get_irn_mode(out) == mode_T)
655 add_tuple_projs(env, out);
657 add_to_sched(env, out);
658 make_users_ready(env, out);
664 * Execute the heuristic function,
666 static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
670 for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
671 if (be_is_Keep(irn)) {
677 return be->selector->select(be->selector_block_env, ns);
681 * Returns non-zero if root is a root in the block block.
683 static int is_root(ir_node *root, ir_node *block) {
684 const ir_edge_t *edge;
686 foreach_out_edge(root, edge) {
687 ir_node *succ = get_edge_src_irn(edge);
691 /* Phi nodes are always in "another block */
694 if (get_nodes_block(succ) == block)
700 /* we need a special mark */
704 static firm_dbg_module_t *xxxdbg;
707 * descent into a dag and create a pre-order list.
709 static void descent(ir_node *root, ir_node *block, ir_node **list) {
712 if (! is_Phi(root)) {
713 /* Phi nodes always leave the block */
714 for (i = get_irn_arity(root) - 1; i >= 0; --i) {
715 ir_node *pred = get_irn_n(root, i);
717 DBG((xxxdbg, LEVEL_3, " node %+F\n", pred));
718 /* Blocks may happen as predecessors of End nodes */
722 /* already seen nodes are not marked */
723 if (get_irn_link(pred) != MARK)
726 /* don't leave our block */
727 if (get_nodes_block(pred) != block)
730 set_irn_link(pred, NULL);
732 descent(pred, block, list);
735 set_irn_link(root, *list);
740 * Perform list scheduling on a block.
742 * Note, that the caller must compute a linked list of nodes in the block
743 * using the link field before calling this function.
745 * Also the outs must have been computed.
747 * @param block The block node.
748 * @param env Scheduling environment.
750 static void list_sched_block(ir_node *block, void *env_ptr)
752 sched_env_t *env = env_ptr;
753 const list_sched_selector_t *selector = env->selector;
754 ir_node *start_node = get_irg_start(get_irn_irg(block));
755 sched_info_t *info = get_irn_sched_info(block);
757 block_sched_env_t be;
758 const ir_edge_t *edge;
762 ir_node *root = NULL, *preord = NULL;
765 /* Initialize the block's list head that will hold the schedule. */
766 INIT_LIST_HEAD(&info->list);
768 /* Initialize the block scheduling environment */
769 be.sched_info = env->sched_info;
772 be.cands = new_nodeset(get_irn_n_edges(block));
773 be.selector = selector;
775 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
776 FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
778 // firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
780 if (selector->init_block)
781 be.selector_block_env = selector->init_block(env->selector_env, block);
783 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
785 /* First step: Find the root set. */
786 foreach_out_edge(block, edge) {
787 ir_node *succ = get_edge_src_irn(edge);
789 if (is_root(succ, block)) {
790 mark_root_node(&be, succ);
791 set_irn_link(succ, root);
795 set_irn_link(succ, MARK);
798 /* Second step: calculate the pre-order list. */
800 for (curr = root; curr; curr = irn) {
801 irn = get_irn_link(curr);
802 DBG((be.dbg, LEVEL_2, " DAG root %+F\n", curr));
803 descent(curr, block, &preord);
807 /* Third step: calculate the Delay. Note that our
808 * list is now in pre-order, starting at root
810 for (curr = root; curr; curr = get_irn_link(curr)) {
813 if (arch_irn_class_is(env->arch_env, curr, branch)) {
814 /* assure, that branches can be executed last */
818 if (is_root_node(&be, curr))
819 d = exectime(env, curr);
822 foreach_out_edge(curr, edge) {
823 ir_node *n = get_edge_src_irn(edge);
825 if (get_nodes_block(n) == block) {
828 ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
834 set_irn_delay(&be, curr, d);
835 DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
837 /* set the etime of all nodes to 0 */
838 set_irn_etime(&be, curr, 0);
842 /* Then one can add all nodes are ready to the set. */
843 foreach_out_edge(block, edge) {
844 ir_node *irn = get_edge_src_irn(edge);
846 /* Skip the end node because of keepalive edges. */
847 if (get_irn_opcode(irn) == iro_End)
851 /* Phi functions are scheduled immediately, since they only transfer
852 * data flow from the predecessors to this block. */
854 /* Increase the time step. */
855 be.curr_time += get_irn_etime(&be, irn);
856 add_to_sched(&be, irn);
857 make_users_ready(&be, irn);
859 else if (irn == start_node) {
860 /* The start block will be scheduled as the first node */
861 be.curr_time += get_irn_etime(&be, irn);
863 add_to_sched(&be, irn);
864 add_tuple_projs(&be, irn);
867 /* Other nodes must have all operands in other blocks to be made
871 /* Check, if the operands of a node are not local to this block */
872 for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
873 ir_node *operand = get_irn_n(irn, j);
875 if (get_nodes_block(operand) == block) {
881 /* Make the node ready, if all operands live in a foreign block */
883 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
884 make_ready(&be, NULL, irn);
889 while (nodeset_count(be.cands) > 0) {
890 nodeset *mcands; /**< the set of candidates with maximum delay time */
891 nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
892 sched_timestep_t max_delay = 0;
894 /* collect statistics about amount of ready nodes */
895 be_do_stat_sched_ready(block, be.cands);
897 /* calculate the max delay of all candidates */
898 foreach_nodeset(be.cands, irn) {
899 sched_timestep_t d = get_irn_delay(&be, irn);
901 max_delay = d > max_delay ? d : max_delay;
903 mcands = new_nodeset(8);
904 ecands = new_nodeset(8);
906 /* calculate mcands and ecands */
907 foreach_nodeset(be.cands, irn) {
908 if (be_is_Keep(irn)) {
909 nodeset_break(be.cands);
912 if (get_irn_delay(&be, irn) == max_delay) {
913 nodeset_insert(mcands, irn);
914 if (get_irn_etime(&be, irn) <= be.curr_time)
915 nodeset_insert(ecands, irn);
920 /* Keeps must be immediately scheduled */
923 DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
925 /* select a node to be scheduled and check if it was ready */
926 if (nodeset_count(mcands) == 1) {
927 DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
928 irn = nodeset_first(mcands);
931 int cnt = nodeset_count(ecands);
933 irn = nodeset_first(ecands);
935 if (arch_irn_class_is(env->arch_env, irn, branch)) {
936 /* BEWARE: don't select a JUMP if others are still possible */
939 DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
942 DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
943 irn = select_node_heuristic(&be, ecands);
947 DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
948 irn = select_node_heuristic(&be, mcands);
955 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
957 /* Increase the time step. */
958 be.curr_time += exectime(env, irn);
960 /* Add the node to the schedule. */
961 add_to_sched(&be, irn);
963 if (get_irn_mode(irn) == mode_T)
964 add_tuple_projs(&be, irn);
966 make_users_ready(&be, irn);
968 /* remove the scheduled node from the ready list. */
969 if (nodeset_find(be.cands, irn))
970 nodeset_remove(be.cands, irn);
973 if (selector->finish_block)
974 selector->finish_block(be.selector_block_env);
976 del_nodeset(be.cands);
979 static const list_sched_selector_t reg_pressure_selector_struct = {
980 reg_pressure_graph_init,
981 reg_pressure_block_init,
983 NULL, /* to_appear_in_schedule */
986 reg_pressure_block_free,
990 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
992 /* List schedule a graph. */
993 void list_sched(const be_irg_t *birg, int enable_mris)
995 const arch_env_t *arch_env = birg->main_env->arch_env;
996 ir_graph *irg = birg->irg;
1002 /* Assure, that the out edges are computed */
1006 mris = be_sched_mris_preprocess(birg);
1008 num_nodes = get_irg_last_idx(irg);
1010 memset(&env, 0, sizeof(env));
1011 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
1012 env.arch_env = arch_env;
1014 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
1016 memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
1018 if (env.selector->init_graph)
1019 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
1021 /* Schedule each single block. */
1022 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
1024 if (env.selector->finish_graph)
1025 env.selector->finish_graph(env.selector_env);
1028 be_sched_mris_free(mris);
1030 DEL_ARR_F(env.sched_info);