2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
23 #include "iredges_t.h"
28 #include "irprintf_t.h"
33 #include "besched_t.h"
36 #include "belistsched.h"
37 #include "beschedmris.h"
42 * All scheduling info needed per node.
44 typedef struct _sched_irn_t {
45 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
46 sched_timestep_t etime; /**< The earliest time of this node. */
47 unsigned already_sched : 1; /**< Set if this node is already scheduled */
48 unsigned is_root : 1; /**< is a root node of a block */
52 * Scheduling environment for the whole graph.
54 typedef struct _sched_env_t {
55 sched_irn_t *sched_info; /**< scheduling info per node */
56 const list_sched_selector_t *selector; /**< The node selector. */
57 const arch_env_t *arch_env; /**< The architecture environment. */
58 const ir_graph *irg; /**< The graph to schedule. */
59 void *selector_env; /**< A pointer to give to the selector. */
64 * Ugly global variable for the compare function
65 * since qsort(3) does not pass an extra pointer.
67 static ir_node *curr_bl = NULL;
69 static int cmp_usage(const void *a, const void *b)
71 struct trivial_sched_env *env;
76 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
79 * One of them is live at the end of the block.
80 * Then, that one shall be scheduled at after the other
91 * The trivial selector:
92 * Just assure that branches are executed last, otherwise select
93 * the first node ready.
95 static ir_node *trivial_select(void *block_env, nodeset *ready_set)
97 const arch_env_t *arch_env = block_env;
101 /* assure that branches and constants are executed last */
102 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
103 arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
105 if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
106 nodeset_break(ready_set);
111 /* assure that constants are executed before branches */
113 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
114 if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
115 nodeset_break(ready_set);
122 /* at last: schedule branches */
123 irn = nodeset_first(ready_set);
124 nodeset_break(ready_set);
129 static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
131 return (void *) arch_env;
134 static void *trivial_init_block(void *graph_env, ir_node *bl)
139 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
143 if(sel->to_appear_in_schedule)
144 res = sel->to_appear_in_schedule(block_env, irn);
146 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn));
149 static const list_sched_selector_t trivial_selector_struct = {
153 NULL, /* to_appear_in_schedule */
156 NULL, /* finish_block */
157 NULL /* finish_graph */
160 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
162 typedef struct _usage_stats_t {
164 struct _usage_stats_t *next;
166 int uses_in_block; /**< Number of uses inside the current block. */
167 int already_consumed; /**< Number of insns using this value already
172 const list_sched_selector_t *vtab;
173 const arch_env_t *arch_env;
174 } reg_pressure_main_env_t;
178 const reg_pressure_main_env_t *main_env;
180 nodeset *already_scheduled;
181 } reg_pressure_selector_env_t;
183 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
185 usage_stats_t *us = get_irn_link(irn);
188 us = obstack_alloc(&env->obst, sizeof(us[0]));
190 us->already_consumed = 0;
191 us->max_hops = INT_MAX;
192 us->next = env->root;
194 set_irn_link(irn, us);
200 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
202 usage_stats_t *us = get_irn_link(irn);
203 assert(us && "This node must have usage stats");
207 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
209 ir_node *bl = get_nodes_block(irn);
211 * If the reached node is not in the block desired,
212 * return the value passed for this situation.
214 if(get_nodes_block(irn) != bl)
215 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
218 * If the node is in the current block but not
219 * yet scheduled, we keep on searching from that node.
221 if(!nodeset_find(env->already_scheduled, irn)) {
224 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
225 ir_node *operand = get_irn_n(irn, i);
227 if(get_irn_visited(operand) < visited_nr) {
230 set_irn_visited(operand, visited_nr);
231 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
240 * If the node is in the current block and scheduled, return
241 * the depth which indicates the number of steps to the
242 * region of scheduled nodes.
247 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
249 ir_node *bl = get_nodes_block(irn);
250 ir_graph *irg = get_irn_irg(bl);
253 const ir_edge_t *edge;
255 foreach_out_edge(irn, edge) {
256 ir_node *user = get_edge_src_irn(edge);
257 unsigned visited_nr = get_irg_visited(irg) + 1;
260 set_irg_visited(irg, visited_nr);
261 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
262 res = MAX(res, max_hops);
268 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
270 reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
272 main_env->arch_env = arch_env;
273 main_env->vtab = vtab;
274 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
279 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
282 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
284 obstack_init(&env->obst);
285 env->already_scheduled = new_nodeset(32);
287 env->main_env = graph_env;
290 * Collect usage statistics.
292 sched_foreach(bl, irn) {
293 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
296 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
297 ir_node *op = get_irn_n(irn, i);
298 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
299 usage_stats_t *us = get_or_set_usage_stats(env, irn);
300 if(is_live_end(bl, op))
301 us->uses_in_block = 99999;
312 static void reg_pressure_block_free(void *block_env)
314 reg_pressure_selector_env_t *env = block_env;
317 for(us = env->root; us; us = us->next)
318 set_irn_link(us->irn, NULL);
320 obstack_free(&env->obst, NULL);
321 del_nodeset(env->already_scheduled);
325 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
328 if(get_irn_mode(irn) == mode_T) {
329 const ir_edge_t *edge;
331 foreach_out_edge(irn, edge)
332 res += get_result_hops_sum(env, get_edge_src_irn(edge));
335 else if(mode_is_data(get_irn_mode(irn)))
336 res = compute_max_hops(env, irn);
342 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
347 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
348 ir_node *op = get_irn_n(irn, i);
350 if(must_appear_in_schedule(env->main_env->vtab, env, op))
351 sum += compute_max_hops(env, op);
354 sum += get_result_hops_sum(env, irn);
359 static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
361 reg_pressure_selector_env_t *env = block_env;
362 ir_node *irn, *res = NULL;
363 int curr_cost = INT_MAX;
365 assert(nodeset_count(ready_set) > 0);
367 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
369 Ignore branch instructions for the time being.
370 They should only be scheduled if there is nothing else.
372 if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
373 int costs = reg_pr_costs(env, irn);
374 if (costs <= curr_cost) {
382 There was no result so we only saw a branch.
387 res = nodeset_first(ready_set);
388 nodeset_break(ready_set);
390 assert(res && "There must be a node scheduled.");
393 nodeset_insert(env->already_scheduled, res);
398 * Environment for a block scheduler.
400 typedef struct _block_sched_env_t {
401 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
402 sched_timestep_t curr_time; /**< current time of the scheduler */
403 nodeset *cands; /**< the set of candidates */
404 ir_node *block; /**< the current block */
405 sched_env_t *sched_env; /**< the scheduler environment */
406 const list_sched_selector_t *selector;
407 void *selector_block_env;
408 DEBUG_ONLY(firm_dbg_module_t *dbg;)
412 * Returns non-zero if the node is already scheduled
414 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
416 int idx = get_irn_idx(n);
418 assert(idx < ARR_LEN(env->sched_info));
419 return env->sched_info[idx].already_sched;
423 * Mark a node as already scheduled
425 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
427 int idx = get_irn_idx(n);
429 assert(idx < ARR_LEN(env->sched_info));
430 env->sched_info[idx].already_sched = 1;
434 * Returns non-zero if the node is a root node
436 static INLINE unsigned is_root_node(block_sched_env_t *env, ir_node *n)
438 int idx = get_irn_idx(n);
440 assert(idx < ARR_LEN(env->sched_info));
441 return env->sched_info[idx].is_root;
445 * Mark a node as roto node
447 static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
449 int idx = get_irn_idx(n);
451 assert(idx < ARR_LEN(env->sched_info));
452 env->sched_info[idx].is_root = 1;
456 * Get the current delay.
458 static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
459 int idx = get_irn_idx(n);
461 assert(idx < ARR_LEN(env->sched_info));
462 return env->sched_info[idx].delay;
466 * Set the current delay.
468 static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
469 int idx = get_irn_idx(n);
471 assert(idx < ARR_LEN(env->sched_info));
472 env->sched_info[idx].delay = delay;
476 * Get the current etime.
478 static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
479 int idx = get_irn_idx(n);
481 assert(idx < ARR_LEN(env->sched_info));
482 return env->sched_info[idx].etime;
486 * Set the current etime.
488 static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
489 int idx = get_irn_idx(n);
491 assert(idx < ARR_LEN(env->sched_info));
492 env->sched_info[idx].etime = etime;
496 * returns the exec-time for node n.
498 static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
499 if (be_is_Keep(n) || is_Proj(n))
501 if (env->selector->exectime)
502 return env->selector->exectime(env->selector_env, n);
507 * Calculates the latency for between two ops
509 static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
510 /* a Keep hides a root */
511 if (be_is_Keep(curr))
512 return exectime(env, pred);
514 /* Proj's are executed immediately */
518 /* predecessors Proj's must be skipped */
520 pred = get_Proj_pred(pred);
522 if (env->selector->latency)
523 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
528 * Try to put a node in the ready set.
529 * @param env The block scheduler environment.
530 * @param pred The previous scheduled node.
531 * @param irn The node to make ready.
532 * @return 1, if the node could be made ready, 0 else.
534 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
537 sched_timestep_t etime_p, etime;
539 /* Blocks cannot be scheduled. */
544 * Check, if the given ir node is in a different block as the
545 * currently scheduled one. If that is so, don't make the node ready.
547 if (env->block != get_nodes_block(irn))
550 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
551 ir_node *op = get_irn_n(irn, i);
553 /* if irn is an End we have keep-alives and op might be a block, skip that */
555 assert(get_irn_op(irn) == op_End);
559 /* If the operand is local to the scheduled block and not yet
560 * scheduled, this nodes cannot be made ready, so exit. */
561 if (!is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
565 nodeset_insert(env->cands, irn);
567 /* calculate the etime of this node */
568 etime = env->curr_time;
570 etime_p = get_irn_etime(env, pred);
571 etime += latency(env->sched_env, pred, 1, irn, 0);
573 etime = etime_p > etime ? etime_p : etime;
576 set_irn_etime(env, irn, etime);
578 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
584 * Try, to make all users of a node ready.
585 * In fact, a usage node can only be made ready, if all its operands
586 * have already been scheduled yet. This is checked my make_ready().
587 * @param env The block schedule environment.
588 * @param irn The node, which usages (successors) are to be made ready.
590 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
592 const ir_edge_t *edge;
594 foreach_out_edge(irn, edge) {
595 ir_node *user = edge->src;
597 make_ready(env, irn, user);
602 * Compare to nodes using pointer equality.
603 * @param p1 Node one.
604 * @param p2 Node two.
605 * @return 0 if they are identical.
607 static int node_cmp_func(const void *p1, const void *p2)
613 * Append an instruction to a schedule.
614 * @param env The block scheduling environment.
615 * @param irn The node to add to the schedule.
616 * @return The given node.
618 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
620 /* If the node consumes/produces data, it is appended to the schedule
621 * list, otherwise, it is not put into the list */
622 if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
623 sched_info_t *info = get_irn_sched_info(irn);
624 INIT_LIST_HEAD(&info->list);
626 sched_add_before(env->block, irn);
628 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
631 /* Insert the node in the set of all already scheduled nodes. */
632 mark_already_scheduled(env, irn);
634 /* Remove the node from the ready set */
635 if(nodeset_find(env->cands, irn))
636 nodeset_remove(env->cands, irn);
642 * Add the proj nodes of a tuple-mode irn to the schedule immediately
643 * after the tuple-moded irn. By pinning the projs after the irn, no
644 * other nodes can create a new lifetime between the tuple-moded irn and
645 * one of its projs. This should render a realistic image of a
646 * tuple-moded irn, which in fact models a node which defines multiple
649 * @param irn The tuple-moded irn.
651 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
653 const ir_edge_t *edge;
655 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
660 foreach_out_edge(irn, edge) {
661 ir_node *out = edge->src;
663 assert(is_Proj(out) && "successor of a modeT node must be a proj");
665 if (get_irn_mode(out) == mode_T)
666 add_tuple_projs(env, out);
668 add_to_sched(env, out);
669 make_users_ready(env, out);
675 * Execute the heuristic function,
677 static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
681 for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
682 if (be_is_Keep(irn)) {
688 return be->selector->select(be->selector_block_env, ns);
692 * Returns non-zero if root is a root in the block block.
694 static int is_root(ir_node *root, ir_node *block) {
695 const ir_edge_t *edge;
697 foreach_out_edge(root, edge) {
698 ir_node *succ = get_edge_src_irn(edge);
702 /* Phi nodes are always in "another block */
705 if (get_nodes_block(succ) == block)
711 /* we need a special mark */
716 * descent into a dag and create a pre-order list.
718 static void descent(ir_node *root, ir_node *block, ir_node **list) {
721 if (! is_Phi(root)) {
722 /* Phi nodes always leave the block */
723 for (i = get_irn_arity(root) - 1; i >= 0; --i) {
724 ir_node *pred = get_irn_n(root, i);
726 /* Blocks may happen as predecessors of End nodes */
730 /* already seen nodes are not marked */
731 if (get_irn_link(pred) != MARK)
734 /* don't leave our block */
735 if (get_nodes_block(pred) != block)
738 descent(pred, block, list);
741 set_irn_link(root, *list);
746 * Perform list scheduling on a block.
748 * Note, that the caller must compute a linked list of nodes in the block
749 * using the link field before calling this function.
751 * Also the outs must have been computed.
753 * @param block The block node.
754 * @param env Scheduling environment.
756 static void list_sched_block(ir_node *block, void *env_ptr)
758 sched_env_t *env = env_ptr;
759 const list_sched_selector_t *selector = env->selector;
760 ir_node *start_node = get_irg_start(get_irn_irg(block));
761 sched_info_t *info = get_irn_sched_info(block);
763 block_sched_env_t be;
764 const ir_edge_t *edge;
768 ir_node *root = NULL, *preord = NULL;
771 /* Initialize the block's list head that will hold the schedule. */
772 INIT_LIST_HEAD(&info->list);
774 /* Initialize the block scheduling environment */
775 be.sched_info = env->sched_info;
778 be.cands = new_nodeset(get_irn_n_edges(block));
779 be.selector = selector;
781 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
783 // firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
785 if (selector->init_block)
786 be.selector_block_env = selector->init_block(env->selector_env, block);
788 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
790 /* First step: Find the root set. */
791 foreach_out_edge(block, edge) {
792 ir_node *succ = get_edge_src_irn(edge);
794 if (is_root(succ, block)) {
795 mark_root_node(&be, succ);
796 set_irn_link(succ, root);
800 set_irn_link(succ, MARK);
803 /* Second step: calculate the pre-order list. */
805 for (curr = root; curr; curr = irn) {
806 irn = get_irn_link(curr);
807 descent(curr, block, &preord);
811 /* Third step: calculate the Delay. Note that our
812 * list is now in pre-order, starting at root
814 for (curr = root; curr; curr = get_irn_link(curr)) {
817 if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
818 /* assure, that branches can be executed last */
822 if (is_root_node(&be, curr))
823 d = exectime(env, curr);
826 foreach_out_edge(curr, edge) {
827 ir_node *n = get_edge_src_irn(edge);
829 if (get_nodes_block(n) == block) {
832 ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
838 set_irn_delay(&be, curr, d);
839 DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
841 /* set the etime of all nodes to 0 */
842 set_irn_etime(&be, curr, 0);
846 /* Then one can add all nodes are ready to the set. */
847 foreach_out_edge(block, edge) {
848 ir_node *irn = get_edge_src_irn(edge);
850 /* Skip the end node because of keepalive edges. */
851 if (get_irn_opcode(irn) == iro_End)
855 /* Phi functions are scheduled immediately, since they only transfer
856 * data flow from the predecessors to this block. */
858 /* Increase the time step. */
859 be.curr_time += get_irn_etime(&be, irn);
860 add_to_sched(&be, irn);
861 make_users_ready(&be, irn);
863 else if (irn == start_node) {
864 /* The start block will be scheduled as the first node */
865 be.curr_time += get_irn_etime(&be, irn);
867 add_to_sched(&be, irn);
868 add_tuple_projs(&be, irn);
871 /* Other nodes must have all operands in other blocks to be made
875 /* Check, if the operands of a node are not local to this block */
876 for (j = 0, m = get_irn_arity(irn); j < m; ++j) {
877 ir_node *operand = get_irn_n(irn, j);
879 if (get_nodes_block(operand) == block) {
885 /* Make the node ready, if all operands live in a foreign block */
887 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
888 make_ready(&be, NULL, irn);
893 while (nodeset_count(be.cands) > 0) {
894 nodeset *mcands; /**< the set of candidates with maximum delay time */
895 nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
896 sched_timestep_t max_delay = 0;
898 /* collect statistics about amount of ready nodes */
899 be_do_stat_sched_ready(block, be.cands);
901 /* calculate the max delay of all candidates */
902 foreach_nodeset(be.cands, irn) {
903 sched_timestep_t d = get_irn_delay(&be, irn);
905 max_delay = d > max_delay ? d : max_delay;
907 mcands = new_nodeset(8);
908 ecands = new_nodeset(8);
910 /* calculate mcands and ecands */
911 foreach_nodeset(be.cands, irn) {
912 if (be_is_Keep(irn)) {
913 nodeset_break(be.cands);
916 if (get_irn_delay(&be, irn) == max_delay) {
917 nodeset_insert(mcands, irn);
918 if (get_irn_etime(&be, irn) <= be.curr_time)
919 nodeset_insert(ecands, irn);
924 /* Keeps must be immediately scheduled */
927 DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
929 /* select a node to be scheduled and check if it was ready */
930 if (nodeset_count(mcands) == 1) {
931 DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
932 irn = nodeset_first(mcands);
935 int cnt = nodeset_count(ecands);
937 arch_irn_class_t irn_class;
939 irn = nodeset_first(ecands);
940 irn_class = arch_irn_classify(env->arch_env, irn);
942 if (irn_class == arch_irn_class_branch) {
943 /* BEWARE: don't select a JUMP if others are still possible */
946 DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
949 DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
950 irn = select_node_heuristic(&be, ecands);
954 DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
955 irn = select_node_heuristic(&be, mcands);
962 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
964 /* Increase the time step. */
965 be.curr_time += exectime(env, irn);
967 /* Add the node to the schedule. */
968 add_to_sched(&be, irn);
970 if (get_irn_mode(irn) == mode_T)
971 add_tuple_projs(&be, irn);
973 make_users_ready(&be, irn);
975 /* remove the scheduled node from the ready list. */
976 if (nodeset_find(be.cands, irn))
977 nodeset_remove(be.cands, irn);
980 if (selector->finish_block)
981 selector->finish_block(be.selector_block_env);
983 del_nodeset(be.cands);
986 static const list_sched_selector_t reg_pressure_selector_struct = {
987 reg_pressure_graph_init,
988 reg_pressure_block_init,
990 NULL, /* to_appear_in_schedule */
993 reg_pressure_block_free,
997 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
999 /* List schedule a graph. */
1000 void list_sched(const be_irg_t *birg, int enable_mris)
1002 const arch_env_t *arch_env = birg->main_env->arch_env;
1003 ir_graph *irg = birg->irg;
1009 /* Assure, that the out edges are computed */
1013 mris = be_sched_mris_preprocess(birg);
1015 num_nodes = get_irg_last_idx(irg);
1017 memset(&env, 0, sizeof(env));
1018 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
1019 env.arch_env = arch_env;
1021 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
1023 memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
1025 if (env.selector->init_graph)
1026 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
1028 /* Schedule each single block. */
1029 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
1031 if (env.selector->finish_graph)
1032 env.selector->finish_graph(env.selector_env);
1035 be_sched_mris_free(mris);
1037 DEL_ARR_F(env.sched_info);