2 * Scheduling algorithms.
3 * Just a simple list scheduling algorithm is here.
5 * @author Sebastian Hack
25 #include "iredges_t.h"
30 #include "irprintf_t.h"
35 #include "besched_t.h"
38 #include "belistsched.h"
39 #include "beschedmris.h"
44 * All scheduling info needed per node.
46 typedef struct _sched_irn_t {
47 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
48 sched_timestep_t etime; /**< The earliest time of this node. */
49 unsigned num_user; /**< The number real users (mode datab) of this node */
50 unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
51 int reg_diff; /**< The difference of num(out registers) - num(in registers) */
52 int preorder; /**< The pre-order position */
53 unsigned critical_path_len; /**< The weighted length of the longest critical path */
54 unsigned already_sched : 1; /**< Set if this node is already scheduled */
55 unsigned is_root : 1; /**< is a root node of a block */
59 * Scheduling environment for the whole graph.
61 typedef struct _sched_env_t {
62 sched_irn_t *sched_info; /**< scheduling info per node */
63 const list_sched_selector_t *selector; /**< The node selector. */
64 const arch_env_t *arch_env; /**< The architecture environment. */
65 const ir_graph *irg; /**< The graph to schedule. */
66 void *selector_env; /**< A pointer to give to the selector. */
67 int sel_strategy; /**< Node selection strategy (muchnik, mueller, isa) */
72 * Ugly global variable for the compare function
73 * since qsort(3) does not pass an extra pointer.
75 static ir_node *curr_bl = NULL;
77 static int cmp_usage(const void *a, const void *b)
79 struct trivial_sched_env *env;
84 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
87 * One of them is live at the end of the block.
88 * Then, that one shall be scheduled at after the other
99 * The trivial selector:
100 * Just assure that branches are executed last, otherwise select
101 * the first node ready.
103 static ir_node *trivial_select(void *block_env, nodeset *ready_set)
105 const arch_env_t *arch_env = block_env;
108 /* assure that branches and constants are executed last */
109 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
110 if (! arch_irn_class_is(arch_env, irn, branch)) {
111 nodeset_break(ready_set);
117 /* at last: schedule branches */
118 irn = nodeset_first(ready_set);
119 nodeset_break(ready_set);
124 static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
126 return (void *) arch_env;
129 static void *trivial_init_block(void *graph_env, ir_node *bl)
134 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
138 if(sel->to_appear_in_schedule)
139 res = sel->to_appear_in_schedule(block_env, irn);
141 return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn));
144 static const list_sched_selector_t trivial_selector_struct = {
148 NULL, /* to_appear_in_schedule */
151 NULL, /* finish_block */
152 NULL /* finish_graph */
155 const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
157 typedef struct _usage_stats_t {
159 struct _usage_stats_t *next;
161 int uses_in_block; /**< Number of uses inside the current block. */
162 int already_consumed; /**< Number of insns using this value already
167 const list_sched_selector_t *vtab;
168 const arch_env_t *arch_env;
169 } reg_pressure_main_env_t;
173 const reg_pressure_main_env_t *main_env;
175 nodeset *already_scheduled;
176 } reg_pressure_selector_env_t;
178 static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
180 usage_stats_t *us = get_irn_link(irn);
183 us = obstack_alloc(&env->obst, sizeof(us[0]));
185 us->already_consumed = 0;
186 us->max_hops = INT_MAX;
187 us->next = env->root;
189 set_irn_link(irn, us);
195 static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
197 usage_stats_t *us = get_irn_link(irn);
198 assert(us && "This node must have usage stats");
202 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
204 ir_node *bl = get_nodes_block(irn);
206 * If the reached node is not in the block desired,
207 * return the value passed for this situation.
209 if(get_nodes_block(irn) != bl)
210 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
213 * If the node is in the current block but not
214 * yet scheduled, we keep on searching from that node.
216 if(!nodeset_find(env->already_scheduled, irn)) {
219 for(i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
220 ir_node *operand = get_irn_in_or_dep(irn, i);
222 if(get_irn_visited(operand) < visited_nr) {
225 set_irn_visited(operand, visited_nr);
226 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
235 * If the node is in the current block and scheduled, return
236 * the depth which indicates the number of steps to the
237 * region of scheduled nodes.
242 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
244 ir_node *bl = get_nodes_block(irn);
245 ir_graph *irg = get_irn_irg(bl);
248 const ir_edge_t *edge;
250 foreach_out_edge(irn, edge) {
251 ir_node *user = get_edge_src_irn(edge);
252 unsigned visited_nr = get_irg_visited(irg) + 1;
255 set_irg_visited(irg, visited_nr);
256 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
257 res = MAX(res, max_hops);
263 static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
265 reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
267 main_env->arch_env = arch_env;
268 main_env->vtab = vtab;
269 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
274 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
277 reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
279 obstack_init(&env->obst);
280 env->already_scheduled = new_nodeset(32);
282 env->main_env = graph_env;
285 * Collect usage statistics.
287 sched_foreach(bl, irn) {
288 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
291 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
292 //ir_node *op = get_irn_n(irn, i);
293 if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
294 usage_stats_t *us = get_or_set_usage_stats(env, irn);
295 #if 0 /* Liveness is not computed here! */
296 if(is_live_end(bl, op))
297 us->uses_in_block = 99999;
309 static void reg_pressure_block_free(void *block_env)
311 reg_pressure_selector_env_t *env = block_env;
314 for(us = env->root; us; us = us->next)
315 set_irn_link(us->irn, NULL);
317 obstack_free(&env->obst, NULL);
318 del_nodeset(env->already_scheduled);
322 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
325 if(get_irn_mode(irn) == mode_T) {
326 const ir_edge_t *edge;
328 foreach_out_edge(irn, edge)
329 res += get_result_hops_sum(env, get_edge_src_irn(edge));
332 else if(mode_is_data(get_irn_mode(irn)))
333 res = compute_max_hops(env, irn);
339 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
344 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
345 ir_node *op = get_irn_n(irn, i);
347 if(must_appear_in_schedule(env->main_env->vtab, env, op))
348 sum += compute_max_hops(env, op);
351 sum += get_result_hops_sum(env, irn);
356 static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
358 reg_pressure_selector_env_t *env = block_env;
359 ir_node *irn, *res = NULL;
360 int curr_cost = INT_MAX;
362 assert(nodeset_count(ready_set) > 0);
364 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
366 Ignore branch instructions for the time being.
367 They should only be scheduled if there is nothing else.
369 if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
370 int costs = reg_pr_costs(env, irn);
371 if (costs <= curr_cost) {
379 There was no result so we only saw a branch.
384 res = nodeset_first(ready_set);
385 nodeset_break(ready_set);
387 assert(res && "There must be a node scheduled.");
390 nodeset_insert(env->already_scheduled, res);
395 * Environment for a block scheduler.
397 typedef struct _block_sched_env_t {
398 sched_irn_t *sched_info; /**< scheduling info per node, copied from the global scheduler object */
399 sched_timestep_t curr_time; /**< current time of the scheduler */
400 nodeset *cands; /**< the set of candidates */
401 ir_node *block; /**< the current block */
402 sched_env_t *sched_env; /**< the scheduler environment */
403 nodeset *live; /**< simple liveness during scheduling */
404 const list_sched_selector_t *selector;
405 void *selector_block_env;
406 DEBUG_ONLY(firm_dbg_module_t *dbg;)
410 * Returns non-zero if the node is already scheduled
412 static INLINE int is_already_scheduled(block_sched_env_t *env, ir_node *n)
414 int idx = get_irn_idx(n);
416 assert(idx < ARR_LEN(env->sched_info));
417 return env->sched_info[idx].already_sched;
421 * Mark a node as already scheduled
423 static INLINE void mark_already_scheduled(block_sched_env_t *env, ir_node *n)
425 int idx = get_irn_idx(n);
427 assert(idx < ARR_LEN(env->sched_info));
428 env->sched_info[idx].already_sched = 1;
432 * Returns non-zero if the node is a root node
434 static INLINE unsigned is_root_node(block_sched_env_t *env, ir_node *n)
436 int idx = get_irn_idx(n);
438 assert(idx < ARR_LEN(env->sched_info));
439 return env->sched_info[idx].is_root;
443 * Mark a node as roto node
445 static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
447 int idx = get_irn_idx(n);
449 assert(idx < ARR_LEN(env->sched_info));
450 env->sched_info[idx].is_root = 1;
454 * Get the current delay.
456 static INLINE sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
457 int idx = get_irn_idx(n);
459 assert(idx < ARR_LEN(env->sched_info));
460 return env->sched_info[idx].delay;
464 * Set the current delay.
466 static INLINE void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
467 int idx = get_irn_idx(n);
469 assert(idx < ARR_LEN(env->sched_info));
470 env->sched_info[idx].delay = delay;
474 * Get the current etime.
476 static INLINE sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
477 int idx = get_irn_idx(n);
479 assert(idx < ARR_LEN(env->sched_info));
480 return env->sched_info[idx].etime;
484 * Set the current etime.
486 static INLINE void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
487 int idx = get_irn_idx(n);
489 assert(idx < ARR_LEN(env->sched_info));
490 env->sched_info[idx].etime = etime;
494 * Get the number of users.
496 static INLINE unsigned get_irn_num_user(block_sched_env_t *env, ir_node *n) {
497 int idx = get_irn_idx(n);
499 assert(idx < ARR_LEN(env->sched_info));
500 return env->sched_info[idx].num_user;
504 * Set the number of users.
506 static INLINE void set_irn_num_user(block_sched_env_t *env, ir_node *n, unsigned num_user) {
507 int idx = get_irn_idx(n);
509 assert(idx < ARR_LEN(env->sched_info));
510 env->sched_info[idx].num_user = num_user;
514 * Get the register difference.
516 static INLINE int get_irn_reg_diff(block_sched_env_t *env, ir_node *n) {
517 int idx = get_irn_idx(n);
519 assert(idx < ARR_LEN(env->sched_info));
520 return env->sched_info[idx].reg_diff;
524 * Set the register difference.
526 static INLINE void set_irn_reg_diff(block_sched_env_t *env, ir_node *n, int reg_diff) {
527 int idx = get_irn_idx(n);
529 assert(idx < ARR_LEN(env->sched_info));
530 env->sched_info[idx].reg_diff = reg_diff;
534 * Get the pre-order position.
536 static INLINE int get_irn_preorder(block_sched_env_t *env, ir_node *n) {
537 int idx = get_irn_idx(n);
539 assert(idx < ARR_LEN(env->sched_info));
540 return env->sched_info[idx].preorder;
544 * Set the pre-order position.
546 static INLINE void set_irn_preorder(block_sched_env_t *env, ir_node *n, int pos) {
547 int idx = get_irn_idx(n);
549 assert(idx < ARR_LEN(env->sched_info));
550 env->sched_info[idx].preorder = pos;
554 * Get the pre-order position.
556 static INLINE unsigned get_irn_critical_path_len(block_sched_env_t *env, ir_node *n) {
557 int idx = get_irn_idx(n);
559 assert(idx < ARR_LEN(env->sched_info));
560 return env->sched_info[idx].critical_path_len;
564 * Set the pre-order position.
566 static INLINE void set_irn_critical_path_len(block_sched_env_t *env, ir_node *n, unsigned len) {
567 int idx = get_irn_idx(n);
569 assert(idx < ARR_LEN(env->sched_info));
570 env->sched_info[idx].critical_path_len = len;
574 * returns the exec-time for node n.
576 static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
577 if (be_is_Keep(n) || is_Proj(n))
579 if (env->selector->exectime)
580 return env->selector->exectime(env->selector_env, n);
585 * Calculates the latency for between two ops
587 static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
588 /* a Keep hides a root */
589 if (be_is_Keep(curr))
590 return exectime(env, pred);
592 /* Proj's are executed immediately */
596 /* predecessors Proj's must be skipped */
598 pred = get_Proj_pred(pred);
600 if (env->selector->latency)
601 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
606 * Try to put a node in the ready set.
607 * @param env The block scheduler environment.
608 * @param pred The previous scheduled node.
609 * @param irn The node to make ready.
610 * @return 1, if the node could be made ready, 0 else.
612 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
615 sched_timestep_t etime_p, etime;
617 /* Blocks cannot be scheduled. */
622 * Check, if the given ir node is in a different block as the
623 * currently scheduled one. If that is so, don't make the node ready.
625 if (env->block != get_nodes_block(irn))
628 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
629 ir_node *op = get_irn_in_or_dep(irn, i);
631 /* if irn is an End we have keep-alives and op might be a block, skip that */
633 assert(get_irn_op(irn) == op_End);
637 /* If the operand is local to the scheduled block and not yet
638 * scheduled, this nodes cannot be made ready, so exit. */
639 if (!is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
643 nodeset_insert(env->cands, irn);
645 /* calculate the etime of this node */
646 etime = env->curr_time;
648 etime_p = get_irn_etime(env, pred);
649 etime += latency(env->sched_env, pred, 1, irn, 0);
651 etime = etime_p > etime ? etime_p : etime;
654 set_irn_etime(env, irn, etime);
656 DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
662 * Try, to make all users of a node ready.
663 * In fact, a usage node can only be made ready, if all its operands
664 * have already been scheduled yet. This is checked my make_ready().
665 * @param env The block schedule environment.
666 * @param irn The node, which usages (successors) are to be made ready.
668 static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
670 const ir_edge_t *edge;
672 foreach_out_edge(irn, edge) {
673 ir_node *user = get_edge_src_irn(edge);
675 make_ready(env, irn, user);
678 foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
679 ir_node *user = get_edge_src_irn(edge);
681 make_ready(env, irn, user);
686 * Returns the number of not yet schedules users.
688 static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
689 int idx = get_irn_idx(n);
691 assert(idx < ARR_LEN(env->sched_info));
692 return env->sched_info[idx].num_not_sched_user;
696 * Sets the number of not yet schedules users.
698 static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
699 int idx = get_irn_idx(n);
701 assert(idx < ARR_LEN(env->sched_info));
702 env->sched_info[idx].num_not_sched_user = num;
706 * Add @p num to the number of not yet schedules users and returns the result.
708 static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
709 int idx = get_irn_idx(n);
711 assert(idx < ARR_LEN(env->sched_info));
712 env->sched_info[idx].num_not_sched_user += num;
713 return env->sched_info[idx].num_not_sched_user;
717 * Returns the number of users of a node having mode datab.
719 static int get_num_successors(ir_node *irn) {
721 const ir_edge_t *edge;
723 if (get_irn_mode(irn) == mode_T) {
724 /* for mode_T nodes: count the users of all Projs */
725 foreach_out_edge(irn, edge) {
726 ir_node *proj = get_edge_src_irn(edge);
727 ir_mode *mode = get_irn_mode(proj);
730 sum += get_num_successors(proj);
731 else if (mode_is_datab(mode))
732 sum += get_irn_n_edges(proj);
736 /* do not count keep-alive edges */
737 foreach_out_edge(irn, edge) {
738 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
747 * Adds irn to @p live, updates all inputs that this user is scheduled
748 * and counts all of it's non scheduled users.
750 static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
757 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
758 ir_node *in = get_irn_in_or_dep(irn, i);
760 /* if in is a proj: update predecessor */
762 in = get_Proj_pred(in);
764 /* if in is still in the live set: reduce number of users by one */
765 if (nodeset_find(env->live, in)) {
766 if (add_irn_not_sched_user(env, in, -1) <= 0)
767 nodeset_remove(env->live, in);
772 get_num_successors returns the number of all users. This includes
773 users in different blocks as well. As the each block is scheduled separately
774 the liveness info of those users will not be updated and so these
775 users will keep up the register pressure as it is desired.
777 i = get_num_successors(irn);
779 set_irn_not_sched_user(env, irn, i);
780 nodeset_insert(env->live, irn);
785 * Returns the current register pressure for the current block
787 * This is the number of already scheduled nodes having not yet
790 static INLINE int get_cur_reg_pressure(block_sched_env_t *env) {
792 Nodes with all users scheduled are removed from live set,
793 so the number of nodes in this set represent the current
794 register pressure in this block.
796 return nodeset_count(env->live);
800 * Append an instruction to a schedule.
801 * @param env The block scheduling environment.
802 * @param irn The node to add to the schedule.
803 * @return The given node.
805 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
807 /* If the node consumes/produces data, it is appended to the schedule
808 * list, otherwise, it is not put into the list */
809 if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
810 sched_info_t *info = get_irn_sched_info(irn);
811 INIT_LIST_HEAD(&info->list);
813 update_sched_liveness(env, irn);
814 sched_add_before(env->block, irn);
816 DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
819 /* Insert the node in the set of all already scheduled nodes. */
820 mark_already_scheduled(env, irn);
822 /* Remove the node from the ready set */
823 if(nodeset_find(env->cands, irn))
824 nodeset_remove(env->cands, irn);
830 * Add the proj nodes of a tuple-mode irn to the schedule immediately
831 * after the tuple-moded irn. By pinning the projs after the irn, no
832 * other nodes can create a new lifetime between the tuple-moded irn and
833 * one of its projs. This should render a realistic image of a
834 * tuple-moded irn, which in fact models a node which defines multiple
837 * @param irn The tuple-moded irn.
839 static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
841 const ir_edge_t *edge;
843 assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
848 foreach_out_edge(irn, edge) {
849 ir_node *out = edge->src;
851 assert(is_Proj(out) && "successor of a modeT node must be a proj");
853 if (get_irn_mode(out) == mode_T)
854 add_tuple_projs(env, out);
856 add_to_sched(env, out);
857 make_users_ready(env, out);
863 * Returns the difference of regs_output - regs_input;
865 static int get_reg_difference(block_sched_env_t *be, ir_node *irn) {
870 if (get_irn_mode(irn) == mode_T) {
871 /* mode_T nodes: num out regs == num Projs with mode datab */
872 const ir_edge_t *edge;
873 foreach_out_edge(irn, edge) {
874 ir_node *proj = get_edge_src_irn(edge);
875 if (mode_is_datab(get_irn_mode(proj)))
882 /* num in regs: number of ins with mode datab and not ignore */
883 for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; i--) {
884 ir_node *in = get_irn_in_or_dep(irn, i);
885 if (mode_is_datab(get_irn_mode(in)) && ! arch_irn_is(be->sched_env->arch_env, in, ignore))
889 return num_out - num_in;
893 * Execute the heuristic function,
895 static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
897 ir_node *irn, *cand = NULL;
899 /* prefer instructions which can be scheduled early */
901 /* prefer instructions with lots of successors */
902 #define PRIO_NUMSUCCS 8
903 /* prefer instructions with long critical path */
904 #define PRIO_LEVEL 12
905 /* prefer instructions coming early in preorder */
906 #define PRIO_PREORD 8
907 /* weight of current register pressure */
908 #define PRIO_CUR_PRESS 20
909 /* weight of register pressure difference */
910 #define PRIO_CHG_PRESS 8
912 /* schedule keeps as early as possible */
913 foreach_nodeset(ns, irn) {
914 if (be_is_Keep(irn)) {
920 if (be->sched_env->sel_strategy & BE_SCHED_SELECT_HEUR) {
921 int max_prio = INT_MIN;
922 int cur_prio = INT_MIN;
923 int cur_pressure = get_cur_reg_pressure(be);
924 int reg_fact, cand_reg_fact;
926 /* priority based selection, heuristic inspired by mueller diss */
927 foreach_nodeset(ns, irn) {
928 /* make sure that branches are scheduled last */
929 if (! arch_irn_class_is(be->sched_env->arch_env, irn, branch)) {
930 int rdiff = get_irn_reg_diff(be, irn);
931 int sign = rdiff < 0;
932 int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
934 reg_fact = chg << cur_pressure;
936 reg_fact = INT_MAX - 2;
937 reg_fact = sign ? -reg_fact : reg_fact;
939 cur_prio = (get_irn_critical_path_len(be, irn) << PRIO_LEVEL)
940 //- (get_irn_delay(be, irn) << PRIO_LEVEL)
941 + (get_irn_num_user(be, irn) << PRIO_NUMSUCCS)
942 - (get_irn_etime(be, irn) << PRIO_TIME)
943 // - ((get_irn_reg_diff(be, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
945 + (get_irn_preorder(be, irn) << PRIO_PREORD); /* high preorder means early schedule */
946 if (cur_prio > max_prio) {
949 cand_reg_fact = reg_fact;
952 DBG((be->dbg, LEVEL_4, "checked NODE %+F\n", irn));
953 DBG((be->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
954 DBG((be->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(be, irn), get_irn_critical_path_len(be, irn) << PRIO_LEVEL));
955 DBG((be->dbg, LEVEL_4, "\tdelay: %d (%d)\n", get_irn_delay(be, irn), get_irn_delay(be, irn) << PRIO_LEVEL));
956 DBG((be->dbg, LEVEL_4, "\t#user: %d (%d)\n", get_irn_num_user(be, irn), get_irn_num_user(be, irn) << PRIO_NUMSUCCS));
957 DBG((be->dbg, LEVEL_4, "\tetime: %d (%d)\n", get_irn_etime(be, irn), -(get_irn_etime(be, irn) << PRIO_TIME)));
958 DBG((be->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(be, irn), get_irn_preorder(be, irn) << PRIO_PREORD));
959 DBG((be->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(be, irn), -cand_reg_fact));
960 DBG((be->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
965 DBG((be->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
968 cand = nodeset_first(ns);
972 /* use backend selector */
973 cand = be->selector->select(be->selector_block_env, ns);
980 * Returns non-zero if root is a root in the block block.
982 static int is_root(ir_node *root, ir_node *block) {
983 const ir_edge_t *edge;
985 foreach_out_edge(root, edge) {
986 ir_node *succ = get_edge_src_irn(edge);
990 /* Phi nodes are always in "another block */
993 if (get_nodes_block(succ) == block)
999 /* we need a special mark */
1003 static firm_dbg_module_t *xxxdbg;
1006 * descent into a dag and create a pre-order list.
1008 static void descent(ir_node *root, ir_node *block, ir_node **list, block_sched_env_t *env, unsigned path_len) {
1011 if (! is_Phi(root)) {
1012 path_len += exectime(env->sched_env, root);
1013 if (get_irn_critical_path_len(env, root) < path_len) {
1014 set_irn_critical_path_len(env, root, path_len);
1017 /* Phi nodes always leave the block */
1018 for (i = get_irn_ins_or_deps(root) - 1; i >= 0; --i) {
1019 ir_node *pred = get_irn_in_or_dep(root, i);
1021 DBG((xxxdbg, LEVEL_3, " node %+F\n", pred));
1022 /* Blocks may happen as predecessors of End nodes */
1026 /* already seen nodes are not marked */
1027 if (get_irn_link(pred) != MARK)
1030 /* don't leave our block */
1031 if (get_nodes_block(pred) != block)
1034 set_irn_link(pred, NULL);
1036 descent(pred, block, list, env, path_len);
1039 set_irn_link(root, *list);
1044 * Perform list scheduling on a block.
1046 * Note, that the caller must compute a linked list of nodes in the block
1047 * using the link field before calling this function.
1049 * Also the outs must have been computed.
1051 * @param block The block node.
1052 * @param env Scheduling environment.
1054 static void list_sched_block(ir_node *block, void *env_ptr)
1056 sched_env_t *env = env_ptr;
1057 const list_sched_selector_t *selector = env->selector;
1058 ir_node *start_node = get_irg_start(get_irn_irg(block));
1059 sched_info_t *info = get_irn_sched_info(block);
1061 block_sched_env_t be;
1062 const ir_edge_t *edge;
1066 ir_node *root = NULL, *preord = NULL;
1069 /* Initialize the block's list head that will hold the schedule. */
1070 INIT_LIST_HEAD(&info->list);
1072 /* Initialize the block scheduling environment */
1073 be.sched_info = env->sched_info;
1076 be.cands = new_nodeset(get_irn_n_edges(block));
1077 be.live = new_nodeset(get_irn_n_edges(block));
1078 be.selector = selector;
1080 FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
1081 FIRM_DBG_REGISTER(xxxdbg, "firm.be.schedxxx");
1083 // firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
1085 if (selector->init_block)
1086 be.selector_block_env = selector->init_block(env->selector_env, block);
1088 DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
1090 /* First step: Find the root set. */
1091 foreach_out_edge(block, edge) {
1092 ir_node *succ = get_edge_src_irn(edge);
1094 if (is_root(succ, block)) {
1095 mark_root_node(&be, succ);
1096 set_irn_link(succ, root);
1100 set_irn_link(succ, MARK);
1103 /* Second step: calculate the pre-order list. */
1105 for (curr = root; curr; curr = irn) {
1106 irn = get_irn_link(curr);
1107 DBG((be.dbg, LEVEL_2, " DAG root %+F\n", curr));
1108 descent(curr, block, &preord, &be, 0);
1112 /* Third step: calculate the Delay. Note that our
1113 * list is now in pre-order, starting at root
1115 for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
1118 if (arch_irn_class_is(env->arch_env, curr, branch)) {
1119 /* assure, that branches can be executed last */
1123 if (is_root_node(&be, curr))
1124 d = exectime(env, curr);
1127 foreach_out_edge(curr, edge) {
1128 ir_node *n = get_edge_src_irn(edge);
1130 if (get_nodes_block(n) == block) {
1131 sched_timestep_t ld;
1133 ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
1134 d = ld > d ? ld : d;
1138 foreach_out_edge_kind(curr, edge, EDGE_KIND_DEP) {
1139 ir_node *n = get_edge_src_irn(edge);
1141 if (get_nodes_block(n) == block) {
1142 sched_timestep_t ld;
1144 ld = latency(env, curr, 1, n, 0) + get_irn_delay(&be, n);
1145 d = ld > d ? ld : d;
1150 set_irn_delay(&be, curr, d);
1151 DB((be.dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
1153 /* set the etime of all nodes to 0 */
1154 set_irn_etime(&be, curr, 0);
1156 set_irn_preorder(&be, curr, cur_pos);
1160 /* Then one can add all nodes are ready to the set. */
1161 foreach_out_edge(block, edge) {
1162 ir_node *irn = get_edge_src_irn(edge);
1164 /* Skip the end node because of keepalive edges. */
1165 if (get_irn_opcode(irn) == iro_End)
1169 /* Phi functions are scheduled immediately, since they only transfer
1170 * data flow from the predecessors to this block. */
1172 /* Increase the time step. */
1173 be.curr_time += get_irn_etime(&be, irn);
1174 add_to_sched(&be, irn);
1175 make_users_ready(&be, irn);
1177 else if (irn == start_node) {
1178 /* The start block will be scheduled as the first node */
1179 be.curr_time += get_irn_etime(&be, irn);
1181 add_to_sched(&be, irn);
1182 add_tuple_projs(&be, irn);
1185 /* Other nodes must have all operands in other blocks to be made
1189 /* Check, if the operands of a node are not local to this block */
1190 for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
1191 ir_node *operand = get_irn_in_or_dep(irn, j);
1193 if (get_nodes_block(operand) == block) {
1198 /* live in values increase register pressure */
1199 update_sched_liveness(&be, operand);
1203 /* Make the node ready, if all operands live in a foreign block */
1205 DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
1206 make_ready(&be, NULL, irn);
1209 /* calculate number of users (needed for heuristic) */
1210 set_irn_num_user(&be, irn, get_num_successors(irn));
1212 /* calculate register difference (needed for heuristic) */
1213 set_irn_reg_diff(&be, irn, get_reg_difference(&be, irn));
1217 while (nodeset_count(be.cands) > 0) {
1218 nodeset *mcands; /**< the set of candidates with maximum delay time */
1219 nodeset *ecands; /**< the set of nodes in mcands whose etime <= curr_time */
1220 nodeset *local_cands;
1221 sched_timestep_t max_delay = 0;
1223 /* collect statistics about amount of ready nodes */
1224 be_do_stat_sched_ready(block, be.cands);
1226 /* calculate the max delay of all candidates */
1227 foreach_nodeset(be.cands, irn) {
1228 sched_timestep_t d = get_irn_delay(&be, irn);
1230 max_delay = d > max_delay ? d : max_delay;
1233 if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
1234 mcands = new_nodeset(8);
1235 ecands = new_nodeset(8);
1238 local_cands = new_nodeset(8);
1241 /* calculate mcands and ecands */
1242 foreach_nodeset(be.cands, irn) {
1243 if (be_is_Keep(irn)) {
1244 nodeset_break(be.cands);
1247 if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
1248 if (get_irn_delay(&be, irn) == max_delay) {
1249 nodeset_insert(mcands, irn);
1250 if (get_irn_etime(&be, irn) <= be.curr_time)
1251 nodeset_insert(ecands, irn);
1255 nodeset_insert(local_cands, irn);
1260 /* Keeps must be immediately scheduled */
1263 DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
1265 /* select a node to be scheduled and check if it was ready */
1266 if (! (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK)) {
1267 DB((be.dbg, LEVEL_3, "\tlocal_cands = %d\n", nodeset_count(local_cands)));
1268 irn = select_node_heuristic(&be, local_cands);
1270 else if (nodeset_count(mcands) == 1) {
1271 irn = nodeset_first(mcands);
1272 DB((be.dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
1275 int cnt = nodeset_count(ecands);
1277 irn = nodeset_first(ecands);
1279 if (arch_irn_class_is(env->arch_env, irn, branch)) {
1280 /* BEWARE: don't select a JUMP if others are still possible */
1283 DB((be.dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
1286 DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
1287 irn = select_node_heuristic(&be, ecands);
1291 DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
1292 irn = select_node_heuristic(&be, mcands);
1297 if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
1298 del_nodeset(mcands);
1299 del_nodeset(ecands);
1302 del_nodeset(local_cands);
1305 DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
1307 /* Increase the time step. */
1308 be.curr_time += exectime(env, irn);
1310 /* Add the node to the schedule. */
1311 add_to_sched(&be, irn);
1313 if (get_irn_mode(irn) == mode_T)
1314 add_tuple_projs(&be, irn);
1316 make_users_ready(&be, irn);
1318 /* remove the scheduled node from the ready list. */
1319 if (nodeset_find(be.cands, irn))
1320 nodeset_remove(be.cands, irn);
1323 if (selector->finish_block)
1324 selector->finish_block(be.selector_block_env);
1326 del_nodeset(be.cands);
1327 del_nodeset(be.live);
1330 static const list_sched_selector_t reg_pressure_selector_struct = {
1331 reg_pressure_graph_init,
1332 reg_pressure_block_init,
1333 reg_pressure_select,
1334 NULL, /* to_appear_in_schedule */
1335 NULL, /* exectime */
1337 reg_pressure_block_free,
1341 const list_sched_selector_t *reg_pressure_selector = ®_pressure_selector_struct;
1343 /* List schedule a graph. */
1344 void list_sched(const be_irg_t *birg, be_options_t *be_opts)
1346 const arch_env_t *arch_env = birg->main_env->arch_env;
1347 ir_graph *irg = birg->irg;
1353 /* Assure, that the out edges are computed */
1357 mris = be_sched_mris_preprocess(birg);
1359 num_nodes = get_irg_last_idx(irg);
1361 memset(&env, 0, sizeof(env));
1362 env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
1363 env.arch_env = arch_env;
1365 env.sel_strategy = be_opts->sched_select;
1366 env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
1368 memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
1370 if (env.selector->init_graph)
1371 env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
1373 /* Schedule each single block. */
1374 irg_block_walk_graph(irg, list_sched_block, NULL, &env);
1376 if (env.selector->finish_graph)
1377 env.selector->finish_graph(env.selector_env);
1380 be_sched_mris_free(mris);
1382 DEL_ARR_F(env.sched_info);