2 * Implements a trace scheduler as presented in Muchnik[TM].
3 * Originally implemented by Michael Beck.
4 * @author Christian Wuerdig
14 #include "iredges_t.h"
16 #include "besched_t.h"
17 #include "belistsched.h"
20 /* we need a special mark */
24 typedef struct _trace_irn {
25 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
26 sched_timestep_t etime; /**< The earliest time of this node. */
27 unsigned num_user; /**< The number real users (mode datab) of this node */
28 int reg_diff; /**< The difference of num(out registers) - num(in registers) */
29 int preorder; /**< The pre-order position */
30 unsigned critical_path_len; /**< The weighted length of the longest critical path */
31 unsigned is_root : 1; /**< is a root node of a block */
34 typedef struct _trace_env {
35 trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
36 const arch_env_t *arch_env; /**< the arch environment */
37 sched_timestep_t curr_time; /**< current time of the scheduler */
38 void *selector_env; /**< the backend selector environment */
39 const list_sched_selector_t *selector; /**< the actual backend selector */
40 be_lv_t *liveness; /**< The liveness for the irg */
41 DEBUG_ONLY(firm_dbg_module_t *dbg;)
45 * Returns non-zero if the node is a root node
47 static INLINE unsigned is_root_node(trace_env_t *env, ir_node *n)
49 int idx = get_irn_idx(n);
51 assert(idx < ARR_LEN(env->sched_info));
52 return env->sched_info[idx].is_root;
56 * Mark a node as root node
58 static INLINE void mark_root_node(trace_env_t *env, ir_node *n)
60 int idx = get_irn_idx(n);
62 assert(idx < ARR_LEN(env->sched_info));
63 env->sched_info[idx].is_root = 1;
67 * Get the current delay.
69 static INLINE sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n) {
70 int idx = get_irn_idx(n);
72 assert(idx < ARR_LEN(env->sched_info));
73 return env->sched_info[idx].delay;
77 * Set the current delay.
79 static INLINE void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay) {
80 int idx = get_irn_idx(n);
82 assert(idx < ARR_LEN(env->sched_info));
83 env->sched_info[idx].delay = delay;
87 * Get the current etime.
89 static INLINE sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n) {
90 int idx = get_irn_idx(n);
92 assert(idx < ARR_LEN(env->sched_info));
93 return env->sched_info[idx].etime;
97 * Set the current etime.
99 static INLINE void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime) {
100 int idx = get_irn_idx(n);
102 assert(idx < ARR_LEN(env->sched_info));
103 env->sched_info[idx].etime = etime;
107 * Get the number of users.
109 static INLINE unsigned get_irn_num_user(trace_env_t *env, ir_node *n) {
110 int idx = get_irn_idx(n);
112 assert(idx < ARR_LEN(env->sched_info));
113 return env->sched_info[idx].num_user;
117 * Set the number of users.
119 static INLINE void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user) {
120 int idx = get_irn_idx(n);
122 assert(idx < ARR_LEN(env->sched_info));
123 env->sched_info[idx].num_user = num_user;
127 * Get the register difference.
129 static INLINE int get_irn_reg_diff(trace_env_t *env, ir_node *n) {
130 int idx = get_irn_idx(n);
132 assert(idx < ARR_LEN(env->sched_info));
133 return env->sched_info[idx].reg_diff;
137 * Set the register difference.
139 static INLINE void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff) {
140 int idx = get_irn_idx(n);
142 assert(idx < ARR_LEN(env->sched_info));
143 env->sched_info[idx].reg_diff = reg_diff;
147 * Get the pre-order position.
149 static INLINE int get_irn_preorder(trace_env_t *env, ir_node *n) {
150 int idx = get_irn_idx(n);
152 assert(idx < ARR_LEN(env->sched_info));
153 return env->sched_info[idx].preorder;
157 * Set the pre-order position.
159 static INLINE void set_irn_preorder(trace_env_t *env, ir_node *n, int pos) {
160 int idx = get_irn_idx(n);
162 assert(idx < ARR_LEN(env->sched_info));
163 env->sched_info[idx].preorder = pos;
167 * Get the pre-order position.
169 static INLINE unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n) {
170 int idx = get_irn_idx(n);
172 assert(idx < ARR_LEN(env->sched_info));
173 return env->sched_info[idx].critical_path_len;
177 * Set the pre-order position.
179 static INLINE void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len) {
180 int idx = get_irn_idx(n);
182 assert(idx < ARR_LEN(env->sched_info));
183 env->sched_info[idx].critical_path_len = len;
187 * returns the exec-time for node n.
189 static sched_timestep_t exectime(trace_env_t *env, ir_node *n) {
190 if (be_is_Keep(n) || is_Proj(n))
192 if (env->selector->exectime)
193 return env->selector->exectime(env->selector_env, n);
198 * Calculates the latency for between two ops
200 static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
201 /* a Keep hides a root */
202 if (be_is_Keep(curr))
203 return exectime(env, pred);
205 /* Proj's are executed immediately */
209 /* predecessors Proj's must be skipped */
211 pred = get_Proj_pred(pred);
213 if (env->selector->latency)
214 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
219 * Returns the number of users of a node having mode datab.
221 static int get_num_successors(ir_node *irn) {
223 const ir_edge_t *edge;
225 if (get_irn_mode(irn) == mode_T) {
226 /* for mode_T nodes: count the users of all Projs */
227 foreach_out_edge(irn, edge) {
228 ir_node *proj = get_edge_src_irn(edge);
229 ir_mode *mode = get_irn_mode(proj);
232 sum += get_num_successors(proj);
233 else if (mode_is_datab(mode))
234 sum += get_irn_n_edges(proj);
238 /* do not count keep-alive edges */
239 foreach_out_edge(irn, edge) {
240 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
249 * Returns the difference of regs_output - regs_input;
251 static int get_reg_difference(trace_env_t *env, ir_node *irn) {
255 ir_node *block = get_nodes_block(irn);
257 if (be_is_Call(irn)) {
258 /* we want calls prefered */
262 if (get_irn_mode(irn) == mode_T) {
263 /* mode_T nodes: num out regs == num Projs with mode datab */
264 const ir_edge_t *edge;
265 foreach_out_edge(irn, edge) {
266 ir_node *proj = get_edge_src_irn(edge);
267 if (mode_is_datab(get_irn_mode(proj)))
274 /* num in regs: number of ins with mode datab and not ignore */
275 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
276 ir_node *in = get_irn_n(irn, i);
278 if (! be_is_live_end(env->liveness, block, in) && /* if the value lives outside of block: do not count */
279 mode_is_datab(get_irn_mode(in)) && /* must be data node */
280 ! arch_irn_is(env->arch_env, in, ignore)) /* ignore "ignore" nodes :) */
284 return num_out - num_in;
288 * descent into a dag and create a pre-order list.
290 static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len) {
293 if (! is_Phi(root)) {
294 path_len += exectime(env, root);
295 if (get_irn_critical_path_len(env, root) < path_len) {
296 set_irn_critical_path_len(env, root, path_len);
298 /* calculate number of users (needed for heuristic) */
299 set_irn_num_user(env, root, get_num_successors(root));
301 /* calculate register difference (needed for heuristic) */
302 set_irn_reg_diff(env, root, get_reg_difference(env, root));
304 /* Phi nodes always leave the block */
305 for (i = get_irn_arity(root) - 1; i >= 0; --i) {
306 ir_node *pred = get_irn_n(root, i);
308 DBG((env->dbg, LEVEL_3, " node %+F\n", pred));
310 /* Blocks may happen as predecessors of End nodes */
314 /* already seen nodes are not marked */
315 if (get_irn_link(pred) != MARK)
318 /* don't leave our block */
319 if (get_nodes_block(pred) != block)
322 set_irn_link(pred, NULL);
324 descent(pred, block, list, env, path_len);
327 set_irn_link(root, *list);
332 * Returns non-zero if root is a root in the block block.
334 static int is_root(ir_node *root, ir_node *block) {
335 const ir_edge_t *edge;
337 foreach_out_edge(root, edge) {
338 ir_node *succ = get_edge_src_irn(edge);
342 /* Phi nodes are always in "another block */
345 if (get_nodes_block(succ) == block)
352 * Performs initial block calculations for trace scheduling.
354 static void trace_preprocess_block(trace_env_t *env, ir_node *block) {
355 ir_node *root = NULL, *preord = NULL;
358 const ir_edge_t *edge;
360 /* First step: Find the root set. */
361 foreach_out_edge(block, edge) {
362 ir_node *succ = get_edge_src_irn(edge);
364 if (is_root(succ, block)) {
365 mark_root_node(env, succ);
366 set_irn_link(succ, root);
370 set_irn_link(succ, MARK);
373 /* Second step: calculate the pre-order list. */
375 for (curr = root; curr; curr = irn) {
376 irn = get_irn_link(curr);
377 DBG((env->dbg, LEVEL_2, " DAG root %+F\n", curr));
378 descent(curr, block, &preord, env, 0);
382 /* Third step: calculate the Delay. Note that our
383 * list is now in pre-order, starting at root
385 for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
388 if (arch_irn_class_is(env->arch_env, curr, branch)) {
389 /* assure, that branches can be executed last */
393 if (is_root_node(env, curr))
394 d = exectime(env, curr);
397 foreach_out_edge(curr, edge) {
398 ir_node *n = get_edge_src_irn(edge);
400 if (get_nodes_block(n) == block) {
403 ld = latency(env, curr, 1, n, 0) + get_irn_delay(env, n);
409 set_irn_delay(env, curr, d);
410 DB((env->dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
412 /* set the etime of all nodes to 0 */
413 set_irn_etime(env, curr, 0);
415 set_irn_preorder(env, curr, cur_pos);
420 * This functions gets called after a node finally has been made ready.
422 static void trace_node_ready(void *data, ir_node *irn, ir_node *pred) {
423 trace_env_t *env = data;
424 sched_timestep_t etime_p, etime;
426 etime = env->curr_time;
428 etime_p = get_irn_etime(env, pred);
429 etime += latency(env, pred, 1, irn, 0);
430 etime = etime_p > etime ? etime_p : etime;
433 set_irn_etime(env, irn, etime);
434 DB((env->dbg, LEVEL_2, "\tset etime of %+F to %u\n", irn, etime));
438 * Update the current time after irn has been selected.
440 static void trace_update_time(void *data, ir_node *irn) {
441 trace_env_t *env = data;
442 if (is_Phi(irn) || get_irn_opcode(irn) == iro_Start) {
443 env->curr_time += get_irn_etime(env, irn);
446 env->curr_time += exectime(env, irn);
451 * Allocates memory and initializes trace scheduling environment.
452 * @param birg The backend irg object
453 * @return The environment
455 static trace_env_t *trace_init(const arch_env_t *arch_env, ir_graph *irg) {
456 trace_env_t *env = xcalloc(1, sizeof(*env));
457 int nn = get_irg_last_idx(irg);
459 env->arch_env = arch_env;
461 env->sched_info = NEW_ARR_F(trace_irn_t, nn);
462 env->liveness = be_liveness(irg);
463 FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
465 memset(env->sched_info, 0, nn * sizeof(*(env->sched_info)));
471 * Frees all memory allocated for trace scheduling environment.
472 * @param env The environment
474 static void trace_free(void *data) {
475 trace_env_t *env = data;
476 be_liveness_free(env->liveness);
477 DEL_ARR_F(env->sched_info);
482 * Simple selector. Just assure that jumps are scheduled last.
484 static ir_node *basic_selection(const arch_env_t *arch_env, nodeset *ready_set) {
487 /* assure that branches and constants are executed last */
488 for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
489 if (! arch_irn_class_is(arch_env, irn, branch)) {
490 nodeset_break(ready_set);
495 /* at last: schedule branches */
496 irn = nodeset_first(ready_set);
497 nodeset_break(ready_set);
503 * The muchnik selector.
505 static ir_node *muchnik_select(void *block_env, nodeset *ready_set, nodeset *live_set)
507 trace_env_t *env = block_env;
508 nodeset *mcands, *ecands;
509 sched_timestep_t max_delay = 0;
512 /* calculate the max delay of all candidates */
513 foreach_nodeset(ready_set, irn) {
514 sched_timestep_t d = get_irn_delay(env, irn);
516 max_delay = d > max_delay ? d : max_delay;
519 mcands = new_nodeset(8);
520 ecands = new_nodeset(8);
522 /* build mcands and ecands */
523 foreach_nodeset(ready_set, irn) {
524 if (get_irn_delay(env, irn) == max_delay) {
525 nodeset_insert(mcands, irn);
526 if (get_irn_etime(env, irn) <= env->curr_time)
527 nodeset_insert(ecands, irn);
532 if (nodeset_count(mcands) == 1) {
533 irn = nodeset_first(mcands);
534 DB((env->dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
537 int cnt = nodeset_count(ecands);
539 irn = nodeset_first(ecands);
541 if (arch_irn_class_is(env->arch_env, irn, branch)) {
542 /* BEWARE: don't select a JUMP if others are still possible */
545 DB((env->dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
548 DB((env->dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
549 irn = basic_selection(env->arch_env, ecands);
553 DB((env->dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
554 irn = basic_selection(env->arch_env, mcands);
561 static void *muchnik_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
563 trace_env_t *env = trace_init(arch_env, irg);
564 env->selector = vtab;
565 env->selector_env = (void*) arch_env;
569 static void *muchnik_init_block(void *graph_env, ir_node *bl)
571 trace_preprocess_block(graph_env, bl);
575 static const list_sched_selector_t muchnik_selector_struct = {
579 NULL, /* to_appear_in_schedule */
580 trace_node_ready, /* node_ready */
581 trace_update_time, /* node_selected */
584 NULL, /* finish_block */
585 trace_free /* finish_graph */
588 const list_sched_selector_t *muchnik_selector = &muchnik_selector_struct;
591 * Execute the heuristic function.
593 static ir_node *heuristic_select(void *block_env, nodeset *ns, nodeset *lv)
595 trace_env_t *trace_env = block_env;
596 ir_node *irn, *cand = NULL;
597 int max_prio = INT_MIN;
598 int cur_prio = INT_MIN;
599 int cur_pressure = nodeset_count(lv);
600 int reg_fact, cand_reg_fact;
602 /* prefer instructions which can be scheduled early */
604 /* prefer instructions with lots of successors */
605 #define PRIO_NUMSUCCS 8
606 /* prefer instructions with long critical path */
607 #define PRIO_LEVEL 12
608 /* prefer instructions coming early in preorder */
609 #define PRIO_PREORD 8
610 /* weight of current register pressure */
611 #define PRIO_CUR_PRESS 20
612 /* weight of register pressure difference */
613 #define PRIO_CHG_PRESS 8
615 /* priority based selection, heuristic inspired by mueller diss */
616 foreach_nodeset(ns, irn) {
617 /* make sure that branches are scheduled last */
618 if (! arch_irn_class_is(trace_env->arch_env, irn, branch)) {
619 int rdiff = get_irn_reg_diff(trace_env, irn);
620 int sign = rdiff < 0;
621 int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
623 //reg_fact = chg << cur_pressure;
624 reg_fact = chg * cur_pressure;
626 reg_fact = INT_MAX - 2;
627 reg_fact = sign ? -reg_fact : reg_fact;
629 cur_prio = (get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL)
630 //- (get_irn_delay(trace_env, irn) << PRIO_LEVEL)
631 + (get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS)
632 - (get_irn_etime(trace_env, irn) << PRIO_TIME)
633 //- ((get_irn_reg_diff(trace_env, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
635 + (get_irn_preorder(trace_env, irn) << PRIO_PREORD); /* high preorder means early schedule */
636 if (cur_prio > max_prio) {
639 cand_reg_fact = reg_fact;
642 DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
643 DBG((trace_env->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
644 DBG((trace_env->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(trace_env, irn), get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL));
645 DBG((trace_env->dbg, LEVEL_4, "\tdelay: %d (%d)\n", get_irn_delay(trace_env, irn), get_irn_delay(trace_env, irn) << PRIO_LEVEL));
646 DBG((trace_env->dbg, LEVEL_4, "\t#user: %d (%d)\n", get_irn_num_user(trace_env, irn), get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS));
647 DBG((trace_env->dbg, LEVEL_4, "\tetime: %d (%d)\n", get_irn_etime(trace_env, irn), 0 - (get_irn_etime(trace_env, irn) << PRIO_TIME)));
648 DBG((trace_env->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(trace_env, irn), get_irn_preorder(trace_env, irn) << PRIO_PREORD));
649 DBG((trace_env->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(trace_env, irn), 0 - reg_fact));
650 DBG((trace_env->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
655 DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
658 cand = basic_selection(trace_env->arch_env, ns);
664 static const list_sched_selector_t heuristic_selector_struct = {
668 NULL, /* to_appear_in_schedule */
669 trace_node_ready, /* node_ready */
670 trace_update_time, /* node_selected */
673 NULL, /* finish_block */
674 trace_free /* finish_graph */
677 const list_sched_selector_t *heuristic_selector = &heuristic_selector_struct;