2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements a trace scheduler as presented in Muchnik[TM].
23 * @author Michael Beck
30 #include "iredges_t.h"
33 #include "belistsched.h"
38 /* we need a special mark */
42 typedef struct trace_irn {
43 sched_timestep_t delay; /**< The delay for this node if already calculated, else 0. */
44 sched_timestep_t etime; /**< The earliest time of this node. */
45 unsigned num_user; /**< The number real users (mode datab) of this node */
46 int reg_diff; /**< The difference of num(out registers) - num(in registers) */
47 int preorder; /**< The pre-order position */
48 unsigned critical_path_len; /**< The weighted length of the longest critical path */
49 unsigned is_root : 1; /**< is a root node of a block */
52 typedef struct trace_env {
53 trace_irn_t *sched_info; /**< trace scheduling information about the nodes */
54 sched_timestep_t curr_time; /**< current time of the scheduler */
55 be_lv_t *liveness; /**< The liveness for the irg */
56 DEBUG_ONLY(firm_dbg_module_t *dbg;)
60 * Returns a random node from a nodeset
62 static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
64 return ir_nodeset_first(nodeset);
68 * Returns non-zero if the node is a root node
70 static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
72 unsigned const idx = get_irn_idx(n);
74 assert(idx < ARR_LEN(env->sched_info));
75 return env->sched_info[idx].is_root;
79 * Mark a node as root node
81 static inline void mark_root_node(trace_env_t *env, ir_node *n)
83 unsigned const idx = get_irn_idx(n);
85 assert(idx < ARR_LEN(env->sched_info));
86 env->sched_info[idx].is_root = 1;
90 * Get the current delay.
92 static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
94 unsigned const idx = get_irn_idx(n);
96 assert(idx < ARR_LEN(env->sched_info));
97 return env->sched_info[idx].delay;
101 * Set the current delay.
103 static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
105 unsigned const idx = get_irn_idx(n);
107 assert(idx < ARR_LEN(env->sched_info));
108 env->sched_info[idx].delay = delay;
112 * Get the current etime.
114 static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
116 unsigned const idx = get_irn_idx(n);
118 assert(idx < ARR_LEN(env->sched_info));
119 return env->sched_info[idx].etime;
123 * Set the current etime.
125 static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
127 unsigned const idx = get_irn_idx(n);
129 assert(idx < ARR_LEN(env->sched_info));
130 env->sched_info[idx].etime = etime;
134 * Get the number of users.
136 static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
138 unsigned const idx = get_irn_idx(n);
140 assert(idx < ARR_LEN(env->sched_info));
141 return env->sched_info[idx].num_user;
145 * Set the number of users.
147 static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
149 unsigned const idx = get_irn_idx(n);
151 assert(idx < ARR_LEN(env->sched_info));
152 env->sched_info[idx].num_user = num_user;
156 * Get the register difference.
158 static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
160 unsigned const idx = get_irn_idx(n);
162 assert(idx < ARR_LEN(env->sched_info));
163 return env->sched_info[idx].reg_diff;
167 * Set the register difference.
169 static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
171 unsigned const idx = get_irn_idx(n);
173 assert(idx < ARR_LEN(env->sched_info));
174 env->sched_info[idx].reg_diff = reg_diff;
178 * Get the pre-order position.
180 static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
182 unsigned const idx = get_irn_idx(n);
184 assert(idx < ARR_LEN(env->sched_info));
185 return env->sched_info[idx].preorder;
189 * Set the pre-order position.
191 static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
193 unsigned const idx = get_irn_idx(n);
195 assert(idx < ARR_LEN(env->sched_info));
196 env->sched_info[idx].preorder = pos;
200 * Get the pre-order position.
202 static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
204 unsigned const idx = get_irn_idx(n);
206 assert(idx < ARR_LEN(env->sched_info));
207 return env->sched_info[idx].critical_path_len;
211 * Set the pre-order position.
213 static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
215 unsigned const idx = get_irn_idx(n);
217 assert(idx < ARR_LEN(env->sched_info));
218 env->sched_info[idx].critical_path_len = len;
222 * returns the exec-time for node n.
224 static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
227 if (be_is_Keep(n) || is_Proj(n))
230 if (env->selector->exectime)
231 return env->selector->exectime(env->selector_env, n);
237 * Calculates the latency for between two ops
239 static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
243 /* a Keep hides a root */
244 if (be_is_Keep(curr))
245 return exectime(env, pred);
247 /* Proj's are executed immediately */
251 /* predecessors Proj's must be skipped */
253 pred = get_Proj_pred(pred);
256 if (env->selector->latency)
257 return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
264 * Returns the number of users of a node having mode datab.
266 static int get_num_successors(ir_node *irn)
269 const ir_edge_t *edge;
271 if (get_irn_mode(irn) == mode_T) {
272 /* for mode_T nodes: count the users of all Projs */
273 foreach_out_edge(irn, edge) {
274 ir_node *proj = get_edge_src_irn(edge);
275 ir_mode *mode = get_irn_mode(proj);
278 sum += get_num_successors(proj);
279 else if (mode_is_datab(mode))
280 sum += get_irn_n_edges(proj);
284 /* do not count keep-alive edges */
285 foreach_out_edge(irn, edge) {
286 if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
295 * Returns the difference of regs_output - regs_input;
297 static int get_reg_difference(trace_env_t *env, ir_node *irn)
302 ir_node *block = get_nodes_block(irn);
304 if (be_is_Call(irn)) {
305 /* we want calls preferred */
309 if (get_irn_mode(irn) == mode_T) {
310 /* mode_T nodes: num out regs == num Projs with mode datab */
311 const ir_edge_t *edge;
312 foreach_out_edge(irn, edge) {
313 ir_node *proj = get_edge_src_irn(edge);
314 if (mode_is_datab(get_irn_mode(proj)))
321 /* num in regs: number of ins with mode datab and not ignore */
322 for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
323 ir_node *in = get_irn_n(irn, i);
325 if (!mode_is_datab(get_irn_mode(in)))
328 if (arch_irn_is_ignore(in))
331 if (be_is_live_end(env->liveness, block, in))
337 return num_out - num_in;
341 * descent into a dag and create a pre-order list.
343 static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
347 if (! is_Phi(root)) {
348 path_len += exectime(env, root);
349 if (get_irn_critical_path_len(env, root) < path_len) {
350 set_irn_critical_path_len(env, root, path_len);
352 /* calculate number of users (needed for heuristic) */
353 set_irn_num_user(env, root, get_num_successors(root));
355 /* calculate register difference (needed for heuristic) */
356 set_irn_reg_diff(env, root, get_reg_difference(env, root));
358 /* Phi nodes always leave the block */
359 for (i = get_irn_arity(root) - 1; i >= 0; --i) {
360 ir_node *pred = get_irn_n(root, i);
362 DBG((env->dbg, LEVEL_3, " node %+F\n", pred));
364 /* Blocks may happen as predecessors of End nodes */
368 /* already seen nodes are not marked */
369 if (get_irn_link(pred) != MARK)
372 /* don't leave our block */
373 if (get_nodes_block(pred) != block)
376 set_irn_link(pred, NULL);
378 descent(pred, block, list, env, path_len);
381 set_irn_link(root, *list);
386 * Returns non-zero if root is a root in the block block.
388 static int is_root(ir_node *root, ir_node *block)
390 const ir_edge_t *edge;
392 foreach_out_edge(root, edge) {
393 ir_node *succ = get_edge_src_irn(edge);
397 /* Phi nodes are always in "another block */
400 if (get_nodes_block(succ) == block)
407 * Performs initial block calculations for trace scheduling.
409 static void trace_preprocess_block(trace_env_t *env, ir_node *block)
411 ir_node *root = NULL, *preord = NULL;
414 const ir_edge_t *edge;
416 /* First step: Find the root set. */
417 foreach_out_edge(block, edge) {
418 ir_node *succ = get_edge_src_irn(edge);
420 if (is_Anchor(succ)) {
421 /* ignore a keep alive edge */
424 if (is_root(succ, block)) {
425 mark_root_node(env, succ);
426 set_irn_link(succ, root);
430 set_irn_link(succ, MARK);
433 /* Second step: calculate the pre-order list. */
435 for (curr = root; curr; curr = irn) {
436 irn = (ir_node*)get_irn_link(curr);
437 DBG((env->dbg, LEVEL_2, " DAG root %+F\n", curr));
438 descent(curr, block, &preord, env, 0);
442 /* Third step: calculate the Delay. Note that our
443 * list is now in pre-order, starting at root
445 for (cur_pos = 0, curr = root; curr; curr = (ir_node*)get_irn_link(curr), cur_pos++) {
449 /* assure, that branches can be executed last */
453 if (is_root_node(env, curr))
454 d = exectime(env, curr);
457 foreach_out_edge(curr, edge) {
458 ir_node *n = get_edge_src_irn(edge);
460 if (get_nodes_block(n) == block) {
463 ld = latency(env, curr, 1, n, 0) + get_irn_delay(env, n);
469 set_irn_delay(env, curr, d);
470 DB((env->dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
472 /* set the etime of all nodes to 0 */
473 set_irn_etime(env, curr, 0);
475 set_irn_preorder(env, curr, cur_pos);
480 * This functions gets called after a node finally has been made ready.
482 static void trace_node_ready(void *data, ir_node *irn, ir_node *pred)
484 trace_env_t *env = (trace_env_t*)data;
485 sched_timestep_t etime_p, etime;
487 etime = env->curr_time;
489 etime_p = get_irn_etime(env, pred);
490 etime += latency(env, pred, 1, irn, 0);
491 etime = etime_p > etime ? etime_p : etime;
494 set_irn_etime(env, irn, etime);
495 DB((env->dbg, LEVEL_2, "\tset etime of %+F to %u\n", irn, etime));
499 * Update the current time after irn has been selected.
501 static void trace_update_time(void *data, ir_node *irn)
503 trace_env_t *env = (trace_env_t*)data;
504 if (is_Phi(irn) || get_irn_opcode(irn) == beo_Start) {
505 env->curr_time += get_irn_etime(env, irn);
508 env->curr_time += exectime(env, irn);
513 * Allocates memory and initializes trace scheduling environment.
514 * @param irg The backend irg object
515 * @return The environment
517 static trace_env_t *trace_init(ir_graph *irg)
519 trace_env_t *env = XMALLOCZ(trace_env_t);
520 int nn = get_irg_last_idx(irg);
523 env->sched_info = NEW_ARR_F(trace_irn_t, nn);
524 env->liveness = be_get_irg_liveness(irg);
525 FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
527 be_assure_live_chk(irg);
528 memset(env->sched_info, 0, nn * sizeof(*(env->sched_info)));
534 * Frees all memory allocated for trace scheduling environment.
535 * @param env The environment
537 static void trace_free(void *data)
539 trace_env_t *env = (trace_env_t*)data;
540 DEL_ARR_F(env->sched_info);
545 * Simple selector. Just assure that jumps are scheduled last.
547 static ir_node *basic_selection(ir_nodeset_t *ready_set)
550 ir_nodeset_iterator_t iter;
552 /* assure that branches and constants are executed last */
553 foreach_ir_nodeset(ready_set, irn, iter) {
559 /* at last: schedule branches */
560 irn = get_nodeset_node(ready_set);
566 * The muchnik selector.
568 static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set)
570 trace_env_t *env = (trace_env_t*)block_env;
571 ir_nodeset_t mcands, ecands;
572 ir_nodeset_iterator_t iter;
573 sched_timestep_t max_delay = 0;
576 /* calculate the max delay of all candidates */
577 foreach_ir_nodeset(ready_set, irn, iter) {
578 sched_timestep_t d = get_irn_delay(env, irn);
580 max_delay = d > max_delay ? d : max_delay;
583 ir_nodeset_init_size(&mcands, 8);
584 ir_nodeset_init_size(&ecands, 8);
586 /* build mcands and ecands */
587 foreach_ir_nodeset(ready_set, irn, iter) {
588 if (get_irn_delay(env, irn) == max_delay) {
589 ir_nodeset_insert(&mcands, irn);
590 if (get_irn_etime(env, irn) <= env->curr_time)
591 ir_nodeset_insert(&ecands, irn);
596 if (ir_nodeset_size(&mcands) == 1) {
597 irn = get_nodeset_node(&mcands);
598 DB((env->dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
601 size_t cnt = ir_nodeset_size(&ecands);
603 irn = get_nodeset_node(&ecands);
606 /* BEWARE: don't select a JUMP if others are still possible */
609 DB((env->dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
612 DB((env->dbg, LEVEL_3, "\tecand = %zu, max_delay = %u\n", cnt, max_delay));
613 irn = basic_selection(&ecands);
617 DB((env->dbg, LEVEL_3, "\tmcand = %zu\n", ir_nodeset_size(&mcands)));
618 irn = basic_selection(&mcands);
625 static void *muchnik_init_graph(ir_graph *irg)
627 trace_env_t *env = trace_init(irg);
631 static void *muchnik_init_block(void *graph_env, ir_node *bl)
633 trace_env_t *env = (trace_env_t*) graph_env;
634 trace_preprocess_block(env, bl);
638 static void sched_muchnik(ir_graph *irg)
640 static const list_sched_selector_t muchnik_selector = {
644 trace_node_ready, /* node_ready */
645 trace_update_time, /* node_selected */
646 NULL, /* finish_block */
647 trace_free /* finish_graph */
649 be_list_sched_graph(irg, &muchnik_selector);
653 * Execute the heuristic function.
655 static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
657 trace_env_t *trace_env = (trace_env_t*)block_env;
658 ir_node *irn, *cand = NULL;
659 int max_prio = INT_MIN;
660 int cur_prio = INT_MIN;
662 ir_nodeset_iterator_t iter;
663 /* Note: register pressure calculation needs an overhaul, you need correct
664 * tracking for each register class indidually and weight by each class
665 int cur_pressure = ir_nodeset_size(lv); */
666 int cur_pressure = 1;
668 /* prefer instructions which can be scheduled early */
670 /* prefer instructions with lots of successors */
671 #define PRIO_NUMSUCCS 8
672 /* prefer instructions with long critical path */
673 #define PRIO_LEVEL 12
674 /* prefer instructions coming early in preorder */
675 #define PRIO_PREORD 8
676 /* weight of current register pressure */
677 #define PRIO_CUR_PRESS 20
678 /* weight of register pressure difference */
679 #define PRIO_CHG_PRESS 8
681 /* priority based selection, heuristic inspired by mueller diss */
682 foreach_ir_nodeset(ns, irn, iter) {
683 /* make sure that branches are scheduled last */
685 int rdiff = get_irn_reg_diff(trace_env, irn);
686 int sign = rdiff < 0;
687 int chg = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
689 reg_fact = chg * cur_pressure;
691 reg_fact = INT_MAX - 2;
692 reg_fact = sign ? -reg_fact : reg_fact;
694 cur_prio = (get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL)
695 //- (get_irn_delay(trace_env, irn) << PRIO_LEVEL)
696 + (get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS)
697 - (get_irn_etime(trace_env, irn) << PRIO_TIME)
698 //- ((get_irn_reg_diff(trace_env, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
700 + (get_irn_preorder(trace_env, irn) << PRIO_PREORD); /* high preorder means early schedule */
701 if (cur_prio > max_prio) {
706 DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
707 DBG((trace_env->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
708 DBG((trace_env->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(trace_env, irn), get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL));
709 DBG((trace_env->dbg, LEVEL_4, "\tdelay: %d (%d)\n", get_irn_delay(trace_env, irn), get_irn_delay(trace_env, irn) << PRIO_LEVEL));
710 DBG((trace_env->dbg, LEVEL_4, "\t#user: %d (%d)\n", get_irn_num_user(trace_env, irn), get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS));
711 DBG((trace_env->dbg, LEVEL_4, "\tetime: %d (%d)\n", get_irn_etime(trace_env, irn), 0 - (get_irn_etime(trace_env, irn) << PRIO_TIME)));
712 DBG((trace_env->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(trace_env, irn), get_irn_preorder(trace_env, irn) << PRIO_PREORD));
713 DBG((trace_env->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(trace_env, irn), 0 - reg_fact));
714 DBG((trace_env->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
719 DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
722 cand = basic_selection(ns);
728 static void sched_heuristic(ir_graph *irg)
730 static const list_sched_selector_t heuristic_selector = {
734 trace_node_ready, /* node_ready */
735 trace_update_time, /* node_selected */
736 NULL, /* finish_block */
737 trace_free /* finish_graph */
739 be_list_sched_graph(irg, &heuristic_selector);
742 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_trace)
743 void be_init_sched_trace(void)
745 be_register_scheduler("heur", sched_heuristic);
746 be_register_scheduler("muchnik", sched_muchnik);