2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief Register pressure node selector.
9 * @author Sebastian Hack
16 #include "iredges_t.h"
22 #include "belistsched.h"
27 typedef struct usage_stats_t {
29 struct usage_stats_t *next;
31 int uses_in_block; /**< Number of uses inside the current block. */
32 int already_consumed; /**< Number of insns using this value already
39 ir_nodeset_t already_scheduled;
40 } reg_pressure_selector_env_t;
43 static inline usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
45 usage_stats_t *us = (usage_stats_t*)get_irn_link(irn);
48 us = OALLOC(&env->obst, usage_stats_t);
50 us->already_consumed = 0;
51 us->max_hops = INT_MAX;
54 set_irn_link(irn, us);
60 static inline usage_stats_t *get_usage_stats(ir_node *irn)
62 usage_stats_t *us = (usage_stats_t*)get_irn_link(irn);
63 assert(us && "This node must have usage stats");
67 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
69 ir_node *bl = get_nodes_block(irn);
71 * If the reached node is not in the block desired,
72 * return the value passed for this situation.
74 if (get_nodes_block(irn) != bl)
75 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
78 * If the node is in the current block but not
79 * yet scheduled, we keep on searching from that node.
81 if (!ir_nodeset_contains(&env->already_scheduled, irn)) {
84 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
85 ir_node *operand = get_irn_in_or_dep(irn, i);
87 if (get_irn_visited(operand) < visited_nr) {
90 set_irn_visited(operand, visited_nr);
91 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
100 * If the node is in the current block and scheduled, return
101 * the depth which indicates the number of steps to the
102 * region of scheduled nodes.
107 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
109 ir_node *bl = get_nodes_block(irn);
110 ir_graph *irg = get_irn_irg(bl);
113 foreach_out_edge(irn, edge) {
114 ir_node *user = get_edge_src_irn(edge);
115 unsigned visited_nr = get_irg_visited(irg) + 1;
118 set_irg_visited(irg, visited_nr);
119 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
120 res = MAX(res, max_hops);
126 static void *reg_pressure_graph_init(ir_graph *irg)
128 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
133 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
135 reg_pressure_selector_env_t *env = XMALLOC(reg_pressure_selector_env_t);
138 obstack_init(&env->obst);
139 ir_nodeset_init(&env->already_scheduled);
143 * Collect usage statistics.
145 sched_foreach(bl, irn) {
146 for (int i = 0, n = get_irn_arity(irn); i < n; ++i) {
147 usage_stats_t *us = get_or_set_usage_stats(env, irn);
155 static void reg_pressure_block_free(void *block_env)
157 reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
160 for (us = env->root; us; us = us->next)
161 set_irn_link(us->irn, NULL);
163 obstack_free(&env->obst, NULL);
164 ir_nodeset_destroy(&env->already_scheduled);
168 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
171 if (get_irn_mode(irn) == mode_T) {
172 foreach_out_edge(irn, edge)
173 res += get_result_hops_sum(env, get_edge_src_irn(edge));
176 else if (mode_is_data(get_irn_mode(irn)))
177 res = compute_max_hops(env, irn);
183 static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
188 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
189 ir_node *op = get_irn_n(irn, i);
192 || (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled))
195 sum += compute_max_hops(env, op);
198 sum += get_result_hops_sum(env, irn);
203 static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set)
205 reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
207 int curr_cost = INT_MAX;
209 assert(ir_nodeset_size(ready_set) > 0);
211 foreach_ir_nodeset(ready_set, irn, iter) {
213 Ignore branch instructions for the time being.
214 They should only be scheduled if there is nothing else.
217 int costs = reg_pr_costs(env, irn);
218 if (costs <= curr_cost) {
226 There was no result so we only saw a branch.
231 res = ir_nodeset_first(ready_set);
232 assert(res && "There must be a node scheduled.");
235 ir_nodeset_insert(&env->already_scheduled, res);
239 static void sched_reg_pressure(ir_graph *irg)
241 static const list_sched_selector_t reg_pressure_selector = {
242 reg_pressure_graph_init,
243 reg_pressure_block_init,
245 NULL, /* node_ready */
246 NULL, /* node_selected */
247 reg_pressure_block_free,
250 be_list_sched_graph(irg, ®_pressure_selector);
253 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_regpress)
254 void be_init_sched_regpress(void)
256 be_register_scheduler("regpress", sched_reg_pressure);