2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Register pressure node selector.
23 * @author Sebastian Hack
30 #include "iredges_t.h"
36 #include "belistsched.h"
41 typedef struct usage_stats_t {
43 struct usage_stats_t *next;
45 int uses_in_block; /**< Number of uses inside the current block. */
46 int already_consumed; /**< Number of insns using this value already
53 ir_nodeset_t already_scheduled;
54 } reg_pressure_selector_env_t;
59 * Ugly global variable for the compare function
60 * since qsort(3) does not pass an extra pointer.
62 static ir_node *curr_bl = NULL;
64 static int cmp_usage(const void *a, const void *b)
66 struct trivial_sched_env *env;
71 res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
74 * One of them is live at the end of the block.
75 * Then, that one shall be scheduled at after the other
85 static inline usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
87 usage_stats_t *us = (usage_stats_t*)get_irn_link(irn);
90 us = OALLOC(&env->obst, usage_stats_t);
92 us->already_consumed = 0;
93 us->max_hops = INT_MAX;
96 set_irn_link(irn, us);
102 static inline usage_stats_t *get_usage_stats(ir_node *irn)
104 usage_stats_t *us = (usage_stats_t*)get_irn_link(irn);
105 assert(us && "This node must have usage stats");
109 static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
111 ir_node *bl = get_nodes_block(irn);
113 * If the reached node is not in the block desired,
114 * return the value passed for this situation.
116 if (get_nodes_block(irn) != bl)
117 return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
120 * If the node is in the current block but not
121 * yet scheduled, we keep on searching from that node.
123 if (!ir_nodeset_contains(&env->already_scheduled, irn)) {
126 for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
127 ir_node *operand = get_irn_in_or_dep(irn, i);
129 if (get_irn_visited(operand) < visited_nr) {
132 set_irn_visited(operand, visited_nr);
133 tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
142 * If the node is in the current block and scheduled, return
143 * the depth which indicates the number of steps to the
144 * region of scheduled nodes.
149 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
151 ir_node *bl = get_nodes_block(irn);
152 ir_graph *irg = get_irn_irg(bl);
155 foreach_out_edge(irn, edge) {
156 ir_node *user = get_edge_src_irn(edge);
157 unsigned visited_nr = get_irg_visited(irg) + 1;
160 set_irg_visited(irg, visited_nr);
161 max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
162 res = MAX(res, max_hops);
168 static void *reg_pressure_graph_init(ir_graph *irg)
170 irg_walk_graph(irg, firm_clear_link, NULL, NULL);
175 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
177 reg_pressure_selector_env_t *env = XMALLOC(reg_pressure_selector_env_t);
180 obstack_init(&env->obst);
181 ir_nodeset_init(&env->already_scheduled);
185 * Collect usage statistics.
187 sched_foreach(bl, irn) {
190 || (arch_get_irn_flags(irn) & arch_irn_flags_not_scheduled))
193 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
194 usage_stats_t *us = get_or_set_usage_stats(env, irn);
195 #if 0 /* Liveness is not computed here! */
196 if (is_live_end(bl, op))
197 us->uses_in_block = 99999;
207 static void reg_pressure_block_free(void *block_env)
209 reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
212 for (us = env->root; us; us = us->next)
213 set_irn_link(us->irn, NULL);
215 obstack_free(&env->obst, NULL);
216 ir_nodeset_destroy(&env->already_scheduled);
220 static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
223 if (get_irn_mode(irn) == mode_T) {
224 foreach_out_edge(irn, edge)
225 res += get_result_hops_sum(env, get_edge_src_irn(edge));
228 else if (mode_is_data(get_irn_mode(irn)))
229 res = compute_max_hops(env, irn);
235 static inline int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
240 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
241 ir_node *op = get_irn_n(irn, i);
244 || (arch_get_irn_flags(op) & arch_irn_flags_not_scheduled))
247 sum += compute_max_hops(env, op);
250 sum += get_result_hops_sum(env, irn);
255 static ir_node *reg_pressure_select(void *block_env, ir_nodeset_t *ready_set)
257 reg_pressure_selector_env_t *env = (reg_pressure_selector_env_t*)block_env;
259 int curr_cost = INT_MAX;
261 assert(ir_nodeset_size(ready_set) > 0);
263 foreach_ir_nodeset(ready_set, irn, iter) {
265 Ignore branch instructions for the time being.
266 They should only be scheduled if there is nothing else.
269 int costs = reg_pr_costs(env, irn);
270 if (costs <= curr_cost) {
278 There was no result so we only saw a branch.
283 res = ir_nodeset_first(ready_set);
284 assert(res && "There must be a node scheduled.");
287 ir_nodeset_insert(&env->already_scheduled, res);
291 static void sched_reg_pressure(ir_graph *irg)
293 static const list_sched_selector_t reg_pressure_selector = {
294 reg_pressure_graph_init,
295 reg_pressure_block_init,
297 NULL, /* node_ready */
298 NULL, /* node_selected */
299 reg_pressure_block_free,
302 be_list_sched_graph(irg, ®_pressure_selector);
305 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_regpress)
306 void be_init_sched_regpress(void)
308 be_register_scheduler("regpress", sched_reg_pressure);