+static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+{
+ reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
+
+ main_env->arch_env = arch_env;
+ main_env->vtab = vtab;
+ irg_walk_graph(irg, firm_clear_link, NULL, NULL);
+
+ return main_env;
+}
+
+static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
+{
+ ir_node *irn;
+ reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
+
+ obstack_init(&env->obst);
+ env->already_scheduled = new_nodeset(32);
+ env->root = NULL;
+ env->main_env = graph_env;
+
+ /*
+ * Collect usage statistics.
+ */
+ sched_foreach(bl, irn) {
+ if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
+ int i, n;
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+ if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
+ usage_stats_t *us = get_or_set_usage_stats(env, irn);
+ if(is_live_end(bl, op))
+ us->uses_in_block = 99999;
+ else
+ us->uses_in_block++;
+ }
+ }
+ }
+ }
+
+ return env;
+}
+
+static void reg_pressure_block_free(void *block_env)
+{
+ reg_pressure_selector_env_t *env = block_env;
+ usage_stats_t *us;
+
+ for(us = env->root; us; us = us->next)
+ set_irn_link(us->irn, NULL);
+
+ obstack_free(&env->obst, NULL);
+ del_nodeset(env->already_scheduled);
+ free(env);
+}
+
+static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+ int res = 0;
+ if(get_irn_mode(irn) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge)
+ res += get_result_hops_sum(env, get_edge_src_irn(edge));
+ }
+
+ else if(mode_is_data(get_irn_mode(irn)))
+ res = compute_max_hops(env, irn);
+
+
+ return res;
+}
+
+static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+ int i, n;
+ int sum = 0;
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if(must_appear_in_schedule(env->main_env->vtab, env, op))
+ sum += compute_max_hops(env, op);
+ }
+
+ sum += get_result_hops_sum(env, irn);
+
+ return sum;
+}
+
+static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
+{
+ reg_pressure_selector_env_t *env = block_env;
+ ir_node *irn, *res = NULL;
+ int curr_cost = INT_MAX;
+
+ assert(nodeset_count(ready_set) > 0);
+
+ for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
+ /*
+ Ignore branch instructions for the time being.
+ They should only be scheduled if there is nothing else.
+ */
+ if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
+ int costs = reg_pr_costs(env, irn);
+ if (costs <= curr_cost) {
+ res = irn;
+ curr_cost = costs;
+ }
+ }
+ }
+
+ /*
+ There was no result so we only saw a branch.
+ Take it and finish.
+ */
+
+ if(!res) {
+ res = nodeset_first(ready_set);
+ nodeset_break(ready_set);
+
+ assert(res && "There must be a node scheduled.");
+ }
+
+ nodeset_insert(env->already_scheduled, res);
+ return res;
+}