+#if 0
+/*
+ * Ugly global variable for the compare function
+ * since qsort(3) does not pass an extra pointer.
+ */
+static ir_node *curr_bl = NULL;
+
+static int cmp_usage(const void *a, const void *b)
+{
+ struct trivial_sched_env *env;
+ const ir_node *p = a;
+ const ir_node *q = b;
+ int res = 0;
+
+ res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
+
+ /*
+ * One of them is live at the end of the block.
+ * Then, that one shall be scheduled at after the other
+ */
+ if(res != 0)
+ return res;
+
+
+ return res;
+}
+#endif
+
+/**
+ * The trivial selector:
+ * Just assure that branches are executed last, otherwise select
+ * the first node ready.
+ */
+static ir_node *trivial_select(void *block_env, nodeset *ready_set)
+{
+ const arch_env_t *arch_env = block_env;
+ ir_node *irn = NULL;
+ int const_last = 0;
+
+ /* assure that branches and constants are executed last */
+ for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
+ arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
+
+ if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
+ nodeset_break(ready_set);
+ return irn;
+ }
+ }
+
+ /* assure that constants are executed before branches */
+ if (const_last) {
+ for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
+ if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
+ nodeset_break(ready_set);
+ return irn;
+ }
+ }
+ }
+
+
+ /* at last: schedule branches */
+ irn = nodeset_first(ready_set);
+ nodeset_break(ready_set);
+
+ return irn;
+}
+
+static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+{
+ return (void *) arch_env;
+}
+
+static void *trivial_init_block(void *graph_env, ir_node *bl)
+{
+ return graph_env;
+}
+
+static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+{
+ int res = 0;
+
+ if(sel->to_appear_in_schedule)
+ res = sel->to_appear_in_schedule(block_env, irn);
+
+ return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
+}
+
+static const list_sched_selector_t trivial_selector_struct = {
+ trivial_init_graph,
+ trivial_init_block,
+ trivial_select,
+ NULL, /* to_appear_in_schedule */
+ NULL, /* exectime */
+ NULL, /* latency */
+ NULL, /* finish_block */
+ NULL /* finish_graph */
+};
+
+const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
+
+typedef struct _usage_stats_t {
+ ir_node *irn;
+ struct _usage_stats_t *next;
+ int max_hops;
+ int uses_in_block; /**< Number of uses inside the current block. */
+ int already_consumed; /**< Number of insns using this value already
+ scheduled. */
+} usage_stats_t;
+
+typedef struct {
+ const list_sched_selector_t *vtab;
+ const arch_env_t *arch_env;
+} reg_pressure_main_env_t;
+
+typedef struct {
+ struct obstack obst;
+ const reg_pressure_main_env_t *main_env;
+ usage_stats_t *root;
+ nodeset *already_scheduled;
+} reg_pressure_selector_env_t;
+
+static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+ usage_stats_t *us = get_irn_link(irn);
+
+ if(!us) {
+ us = obstack_alloc(&env->obst, sizeof(us[0]));
+ us->irn = irn;
+ us->already_consumed = 0;
+ us->max_hops = INT_MAX;
+ us->next = env->root;
+ env->root = us;
+ set_irn_link(irn, us);
+ }
+
+ return us;
+}
+
+static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
+{
+ usage_stats_t *us = get_irn_link(irn);
+ assert(us && "This node must have usage stats");
+ return us;
+}
+
+static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
+{
+ ir_node *bl = get_nodes_block(irn);
+ /*
+ * If the reached node is not in the block desired,
+ * return the value passed for this situation.
+ */
+ if(get_nodes_block(irn) != bl)
+ return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
+
+ /*
+ * If the node is in the current block but not
+ * yet scheduled, we keep on searching from that node.
+ */
+ if(!nodeset_find(env->already_scheduled, irn)) {
+ int i, n;
+ int res = 0;
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *operand = get_irn_n(irn, i);
+
+ if(get_irn_visited(operand) < visited_nr) {
+ int tmp;
+
+ set_irn_visited(operand, visited_nr);
+ tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
+ res = MAX(tmp, res);
+ }
+ }
+
+ return res;
+ }
+
+ /*
+ * If the node is in the current block and scheduled, return
+ * the depth which indicates the number of steps to the
+ * region of scheduled nodes.
+ */
+ return depth;
+}
+
+static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+ ir_node *bl = get_nodes_block(irn);
+ ir_graph *irg = get_irn_irg(bl);
+ int res = 0;
+
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ unsigned visited_nr = get_irg_visited(irg) + 1;
+ int max_hops;
+
+ set_irg_visited(irg, visited_nr);
+ max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
+ res = MAX(res, max_hops);
+ }
+
+ return res;
+}
+
+static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+{
+ reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
+
+ main_env->arch_env = arch_env;
+ main_env->vtab = vtab;
+ irg_walk_graph(irg, firm_clear_link, NULL, NULL);
+
+ return main_env;
+}
+
+static void *reg_pressure_block_init(void *graph_env, ir_node *bl)