fixed debug output of unary x87 nodes
[libfirm] / ir / be / belistsched.c
index 3aedf5b..f5796dd 100644 (file)
@@ -1,8 +1,9 @@
 /**
  * Scheduling algorithms.
  * Just a simple list scheduling algorithm is here.
- * @date 20.10.2004
+ * @date   20.10.2004
  * @author Sebastian Hack
+ * @cvs-id $Id$
  */
 
 #ifdef HAVE_CONFIG_H
@@ -15,6 +16,7 @@
 #include <limits.h>
 
 #include "benode_t.h"
+#include "be_t.h"
 
 #include "obst.h"
 #include "list.h"
  * All scheduling info needed per node.
  */
 typedef struct _sched_irn_t {
-       sched_timestep_t delay;     /**< The delay for this node if already calculated, else 0. */
-       sched_timestep_t etime;     /**< The earliest time of this node. */
-       unsigned already_sched : 1; /**< Set if this node is already scheduled */
-       unsigned is_root       : 1; /**< is a root node of a block */
+       sched_timestep_t delay;      /**< The delay for this node if already calculated, else 0. */
+       sched_timestep_t etime;      /**< The earliest time of this node. */
+       unsigned num_user;           /**< The number real users (mode datab) of this node */
+       unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
+       int      reg_diff;           /**< The difference of num(out registers) - num(in registers) */
+       int      preorder;           /**< The pre-order position */
+       unsigned critical_path_len;  /**< The weighted length of the longest critical path */
+       unsigned already_sched : 1;  /**< Set if this node is already scheduled */
+       unsigned is_root       : 1;  /**< is a root node of a block */
 } sched_irn_t;
 
 /**
@@ -57,6 +64,7 @@ typedef struct _sched_env_t {
        const arch_env_t *arch_env;                 /**< The architecture environment. */
        const ir_graph *irg;                        /**< The graph to schedule. */
        void *selector_env;                         /**< A pointer to give to the selector. */
+       int sel_strategy;                           /**< Node selection strategy (muchnik, mueller, isa) */
 } sched_env_t;
 
 #if 0
@@ -96,28 +104,15 @@ static ir_node *trivial_select(void *block_env, nodeset *ready_set)
 {
        const arch_env_t *arch_env = block_env;
        ir_node *irn = NULL;
-       int const_last = 0;
 
        /* assure that branches and constants are executed last */
        for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
-               arch_irn_class_t irn_class = arch_irn_classify(arch_env, irn);
-
-               if (irn_class != arch_irn_class_branch && (const_last ? (irn_class != arch_irn_class_const) : 1)) {
+               if (! arch_irn_class_is(arch_env, irn, branch)) {
                        nodeset_break(ready_set);
                        return irn;
                }
        }
 
-       /* assure that constants are executed before branches */
-       if (const_last) {
-               for (irn = nodeset_first(ready_set); irn; irn = nodeset_next(ready_set)) {
-                       if (arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
-                               nodeset_break(ready_set);
-                               return irn;
-                       }
-               }
-       }
-
 
        /* at last: schedule branches */
        irn = nodeset_first(ready_set);
@@ -138,12 +133,12 @@ static void *trivial_init_block(void *graph_env, ir_node *bl)
 
 static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
 {
-       int res = 0;
+       int res = -1;
 
        if(sel->to_appear_in_schedule)
                res = sel->to_appear_in_schedule(block_env, irn);
 
-       return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
+       return res >= 0 ? res : (to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn));
 }
 
 static const list_sched_selector_t trivial_selector_struct = {
@@ -294,12 +289,14 @@ static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
                        int i, n;
 
                        for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-                               ir_node *op = get_irn_n(irn, i);
+                               //ir_node *op = get_irn_n(irn, i);
                                if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
                                        usage_stats_t *us = get_or_set_usage_stats(env, irn);
+#if 0 /* Liveness is not computed here! */
                                        if(is_live_end(bl, op))
                                                us->uses_in_block = 99999;
                                        else
+#endif
                                                us->uses_in_block++;
                                }
                        }
@@ -369,7 +366,7 @@ static ir_node *reg_pressure_select(void *block_env, nodeset *ready_set)
                        Ignore branch instructions for the time being.
                        They should only be scheduled if there is nothing else.
                */
-               if (arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
+               if (! arch_irn_class_is(env->main_env->arch_env, irn, branch)) {
                        int costs = reg_pr_costs(env, irn);
                        if (costs <= curr_cost) {
                                res       = irn;
@@ -403,6 +400,7 @@ typedef struct _block_sched_env_t {
        nodeset *cands;                             /**< the set of candidates */
        ir_node *block;                             /**< the current block */
        sched_env_t *sched_env;                     /**< the scheduler environment */
+       nodeset *live;                              /**< simple liveness during scheduling */
        const list_sched_selector_t *selector;
        void *selector_block_env;
        DEBUG_ONLY(firm_dbg_module_t *dbg;)
@@ -455,7 +453,7 @@ static INLINE void mark_root_node(block_sched_env_t *env, ir_node *n)
 /**
  * Get the current delay.
  */
-static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
+static INLINE sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
        int idx = get_irn_idx(n);
 
        assert(idx < ARR_LEN(env->sched_info));
@@ -465,7 +463,7 @@ static sched_timestep_t get_irn_delay(block_sched_env_t *env, ir_node *n) {
 /**
  * Set the current delay.
  */
-static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
+static INLINE void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t delay) {
        int idx = get_irn_idx(n);
 
        assert(idx < ARR_LEN(env->sched_info));
@@ -475,7 +473,7 @@ static void set_irn_delay(block_sched_env_t *env, ir_node *n, sched_timestep_t d
 /**
  * Get the current etime.
  */
-static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
+static INLINE sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
        int idx = get_irn_idx(n);
 
        assert(idx < ARR_LEN(env->sched_info));
@@ -485,13 +483,93 @@ static sched_timestep_t get_irn_etime(block_sched_env_t *env, ir_node *n) {
 /**
  * Set the current etime.
  */
-static void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
+static INLINE void set_irn_etime(block_sched_env_t *env, ir_node *n, sched_timestep_t etime) {
        int idx = get_irn_idx(n);
 
        assert(idx < ARR_LEN(env->sched_info));
        env->sched_info[idx].etime = etime;
 }
 
+/**
+ * Get the number of users.
+ */
+static INLINE unsigned get_irn_num_user(block_sched_env_t *env, ir_node *n) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].num_user;
+}
+
+/**
+ * Set the number of users.
+ */
+static INLINE void set_irn_num_user(block_sched_env_t *env, ir_node *n, unsigned num_user) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].num_user = num_user;
+}
+
+/**
+ * Get the register difference.
+ */
+static INLINE int get_irn_reg_diff(block_sched_env_t *env, ir_node *n) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].reg_diff;
+}
+
+/**
+ * Set the register difference.
+ */
+static INLINE void set_irn_reg_diff(block_sched_env_t *env, ir_node *n, int reg_diff) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].reg_diff = reg_diff;
+}
+
+/**
+ * Get the pre-order position.
+ */
+static INLINE int get_irn_preorder(block_sched_env_t *env, ir_node *n) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].preorder;
+}
+
+/**
+ * Set the pre-order position.
+ */
+static INLINE void set_irn_preorder(block_sched_env_t *env, ir_node *n, int pos) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].preorder = pos;
+}
+
+/**
+ * Get the pre-order position.
+ */
+static INLINE unsigned get_irn_critical_path_len(block_sched_env_t *env, ir_node *n) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].critical_path_len;
+}
+
+/**
+ * Set the pre-order position.
+ */
+static INLINE void set_irn_critical_path_len(block_sched_env_t *env, ir_node *n, unsigned len) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].critical_path_len = len;
+}
+
 /**
  * returns the exec-time for node n.
  */
@@ -508,16 +586,16 @@ static sched_timestep_t exectime(sched_env_t *env, ir_node *n) {
  */
 static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle) {
        /* a Keep hides a root */
-  if (be_is_Keep(curr))
+       if (be_is_Keep(curr))
                return exectime(env, pred);
 
        /* Proj's are executed immediately */
        if (is_Proj(curr))
-    return 0;
+               return 0;
 
        /* predecessors Proj's must be skipped */
-  if (is_Proj(pred))
-    pred = get_Proj_pred(pred);
+       if (is_Proj(pred))
+               pred = get_Proj_pred(pred);
 
        if (env->selector->latency)
                return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
@@ -533,8 +611,8 @@ static sched_timestep_t latency(sched_env_t *env, ir_node *pred, int pred_cycle,
  */
 static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
 {
-    int i, n;
-               sched_timestep_t etime_p, etime;
+       int i, n;
+       sched_timestep_t etime_p, etime;
 
     /* Blocks cannot be scheduled. */
     if (is_Block(irn))
@@ -564,16 +642,16 @@ static INLINE int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn
 
     nodeset_insert(env->cands, irn);
 
-               /* calculate the etime of this node */
-               etime = env->curr_time;
-               if (pred) {
-                       etime_p  = get_irn_etime(env, pred);
-                       etime   += latency(env->sched_env, pred, 1, irn, 0);
+       /* calculate the etime of this node */
+       etime = env->curr_time;
+       if (pred) {
+               etime_p  = get_irn_etime(env, pred);
+               etime   += latency(env->sched_env, pred, 1, irn, 0);
 
-                       etime = etime_p > etime ? etime_p : etime;
-               }
+               etime = etime_p > etime ? etime_p : etime;
+       }
 
-               set_irn_etime(env, irn, etime);
+       set_irn_etime(env, irn, etime);
 
     DB((env->dbg, LEVEL_2, "\tmaking ready: %+F etime %u\n", irn, etime));
 
@@ -599,14 +677,117 @@ static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
 }
 
 /**
- * Compare to nodes using pointer equality.
- * @param p1 Node one.
- * @param p2 Node two.
- * @return 0 if they are identical.
+ * Returns the number of not yet schedules users.
  */
-static int node_cmp_func(const void *p1, const void *p2)
-{
-    return p1 != p2;
+static INLINE int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].num_not_sched_user;
+}
+
+/**
+ * Sets the number of not yet schedules users.
+ */
+static INLINE void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].num_not_sched_user = num;
+}
+
+/**
+ * Add @p num to the number of not yet schedules users and returns the result.
+ */
+static INLINE int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num) {
+       int idx = get_irn_idx(n);
+
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].num_not_sched_user += num;
+       return env->sched_info[idx].num_not_sched_user;
+}
+
+/**
+ * Returns the number of users of a node having mode datab.
+ */
+static int get_num_successors(ir_node *irn) {
+       int sum = 0;
+       const ir_edge_t *edge;
+
+       if (get_irn_mode(irn) == mode_T) {
+               /* for mode_T nodes: count the users of all Projs */
+               foreach_out_edge(irn, edge) {
+                       ir_node *proj = get_edge_src_irn(edge);
+                       ir_mode *mode = get_irn_mode(proj);
+
+                       if (mode == mode_T)
+                               sum += get_num_successors(proj);
+                       else if (mode_is_datab(mode))
+                               sum += get_irn_n_edges(proj);
+               }
+       }
+       else {
+               /* do not count keep-alive edges */
+               foreach_out_edge(irn, edge) {
+                       if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
+                               sum++;
+               }
+       }
+
+       return sum;
+}
+
+/**
+ * Adds irn to @p live, updates all inputs that this user is scheduled
+ * and counts all of it's non scheduled users.
+ */
+static void update_sched_liveness(block_sched_env_t *env, ir_node *irn) {
+       int i;
+
+       /* ignore Projs */
+       if (is_Proj(irn))
+               return;
+
+       for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+               ir_node *in = get_irn_n(irn, i);
+
+               /* if in is a proj: update predecessor */
+               while (is_Proj(in))
+                       in = get_Proj_pred(in);
+
+               /* if in is still in the live set: reduce number of users by one */
+               if (nodeset_find(env->live, in)) {
+                       if (add_irn_not_sched_user(env, in, -1) <= 0)
+                               nodeset_remove(env->live, in);
+               }
+       }
+
+       /*
+               get_num_successors returns the number of all users. This includes
+               users in different blocks as well. As the each block is scheduled separately
+               the liveness info of those users will not be updated and so these
+               users will keep up the register pressure as it is desired.
+       */
+       i = get_num_successors(irn);
+       if (i > 0) {
+               set_irn_not_sched_user(env, irn, i);
+               nodeset_insert(env->live, irn);
+       }
+}
+
+/**
+ * Returns the current register pressure for the current block
+ * while scheduling.
+ * This is the number of already scheduled nodes having not yet
+ * scheduled users.
+ */
+static INLINE int get_cur_reg_pressure(block_sched_env_t *env) {
+       /*
+               Nodes with all users scheduled are removed from live set,
+               so the number of nodes in this set represent the current
+               register pressure in this block.
+       */
+       return nodeset_count(env->live);
 }
 
 /**
@@ -623,6 +804,7 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
         sched_info_t *info = get_irn_sched_info(irn);
         INIT_LIST_HEAD(&info->list);
         info->scheduled = 1;
+               update_sched_liveness(env, irn);
         sched_add_before(env->block, irn);
 
         DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
@@ -671,21 +853,121 @@ static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
        }
 }
 
+/**
+ * Returns the difference of regs_output - regs_input;
+ */
+static int get_reg_difference(block_sched_env_t *be, ir_node *irn) {
+       int num_out = 0;
+       int num_in  = 0;
+       int i;
+
+       if (get_irn_mode(irn) == mode_T) {
+               /* mode_T nodes: num out regs == num Projs with mode datab */
+               const ir_edge_t *edge;
+               foreach_out_edge(irn, edge) {
+                       ir_node *proj = get_edge_src_irn(edge);
+                       if (mode_is_datab(get_irn_mode(proj)))
+                               num_out++;
+               }
+       }
+       else
+               num_out = 1;
+
+       /* num in regs: number of ins with mode datab and not ignore */
+       for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
+               ir_node *in = get_irn_n(irn, i);
+               if (mode_is_datab(get_irn_mode(in)) && ! arch_irn_is(be->sched_env->arch_env, in, ignore))
+                       num_in++;
+       }
+
+       return num_out - num_in;
+}
+
 /**
  * Execute the heuristic function,
  */
 static ir_node *select_node_heuristic(block_sched_env_t *be, nodeset *ns)
 {
-       ir_node *irn;
-
-       for (irn = nodeset_first(ns); irn; irn = nodeset_next(ns)) {
+       ir_node *irn, *cand = NULL;
+
+/* prefer instructions which can be scheduled early */
+#define PRIO_TIME       16
+/* prefer instructions with lots of successors */
+#define PRIO_NUMSUCCS    8
+/* prefer instructions with long critical path */
+#define PRIO_LEVEL      12
+/* prefer instructions coming early in preorder */
+#define PRIO_PREORD      8
+/* weight of current register pressure */
+#define PRIO_CUR_PRESS  20
+/* weight of register pressure difference */
+#define PRIO_CHG_PRESS   8
+
+       /* schedule keeps as early as possible */
+       foreach_nodeset(ns, irn) {
                if (be_is_Keep(irn)) {
                        nodeset_break(ns);
                        return irn;
                }
        }
 
-       return be->selector->select(be->selector_block_env, ns);
+       if (be->sched_env->sel_strategy & BE_SCHED_SELECT_HEUR) {
+               int max_prio     = INT_MIN;
+               int cur_prio     = INT_MIN;
+               int cur_pressure = get_cur_reg_pressure(be);
+               int reg_fact, cand_reg_fact;
+
+               /* priority based selection, heuristic inspired by mueller diss */
+               foreach_nodeset(ns, irn) {
+                       /* make sure that branches are scheduled last */
+                       if (! arch_irn_class_is(be->sched_env->arch_env, irn, branch)) {
+                               int rdiff = get_irn_reg_diff(be, irn);
+                               int sign  = rdiff < 0;
+                               int chg   = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
+
+                               reg_fact = chg << cur_pressure;
+                               if (reg_fact < chg)
+                                       reg_fact = INT_MAX - 2;
+                               reg_fact = sign ? -reg_fact : reg_fact;
+
+                               cur_prio = (get_irn_critical_path_len(be, irn) << PRIO_LEVEL)
+                                       //- (get_irn_delay(be, irn) << PRIO_LEVEL)
+                                       + (get_irn_num_user(be, irn) << PRIO_NUMSUCCS)
+                                       - (get_irn_etime(be, irn) << PRIO_TIME)
+//                                     - ((get_irn_reg_diff(be, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
+                                       - reg_fact
+                                       + (get_irn_preorder(be, irn) << PRIO_PREORD); /* high preorder means early schedule */
+                               if (cur_prio > max_prio) {
+                                       cand          = irn;
+                                       max_prio      = cur_prio;
+                                       cand_reg_fact = reg_fact;
+                               }
+
+                               DBG((be->dbg, LEVEL_4, "checked NODE %+F\n", irn));
+                               DBG((be->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
+                               DBG((be->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(be, irn), get_irn_critical_path_len(be, irn) << PRIO_LEVEL));
+                               DBG((be->dbg, LEVEL_4, "\tdelay:    %d (%d)\n", get_irn_delay(be, irn), get_irn_delay(be, irn) << PRIO_LEVEL));
+                               DBG((be->dbg, LEVEL_4, "\t#user:    %d (%d)\n", get_irn_num_user(be, irn), get_irn_num_user(be, irn) << PRIO_NUMSUCCS));
+                               DBG((be->dbg, LEVEL_4, "\tetime:    %d (%d)\n", get_irn_etime(be, irn), -(get_irn_etime(be, irn) << PRIO_TIME)));
+                               DBG((be->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(be, irn), get_irn_preorder(be, irn) << PRIO_PREORD));
+                               DBG((be->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(be, irn), -cand_reg_fact));
+                               DBG((be->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
+                       }
+               }
+
+               if (cand) {
+                       DBG((be->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
+               }
+               else {
+                       cand = nodeset_first(ns);
+               }
+       }
+       else {
+               /* use backend selector */
+               cand = be->selector->select(be->selector_block_env, ns);
+       }
+
+       return cand;
 }
 
 /**
@@ -712,17 +994,25 @@ static int is_root(ir_node *root, ir_node *block) {
 static char _mark;
 #define MARK   &_mark
 
+static firm_dbg_module_t *xxxdbg;
+
 /**
  * descent into a dag and create a pre-order list.
  */
-static void descent(ir_node *root, ir_node *block, ir_node **list) {
+static void descent(ir_node *root, ir_node *block, ir_node **list, block_sched_env_t *env, unsigned path_len) {
        int i;
 
        if (! is_Phi(root)) {
+               path_len += exectime(env->sched_env, root);
+               if (get_irn_critical_path_len(env, root) < path_len) {
+                       set_irn_critical_path_len(env, root, path_len);
+               }
+
                /* Phi nodes always leave the block */
                for (i = get_irn_arity(root) - 1; i >= 0; --i) {
                        ir_node *pred = get_irn_n(root, i);
 
+                       DBG((xxxdbg, LEVEL_3, "   node %+F\n", pred));
                        /* Blocks may happen as predecessors of End nodes */
                        if (is_Block(pred))
                                continue;
@@ -735,7 +1025,9 @@ static void descent(ir_node *root, ir_node *block, ir_node **list) {
                        if (get_nodes_block(pred) != block)
                                continue;
 
-                       descent(pred, block, list);
+                       set_irn_link(pred, NULL);
+
+                       descent(pred, block, list, env, path_len);
                }
        }
        set_irn_link(root, *list);
@@ -763,7 +1055,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
        block_sched_env_t be;
        const ir_edge_t *edge;
        ir_node *irn;
-       int j, m;
+       int j, m, cur_pos;
 
        ir_node *root = NULL, *preord = NULL;
        ir_node *curr;
@@ -776,11 +1068,13 @@ static void list_sched_block(ir_node *block, void *env_ptr)
        be.block             = block;
        be.curr_time         = 0;
        be.cands             = new_nodeset(get_irn_n_edges(block));
+       be.live              = new_nodeset(get_irn_n_edges(block));
        be.selector          = selector;
        be.sched_env         = env;
        FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
+       FIRM_DBG_REGISTER(xxxdbg, "firm.be.sched");
 
-//     firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
+       //      firm_dbg_set_mask(be.dbg, SET_LEVEL_3);
 
        if (selector->init_block)
                be.selector_block_env = selector->init_block(env->selector_env, block);
@@ -804,17 +1098,18 @@ static void list_sched_block(ir_node *block, void *env_ptr)
        preord = NULL;
        for (curr = root; curr; curr = irn) {
                irn = get_irn_link(curr);
-               descent(curr, block, &preord);
+               DBG((be.dbg, LEVEL_2, "   DAG root %+F\n", curr));
+               descent(curr, block, &preord, &be, 0);
        }
        root = preord;
 
        /* Third step: calculate the Delay. Note that our
-        * list is now in pre-order, starting at root
-        */
-       for (curr = root; curr; curr = get_irn_link(curr)) {
+       * list is now in pre-order, starting at root
+       */
+       for (cur_pos = 0, curr = root; curr; curr = get_irn_link(curr), cur_pos++) {
                sched_timestep_t d;
 
-               if (arch_irn_classify(env->arch_env, curr) == arch_irn_class_branch) {
+               if (arch_irn_class_is(env->arch_env, curr, branch)) {
                        /* assure, that branches can be executed last */
                        d = 0;
                }
@@ -840,6 +1135,8 @@ static void list_sched_block(ir_node *block, void *env_ptr)
 
                /* set the etime of all nodes to 0 */
                set_irn_etime(&be, curr, 0);
+
+               set_irn_preorder(&be, curr, cur_pos);
        }
 
 
@@ -853,7 +1150,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
 
                if (is_Phi(irn)) {
                        /* Phi functions are scheduled immediately, since they only transfer
-                        * data flow from the predecessors to this block. */
+                       * data flow from the predecessors to this block. */
 
                        /* Increase the time step. */
                        be.curr_time += get_irn_etime(&be, irn);
@@ -869,7 +1166,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
                }
                else {
                        /* Other nodes must have all operands in other blocks to be made
-                        * ready */
+                       * ready */
                        int ready = 1;
 
                        /* Check, if the operands of a node are not local to this block */
@@ -880,6 +1177,10 @@ static void list_sched_block(ir_node *block, void *env_ptr)
                                        ready = 0;
                                        break;
                                }
+                               else {
+                                       /* live in values increase register pressure */
+                                       update_sched_liveness(&be, operand);
+                               }
                        }
 
                        /* Make the node ready, if all operands live in a foreign block */
@@ -887,12 +1188,19 @@ static void list_sched_block(ir_node *block, void *env_ptr)
                                DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
                                make_ready(&be, NULL, irn);
                        }
+
+                       /* calculate number of users (needed for heuristic) */
+                       set_irn_num_user(&be, irn, get_num_successors(irn));
+
+                       /* calculate register difference (needed for heuristic) */
+                       set_irn_reg_diff(&be, irn, get_reg_difference(&be, irn));
                }
        }
 
        while (nodeset_count(be.cands) > 0) {
                nodeset *mcands;                            /**< the set of candidates with maximum delay time */
                nodeset *ecands;                            /**< the set of nodes in mcands whose etime <= curr_time  */
+               nodeset *local_cands;
                sched_timestep_t max_delay = 0;
 
                /* collect statistics about amount of ready nodes */
@@ -904,60 +1212,78 @@ static void list_sched_block(ir_node *block, void *env_ptr)
 
                        max_delay = d > max_delay ? d : max_delay;
                }
-               mcands = new_nodeset(8);
-               ecands = new_nodeset(8);
+
+               if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+                       mcands = new_nodeset(8);
+                       ecands = new_nodeset(8);
+               }
+               else {
+                       local_cands = new_nodeset(8);
+               }
 
                /* calculate mcands and ecands */
                foreach_nodeset(be.cands, irn) {
-      if (be_is_Keep(irn)) {
-        nodeset_break(be.cands);
-        break;
-      }
-                       if (get_irn_delay(&be, irn) == max_delay) {
-                               nodeset_insert(mcands, irn);
-                               if (get_irn_etime(&be, irn) <= be.curr_time)
-                                       nodeset_insert(ecands, irn);
+                       if (be_is_Keep(irn)) {
+                               nodeset_break(be.cands);
+                               break;
+                       }
+                       if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+                               if (get_irn_delay(&be, irn) == max_delay) {
+                                       nodeset_insert(mcands, irn);
+                                       if (get_irn_etime(&be, irn) <= be.curr_time)
+                                               nodeset_insert(ecands, irn);
+                               }
+                       }
+                       else {
+                               nodeset_insert(local_cands, irn);
                        }
                }
 
-    if (irn) {
-      /* Keeps must be immediately scheduled */
-    }
-    else {
-                 DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
-
-                 /* select a node to be scheduled and check if it was ready */
-                 if (nodeset_count(mcands) == 1) {
-                         DB((be.dbg, LEVEL_3, "\tmcand = 1, max_delay = %u\n", max_delay));
-                         irn = nodeset_first(mcands);
-                 }
-                 else {
-                         int cnt = nodeset_count(ecands);
-                         if (cnt == 1) {
-                                       arch_irn_class_t irn_class;
-
-                                 irn = nodeset_first(ecands);
-                                       irn_class = arch_irn_classify(env->arch_env, irn);
-
-                                       if (irn_class == arch_irn_class_branch) {
+               if (irn) {
+                       /* Keeps must be immediately scheduled */
+               }
+               else {
+                       DB((be.dbg, LEVEL_2, "\tbe.curr_time = %u\n", be.curr_time));
+
+                       /* select a node to be scheduled and check if it was ready */
+                       if (! (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK)) {
+                               DB((be.dbg, LEVEL_3, "\tlocal_cands = %d\n", nodeset_count(local_cands)));
+                               irn = select_node_heuristic(&be, local_cands);
+                       }
+                       else if (nodeset_count(mcands) == 1) {
+                               irn = nodeset_first(mcands);
+                               DB((be.dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
+                       }
+                       else {
+                               int cnt = nodeset_count(ecands);
+                               if (cnt == 1) {
+                                       irn = nodeset_first(ecands);
+
+                                       if (arch_irn_class_is(env->arch_env, irn, branch)) {
                                                /* BEWARE: don't select a JUMP if others are still possible */
                                                goto force_mcands;
                                        }
-                                 DB((be.dbg, LEVEL_3, "\tecand = 1, max_delay = %u\n", max_delay));
-                         }
-                         else if (cnt > 1) {
-                                 DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
-                                 irn = select_node_heuristic(&be, ecands);
-                         }
-                         else {
+                                       DB((be.dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
+                               }
+                               else if (cnt > 1) {
+                                       DB((be.dbg, LEVEL_3, "\tecand = %d, max_delay = %u\n", cnt, max_delay));
+                                       irn = select_node_heuristic(&be, ecands);
+                               }
+                               else {
 force_mcands:
-                                 DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
-                                 irn = select_node_heuristic(&be, mcands);
-                         }
-                 }
-    }
-               del_nodeset(mcands);
-               del_nodeset(ecands);
+                                       DB((be.dbg, LEVEL_3, "\tmcand = %d\n", nodeset_count(mcands)));
+                                       irn = select_node_heuristic(&be, mcands);
+                               }
+                       }
+               }
+
+               if (be.sched_env->sel_strategy & BE_SCHED_SELECT_MUCHNIK) {
+                       del_nodeset(mcands);
+                       del_nodeset(ecands);
+               }
+               else {
+                       del_nodeset(local_cands);
+               }
 
                DB((be.dbg, LEVEL_2, "\tpicked node %+F\n", irn));
 
@@ -981,6 +1307,7 @@ force_mcands:
                selector->finish_block(be.selector_block_env);
 
        del_nodeset(be.cands);
+       del_nodeset(be.live);
 }
 
 static const list_sched_selector_t reg_pressure_selector_struct = {
@@ -997,7 +1324,7 @@ static const list_sched_selector_t reg_pressure_selector_struct = {
 const list_sched_selector_t *reg_pressure_selector = &reg_pressure_selector_struct;
 
 /* List schedule a graph. */
-void list_sched(const be_irg_t *birg, int enable_mris)
+void list_sched(const be_irg_t *birg, be_options_t *be_opts)
 {
        const arch_env_t *arch_env = birg->main_env->arch_env;
        ir_graph *irg              = birg->irg;
@@ -1009,16 +1336,17 @@ void list_sched(const be_irg_t *birg, int enable_mris)
        /* Assure, that the out edges are computed */
        edges_assure(irg);
 
-       if(enable_mris)
+       if(be_opts->mris)
                mris = be_sched_mris_preprocess(birg);
 
        num_nodes = get_irg_last_idx(irg);
 
        memset(&env, 0, sizeof(env));
-       env.selector   = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
-       env.arch_env   = arch_env;
-       env.irg        = irg;
-       env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
+       env.selector     = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
+       env.arch_env     = arch_env;
+       env.irg          = irg;
+       env.sel_strategy = be_opts->sched_select;
+       env.sched_info   = NEW_ARR_F(sched_irn_t, num_nodes);
 
        memset(env.sched_info, 0, num_nodes * sizeof(*env.sched_info));
 
@@ -1031,7 +1359,7 @@ void list_sched(const be_irg_t *birg, int enable_mris)
        if (env.selector->finish_graph)
                env.selector->finish_graph(env.selector_env);
 
-       if(enable_mris)
+       if(be_opts->mris)
                be_sched_mris_free(mris);
 
        DEL_ARR_F(env.sched_info);