backend: created a (not so nice) macro to iterate over all values defined by an instr...
[libfirm] / ir / be / belistsched.c
index 9a9c690..4690990 100644 (file)
@@ -1,20 +1,38 @@
-/**
- * Scheduling algorithms.
- * Just a simple list scheduling algorithm is here.
- * @date 20.10.2004
- * @author Sebastian Hack
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
  */
 
-#ifdef HAVE_CONFIG_H
+/**
+ * @file
+ * @brief       Primitive list scheduling with different node selectors.
+ * @author      Sebastian Hack
+ * @date        20.10.2004
+ * @version     $Id$
+ */
 #include "config.h"
-#endif
 
 #include <stdio.h>
 #include <stdarg.h>
 #include <string.h>
 #include <limits.h>
 
-#include "benode_t.h"
+#include "benode.h"
+#include "be_t.h"
 
 #include "obst.h"
 #include "list.h"
 #include "irmode_t.h"
 #include "irdump.h"
 #include "irprintf_t.h"
+#include "array.h"
 #include "debug.h"
+#include "irtools.h"
 
-#include "besched_t.h"
+#include "bemodule.h"
+#include "besched.h"
 #include "beutil.h"
 #include "belive_t.h"
 #include "belistsched.h"
+#include "beschedmris.h"
+#include "beschedrss.h"
 #include "bearch.h"
+#include "bestat.h"
+#include "beirg.h"
 
-#define MAX(x,y) ((x) > (y) ? (x) : (y))
-#define MIN(x,y) ((x) < (y) ? (x) : (y))
+#include "lc_opts.h"
+#include "lc_opts_enum.h"
 
-/**
- * Scheduling environment for the whole graph.
- */
-typedef struct _sched_env_t {
-    const list_sched_selector_t *selector;      /**< The node selector. */
-       const arch_env_t *arch_env;                 /**< The architecture enviromnent. */
-       const ir_graph *irg;                        /**< The graph to schedule. */
-    void *selector_env;                         /**< A pointer to give to the selector. */
-} sched_env_t;
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL);
 
-#if 0
-/*
- * Ugly global variable for the compare function
- * since qsort(3) does not pass an extra pointer.
- */
-static ir_node *curr_bl = NULL;
-
-static int cmp_usage(const void *a, const void *b)
-{
-       struct trivial_sched_env *env;
-       const ir_node *p = a;
-       const ir_node *q = b;
-       int res = 0;
+#define BE_SCHED_NODE(irn) (be_is_Keep(irn) || be_is_CopyKeep(irn) || be_is_Start(irn))
 
-       res = is_live_end(env->curr_bl, a) - is_live_end(env->curr_bl, b);
-
-       /*
-        * One of them is live at the end of the block.
-        * Then, that one shall be scheduled at after the other
-        */
-       if(res != 0)
-               return res;
-
-
-       return res;
-}
-#endif
-
-static ir_node *trivial_select(void *block_env, pset *ready_set)
-{
-       const arch_env_t *arch_env = block_env;
-       ir_node *irn = NULL;
-
-       for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
-               if(arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
-                       pset_break(ready_set);
-                       return irn;
-               }
-       }
+enum {
+       BE_SCHED_SELECT_TRIVIAL,
+       BE_SCHED_SELECT_REGPRESS,
+       BE_SCHED_SELECT_MUCHNIK,
+       BE_SCHED_SELECT_HEUR,
+       BE_SCHED_SELECT_HMUCHNIK,
+       BE_SCHED_SELECT_RANDOM,
+       BE_SCHED_SELECT_NORMAL,
+};
 
-       irn = pset_first(ready_set);
-       pset_break(ready_set);
+enum {
+       BE_SCHED_PREP_NONE = 0,
+       BE_SCHED_PREP_MRIS = 2,
+       BE_SCHED_PREP_RSS  = 3
+};
 
-       return irn;
-}
+typedef struct _list_sched_options_t {
+       int select;  /**< the node selector */
+       int prep;    /**< schedule preparation */
+} list_sched_options_t;
 
-static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
-{
-       return (void *) arch_env;
-}
+static list_sched_options_t list_sched_options = {
+       BE_SCHED_SELECT_NORMAL,   /* mueller heuristic selector */
+       BE_SCHED_PREP_NONE,       /* no scheduling preparation */
+};
 
-static void *trivial_init_block(void *graph_env, ir_node *bl)
-{
-       return graph_env;
-}
+/* schedule selector options. */
+static const lc_opt_enum_int_items_t sched_select_items[] = {
+       { "trivial",  BE_SCHED_SELECT_TRIVIAL  },
+       { "random",   BE_SCHED_SELECT_RANDOM   },
+       { "regpress", BE_SCHED_SELECT_REGPRESS },
+       { "normal",   BE_SCHED_SELECT_NORMAL   },
+       { "muchnik",  BE_SCHED_SELECT_MUCHNIK  },
+       { "heur",     BE_SCHED_SELECT_HEUR     },
+       { "hmuchnik", BE_SCHED_SELECT_HMUCHNIK },
+       { NULL,       0 }
+};
 
-static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
-{
-       int res = 0;
+/* schedule preparation options. */
+static const lc_opt_enum_int_items_t sched_prep_items[] = {
+       { "none", BE_SCHED_PREP_NONE },
+       { "mris", BE_SCHED_PREP_MRIS },
+       { "rss",  BE_SCHED_PREP_RSS  },
+       { NULL,   0 }
+};
 
-       if(sel->to_appear_in_schedule)
-               res = sel->to_appear_in_schedule(block_env, irn);
+static lc_opt_enum_int_var_t sched_select_var = {
+       &list_sched_options.select, sched_select_items
+};
 
-       return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
-}
+static lc_opt_enum_int_var_t sched_prep_var = {
+       &list_sched_options.prep, sched_prep_items
+};
 
-static const list_sched_selector_t trivial_selector_struct = {
-       trivial_init_graph,
-       trivial_init_block,
-       trivial_select,
-       NULL,
-       NULL,
-       NULL
+static const lc_opt_table_entry_t list_sched_option_table[] = {
+       LC_OPT_ENT_ENUM_PTR("prep",   "schedule preparation",   &sched_prep_var),
+       LC_OPT_ENT_ENUM_PTR("select", "node selector",          &sched_select_var),
+       LC_OPT_LAST
 };
 
-const list_sched_selector_t *trivial_selector = &trivial_selector_struct;
+/**
+ * All scheduling info needed per node.
+ */
+typedef struct _sched_irn_t {
+       unsigned num_not_sched_user; /**< The number of not yet scheduled users of this node */
+       unsigned already_sched : 1;  /**< Set if this node is already scheduled */
+} sched_irn_t;
 
-typedef struct _usage_stats_t {
-       ir_node *irn;
-       struct _usage_stats_t *next;
-       int max_hops;
-       int uses_in_block;      /**< Number of uses inside the current block. */
-       int already_consumed;   /**< Number of insns using this value already
-                                                         scheduled. */
-} usage_stats_t;
-
-typedef struct {
-       const list_sched_selector_t *vtab;
-       const arch_env_t *arch_env;
-} reg_pressure_main_env_t;
-
-typedef struct {
-       struct obstack obst;
-       const reg_pressure_main_env_t *main_env;
-       usage_stats_t *root;
-       pset *already_scheduled;
-} reg_pressure_selector_env_t;
-
-static INLINE usage_stats_t *get_or_set_usage_stats(reg_pressure_selector_env_t *env, ir_node *irn)
-{
-       usage_stats_t *us = get_irn_link(irn);
-
-       if(!us) {
-               us                   = obstack_alloc(&env->obst, sizeof(us[0]));
-               us->irn              = irn;
-               us->already_consumed = 0;
-               us->max_hops         = INT_MAX;
-               us->next             = env->root;
-               env->root            = us;
-               set_irn_link(irn, us);
-       }
+/**
+ * Scheduling environment for the whole graph.
+ */
+typedef struct _sched_env_t {
+       sched_irn_t *sched_info;                    /**< scheduling info per node */
+       const list_sched_selector_t *selector;      /**< The node selector. */
+       void *selector_env;                         /**< A pointer to give to the selector. */
+} sched_env_t;
 
-       return us;
-}
+/**
+ * Environment for a block scheduler.
+ */
+typedef struct _block_sched_env_t {
+       sched_irn_t *sched_info;                    /**< scheduling info per node, copied from the global scheduler object */
+       ir_nodeset_t cands;                         /**< the set of candidates */
+       ir_node *block;                             /**< the current block */
+       sched_env_t *sched_env;                     /**< the scheduler environment */
+       ir_nodeset_t live;                          /**< simple liveness during scheduling */
+       const list_sched_selector_t *selector;
+       void *selector_block_env;
+} block_sched_env_t;
 
-static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
+/**
+ * Returns non-zero if a node must be placed in the schedule.
+ */
+static inline int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
 {
-       usage_stats_t *us = get_irn_link(irn);
-       assert(us && "This node must have usage stats");
-       return us;
-}
+       int res = -1;
 
-static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
-{
-       ir_node *bl = get_nodes_block(irn);
-       /*
-        * If the reached node is not in the block desired,
-        * return the value passed for this situation.
-        */
-       if(get_nodes_block(irn) != bl)
-               return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
+       /* if there are no uses, don't schedule */
+       if (get_irn_n_edges(irn) < 1)
+               return 0;
 
-       /*
-        * If the node is in the current block but not
-        * yet scheduled, we keep on searching from that node.
-        */
-       if(!pset_find_ptr(env->already_scheduled, irn)) {
-               int i, n;
-               int res = 0;
-               for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-                       ir_node *operand = get_irn_n(irn, i);
-
-                       if(get_irn_visited(operand) < visited_nr) {
-                               int tmp;
-
-                               set_irn_visited(operand, visited_nr);
-                               tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
-                               res = MAX(tmp, res);
-                       }
-               }
-
-               return res;
-       }
+       /* else ask the scheduler */
+       if (sel->to_appear_in_schedule)
+               res = sel->to_appear_in_schedule(block_env, irn);
 
-       /*
-        * If the node is in the current block and scheduled, return
-        * the depth which indicates the number of steps to the
-        * region of scheduled nodes.
-        */
-       return depth;
+       return res >= 0 ? res : ((to_appear_in_schedule(irn) || BE_SCHED_NODE(irn)) && ! is_Unknown(irn));
 }
 
-static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
+/**
+ * Returns non-zero if the node is already scheduled
+ */
+static inline int is_already_scheduled(block_sched_env_t *env, ir_node *n)
 {
-       ir_node *bl   = get_nodes_block(irn);
-       ir_graph *irg = get_irn_irg(bl);
-       int res       = 0;
-
-       const ir_edge_t *edge;
-
-       foreach_out_edge(irn, edge) {
-               ir_node *user       = get_edge_src_irn(edge);
-               unsigned visited_nr = get_irg_visited(irg) + 1;
-               int max_hops;
-
-               set_irg_visited(irg, visited_nr);
-               max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
-               res      = MAX(res, max_hops);
-       }
+       int idx = get_irn_idx(n);
 
-       return res;
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].already_sched;
 }
 
-static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+/**
+ * Mark a node as already scheduled
+ */
+static inline void set_already_scheduled(block_sched_env_t *env, ir_node *n)
 {
-       reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
-
-       main_env->arch_env = arch_env;
-       main_env->vtab     = vtab;
-       irg_walk_graph(irg, firm_clear_link, NULL, NULL);
+       int idx = get_irn_idx(n);
 
-       return main_env;
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].already_sched = 1;
 }
 
-static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
+static void add_to_sched(block_sched_env_t *env, ir_node *irn);
+
+/**
+ * Try to put a node in the ready set.
+ * @param env   The block scheduler environment.
+ * @param pred  The previous scheduled node.
+ * @param irn   The node to make ready.
+ * @return 1, if the node could be made ready, 0 else.
+ */
+static inline int make_ready(block_sched_env_t *env, ir_node *pred, ir_node *irn)
 {
-       ir_node *irn;
-       reg_pressure_selector_env_t *env  = xmalloc(sizeof(env[0]));
+       int i, n;
 
-       obstack_init(&env->obst);
-       env->already_scheduled = pset_new_ptr(32);
-       env->root              = NULL;
-       env->main_env          = graph_env;
+       /* Blocks cannot be scheduled. */
+       if (is_Block(irn) || get_irn_n_edges(irn) == 0)
+               return 0;
 
        /*
-        * Collect usage statistics.
+        * Check, if the given ir node is in a different block as the
+        * currently scheduled one. If that is so, don't make the node ready.
         */
-       sched_foreach(bl, irn) {
-               if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
-                       int i, n;
-
-                       for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-                               ir_node *op = get_irn_n(irn, i);
-                               if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
-                                       usage_stats_t *us = get_or_set_usage_stats(env, irn);
-                                       if(is_live_end(bl, op))
-                                               us->uses_in_block = 99999;
-                                       else
-                                               us->uses_in_block++;
-                               }
-                       }
-               }
-       }
+       if (env->block != get_nodes_block(irn))
+               return 0;
 
-       return env;
-}
+       for (i = 0, n = get_irn_ins_or_deps(irn); i < n; ++i) {
+               ir_node *op = get_irn_in_or_dep(irn, i);
 
-static void reg_pressure_block_free(void *block_env)
-{
-       reg_pressure_selector_env_t *env = block_env;
-       usage_stats_t *us;
+               /* if irn is an End we have keep-alives and op might be a block, skip that */
+               if (is_Block(op)) {
+                       assert(is_End(irn));
+                       continue;
+               }
 
-       for(us = env->root; us; us = us->next)
-               set_irn_link(us->irn, NULL);
+               /* If the operand is local to the scheduled block and not yet
+                * scheduled, this nodes cannot be made ready, so exit. */
+               if (! is_already_scheduled(env, op) && get_nodes_block(op) == env->block)
+                       return 0;
+       }
 
-       obstack_free(&env->obst, NULL);
-       del_pset(env->already_scheduled);
-       free(env);
-}
+       if (! must_appear_in_schedule(env->selector, env, irn)) {
+               add_to_sched(env, irn);
+               DB((dbg, LEVEL_3, "\tmaking immediately available: %+F\n", irn));
+       } else {
+               ir_nodeset_insert(&env->cands, irn);
 
-static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
-{
-       int res = 0;
-       if(get_irn_mode(irn) == mode_T) {
-               const ir_edge_t *edge;
+               /* Notify selector about the ready node. */
+               if (env->selector->node_ready)
+                       env->selector->node_ready(env->selector_block_env, irn, pred);
 
-               foreach_out_edge(irn, edge)
-                       res += get_result_hops_sum(env, get_edge_src_irn(edge));
+               DB((dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
        }
 
-       else if(mode_is_data(get_irn_mode(irn)))
-               res = compute_max_hops(env, irn);
-
-
-       return res;
+    return 1;
 }
 
-static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
+/**
+ * Try, to make all users of a node ready.
+ * In fact, a usage node can only be made ready, if all its operands
+ * have already been scheduled yet. This is checked by make_ready().
+ * @param env The block schedule environment.
+ * @param irn The node, which usages (successors) are to be made ready.
+ */
+static void make_users_ready(block_sched_env_t *env, ir_node *irn)
 {
-       int i, n;
-       int sum = 0;
-
-       for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-               ir_node *op = get_irn_n(irn, i);
-
-               if(must_appear_in_schedule(env->main_env->vtab, env, op))
-                       sum += compute_max_hops(env, op);
-       }
-
-       sum += get_result_hops_sum(env, irn);
+       const ir_edge_t *edge;
 
-       return sum;
-}
+       /* make all data users ready */
+       foreach_out_edge(irn, edge) {
+               ir_node *user = get_edge_src_irn(edge);
 
-static ir_node *reg_pressure_select(void *block_env, pset *ready_set)
-{
-       reg_pressure_selector_env_t *env = block_env;
-       ir_node *irn, *res     = NULL;
-       int curr_cost          = INT_MAX;
-
-       assert(pset_count(ready_set) > 0);
-
-       for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
-               /*
-                       Ignore branch instructions for the time being.
-                       They should only be scheduled if there is nothing else.
-               */
-               if(arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
-                       int costs = reg_pr_costs(env, irn);
-                       if(costs <= curr_cost) {
-                               res       = irn;
-                               curr_cost = costs;
-                       }
-               }
+               if (! is_Phi(user))
+                       make_ready(env, irn, user);
        }
 
-       /*
-               There was no result so we only saw a branch.
-               Take it and finish.
-       */
-
-       if(!res) {
-               res = pset_first(ready_set);
-               pset_break(ready_set);
+       /* and the dependent nodes as well */
+       foreach_out_edge_kind(irn, edge, EDGE_KIND_DEP) {
+               ir_node *user = get_edge_src_irn(edge);
 
-               assert(res && "There must be a node scheduled.");
+               if (! is_Phi(user))
+                       make_ready(env, irn, user);
        }
-
-       pset_insert_ptr(env->already_scheduled, res);
-       return res;
 }
 
-static const list_sched_selector_t reg_pressure_selector_struct = {
-       reg_pressure_graph_init,
-       reg_pressure_block_init,
-       reg_pressure_select,
-       NULL,
-       reg_pressure_block_free,
-       free
-};
-
-const list_sched_selector_t *reg_pressure_selector = &reg_pressure_selector_struct;
-
-static void list_sched_block(ir_node *block, void *env_ptr);
-
-void list_sched(const arch_env_t *arch_env, ir_graph *irg)
+/**
+ * Returns the number of not yet schedules users.
+ */
+static inline int get_irn_not_sched_user(block_sched_env_t *env, ir_node *n)
 {
-       sched_env_t env;
-
-       memset(&env, 0, sizeof(env));
-       env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
-       env.arch_env = arch_env;
-       env.irg      = irg;
-
-       if(env.selector->init_graph)
-               env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
-
-       /* Assure, that the out edges are computed */
-       edges_assure(irg);
-
-       /* Schedule each single block. */
-       irg_block_walk_graph(irg, list_sched_block, NULL, &env);
+       int idx = get_irn_idx(n);
 
-       if(env.selector->finish_graph)
-               env.selector->finish_graph(env.selector_env);
+       assert(idx < ARR_LEN(env->sched_info));
+       return env->sched_info[idx].num_not_sched_user;
 }
 
-
 /**
- * Environment for a block scheduler.
+ * Sets the number of not yet schedules users.
  */
-typedef struct _block_sched_env_t {
-       int curr_time;
-       pset *ready_set;
-       pset *already_scheduled;
-       ir_node *block;
-       firm_dbg_module_t *dbg;
-       const list_sched_selector_t *selector;
-       void *selector_block_env;
-} block_sched_env_t;
-
-/**
- * Try to put a node in the ready set.
- * @param env The block scheduler environment.
- * @param irn The node to make ready.
- * @return 1, if the node could be made ready, 0 else.
- */
-static INLINE int make_ready(block_sched_env_t *env, ir_node *irn)
+static inline void set_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
 {
-    int i, n;
-
-    /* Blocks cannot be scheduled. */
-    if(is_Block(irn))
-        return 0;
-
-    /*
-     * Check, if the given ir node is in a different block as the
-     * currently scheduled one. If that is so, don't make the node ready.
-     */
-    if(env->block != get_nodes_block(irn))
-        return 0;
-
-    for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
-        ir_node *op = get_irn_n(irn, i);
-
-        /* If the operand is local to the scheduled block and not yet
-         * scheduled, this nodes cannot be made ready, so exit. */
-        if(!pset_find_ptr(env->already_scheduled, op) && get_nodes_block(op) == env->block)
-            return 0;
-    }
+       int idx = get_irn_idx(n);
 
-    DBG((env->dbg, LEVEL_2, "\tmaking ready: %+F\n", irn));
-    pset_insert_ptr(env->ready_set, irn);
-
-    return 1;
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].num_not_sched_user = num;
 }
 
 /**
- * Check, if a node is ready in a block schedule.
- * @param env The block schedule environment.
- * @param irn The node to check for.
- * @return 1 if the node was ready, 0 if not.
+ * Add @p num to the number of not yet schedules users and returns the result.
  */
-#define is_ready(env,irn) \
-  (pset_find_ptr((env)->ready_set, irn) != NULL)
+static inline int add_irn_not_sched_user(block_sched_env_t *env, ir_node *n, int num)
+{
+       int idx = get_irn_idx(n);
 
-/**
- * Check, if a node has already been schedules.
- * @param env The block schedule environment.
- * @param irn The node to check for.
- * @return 1 if the node was already scheduled, 0 if not.
- */
-#define is_scheduled(env,irn) \
-  (pset_find_ptr((env)->already_scheduled, irn) != NULL)
+       assert(idx < ARR_LEN(env->sched_info));
+       env->sched_info[idx].num_not_sched_user += num;
+       return env->sched_info[idx].num_not_sched_user;
+}
 
 /**
- * Try, to make all users of a node ready.
- * In fact, a usage node can only be made ready, if all its operands
- * have already been scheduled yet. This is checked my make_ready().
- * @param env The block schedule environment.
- * @param irn The node, which usages (successors) are to be made ready.
+ * Returns the number of users of a node having mode datab.
  */
-static INLINE void make_users_ready(block_sched_env_t *env, ir_node *irn)
+static int get_num_successors(ir_node *irn)
 {
+       int             sum = 0;
        const ir_edge_t *edge;
 
-       foreach_out_edge(irn, edge) {
-               ir_node *user = edge->src;
-               if(!is_Phi(user))
-                       make_ready(env, user);
+       if (get_irn_mode(irn) == mode_T) {
+               /* for mode_T nodes: count the users of all Projs */
+               foreach_out_edge(irn, edge) {
+                       ir_node *proj = get_edge_src_irn(edge);
+                       ir_mode *mode = get_irn_mode(proj);
+
+                       if (mode == mode_T) {
+                               sum += get_num_successors(proj);
+                       } else if (mode_is_datab(mode)) {
+                               sum += get_irn_n_edges(proj);
+                       }
+               }
        }
+       else {
+               /* do not count keep-alive edges */
+               foreach_out_edge(irn, edge) {
+                       if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
+                               sum++;
+               }
+       }
+
+       return sum;
 }
 
 /**
- * Compare to nodes using pointer equality.
- * @param p1 Node one.
- * @param p2 Node two.
- * @return 0 if they are identical.
+ * Adds irn to @p live, updates all inputs that this user is scheduled
+ * and counts all of its non scheduled users.
  */
-static int node_cmp_func(const void *p1, const void *p2)
+static void update_sched_liveness(block_sched_env_t *env, ir_node *irn)
 {
-    return p1 != p2;
+       int i;
+
+       /* ignore Projs */
+       if (is_Proj(irn))
+               return;
+
+       for (i = get_irn_ins_or_deps(irn) - 1; i >= 0; --i) {
+               ir_node *in = get_irn_in_or_dep(irn, i);
+
+               /* if in is a proj: update predecessor */
+               in = skip_Proj(in);
+
+               /* if in is still in the live set: reduce number of users by one */
+               if (ir_nodeset_contains(&env->live, in)) {
+                       if (add_irn_not_sched_user(env, in, -1) <= 0)
+                               ir_nodeset_remove(&env->live, in);
+               }
+       }
+
+       /*
+               get_num_successors returns the number of all users. This includes
+               users in different blocks as well. As the each block is scheduled separately
+               the liveness info of those users will not be updated and so these
+               users will keep up the register pressure as it is desired.
+       */
+       i = get_num_successors(irn);
+       if (i > 0) {
+               set_irn_not_sched_user(env, irn, i);
+               ir_nodeset_insert(&env->live, irn);
+       }
 }
 
 /**
@@ -497,79 +389,28 @@ static int node_cmp_func(const void *p1, const void *p2)
  * @param irn The node to add to the schedule.
  * @return    The given node.
  */
-static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
+static void add_to_sched(block_sched_env_t *env, ir_node *irn)
 {
     /* If the node consumes/produces data, it is appended to the schedule
      * list, otherwise, it is not put into the list */
-    if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
-        sched_info_t *info = get_irn_sched_info(irn);
-        INIT_LIST_HEAD(&info->list);
-        info->scheduled = 1;
-        sched_add_before(env->block, irn);
-
-        DBG((env->dbg, LEVEL_2, "\tadding %+F\n", irn));
-    }
+    if (must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
+               update_sched_liveness(env, irn);
+               sched_add_before(env->block, irn);
 
-    /* Insert the node in the set of all already scheduled nodes. */
-    pset_insert_ptr(env->already_scheduled, irn);
+               DBG((dbg, LEVEL_2, "\tadding %+F\n", irn));
 
-    /* Remove the node from the ready set */
-    if(pset_find_ptr(env->ready_set, irn))
-        pset_remove_ptr(env->ready_set, irn);
-
-    return irn;
-}
-
-/**
- * Add the proj nodes of a tuple-mode irn to the schedule immediately
- * after the tuple-moded irn. By pinning the projs after the irn, no
- * other nodes can create a new lifetime between the tuple-moded irn and
- * one of its projs. This should render a realistic image of a
- * tuple-moded irn, which in fact models a node which defines multiple
- * values.
- *
- * @param irn The tuple-moded irn.
- * @param list The schedule list to append all the projs.
- * @param time The time step to which the irn and all its projs are
- * related to.
- * @param obst The obstack the scheduling data structures shall be
- * created upon.
- * @param ready_set The ready set of the list scheduler.
- * @param already_scheduled A set containing all nodes already
- * scheduled.
- */
-static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
-{
-       const ir_edge_t *edge;
-
-       assert(get_irn_mode(irn) == mode_T && "Mode of node must be tuple");
-
-       foreach_out_edge(irn, edge) {
-               ir_node *out = edge->src;
-
-               assert(is_Proj(out) && "successor of a modeT node must be a proj");
-
-               if(get_irn_mode(out) == mode_T)
-                       add_tuple_projs(env, out);
-               else {
-                       add_to_sched(env, out);
-                       make_users_ready(env, out);
-               }
-       }
-}
+               /* Remove the node from the ready set */
+               ir_nodeset_remove(&env->cands, irn);
+    }
 
-static ir_node *select_node(block_sched_env_t *be)
-{
-       ir_node *irn;
+       /* notify the selector about the finally selected node. */
+       if (env->selector->node_selected)
+               env->selector->node_selected(env->selector_block_env, irn);
 
-       for(irn = pset_first(be->ready_set); irn; irn = pset_next(be->ready_set)) {
-               if(be_is_Keep(irn)) {
-                       pset_break(be->ready_set);
-                       return irn;
-               }
-       }
+    /* Insert the node in the set of all available scheduled nodes. */
+    set_already_scheduled(env, irn);
 
-       return be->selector->select(be->selector_block_env, be->ready_set);
+       make_users_ready(env, irn);
 }
 
 /**
@@ -587,9 +428,6 @@ static void list_sched_block(ir_node *block, void *env_ptr)
 {
        sched_env_t *env                      = env_ptr;
        const list_sched_selector_t *selector = env->selector;
-       ir_node *start_node                   = get_irg_start(get_irn_irg(block));
-       int phi_seen                          = 0;
-       sched_info_t *info                    = get_irn_sched_info(block);
 
        block_sched_env_t be;
        const ir_edge_t *edge;
@@ -597,95 +435,231 @@ static void list_sched_block(ir_node *block, void *env_ptr)
        int j, m;
 
        /* Initialize the block's list head that will hold the schedule. */
-       INIT_LIST_HEAD(&info->list);
+       sched_init_block(block);
 
        /* Initialize the block scheduling environment */
-       be.dbg               = firm_dbg_register("firm.be.sched");
-       be.block             = block;
-       be.curr_time         = 0;
-       be.ready_set         = new_pset(node_cmp_func, get_irn_n_edges(block));
-       be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
-       be.selector          = selector;
-
-       if(selector->init_block)
-               be.selector_block_env = selector->init_block(env->selector_env, block);
+       be.sched_info = env->sched_info;
+       be.block      = block;
+       ir_nodeset_init_size(&be.cands, get_irn_n_edges(block));
+       ir_nodeset_init_size(&be.live, get_irn_n_edges(block));
+       be.selector   = selector;
+       be.sched_env  = env;
 
-       DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
+       DBG((dbg, LEVEL_1, "scheduling %+F\n", block));
+
+       if (selector->init_block)
+               be.selector_block_env = selector->init_block(env->selector_env, block);
 
        /* Then one can add all nodes are ready to the set. */
        foreach_out_edge(block, edge) {
-               ir_node *irn = get_edge_src_irn(edge);
+               ir_node   *irn = get_edge_src_irn(edge);
+               ir_opcode code = get_irn_opcode(irn);
+               int users;
 
-               /* Skip the end node because of keepalive edges. */
-               if(get_irn_opcode(irn) == iro_End)
+               if (code == iro_End) {
+                       /* Skip the end node because of keep-alive edges. */
+                       continue;
+               } else if (code == iro_Block) {
+                       /* A Block-Block edge. This should be the MacroBlock
+                        * edge, ignore it. */
+                       assert(get_Block_MacroBlock(irn) == block && "Block-Block edge found");
                        continue;
-
-               /* Phi functions are scheduled immediately, since they only transfer
-                * data flow from the predecessors to this block. */
-               if(is_Phi(irn)) {
-                       add_to_sched(&be, irn);
-                       make_users_ready(&be, irn);
-                       phi_seen = 1;
                }
 
-               /* The start block will be scheduled as the first node */
-               else if(irn == start_node) {
-                       add_to_sched(&be, irn);
-                       add_tuple_projs(&be, irn);
+               users = get_irn_n_edges(irn);
+               if (users == 0)
+                       continue;
+               else if (users == 1) { /* ignore nodes that are only hold by the anchor */
+                       const ir_edge_t *edge = get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL);
+                       ir_node *user = get_edge_src_irn(edge);
+                       if (is_Anchor(user))
+                               continue;
                }
 
-
-               /* Other nodes must have all operands in other blocks to be made
-                * ready */
-               else {
+               if (is_Phi(irn)) {
+                       /*
+                               Phi functions are scheduled immediately, since they     only
+                               transfer data flow from the predecessors to this block.
+                       */
+                       add_to_sched(&be, irn);
+               } else if (be_is_Start(irn)) {
+                       /* The start block will be scheduled as the first node */
+                       add_to_sched(&be, irn);
+               } else {
+                       /* Other nodes must have all operands in other blocks to be made
+                        * ready */
                        int ready = 1;
 
                        /* Check, if the operands of a node are not local to this block */
-                       for(j = 0, m = get_irn_arity(irn); j < m; ++j) {
-                               ir_node *operand = get_irn_n(irn, j);
+                       for (j = 0, m = get_irn_ins_or_deps(irn); j < m; ++j) {
+                               ir_node *operand = get_irn_in_or_dep(irn, j);
 
-                               if(get_nodes_block(operand) == block) {
+                               if (get_nodes_block(operand) == block) {
                                        ready = 0;
                                        break;
+                               } else {
+                                       /* live in values increase register pressure */
+                                       ir_nodeset_insert(&be.live, operand);
                                }
                        }
 
                        /* Make the node ready, if all operands live in a foreign block */
-                       if(ready) {
-                               DBG((be.dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
-                               make_ready(&be, irn);
+                       if (ready) {
+                               DBG((dbg, LEVEL_2, "\timmediately ready: %+F\n", irn));
+                               make_ready(&be, NULL, irn);
                        }
                }
        }
 
-       /* Increase the time, if some phi functions have been scheduled */
-       be.curr_time += phi_seen;
+       /* Iterate over all remaining nodes */
+       while (ir_nodeset_size(&be.cands) > 0) {
+               ir_nodeset_iterator_t iter;
 
-       while(pset_count(be.ready_set) > 0) {
-               /* select a node to be scheduled and check if it was ready */
-               irn = select_node(&be);
+               /* Keeps must be scheduled immediately */
+               foreach_ir_nodeset(&be.cands, irn, iter) {
+                       if (be_is_Keep(irn) || be_is_CopyKeep(irn) || is_Sync(irn)) {
+                               break;
+                       }
+               }
+
+               if (! irn) {
+                       /* Keeps must be immediately scheduled */
+                       irn = be.selector->select(be.selector_block_env, &be.cands, &be.live);
+               }
 
-               DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
+               DB((dbg, LEVEL_2, "\tpicked node %+F\n", irn));
 
                /* Add the node to the schedule. */
                add_to_sched(&be, irn);
 
-               if(get_irn_mode(irn) == mode_T)
-                       add_tuple_projs(&be, irn);
-               else
-                       make_users_ready(&be, irn);
-
-               /* Increase the time step. */
-               be.curr_time += 1;
-
                /* remove the scheduled node from the ready list. */
-               if(pset_find_ptr(be.ready_set, irn))
-                       pset_remove_ptr(be.ready_set, irn);
+               ir_nodeset_remove(&be.cands, irn);
        }
 
-       if(selector->finish_block)
+       if (selector->finish_block)
                selector->finish_block(be.selector_block_env);
 
-       del_pset(be.ready_set);
-       del_pset(be.already_scheduled);
+       ir_nodeset_destroy(&be.cands);
+       ir_nodeset_destroy(&be.live);
+}
+
+/* List schedule a graph. */
+void list_sched(ir_graph *irg)
+{
+       int num_nodes;
+       sched_env_t env;
+       mris_env_t *mris = NULL;
+       list_sched_selector_t sel;
+
+       /* Select a scheduler based on backend options */
+       switch (list_sched_options.select) {
+               case BE_SCHED_SELECT_TRIVIAL:  sel = trivial_selector;      break;
+               case BE_SCHED_SELECT_RANDOM:   sel = random_selector;       break;
+               case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
+               case BE_SCHED_SELECT_MUCHNIK:  sel = muchnik_selector;      break;
+               case BE_SCHED_SELECT_HEUR:     sel = heuristic_selector;    break;
+               case BE_SCHED_SELECT_NORMAL:   sel = normal_selector;       break;
+               default:
+               case BE_SCHED_SELECT_HMUCHNIK: sel = heuristic_selector;    break;
+       }
+
+#if 1
+       /* Matze: This is very slow, we should avoid it to improve backend speed,
+        * we just have to make sure that we have no dangling out-edges at this
+        * point...
+        */
+
+       /* Assure, that we have no dangling out-edges to deleted stuff */
+       edges_deactivate(irg);
+       edges_activate(irg);
+#endif
+
+       switch (list_sched_options.prep) {
+               case BE_SCHED_PREP_MRIS:
+                       mris = be_sched_mris_preprocess(irg);
+                       break;
+               case BE_SCHED_PREP_RSS:
+                       rss_schedule_preparation(irg);
+                       break;
+               default:
+                       break;
+       }
+
+       num_nodes = get_irg_last_idx(irg);
+
+       /* initialize environment for list scheduler */
+       memset(&env, 0, sizeof(env));
+       env.selector   = arch_env_get_list_sched_selector(be_get_irg_arch_env(irg), &sel);
+       env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
+
+       memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
+
+       if (env.selector->init_graph)
+               env.selector_env = env.selector->init_graph(env.selector, irg);
+
+       /* Schedule each single block. */
+       irg_block_walk_graph(irg, list_sched_block, NULL, &env);
+
+       if (env.selector->finish_graph)
+               env.selector->finish_graph(env.selector_env);
+
+       if (list_sched_options.prep == BE_SCHED_PREP_MRIS)
+               be_sched_mris_free(mris);
+
+       DEL_ARR_F(env.sched_info);
+}
+
+/* List schedule a block. */
+void list_sched_single_block(ir_graph *irg, ir_node *block)
+{
+       int num_nodes;
+       sched_env_t env;
+       list_sched_selector_t sel;
+
+       /* Select a scheduler based on backend options */
+       switch (list_sched_options.select) {
+               case BE_SCHED_SELECT_TRIVIAL:  sel = trivial_selector;      break;
+               case BE_SCHED_SELECT_RANDOM:   sel = random_selector;       break;
+               case BE_SCHED_SELECT_REGPRESS: sel = reg_pressure_selector; break;
+               case BE_SCHED_SELECT_MUCHNIK:  sel = muchnik_selector;      break;
+               case BE_SCHED_SELECT_HEUR:     sel = heuristic_selector;    break;
+               case BE_SCHED_SELECT_NORMAL:   sel = normal_selector;       break;
+               default:
+               case BE_SCHED_SELECT_HMUCHNIK: sel = trivial_selector;      break;
+       }
+
+       /* Assure, that the out edges are computed */
+       edges_deactivate(irg);
+       edges_activate(irg);
+
+       num_nodes = get_irg_last_idx(irg);
+
+       /* initialize environment for list scheduler */
+       memset(&env, 0, sizeof(env));
+       env.selector   = arch_env_get_list_sched_selector(be_get_irg_arch_env(irg), &sel);
+       env.sched_info = NEW_ARR_F(sched_irn_t, num_nodes);
+
+       memset(env.sched_info, 0, num_nodes * sizeof(env.sched_info[0]));
+
+       if (env.selector->init_graph)
+               env.selector_env = env.selector->init_graph(env.selector, irg);
+
+       /* Schedule block. */
+       list_sched_block(block, &env);
+
+       if (env.selector->finish_graph)
+               env.selector->finish_graph(env.selector_env);
+
+       DEL_ARR_F(env.sched_info);
+}
+
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_listsched);
+void be_init_listsched(void)
+{
+       lc_opt_entry_t *be_grp    = lc_opt_get_grp(firm_opt_get_root(), "be");
+       lc_opt_entry_t *sched_grp = lc_opt_get_grp(be_grp, "listsched");
+
+       lc_opt_add_table(sched_grp, list_sched_option_table);
+
+       FIRM_DBG_REGISTER(dbg, "firm.be.sched");
 }