fixed addressmode bug
[libfirm] / ir / be / belistsched.c
index 6424fd0..45e6c44 100644 (file)
@@ -4,6 +4,7 @@
  * @date 20.10.2004
  * @author Sebastian Hack
  */
+
 #ifdef HAVE_CONFIG_H
 #include "config.h"
 #endif
@@ -13,7 +14,8 @@
 #include <string.h>
 #include <limits.h>
 
-#include "fourcc.h"
+#include "benode_t.h"
+
 #include "obst.h"
 #include "list.h"
 #include "iterator.h"
@@ -39,8 +41,9 @@
  * Scheduling environment for the whole graph.
  */
 typedef struct _sched_env_t {
-    const ir_graph *irg;                        /**< The graph to schedule. */
-    const list_sched_selector_t *selector;               /**< The node selector. */
+    const list_sched_selector_t *selector;      /**< The node selector. */
+       const arch_env_t *arch_env;                 /**< The architecture enviromnent. */
+       const ir_graph *irg;                        /**< The graph to schedule. */
     void *selector_env;                         /**< A pointer to give to the selector. */
 } sched_env_t;
 
@@ -72,31 +75,56 @@ static int cmp_usage(const void *a, const void *b)
 }
 #endif
 
-static ir_node *trivial_select(void *env, void *block_env,
-               const struct list_head *sched_head,
-               int curr_time, pset *ready_set)
+/**
+ * The trivial selector:
+ * Just assure that branches are executed last, otherwise select
+ * the first node ready.
+ */
+static ir_node *trivial_select(void *block_env, pset *ready_set)
 {
-       ir_node *res;
+       const arch_env_t *arch_env = block_env;
+       ir_node *irn = NULL;
 
-#if 0
-       int i, n = pset_count(ready_set);
-       ir_node *irn;
-       ir_node **ready = alloca(n * sizeof(ready[0]));
-
-       for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set))
-               ready[i++] = irn;
-#endif
+       /* assure that branches are executed last */
+       for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
+               if(arch_irn_classify(arch_env, irn) != arch_irn_class_branch) {
+                       pset_break(ready_set);
+                       return irn;
+               }
+       }
 
-       res = pset_first(ready_set);
+       irn = pset_first(ready_set);
        pset_break(ready_set);
-       return res;
+
+       return irn;
+}
+
+static void *trivial_init_graph(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
+{
+       return (void *) arch_env;
+}
+
+static void *trivial_init_block(void *graph_env, ir_node *bl)
+{
+       return graph_env;
+}
+
+static INLINE int must_appear_in_schedule(const list_sched_selector_t *sel, void *block_env, const ir_node *irn)
+{
+       int res = 0;
+
+       if(sel->to_appear_in_schedule)
+               res = sel->to_appear_in_schedule(block_env, irn);
+
+       return res || to_appear_in_schedule(irn) || be_is_Keep(irn) || be_is_RegParams(irn);
 }
 
 static const list_sched_selector_t trivial_selector_struct = {
-       NULL,
-       NULL,
+       trivial_init_graph,
+       trivial_init_block,
        trivial_select,
        NULL,
+       NULL,
        NULL
 };
 
@@ -111,8 +139,14 @@ typedef struct _usage_stats_t {
                                                          scheduled. */
 } usage_stats_t;
 
+typedef struct {
+       const list_sched_selector_t *vtab;
+       const arch_env_t *arch_env;
+} reg_pressure_main_env_t;
+
 typedef struct {
        struct obstack obst;
+       const reg_pressure_main_env_t *main_env;
        usage_stats_t *root;
        pset *already_scheduled;
 } reg_pressure_selector_env_t;
@@ -141,14 +175,23 @@ static INLINE usage_stats_t *get_usage_stats(ir_node *irn)
        return us;
 }
 
-static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visited_nr)
+static int max_hops_walker(reg_pressure_selector_env_t *env, ir_node *irn, ir_node *curr_bl, int depth, unsigned visited_nr)
 {
-       int i, n;
-       int res = 0;
-
-       if(irn != tgt) {
-               res = INT_MAX;
+       ir_node *bl = get_nodes_block(irn);
+       /*
+        * If the reached node is not in the block desired,
+        * return the value passed for this situation.
+        */
+       if(get_nodes_block(irn) != bl)
+               return block_dominates(bl, curr_bl) ? 0 : INT_MAX;
 
+       /*
+        * If the node is in the current block but not
+        * yet scheduled, we keep on searching from that node.
+        */
+       if(!pset_find_ptr(env->already_scheduled, irn)) {
+               int i, n;
+               int res = 0;
                for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
                        ir_node *operand = get_irn_n(irn, i);
 
@@ -156,64 +199,74 @@ static int max_hops_walker(ir_node *irn, ir_node *tgt, int depth, unsigned visit
                                int tmp;
 
                                set_irn_visited(operand, visited_nr);
-                               tmp = max_hops_walker(operand, tgt, depth + 1, visited_nr);
+                               tmp = max_hops_walker(env, operand, bl, depth + 1, visited_nr);
                                res = MAX(tmp, res);
                        }
                }
+
+               return res;
        }
 
-       return res;
+       /*
+        * If the node is in the current block and scheduled, return
+        * the depth which indicates the number of steps to the
+        * region of scheduled nodes.
+        */
+       return depth;
 }
 
 static int compute_max_hops(reg_pressure_selector_env_t *env, ir_node *irn)
 {
        ir_node *bl   = get_nodes_block(irn);
        ir_graph *irg = get_irn_irg(bl);
-       int res       = INT_MAX;
+       int res       = 0;
 
        const ir_edge_t *edge;
 
        foreach_out_edge(irn, edge) {
-               ir_node *user = get_edge_src_irn(edge);
-
-               if(get_nodes_block(user) == bl && !pset_find_ptr(env->already_scheduled, user)) {
-                       unsigned visited_nr = get_irg_visited(irg) + 1;
-                       int max_hops;
+               ir_node *user       = get_edge_src_irn(edge);
+               unsigned visited_nr = get_irg_visited(irg) + 1;
+               int max_hops;
 
-                       set_irg_visited(irg, visited_nr);
-                       max_hops = max_hops_walker(user, irn, 0, visited_nr);
-                       res = MAX(res, max_hops);
-               }
+               set_irg_visited(irg, visited_nr);
+               max_hops = max_hops_walker(env, user, irn, 0, visited_nr);
+               res      = MAX(res, max_hops);
        }
 
        return res;
 }
 
-static void *reg_pressure_graph_init(const arch_isa_t *isa, ir_graph *irg)
+static void *reg_pressure_graph_init(const list_sched_selector_t *vtab, const arch_env_t *arch_env, ir_graph *irg)
 {
+       reg_pressure_main_env_t *main_env = xmalloc(sizeof(main_env[0]));
+
+       main_env->arch_env = arch_env;
+       main_env->vtab     = vtab;
        irg_walk_graph(irg, firm_clear_link, NULL, NULL);
-       return NULL;
+
+       return main_env;
 }
 
 static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
 {
        ir_node *irn;
-       reg_pressure_selector_env_t *env = xmalloc(sizeof(env[0]));
+       reg_pressure_selector_env_t *env  = xmalloc(sizeof(env[0]));
 
        obstack_init(&env->obst);
-       env->root = NULL;
        env->already_scheduled = pset_new_ptr(32);
+       env->root              = NULL;
+       env->main_env          = graph_env;
 
        /*
         * Collect usage statistics.
         */
        sched_foreach(bl, irn) {
-               if(to_appear_in_schedule(irn)) {
+               if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
                        int i, n;
 
                        for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
                                ir_node *op = get_irn_n(irn, i);
-                               if(to_appear_in_schedule(op)) {
+                               if(must_appear_in_schedule(env->main_env->vtab, env, irn)) {
                                        usage_stats_t *us = get_or_set_usage_stats(env, irn);
                                        if(is_live_end(bl, op))
                                                us->uses_in_block = 99999;
@@ -227,7 +280,7 @@ static void *reg_pressure_block_init(void *graph_env, ir_node *bl)
        return env;
 }
 
-static void reg_pressure_block_free(void *graph_env, void *block_env, ir_node *bl)
+static void reg_pressure_block_free(void *block_env)
 {
        reg_pressure_selector_env_t *env = block_env;
        usage_stats_t *us;
@@ -240,6 +293,23 @@ static void reg_pressure_block_free(void *graph_env, void *block_env, ir_node *b
        free(env);
 }
 
+static int get_result_hops_sum(reg_pressure_selector_env_t *env, ir_node *irn)
+{
+       int res = 0;
+       if(get_irn_mode(irn) == mode_T) {
+               const ir_edge_t *edge;
+
+               foreach_out_edge(irn, edge)
+                       res += get_result_hops_sum(env, get_edge_src_irn(edge));
+       }
+
+       else if(mode_is_data(get_irn_mode(irn)))
+               res = compute_max_hops(env, irn);
+
+
+       return res;
+}
+
 static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
 {
        int i, n;
@@ -248,16 +318,16 @@ static INLINE int reg_pr_costs(reg_pressure_selector_env_t *env, ir_node *irn)
        for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
                ir_node *op = get_irn_n(irn, i);
 
-               if(to_appear_in_schedule(op))
+               if(must_appear_in_schedule(env->main_env->vtab, env, op))
                        sum += compute_max_hops(env, op);
        }
 
+       sum += get_result_hops_sum(env, irn);
+
        return sum;
 }
 
-ir_node *reg_pressure_select(void *graph_env, void *block_env,
-                                                        const struct list_head *sched_head,
-                                                        int curr_time, pset *ready_set)
+static ir_node *reg_pressure_select(void *block_env, pset *ready_set)
 {
        reg_pressure_selector_env_t *env = block_env;
        ir_node *irn, *res     = NULL;
@@ -266,13 +336,31 @@ ir_node *reg_pressure_select(void *graph_env, void *block_env,
        assert(pset_count(ready_set) > 0);
 
        for(irn = pset_first(ready_set); irn; irn = pset_next(ready_set)) {
-               int costs = reg_pr_costs(env, irn);
-               if(costs <= curr_cost) {
-                       res       = irn;
-                       curr_cost = costs;
+               /*
+                       Ignore branch instructions for the time being.
+                       They should only be scheduled if there is nothing else.
+               */
+               if(arch_irn_classify(env->main_env->arch_env, irn) != arch_irn_class_branch) {
+                       int costs = reg_pr_costs(env, irn);
+                       if(costs <= curr_cost) {
+                               res       = irn;
+                               curr_cost = costs;
+                       }
                }
        }
 
+       /*
+               There was no result so we only saw a branch.
+               Take it and finish.
+       */
+
+       if(!res) {
+               res = pset_first(ready_set);
+               pset_break(ready_set);
+
+               assert(res && "There must be a node scheduled.");
+       }
+
        pset_insert_ptr(env->already_scheduled, res);
        return res;
 }
@@ -281,23 +369,26 @@ static const list_sched_selector_t reg_pressure_selector_struct = {
        reg_pressure_graph_init,
        reg_pressure_block_init,
        reg_pressure_select,
+       NULL,
        reg_pressure_block_free,
-       NULL
+       free
 };
 
 const list_sched_selector_t *reg_pressure_selector = &reg_pressure_selector_struct;
 
 static void list_sched_block(ir_node *block, void *env_ptr);
 
-void list_sched(const struct _arch_isa_t *isa, ir_graph *irg)
+void list_sched(const arch_env_t *arch_env, ir_graph *irg)
 {
        sched_env_t env;
-       const list_sched_selector_t *selector;
 
        memset(&env, 0, sizeof(env));
-       selector = env.selector = isa->impl->get_list_sched_selector(isa);
-       env.selector_env = selector->init_graph ? selector->init_graph(isa, irg) : NULL;
-       env.irg = irg;
+       env.selector = arch_env->isa->impl->get_list_sched_selector(arch_env->isa);
+       env.arch_env = arch_env;
+       env.irg      = irg;
+
+       if(env.selector->init_graph)
+               env.selector_env = env.selector->init_graph(env.selector, arch_env, irg);
 
        /* Assure, that the out edges are computed */
        edges_assure(irg);
@@ -305,8 +396,8 @@ void list_sched(const struct _arch_isa_t *isa, ir_graph *irg)
        /* Schedule each single block. */
        irg_block_walk_graph(irg, list_sched_block, NULL, &env);
 
-       if(selector->finish_graph)
-               selector->finish_graph(env.selector_env, irg);
+       if(env.selector->finish_graph)
+               env.selector->finish_graph(env.selector_env);
 }
 
 
@@ -318,7 +409,9 @@ typedef struct _block_sched_env_t {
        pset *ready_set;
        pset *already_scheduled;
        ir_node *block;
-       firm_dbg_module_t *dbg;
+       const list_sched_selector_t *selector;
+       void *selector_block_env;
+       DEBUG_ONLY(firm_dbg_module_t *dbg;)
 } block_sched_env_t;
 
 /**
@@ -406,15 +499,15 @@ static int node_cmp_func(const void *p1, const void *p2)
 
 /**
  * Append an instruction to a schedule.
- * @param env The block scheduleing environment.
+ * @param env The block scheduling environment.
  * @param irn The node to add to the schedule.
- * @return The given node.
+ * @return    The given node.
  */
 static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
 {
     /* If the node consumes/produces data, it is appended to the schedule
      * list, otherwise, it is not put into the list */
-    if(to_appear_in_schedule(irn)) {
+    if(must_appear_in_schedule(env->selector, env->selector_block_env, irn)) {
         sched_info_t *info = get_irn_sched_info(irn);
         INIT_LIST_HEAD(&info->list);
         info->scheduled = 1;
@@ -433,7 +526,6 @@ static ir_node *add_to_sched(block_sched_env_t *env, ir_node *irn)
     return irn;
 }
 
-
 /**
  * Add the proj nodes of a tuple-mode irn to the schedule immediately
  * after the tuple-moded irn. By pinning the projs after the irn, no
@@ -472,6 +564,20 @@ static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
        }
 }
 
+static ir_node *select_node(block_sched_env_t *be)
+{
+       ir_node *irn;
+
+       for(irn = pset_first(be->ready_set); irn; irn = pset_next(be->ready_set)) {
+               if(be_is_Keep(irn)) {
+                       pset_break(be->ready_set);
+                       return irn;
+               }
+       }
+
+       return be->selector->select(be->selector_block_env, be->ready_set);
+}
+
 /**
  * Perform list scheduling on a block.
  *
@@ -485,30 +591,30 @@ static void add_tuple_projs(block_sched_env_t *env, ir_node *irn)
  */
 static void list_sched_block(ir_node *block, void *env_ptr)
 {
-       void *block_env = NULL;
-       sched_env_t *env = env_ptr;
-       block_sched_env_t be;
+       sched_env_t *env                      = env_ptr;
        const list_sched_selector_t *selector = env->selector;
+       ir_node *start_node                   = get_irg_start(get_irn_irg(block));
+       int phi_seen                          = 0;
+       sched_info_t *info                    = get_irn_sched_info(block);
+
+       block_sched_env_t be;
        const ir_edge_t *edge;
        ir_node *irn;
        int j, m;
-       int phi_seen = 0;
-       sched_info_t *info = get_irn_sched_info(block);
 
        /* Initialize the block's list head that will hold the schedule. */
        INIT_LIST_HEAD(&info->list);
 
        /* Initialize the block scheduling environment */
-       be.dbg = firm_dbg_register("firm.be.sched");
-       be.block = block;
-       be.curr_time = 0;
-       be.ready_set = new_pset(node_cmp_func, get_irn_n_edges(block));
+       be.block             = block;
+       be.curr_time         = 0;
+       be.ready_set         = new_pset(node_cmp_func, get_irn_n_edges(block));
        be.already_scheduled = new_pset(node_cmp_func, get_irn_n_edges(block));
-
-       firm_dbg_set_mask(be.dbg, 0);
+       be.selector          = selector;
+       FIRM_DBG_REGISTER(be.dbg, "firm.be.sched");
 
        if(selector->init_block)
-               block_env = selector->init_block(env->selector_env, block);
+               be.selector_block_env = selector->init_block(env->selector_env, block);
 
        DBG((be.dbg, LEVEL_1, "scheduling %+F\n", block));
 
@@ -528,6 +634,13 @@ static void list_sched_block(ir_node *block, void *env_ptr)
                        phi_seen = 1;
                }
 
+               /* The start block will be scheduled as the first node */
+               else if(irn == start_node) {
+                       add_to_sched(&be, irn);
+                       add_tuple_projs(&be, irn);
+               }
+
+
                /* Other nodes must have all operands in other blocks to be made
                 * ready */
                else {
@@ -556,7 +669,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
 
        while(pset_count(be.ready_set) > 0) {
                /* select a node to be scheduled and check if it was ready */
-               irn = selector->select(env->selector_env, block_env, &info->list, be.curr_time, be.ready_set);
+               irn = select_node(&be);
 
                DBG((be.dbg, LEVEL_3, "\tpicked node %+F\n", irn));
 
@@ -577,7 +690,7 @@ static void list_sched_block(ir_node *block, void *env_ptr)
        }
 
        if(selector->finish_block)
-               selector->finish_block(env->selector_env, block_env, block);
+               selector->finish_block(be.selector_block_env);
 
        del_pset(be.ready_set);
        del_pset(be.already_scheduled);