Added support for SymConst(ofs_ent)
[libfirm] / ir / be / besched.c
index a588458..d498843 100644 (file)
@@ -1,3 +1,5 @@
+/* $Id$ */
+
 #ifdef HAVE_CONFIG_H
 # include "config.h"
 #endif
@@ -12,6 +14,8 @@
 #include "irnode_t.h"
 #include "irgraph_t.h"
 #include "iredges_t.h"
+#include "ircons.h"
+#include "irextbb.h"
 #include "debug.h"
 
 #include "bearch.h"
@@ -60,7 +64,7 @@ void sched_renumber(const ir_node *block)
 {
        ir_node *irn;
        sched_info_t *inf;
-       sched_timestep_t step = 0;
+       sched_timestep_t step = SCHED_INITIAL_GRANULARITY;
 
        sched_foreach(block, irn) {
                inf = get_irn_sched_info(irn);
@@ -180,7 +184,7 @@ int sched_verify_irg(ir_graph *irg)
 
 int sched_skip_cf_predicator(const ir_node *irn, void *data) {
        arch_env_t *ae = data;
-       return arch_irn_classify(ae, irn) == arch_irn_class_branch;
+       return arch_irn_class_is(ae, irn, branch);
 }
 
 int sched_skip_phi_predicator(const ir_node *irn, void *data) {
@@ -208,53 +212,127 @@ typedef struct {
        unsigned n_blks;  /**< number of blocks in the list */
 } anchor;
 
-/**
- * Ext-Block walker: create a block schedule
- */
-static void create_block_list(ir_extblk *blk, void *env) {
-       anchor *list = env;
-       int i, n;
+static void add_block(anchor *list, ir_node *block) {
+       if(list->start == NULL) {
+               list->start = block;
+               list->end = block;
+       } else {
+               set_irn_link(list->end, block);
+               list->end = block;
+       }
 
-       for (i = 0, n = get_extbb_n_blocks(blk); i < n; ++i) {
-               ir_node *block = get_extbb_block(blk, i);
+       list->n_blks++;
+}
 
-               set_irn_link(block, NULL);
-               if (list->start)
-                       set_irn_link(list->end, block);
-               else
-                       list->start = block;
+static void create_block_list(ir_node *leader_block, anchor *list) {
+       int i;
+       ir_node *block = NULL;
+       const ir_edge_t *edge;
 
-               list->end = block;
-               ++list->n_blks;
+       ir_extblk *extbb = get_Block_extbb(leader_block);
+       if(extbb_visited(extbb))
+               return;
+       mark_extbb_visited(extbb);
+
+       for(i = 0; i < get_extbb_n_blocks(extbb); ++i) {
+               block = get_extbb_block(extbb, i);
+               add_block(list, block);
+       }
+
+       assert(block != NULL);
+
+       // pick successor extbbs
+       foreach_block_succ(block, edge) {
+               ir_node *succ = get_edge_src_irn(edge);
+
+               create_block_list(succ, list);
+       }
+
+       for(i = 0; i < get_extbb_n_blocks(extbb) - 1; ++i) {
+               block = get_extbb_block(extbb, i);
+               foreach_block_succ(block, edge) {
+                       ir_node *succ = get_edge_src_irn(edge);
+
+                       create_block_list(succ, list);
+               }
        }
 }
 
+void compute_extbb_execfreqs(ir_graph *irg, exec_freq_t *execfreqs);
+
 /*
  * Calculates a block schedule. The schedule is stored as a linked
  * list starting at the start_block of the irg.
  */
-ir_node **sched_create_block_schedule(ir_graph *irg)
+ir_node **sched_create_block_schedule(ir_graph *irg, exec_freq_t *execfreqs)
 {
        anchor list;
        ir_node **blk_list, *b, *n;
        unsigned i;
 
        /* schedule extended basic blocks */
-       compute_extbb(irg);
+       compute_extbb_execfreqs(irg, execfreqs);
+       //compute_extbb(irg);
 
        list.start  = NULL;
        list.end    = NULL;
        list.n_blks = 0;
-       irg_extblock_walk_graph(irg, NULL, create_block_list, &list);
+       inc_irg_block_visited(irg);
+       create_block_list(get_irg_start_block(irg), &list);
 
        /** create an array, so we can go forward and backward */
        blk_list = NEW_ARR_D(ir_node *, irg->obst,list.n_blks);
 
        for (i = 0, b = list.start; b; b = n, ++i) {
                n = get_irn_link(b);
-               set_irn_link(b, INT_TO_PTR(i));
-
                blk_list[i] = b;
        }
+
        return blk_list;
 }
+
+typedef struct remove_dead_nodes_env_t_ {
+       ir_graph *irg;
+       bitset_t *reachable;
+} remove_dead_nodes_env_t;
+
+static void mark_dead_nodes_walker(ir_node *node, void *data)
+{
+       remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
+       bitset_set(env->reachable, get_irn_idx(node));
+}
+
+static void remove_dead_nodes_walker(ir_node *block, void *data)
+{
+       remove_dead_nodes_env_t *env = (remove_dead_nodes_env_t*) data;
+       ir_node *node, *next;
+
+       for(node = sched_first(block); !sched_is_end(node); node = next) {
+               int i, arity;
+
+               // get next node now, as after calling sched_remove it will be invalid
+               next = sched_next(node);
+
+               if(bitset_is_set(env->reachable, get_irn_idx(node)))
+                       continue;
+
+               arity = get_irn_arity(node);
+               for(i = 0; i < arity; ++i)
+                       set_irn_n(node, i, new_r_Bad(env->irg));
+
+               sched_remove(node);
+       }
+}
+
+void be_remove_dead_nodes_from_schedule(ir_graph *irg)
+{
+       remove_dead_nodes_env_t env;
+       env.irg = irg;
+       env.reachable = bitset_alloca(get_irg_last_idx(irg));
+
+       // mark all reachable nodes
+       irg_walk_graph(irg, mark_dead_nodes_walker, NULL, &env);
+
+       // walk schedule and remove non-marked nodes
+       irg_block_walk_graph(irg, remove_dead_nodes_walker, NULL, &env);
+}