end block can also have 0 predecessors
[libfirm] / ir / be / beblocksched.c
index c409677..69422b5 100644 (file)
  * @author      Matthias Braun, Christoph Mallon
  * @date        27.09.2006
  * @version     $Id$
+ *
+ * The goals of the greedy (and ILP) algorithm here works by assuming that
+ * we want to change as many jumps to fallthroughs as possible (executed jumps
+ * actually, we have to look at the execution frequencies). The algorithms
+ * do this by collecting execution frequencies of all branches (which is easily
+ * possible when all critical edges are split) then removes critical edges where
+ * possible as we don't need and want them anymore now. The algorithms then try
+ * to change as many edges to fallthroughs as possible, this is done by setting
+ * a next and prev pointers on blocks. The greedy algorithm sorts the edges by
+ * execution frequencies and tries to transform them to fallthroughs in this order
  */
 #ifdef HAVE_CONFIG_H
 #include "config.h"
 #include "bemodule.h"
 #include "be.h"
 
+#include <libcore/lc_opts.h>
+#include <libcore/lc_opts_enum.h>
+#include <libcore/lc_timing.h>
+
 #ifdef WITH_ILP
 #include <lpp/lpp.h>
 #include <lpp/lpp_net.h>
@@ -62,13 +76,6 @@ typedef enum _blocksched_algos_t {
 
 static int algo = BLOCKSCHED_GREEDY;
 
-
-#ifdef WITH_LIBCORE
-
-#include <libcore/lc_opts.h>
-#include <libcore/lc_opts_enum.h>
-#include <libcore/lc_timing.h>
-
 static const lc_opt_enum_int_items_t blockschedalgo_items[] = {
        { "naiv",       BLOCKSCHED_NAIV },
        { "extbb",      BLOCKSCHED_EXTBB },
@@ -87,7 +94,6 @@ static const lc_opt_table_entry_t be_blocksched_options[] = {
        LC_OPT_ENT_ENUM_INT ("algo", "the block scheduling algorithm", &algo_var),
        LC_OPT_LAST
 };
-#endif /* WITH_LIBCORE */
 
 /*
  *   ____                   _
@@ -138,12 +144,15 @@ static void collect_egde_frequency(ir_node *block, void *data)
        entry->prev  = NULL;
        set_irn_link(block, entry);
 
-       if (block == get_irg_start_block(env->irg))
-               return;
-
        arity = get_Block_n_cfgpreds(block);
 
-       if (arity == 1) {
+       if (arity == 0) {
+               assert(block == get_irg_start_block(env->irg)
+                               || block == get_irg_end_block(env->irg));
+               /* must be the start block (or end-block for endless loops), nothing to
+                * do here */
+               return;
+       } else if (arity == 1) {
                edge.block            = block;
                edge.pos              = 0;
                edge.execfreq         = get_block_execfreq(env->execfreqs, block);
@@ -194,18 +203,19 @@ static void coalesce_blocks(blocksched_env_t *env)
        for (i = 0; i < edge_count; ++i) {
                const edge_t *edge  = &env->edges[i];
                ir_node      *block = edge->block;
+               int           pos   = edge->pos;
                ir_node      *pred_block;
                blocksched_entry_t *entry, *pred_entry;
 
-               /* the block might have been removed already... */
-               if (is_Bad(get_Block_cfgpred(block, 0)))
-                       continue;
-
                /* only check edge with highest frequency */
                if (! edge->highest_execfreq)
                        continue;
 
-               pred_block = get_Block_cfgpred_block(block, edge->pos);
+               /* the block might have been removed already... */
+               if (is_Bad(get_Block_cfgpred(block, 0)))
+                       continue;
+
+               pred_block = get_Block_cfgpred_block(block, pos);
                entry      = get_irn_link(block);
                pred_entry = get_irn_link(pred_block);
 
@@ -227,6 +237,7 @@ static void coalesce_blocks(blocksched_env_t *env)
        for (i = 0; i < edge_count; ++i) {
                const edge_t *edge  = &env->edges[i];
                ir_node      *block = edge->block;
+               int           pos   = edge->pos;
                ir_node      *pred_block;
                blocksched_entry_t *entry, *pred_entry;
 
@@ -234,7 +245,11 @@ static void coalesce_blocks(blocksched_env_t *env)
                if (is_Bad(get_Block_cfgpred(block, 0)))
                        continue;
 
-               pred_block = get_Block_cfgpred_block(block, edge->pos);
+               /* we can't do fallthroughs in backedges */
+               if (is_backedge(block, pos))
+                       continue;
+
+               pred_block = get_Block_cfgpred_block(block, pos);
                entry      = get_irn_link(block);
                pred_entry = get_irn_link(pred_block);
 
@@ -728,12 +743,11 @@ static ir_node **create_extbb_block_schedule(ir_graph *irg, ir_exec_freq *execfr
  */
 void be_init_blocksched(void)
 {
-#ifdef WITH_LIBCORE
        lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
        lc_opt_entry_t *blocksched_grp = lc_opt_get_grp(be_grp, "blocksched");
 
        lc_opt_add_table(blocksched_grp, be_blocksched_options);
-#endif
+
        FIRM_DBG_REGISTER(dbg, "firm.be.blocksched");
 }