remove sel_based_null_check flag
[libfirm] / ir / opt / jumpthreading.c
index 7561ded..3a18401 100644 (file)
@@ -22,7 +22,6 @@
  * @brief   Path-Sensitive Jump Threading
  * @date    10. Sep. 2006
  * @author  Christoph Mallon, Matthias Braun
- * @version $Id$
  */
 #include "config.h"
 
 #include "iropt_dbg.h"
 #include "irpass.h"
 #include "vrp.h"
+#include "opt_manage.h"
 
 #undef AVOID_PHIB
 
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
 /**
  * Add the new predecessor x to node node, which is either a Block or a Phi
@@ -60,7 +60,7 @@ static void add_pred(ir_node* node, ir_node* x)
        int n;
        int i;
 
-       assert(is_Block(node) || is_Phi(node));
+       assert(is_Block(node));
 
        n = get_irn_arity(node);
        NEW_ARR_A(ir_node*, ins, n + 1);
@@ -194,6 +194,16 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
        }
 }
 
+/**
+ * jumpthreading produces critical edges, e.g. B-C:
+ *     A         A
+ *  \ /       \  |
+ *   B    =>   B |
+ *  / \       / \|
+ *     C         C
+ *
+ * By splitting this critical edge more threadings might be possible.
+ */
 static void split_critical_edge(ir_node *block, int pos)
 {
        ir_graph *irg = get_irn_irg(block);
@@ -284,7 +294,7 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                }
                /* ignore control flow */
                mode = get_irn_mode(node);
-               if (mode == mode_X || is_Cond(node))
+               if (mode == mode_X || is_Cond(node) || is_Switch(node))
                        continue;
 #ifdef AVOID_PHIB
                /* we may not copy mode_b nodes, because this could produce Phi with
@@ -337,7 +347,7 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                ir_mode *mode;
 
                mode = get_irn_mode(node);
-               if (mode == mode_X || is_Cond(node))
+               if (mode == mode_X || is_Cond(node) || is_Switch(node))
                        continue;
 #ifdef AVOID_PHIB
                if (mode == mode_b)
@@ -634,6 +644,7 @@ static void thread_jumps(ir_node* block, void* data)
        ir_node *badX;
        int      cnst_pos;
 
+       /* we do not deal with Phis, so restrict this to exactly one cfgpred */
        if (get_Block_n_cfgpreds(block) != 1)
                return;
 
@@ -643,15 +654,12 @@ static void thread_jumps(ir_node* block, void* data)
        assert(get_irn_mode(projx) == mode_X);
 
        cond = get_Proj_pred(projx);
-       if (!is_Cond(cond))
-               return;
-
-       selector = get_Cond_selector(cond);
        /* TODO handle switch Conds */
-       if (get_irn_mode(selector) != mode_b)
+       if (!is_Cond(cond))
                return;
 
        /* handle cases that can be immediately evaluated */
+       selector = get_Cond_selector(cond);
        selector_evaluated = -1;
        if (is_Cmp(selector)) {
                ir_node *left  = get_Cmp_left(selector);
@@ -742,21 +750,15 @@ static void thread_jumps(ir_node* block, void* data)
        *changed = 1;
 }
 
-void opt_jumpthreading(ir_graph* irg)
+static ir_graph_state_t do_jumpthread(ir_graph* irg)
 {
        int changed, rerun;
+       ir_graph_state_t res = 0;
 
        FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
 
        DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
 
-       remove_critical_cf_edges(irg);
-
-       /* ugly: jump threading might get confused by garbage nodes
-        * of mode_X in copy_and_fix_node(), so remove all garbage edges. */
-       edges_deactivate(irg);
-
-       edges_assure(irg);
        ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
 
        changed = 0;
@@ -768,12 +770,22 @@ void opt_jumpthreading(ir_graph* irg)
 
        ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
 
-       if (changed) {
-               /* control flow changed, some blocks may become dead */
-               set_irg_doms_inconsistent(irg);
-               set_irg_extblk_inconsistent(irg);
-               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+       if (!changed) {
+               res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE;
        }
+
+       return res;
+}
+
+static optdesc_t opt_jumpthread = {
+       "jumpthreading",
+       IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES,
+       do_jumpthread,
+};
+
+void opt_jumpthreading(ir_graph* irg)
+{
+       perform_irg_optimization(irg, &opt_jumpthread);
 }
 
 /* Creates an ir_graph pass for opt_jumpthreading. */