remove Cast node
[libfirm] / ir / opt / jumpthreading.c
index e27d0d5..34523d7 100644 (file)
  * @brief   Path-Sensitive Jump Threading
  * @date    10. Sep. 2006
  * @author  Christoph Mallon, Matthias Braun
- * @version $Id$
  */
 #include "config.h"
 
 #include "iroptimize.h"
 
 #include <assert.h>
+#include <stdbool.h>
 #include "array_t.h"
 #include "debug.h"
 #include "ircons.h"
 #include "iropt_dbg.h"
 #include "irpass.h"
 #include "vrp.h"
+#include "firmstat_t.h"
 
 #undef AVOID_PHIB
 
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
 /**
  * Add the new predecessor x to node node, which is either a Block or a Phi
@@ -57,14 +58,10 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg);
 static void add_pred(ir_node* node, ir_node* x)
 {
        ir_node** ins;
-       int n;
-       int i;
-
-       assert(is_Block(node) || is_Phi(node));
 
-       n = get_irn_arity(node);
+       int const n = get_Block_n_cfgpreds(node);
        NEW_ARR_A(ir_node*, ins, n + 1);
-       for (i = 0; i < n; i++)
+       for (int i = 0; i < n; i++)
                ins[i] = get_irn_n(node, i);
        ins[n] = x;
        set_irn_in(node, n + 1, ins);
@@ -83,10 +80,10 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
        ir_node **in;
        ir_node *dummy;
 
-       /* This is needed because we create bads sometimes */
+       /* In case of a bad input to a block we need to return the bad value */
        if (is_Bad(block)) {
                ir_graph *irg = get_irn_irg(block);
-               return new_r_Bad(irg);
+               return new_r_Bad(irg, mode);
        }
 
        /* the other defs can't be marked for cases where a user of the original
@@ -149,8 +146,6 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
 {
        ir_graph *irg;
        ir_mode *mode;
-       const ir_edge_t *edge;
-       const ir_edge_t *next;
 
        /* no need to do anything */
        if (orig_val == second_val)
@@ -167,7 +162,7 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
        ssa_second_def       = second_val;
 
        /* Only fix the users of the first, i.e. the original node */
-       foreach_out_edge_safe(orig_val, edge, next) {
+       foreach_out_edge_safe(orig_val, edge) {
                ir_node *user = get_edge_src_irn(edge);
                int j = get_edge_src_pos(edge);
                ir_node *user_block = get_nodes_block(user);
@@ -194,6 +189,16 @@ static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
        }
 }
 
+/**
+ * jumpthreading produces critical edges, e.g. B-C:
+ *     A         A
+ *  \ /       \  |
+ *   B    =>   B |
+ *  / \       / \|
+ *     C         C
+ *
+ * By splitting this critical edge more threadings might be possible.
+ */
 static void split_critical_edge(ir_node *block, int pos)
 {
        ir_graph *irg = get_irn_irg(block);
@@ -210,7 +215,7 @@ static void split_critical_edge(ir_node *block, int pos)
 typedef struct jumpthreading_env_t {
        ir_node       *true_block;
        ir_node       *cmp;        /**< The Compare node that might be partial evaluated */
-       pn_Cmp         pnc;        /**< The Compare mode of the Compare node. */
+       ir_relation    relation;   /**< The Compare mode of the Compare node. */
        ir_node       *cnst;
        ir_tarval     *tv;
        ir_visited_t   visited_nr;
@@ -232,7 +237,7 @@ static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
                copy = get_Phi_pred(node, j);
                /* we might have to evaluate a Phi-cascade */
                if (get_irn_visited(copy) >= env->visited_nr) {
-                       copy = get_irn_link(copy);
+                       copy = (ir_node*)get_irn_link(copy);
                }
        } else {
                copy = exact_copy(node);
@@ -249,7 +254,7 @@ static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
                                continue;
 
                        if (get_irn_visited(pred) >= env->visited_nr) {
-                               new_pred = get_irn_link(pred);
+                               new_pred = (ir_node*)get_irn_link(pred);
                        } else {
                                new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
                        }
@@ -267,33 +272,32 @@ static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                          ir_node *copy_block, int j)
 {
-       const ir_edge_t *edge;
-
        /* Look at all nodes in the cond_block and copy them into pred */
        foreach_out_edge(block, edge) {
                ir_node *node = get_edge_src_irn(edge);
                ir_node *copy;
                ir_mode *mode;
 
+               if (is_End(node)) {
+                       /* edge is a Keep edge. If the end block is unreachable via normal
+                        * control flow, we must maintain end's reachability with Keeps.
+                        */
+                       keep_alive(copy_block);
+                       continue;
+               }
                /* ignore control flow */
                mode = get_irn_mode(node);
-               if (mode == mode_X || is_Cond(node))
+               if (mode == mode_X || is_Cond(node) || is_Switch(node))
                        continue;
 #ifdef AVOID_PHIB
                /* we may not copy mode_b nodes, because this could produce Phi with
                 * mode_bs which can't be handled in all backends. Instead we duplicate
                 * the node and move it to its users */
                if (mode == mode_b) {
-                       const ir_edge_t *edge, *next;
-                       ir_node *pred;
-                       int      pn;
-
-                       assert(is_Proj(node));
+                       ir_node *const pred = get_Proj_pred(node);
+                       long     const pn   = get_Proj_proj(node);
 
-                       pred = get_Proj_pred(node);
-                       pn   = get_Proj_proj(node);
-
-                       foreach_out_edge_safe(node, edge, next) {
+                       foreach_out_edge_safe(node, edge) {
                                ir_node *cmp_copy;
                                ir_node *user       = get_edge_src_irn(edge);
                                int pos             = get_edge_src_pos(edge);
@@ -317,7 +321,7 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                 * recursive find_phi_with_const() call */
                assert(get_irn_visited(copy) <= env->visited_nr);
                if (get_irn_visited(copy) >= env->visited_nr) {
-                       ir_node *prev_copy = get_irn_link(copy);
+                       ir_node *prev_copy = (ir_node*)get_irn_link(copy);
                        if (prev_copy != NULL)
                                set_irn_link(node, prev_copy);
                }
@@ -330,7 +334,7 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
                ir_mode *mode;
 
                mode = get_irn_mode(node);
-               if (mode == mode_X || is_Cond(node))
+               if (mode == mode_X || is_Cond(node) || is_Switch(node))
                        continue;
 #ifdef AVOID_PHIB
                if (mode == mode_b)
@@ -339,56 +343,44 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
 
                DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
 
-               copy_node = get_irn_link(node);
+               copy_node = (ir_node*)get_irn_link(node);
                construct_ssa(block, node, copy_block, copy_node);
        }
+
+       /* make sure new nodes are kept alive if old nodes were */
+       ir_graph *irg = get_irn_irg(block);
+       ir_node  *end = get_irg_end(irg);
+       for (int i = 0, arity = get_End_n_keepalives(end); i < arity; ++i) {
+               ir_node *keep = get_End_keepalive(end, i);
+               if (get_irn_visited(keep) < env->visited_nr || is_Block(keep))
+                       continue;
+               ir_node *copy = get_irn_link(keep);
+               add_End_keepalive(end, copy);
+       }
 }
 
 /**
  * returns whether the cmp evaluates to true or false, or can't be evaluated!
  * 1: true, 0: false, -1: can't evaluate
  *
- * @param pnc       the compare mode of the Compare
+ * @param relation  the compare mode of the Compare
  * @param tv_left   the left tarval
  * @param tv_right  the right tarval
  */
-static int eval_cmp_tv(pn_Cmp pnc, ir_tarval *tv_left, ir_tarval *tv_right)
+static int eval_cmp_tv(ir_relation relation, ir_tarval *tv_left,
+                       ir_tarval *tv_right)
 {
-       pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
+       ir_relation cmp_result = tarval_cmp(tv_left, tv_right);
 
        /* does the compare evaluate to true? */
-       if (cmp_result == pn_Cmp_False)
+       if (cmp_result == ir_relation_false)
                return -1;
-       if ((cmp_result & pnc) != cmp_result)
-               return 0;
+       if ((cmp_result & relation) != 0)
+               return 1;
 
-       return 1;
+       return 0;
 }
 
-/**
- * returns whether the cmp evaluates to true or false according to vrp
- * information , or can't be evaluated!
- * 1: true, 0: false, -1: can't evaluate
- *
- * @param pnc       the compare mode of the Compare
- * @param left   the left node
- * @param right  the right node
- */
-static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
-{
-       pn_Cmp cmp_result = vrp_cmp(left, right);
-       /* does the compare evaluate to true? */
-       if (cmp_result == pn_Cmp_False) {
-               return -1;
-       }
-       if ((cmp_result & pnc) != cmp_result) {
-               if ((cmp_result & pnc) != 0) {
-                       return -1;
-               }
-               return 0;
-       }
-       return 1;
-}
 /**
  * returns whether the cmp evaluates to true or false, or can't be evaluated!
  * 1: true, 0: false, -1: can't evaluate
@@ -399,12 +391,12 @@ static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
 {
        if (is_Const(cand)) {
-               ir_tarval *tv_cand   = get_Const_tarval(cand);
-               ir_tarval *tv_cmp    = get_Const_tarval(env->cnst);
+               ir_tarval *tv_cand = get_Const_tarval(cand);
+               ir_tarval *tv_cmp  = get_Const_tarval(env->cnst);
 
-               return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
+               return eval_cmp_tv(env->relation, tv_cand, tv_cmp);
        } else { /* a Confirm */
-               ir_tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);
+               ir_tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->relation);
 
                if (res == tarval_bad)
                        return -1;
@@ -443,14 +435,13 @@ static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
                return NULL;
 
        if (is_Const_or_Confirm(value)) {
-               if (eval_cmp(env, value) <= 0) {
+               if (eval_cmp(env, value) <= 0)
                        return NULL;
-               }
 
                DB((
                        dbg, LEVEL_1,
                        "> Found jump threading candidate %+F->%+F\n",
-                       env->true_block, block
+                       block, env->true_block
                ));
 
                /* adjust true_block to point directly towards our jump */
@@ -468,9 +459,8 @@ static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
                int i, arity;
 
                /* the Phi has to be in the same Block as the Jmp */
-               if (get_nodes_block(value) != block) {
+               if (get_nodes_block(value) != block)
                        return NULL;
-               }
 
                arity = get_irn_arity(value);
                for (i = 0; i < arity; ++i) {
@@ -516,7 +506,7 @@ static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
                DB((
                        dbg, LEVEL_1,
                        "> Found jump threading candidate %+F->%+F\n",
-                       env->true_block, block
+                       block, env->true_block
                ));
 
                /* adjust true_block to point directly towards our jump */
@@ -558,17 +548,11 @@ static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
                        return copy_block;
                }
        }
-       if (is_Proj(value)) {
-               ir_node *left;
-               ir_node *right;
-               int      pnc;
-               ir_node *cmp = get_Proj_pred(value);
-               if (!is_Cmp(cmp))
-                       return NULL;
-
-               left  = get_Cmp_left(cmp);
-               right = get_Cmp_right(cmp);
-               pnc   = get_Proj_proj(value);
+       if (is_Cmp(value)) {
+               ir_node    *cmp      = value;
+               ir_node    *left     = get_Cmp_left(cmp);
+               ir_node    *right    = get_Cmp_right(cmp);
+               ir_relation relation = get_Cmp_relation(cmp);
 
                /* we assume that the constant is on the right side, swap left/right
                 * if needed */
@@ -577,25 +561,24 @@ static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
                        left       = right;
                        right      = t;
 
-                       pnc        = get_inversed_pnc(pnc);
+                       relation   = get_inversed_relation(relation);
                }
 
                if (!is_Const(right))
-                       return 0;
+                       return NULL;
 
-               if (get_nodes_block(left) != block) {
-                       return 0;
-               }
+               if (get_nodes_block(left) != block)
+                       return NULL;
 
                /* negate condition when we're looking for the false block */
                if (env->tv == tarval_b_false) {
-                       pnc = get_negated_pnc(pnc, get_irn_mode(right));
+                       relation = get_negated_relation(relation);
                }
 
                /* (recursively) look if a pred of a Phi is a constant or a Confirm */
-               env->cmp  = cmp;
-               env->pnc  = pnc;
-               env->cnst = right;
+               env->cmp      = cmp;
+               env->relation = relation;
+               env->cnst     = right;
 
                return find_const_or_confirm(env, jump, left);
        }
@@ -619,17 +602,17 @@ static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
 static void thread_jumps(ir_node* block, void* data)
 {
        jumpthreading_env_t env;
-       int *changed = data;
+       bool *changed = (bool*)data;
        ir_node *selector;
        ir_node *projx;
        ir_node *cond;
        ir_node *copy_block;
        int      selector_evaluated;
-       const ir_edge_t *edge, *next;
        ir_graph *irg;
-       ir_node *bad;
-       size_t   cnst_pos;
+       ir_node *badX;
+       int      cnst_pos;
 
+       /* we do not deal with Phis, so restrict this to exactly one cfgpred */
        if (get_Block_n_cfgpreds(block) != 1)
                return;
 
@@ -639,37 +622,22 @@ static void thread_jumps(ir_node* block, void* data)
        assert(get_irn_mode(projx) == mode_X);
 
        cond = get_Proj_pred(projx);
-       if (!is_Cond(cond))
-               return;
-
-       selector = get_Cond_selector(cond);
        /* TODO handle switch Conds */
-       if (get_irn_mode(selector) != mode_b)
+       if (!is_Cond(cond))
                return;
 
        /* handle cases that can be immediately evaluated */
+       selector = get_Cond_selector(cond);
        selector_evaluated = -1;
-       if (is_Proj(selector)) {
-               ir_node *cmp = get_Proj_pred(selector);
-               if (is_Cmp(cmp)) {
-                       ir_node *left  = get_Cmp_left(cmp);
-                       ir_node *right = get_Cmp_right(cmp);
-                       if (is_Const(left) && is_Const(right)) {
-                               int        pnc      = get_Proj_proj(selector);
-                               ir_tarval *tv_left  = get_Const_tarval(left);
-                               ir_tarval *tv_right = get_Const_tarval(right);
-
-                               selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
-                       }
-                       if (selector_evaluated < 0) {
-                               /* This is only the case if the predecessor nodes are not
-                                * constant or the comparison could not be evaluated.
-                                * Try with VRP information now.
-                                */
-                               int pnc = get_Proj_proj(selector);
-
-                               selector_evaluated = eval_cmp_vrp(pnc, left, right);
-                       }
+       if (is_Cmp(selector)) {
+               ir_node *left  = get_Cmp_left(selector);
+               ir_node *right = get_Cmp_right(selector);
+               if (is_Const(left) && is_Const(right)) {
+                       ir_relation relation = get_Cmp_relation(selector);
+                       ir_tarval  *tv_left  = get_Const_tarval(left);
+                       ir_tarval  *tv_right = get_Const_tarval(right);
+
+                       selector_evaluated = eval_cmp_tv(relation, tv_left, tv_right);
                }
        } else if (is_Const_or_Confirm(selector)) {
                ir_tarval *tv = get_Const_or_Confirm_tarval(selector);
@@ -692,16 +660,16 @@ static void thread_jumps(ir_node* block, void* data)
 
        if (selector_evaluated == 0) {
                ir_graph *irg = get_irn_irg(block);
-               bad = new_r_Bad(irg);
+               ir_node  *bad = new_r_Bad(irg, mode_X);
                exchange(projx, bad);
-               *changed = 1;
+               *changed = true;
                return;
        } else if (selector_evaluated == 1) {
                dbg_info *dbgi = get_irn_dbg_info(selector);
                ir_node  *jmp  = new_rd_Jmp(dbgi, get_nodes_block(projx));
                DBG_OPT_JUMPTHREADING(projx, jmp);
                exchange(projx, jmp);
-               *changed = 1;
+               *changed = true;
                return;
        }
 
@@ -715,64 +683,63 @@ static void thread_jumps(ir_node* block, void* data)
        if (copy_block == NULL)
                return;
 
+       /* We might thread the condition block of an infinite loop,
+        * such that there is no path to End anymore. */
+       keep_alive(block);
+
        /* we have to remove the edge towards the pred as the pred now
         * jumps into the true_block. We also have to shorten Phis
         * in our block because of this */
-       bad      = new_r_Bad(irg);
+       badX     = new_r_Bad(irg, mode_X);
        cnst_pos = env.cnst_pos;
 
        /* shorten Phis */
-       foreach_out_edge_safe(env.cnst_pred, edge, next) {
+       foreach_out_edge_safe(env.cnst_pred, edge) {
                ir_node *node = get_edge_src_irn(edge);
 
-               if (is_Phi(node))
+               if (is_Phi(node)) {
+                       ir_node *bad = new_r_Bad(irg, get_irn_mode(node));
                        set_Phi_pred(node, cnst_pos, bad);
+               }
        }
 
-       set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
+       set_Block_cfgpred(env.cnst_pred, cnst_pos, badX);
 
        /* the graph is changed now */
-       *changed = 1;
+       *changed = true;
 }
 
 void opt_jumpthreading(ir_graph* irg)
 {
-       int changed, rerun;
+       bool changed;
+       bool rerun;
+
+       assure_irg_properties(irg,
+               IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+               | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
+               | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES);
 
        FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
 
        DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
 
-       remove_critical_cf_edges(irg);
-
-       edges_assure(irg);
        ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
 
-       changed = 0;
+       changed = false;
        do {
-               rerun = 0;
+               rerun = false;
                irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
                changed |= rerun;
        } while (rerun);
 
        ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
 
-       if (changed) {
-               /* control flow changed, some blocks may become dead */
-               set_irg_outs_inconsistent(irg);
-               set_irg_doms_inconsistent(irg);
-               set_irg_extblk_inconsistent(irg);
-               set_irg_loopinfo_inconsistent(irg);
-               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
-
-               /* Dead code might be created. Optimize it away as it is dangerous
-                * to call optimize_df() an dead code. */
-               optimize_cf(irg);
-       }
+       confirm_irg_properties(irg,
+               changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL);
 }
 
 /* Creates an ir_graph pass for opt_jumpthreading. */
 ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
 {
        return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
-}  /* opt_jumpthreading_pass */
+}