be: Simplify places, which still assumed, that Projs are scheduled.
[libfirm] / ir / ir / iropt.c
index 694668e..3164c61 100644 (file)
@@ -51,6 +51,7 @@
 #include "bitfiddle.h"
 #include "be.h"
 #include "error.h"
+#include "firmstat_t.h"
 
 #include "entity_t.h"
 
@@ -947,15 +948,13 @@ static ir_node *equivalent_node_Sub(ir_node *n)
  *   We handle it anyway here but the better way would be a
  *   flag. This would be needed for Pascal for instance.
  */
-static ir_node *equivalent_node_idempotent_unop(ir_node *n)
+static ir_node *equivalent_node_involution(ir_node *n)
 {
        ir_node *oldn = n;
        ir_node *pred = get_unop_op(n);
-
-       /* optimize symmetric unop */
        if (get_irn_op(pred) == get_irn_op(n)) {
                n = get_unop_op(pred);
-               DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
+               DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_INVOLUTION);
        }
        return n;
 }
@@ -1131,7 +1130,7 @@ static ir_node *equivalent_node_Phi(ir_node *n)
        ir_node *first_val = NULL; /* to shutup gcc */
 
        if (!get_opt_optimize() &&
-                       get_irg_phase_state(get_irn_irg(n)) != phase_building)
+           !irg_is_constrained(get_irn_irg(n), IR_GRAPH_CONSTRAINT_CONSTRUCTION))
                return n;
 
        n_preds = get_Phi_n_preds(n);
@@ -3348,8 +3347,6 @@ static ir_node *transform_node_Cond(ir_node *n)
                        set_Tuple_pred(n, pn_Cond_false, jmp);
                        set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
                }
-               /* We might generate an endless loop, so keep it alive. */
-               add_End_keepalive(get_irg_end(irg), blk);
                clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
        }
        return n;
@@ -6078,6 +6075,25 @@ static ir_node *transform_node_Sync(ir_node *n)
        return n;
 }
 
+static ir_node *create_load_replacement_tuple(ir_node *n, ir_node *mem,
+                                              ir_node *res)
+{
+       ir_node  *block = get_nodes_block(n);
+       ir_graph *irg   = get_irn_irg(n);
+       ir_node  *in[pn_Load_max+1];
+       size_t    n_in  = 2;
+       in[pn_Load_M]   = mem;
+       in[pn_Load_res] = res;
+       if (ir_throws_exception(n)) {
+               in[pn_Load_X_regular] = new_r_Jmp(block);
+               in[pn_Load_X_except]  = new_r_Bad(irg, mode_X);
+               n_in                  = 4;
+               assert(pn_Load_max == 4);
+       }
+       ir_node  *tuple = new_r_Tuple(block, n_in, in);
+       return tuple;
+}
+
 static ir_node *transform_node_Load(ir_node *n)
 {
        /* don't touch volatile loads */
@@ -6107,17 +6123,9 @@ static ir_node *transform_node_Load(ir_node *n)
                        return n;
                /* all combinations of aligned/unaligned pred/n should be fine so we do
                 * not compare the unaligned attribute */
-               {
-                       ir_node  *block = get_nodes_block(n);
-                       ir_node  *jmp   = new_r_Jmp(block);
-                       ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg, mode_X);
-                       ir_mode  *mode  = get_Load_mode(n);
-                       ir_node  *res   = new_r_Proj(pred_load, mode, pn_Load_res);
-                       ir_node  *in[]  = { mem, res, jmp, bad };
-                       ir_node  *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
-                       return tuple;
-               }
+               ir_mode  *mode  = get_Load_mode(n);
+               ir_node  *res   = new_r_Proj(pred_load, mode, pn_Load_res);
+               return create_load_replacement_tuple(n, mem, res);
        } else if (is_Store(mem_pred)) {
                ir_node *pred_store = mem_pred;
                ir_node *value      = get_Store_value(pred_store);
@@ -6128,16 +6136,7 @@ static ir_node *transform_node_Load(ir_node *n)
                        return n;
                /* all combinations of aligned/unaligned pred/n should be fine so we do
                 * not compare the unaligned attribute */
-               {
-                       ir_node  *block = get_nodes_block(n);
-                       ir_node  *jmp   = new_r_Jmp(block);
-                       ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg, mode_X);
-                       ir_node  *res   = value;
-                       ir_node  *in[]  = { mem, res, jmp, bad };
-                       ir_node  *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
-                       return tuple;
-               }
+               return create_load_replacement_tuple(n, mem, value);
        }
 
        return n;
@@ -6358,10 +6357,10 @@ void ir_register_opt_node_ops(void)
        register_equivalent_node_func(op_Conv,    equivalent_node_Conv);
        register_equivalent_node_func(op_Eor,     equivalent_node_Eor);
        register_equivalent_node_func(op_Id,      equivalent_node_Id);
-       register_equivalent_node_func(op_Minus,   equivalent_node_idempotent_unop);
+       register_equivalent_node_func(op_Minus,   equivalent_node_involution);
        register_equivalent_node_func(op_Mul,     equivalent_node_Mul);
        register_equivalent_node_func(op_Mux,     equivalent_node_Mux);
-       register_equivalent_node_func(op_Not,     equivalent_node_idempotent_unop);
+       register_equivalent_node_func(op_Not,     equivalent_node_involution);
        register_equivalent_node_func(op_Or,      equivalent_node_Or);
        register_equivalent_node_func(op_Phi,     equivalent_node_Phi);
        register_equivalent_node_func(op_Proj,    equivalent_node_Proj);
@@ -6752,8 +6751,6 @@ ir_node *optimize_in_place_2(ir_node *n)
 ir_node *optimize_in_place(ir_node *n)
 {
        ir_graph *irg = get_irn_irg(n);
-       /* Handle graph state */
-       assert(get_irg_phase_state(irg) != phase_building);
 
        if (get_opt_global_cse())
                set_irg_pinned(irg, op_pin_state_floats);