fix mode of call-result proj
[libfirm] / ir / ir / irgopt.c
index de85ed1..3de7ae9 100644 (file)
@@ -78,9 +78,7 @@ static inline void do_local_optimize(ir_node *n)
 
        if (get_opt_global_cse())
                set_irg_pinned(irg, op_pin_state_floats);
-       set_irg_outs_inconsistent(irg);
        set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
 
        /* Clean the value_table in irg for the CSE. */
        new_identities(irg);
@@ -100,32 +98,12 @@ void local_optimize_node(ir_node *n)
        current_ir_graph = rem;
 }
 
-/**
- * Block-Walker: uses dominance depth to mark dead blocks.
- */
-static void kill_dead_blocks(ir_node *block, void *env)
+static void enqueue_node(ir_node *node, pdeq *waitq)
 {
-       (void) env;
-
-       if (get_Block_dom_depth(block) < 0) {
-               /*
-                * Note that the new dominance code correctly handles
-                * the End block, i.e. it is always reachable from Start
-                */
-               ir_graph *irg = get_irn_irg(block);
-               exchange(block, get_irg_bad(irg));
-       }
-}
-
-/* Applies local optimizations (see iropt.h) to all nodes reachable from node n. */
-void local_optimize_graph(ir_graph *irg)
-{
-       ir_graph *rem = current_ir_graph;
-       current_ir_graph = irg;
-
-       do_local_optimize(get_irg_end(irg));
-
-       current_ir_graph = rem;
+       if (get_irn_link(node) == waitq)
+               return;
+       pdeq_putr(waitq, node);
+       set_irn_link(node, waitq);
 }
 
 /**
@@ -139,10 +117,7 @@ static void enqueue_users(ir_node *n, pdeq *waitq)
        foreach_out_edge(n, edge) {
                ir_node *succ = get_edge_src_irn(edge);
 
-               if (get_irn_link(succ) != waitq) {
-                       pdeq_putr(waitq, succ);
-                       set_irn_link(succ, waitq);
-               }
+               enqueue_node(succ, waitq);
                if (get_irn_mode(succ) == mode_T) {
                /* A mode_T node has Proj's. Because most optimizations
                        run on the Proj's we have to enqueue them also. */
@@ -151,6 +126,43 @@ static void enqueue_users(ir_node *n, pdeq *waitq)
        }
 }
 
+/**
+ * Block-Walker: uses dominance depth to mark dead blocks.
+ */
+static void find_unreachable_blocks(ir_node *block, void *env)
+{
+       pdeq *waitq = (pdeq*) env;
+
+       if (get_Block_dom_depth(block) < 0) {
+               ir_graph *irg = get_irn_irg(block);
+               ir_node  *end = get_irg_end(irg);
+
+               const ir_edge_t *edge;
+               foreach_block_succ(block, edge) {
+                       const ir_edge_t *edge2;
+                       ir_node *succ_block = get_edge_src_irn(edge);
+                       enqueue_node(succ_block, waitq);
+                       foreach_out_edge(succ_block, edge2) {
+                               ir_node *succ = get_edge_src_irn(edge2);
+                               if (is_Phi(succ))
+                                       enqueue_node(succ, waitq);
+                       }
+               }
+               enqueue_node(end, waitq);
+       }
+}
+
+/* Applies local optimizations (see iropt.h) to all nodes reachable from node n. */
+void local_optimize_graph(ir_graph *irg)
+{
+       ir_graph *rem = current_ir_graph;
+       current_ir_graph = irg;
+
+       do_local_optimize(get_irg_end(irg));
+
+       current_ir_graph = rem;
+}
+
 /**
  * Data flow optimization walker.
  * Optimizes all nodes and enqueue its users
@@ -187,23 +199,20 @@ int optimize_graph_df(ir_graph *irg)
 
        if (get_opt_global_cse()) {
                set_irg_pinned(irg, op_pin_state_floats);
-       } else {
-               /* The following enables unreachable code elimination (=Blocks may be
-                * Bad). We cannot enable it in global_cse nodes since we can't
-                * determine a nodes block there and therefore can't remove all code
-                * in unreachable blocks */
-               set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
-               if (get_irg_dom_state(irg) == dom_consistent)
-                       irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
        }
 
+       /* The following enables unreachable code elimination (=Blocks may be
+        * Bad). */
+       set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
+
        /* invalidate info */
-       set_irg_outs_inconsistent(irg);
        set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
 
        ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
 
+       /* Calculate dominance so we can kill unreachable code */
+       assure_doms(irg);
+
        /* walk over the graph, but don't touch keep-alives */
        irg_walk_graph(irg, NULL, opt_walker, waitq);
 
@@ -211,12 +220,17 @@ int optimize_graph_df(ir_graph *irg)
         * so if it's not empty, the graph has been changed */
        changed = !pdeq_empty(waitq);
 
-       /* finish the wait queue */
-       while (! pdeq_empty(waitq)) {
-               ir_node *n = (ir_node*)pdeq_getl(waitq);
-               if (! is_Bad(n))
+       while (!pdeq_empty(waitq)) {
+               /* finish the wait queue */
+               while (! pdeq_empty(waitq)) {
+                       ir_node *n = (ir_node*)pdeq_getl(waitq);
                        opt_walker(n, waitq);
+               }
+               /* Calculate dominance so we can kill unreachable code */
+               compute_doms(irg);
+               irg_block_walk_graph(irg, NULL, find_unreachable_blocks, waitq);
        }
+       set_irg_doms_inconsistent(irg);
 
        del_pdeq(waitq);
 
@@ -225,9 +239,13 @@ int optimize_graph_df(ir_graph *irg)
        if (! state)
                edges_deactivate(irg);
 
+       if (remove_bads(irg)) {
+               edges_deactivate(irg);
+       }
+
        /* Finally kill BAD and doublets from the keep alives.
           Doing this AFTER edges where deactivated saves cycles */
-       end  = get_irg_end(irg);
+       end = get_irg_end(irg);
        remove_End_Bads_and_doublets(end);
 
        clear_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);