CF backedges are enough for us
[libfirm] / ir / ir / irgopt.c
index a006f8e..8599f39 100644 (file)
@@ -34,9 +34,9 @@
 #include "irgraph_t.h"
 #include "irprog_t.h"
 
-#include "ircons.h"
+#include "iroptimize.h"
+#include "ircons_t.h"
 #include "iropt_t.h"
-#include "cfopt.h"
 #include "irgopt.h"
 #include "irgmod.h"
 #include "irgwalk.h"
  */
 static void optimize_in_place_wrapper (ir_node *n, void *env) {
        ir_node *optimized = optimize_in_place_2(n);
-       if (optimized != n) exchange (n, optimized);
+       (void) env;
+
+       if (optimized != n) {
+               exchange (n, optimized);
+       }
 }
 
 /**
@@ -111,6 +115,8 @@ void local_optimize_node(ir_node *n) {
  * Block-Walker: uses dominance depth to mark dead blocks.
  */
 static void kill_dead_blocks(ir_node *block, void *env) {
+       (void) env;
+
        if (get_Block_dom_depth(block) < 0) {
                /*
                 * Note that the new dominance code correctly handles
@@ -282,7 +288,7 @@ compute_new_arity(ir_node *b) {
                /* compute the number of good predecessors */
                res = irn_arity = get_irn_arity(b);
                for (i = 0; i < irn_arity; i++)
-                       if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
+                       if (is_Bad(get_irn_n(b, i))) res--;
                        /* save it in the flag. */
                        set_Block_block_visited(b, irg_v + res);
                        return res;
@@ -357,20 +363,31 @@ static void copy_node(ir_node *n, void *env) {
  * Copies new predecessors of old node to new node remembered in link.
  * Spare the Bad predecessors of Phi and Block nodes.
  */
-void
-copy_preds(ir_node *n, void *env) {
+static void copy_preds(ir_node *n, void *env) {
        ir_node *nn, *block;
        int i, j, irn_arity;
+       (void) env;
 
        nn = get_new_node(n);
 
        if (is_Block(n)) {
+               /* copy the macro block header */
+               ir_node *mbh = get_Block_MacroBlock(n);
+
+               if (mbh == n) {
+                       /* this block is a macroblock header */
+                       set_irn_n(nn, -1, nn);
+               } else {
+                       /* get the macro block header */
+                       set_irn_n(nn, -1, get_new_node(mbh));
+               }
+
                /* Don't copy Bad nodes. */
                j = 0;
                irn_arity = get_irn_arity(n);
                for (i = 0; i < irn_arity; i++) {
                        if (! is_Bad(get_irn_n(n, i))) {
-                               set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+                               set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
                                /*if (is_backedge(n, i)) set_backedge(nn, j);*/
                                j++;
                        }
@@ -385,7 +402,7 @@ copy_preds(ir_node *n, void *env) {
                   that the fields in ir_graph are set properly. */
                if ((get_opt_control_flow_straightening()) &&
                        (get_Block_n_cfgpreds(nn) == 1) &&
-                       (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+                       is_Jmp(get_Block_cfgpred(nn, 0))) {
                        ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
                        if (nn == old) {
                                /* Jmp jumps into the block it is in -- deal self cycle. */
@@ -395,7 +412,7 @@ copy_preds(ir_node *n, void *env) {
                                exchange(nn, old);
                        }
                }
-       } else if (get_irn_op(n) == op_Phi) {
+       } else if (is_Phi(n)) {
                /* Don't copy node if corresponding predecessor in block is Bad.
                   The Block itself should not be Bad. */
                block = get_nodes_block(n);
@@ -423,7 +440,7 @@ copy_preds(ir_node *n, void *env) {
        }
        /* Now the new node is complete.  We can add it to the hash table for CSE.
           @@@ inlining aborts if we identify End. Why? */
-       if (get_irn_op(nn) != op_End)
+       if (!is_End(nn))
                add_identities(current_ir_graph->value_table, nn);
 }
 
@@ -490,8 +507,8 @@ static void copy_graph(ir_graph *irg, int copy_node_nr) {
        /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
 
        /* visit the anchors as well */
-       for (i = anchor_max - 1; i >= 0; --i) {
-               ir_node *n = irg->anchors[i];
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
 
                if (n && (get_irn_visited(n) <= vfl)) {
                        set_irg_visited(irg, vfl);
@@ -548,7 +565,7 @@ static void copy_graph(ir_graph *irg, int copy_node_nr) {
 static void
 copy_graph_env(int copy_node_nr) {
        ir_graph *irg = current_ir_graph;
-       ir_node *old_end, *n;
+       ir_node *old_end, *new_anchor;
        int i;
 
        /* remove end_except and end_reg nodes */
@@ -559,9 +576,10 @@ copy_graph_env(int copy_node_nr) {
        /* Not all nodes remembered in irg might be reachable
           from the end node.  Assure their link is set to NULL, so that
           we can test whether new nodes have been computed. */
-       for (i = anchor_max - 1; i >= 0; --i) {
-               if (irg->anchors[i])
-                       set_new_node(irg->anchors[i], NULL);
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
+               if (n != NULL)
+                       set_new_node(n, NULL);
        }
        /* we use the block walk flag for removing Bads from Blocks ins. */
        inc_irg_block_visited(irg);
@@ -569,14 +587,20 @@ copy_graph_env(int copy_node_nr) {
        /* copy the graph */
        copy_graph(irg, copy_node_nr);
 
-       /* fix the fields in irg */
-       old_end = get_irg_end(irg);
-       for (i = anchor_max - 1; i >= 0; --i) {
-               n = irg->anchors[i];
+       /* fix the anchor */
+       old_end    = get_irg_end(irg);
+       new_anchor = new_Anchor(irg);
+
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
                if (n)
-                       irg->anchors[i] = get_new_node(n);
+                       set_irn_n(new_anchor, i, get_new_node(n));
        }
        free_End(old_end);
+       irg->anchor = new_anchor;
+
+       /* ensure the new anchor is placed in the endblock */
+       set_irn_n(new_anchor, -1, get_irg_end_block(irg));
 }
 
 /**
@@ -635,7 +659,7 @@ dead_node_elimination(ir_graph *irg) {
 
                /* Free memory from old unoptimized obstack */
                obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
-               xfree (graveyard_obst);           /* ... then free it.           */
+               xfree(graveyard_obst);            /* ... then free it.           */
 
                /* inform statistics that the run is over */
                hook_dead_node_elim(irg, 0);
@@ -655,12 +679,11 @@ dead_node_elimination(ir_graph *irg) {
 static void relink_bad_block_predecessors(ir_node *n, void *env) {
        ir_node **new_in, *irn;
        int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
+       (void) env;
 
        /* if link field of block is NULL, look for bad predecessors otherwise
           this is already done */
-       if (get_irn_op(n) == op_Block &&
-               get_irn_link(n) == NULL) {
-
+       if (is_Block(n) && get_irn_link(n) == NULL) {
                /* save old predecessors in link field (position 0 is the block operand)*/
                set_irn_link(n, get_irn_in(n));
 
@@ -705,12 +728,11 @@ static void relink_bad_predecessors(ir_node *n, void *env) {
        int i, old_irn_arity, new_irn_arity;
 
        /* relink bad predecessors of a block */
-       if (get_irn_op(n) == op_Block)
+       if (is_Block(n))
                relink_bad_block_predecessors(n, env);
 
        /* If Phi node relink its block and its predecessors */
-       if (get_irn_op(n) == op_Phi) {
-
+       if (is_Phi(n)) {
                /* Relink predecessors of phi's block */
                block = get_nodes_block(n);
                if (get_irn_link(block) == NULL)
@@ -775,6 +797,7 @@ typedef struct _survive_dce_list_t {
 
 static void dead_node_hook(void *context, ir_graph *irg, int start) {
        survive_dce_t *sd = context;
+       (void) irg;
 
        /* Create a new map before the dead node elimination is performed. */
        if (start) {
@@ -793,6 +816,7 @@ static void dead_node_hook(void *context, ir_graph *irg, int start) {
 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
        survive_dce_t *sd = context;
        survive_dce_list_t *list = pmap_get(sd->places, old);
+       (void) irg;
 
        /* If the node is to be patched back, write the new address to all registered locations. */
        if (list) {
@@ -882,13 +906,13 @@ copy_node_inline(ir_node *n, void *env) {
        ir_type *frame_tp = (ir_type *)env;
 
        copy_node(n, NULL);
-       if (get_irn_op(n) == op_Sel) {
+       if (is_Sel(n)) {
                nn = get_new_node (n);
                assert(is_Sel(nn));
                if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
                        set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
                }
-       } else if (get_irn_op(n) == op_Block) {
+       } else if (is_Block(n)) {
                nn = get_new_node (n);
                nn->attr.block.irg = current_ir_graph;
        }
@@ -899,9 +923,10 @@ copy_node_inline(ir_node *n, void *env) {
  */
 static void find_addr(ir_node *node, void *env) {
        int *allow_inline = env;
-       if (is_Proj(node) && get_irn_op(get_Proj_pred(node)) == op_Start) {
-               if (get_Proj_proj(node) == pn_Start_P_value_arg_base)
-                       *allow_inline = 0;
+       if (is_Proj(node) &&
+                       is_Start(get_Proj_pred(node)) &&
+                       get_Proj_proj(node) == pn_Start_P_value_arg_base) {
+               *allow_inline = 0;
        }
 }
 
@@ -943,6 +968,12 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
        return res;
 }
 
+enum exc_mode {
+       exc_handler    = 0, /**< There is a handler. */
+       exc_to_end     = 1, /**< Branches to End. */
+       exc_no_handler = 2  /**< Exception handling not represented. */
+};
+
 /* Inlines a method at the given call site. */
 int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node *pre_call;
@@ -953,7 +984,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node **cf_pred;
        ir_node *ret, *phi;
        int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
-       int exc_handling;
+       enum exc_mode exc_handling;
        ir_type *called_frame;
        irg_inline_property prop = get_irg_inline_property(called_graph);
 
@@ -1013,13 +1044,13 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        {
                ir_node *proj, *Mproj = NULL, *Xproj = NULL;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
-                       assert(is_Proj(proj));
-                       if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
-                       if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
+                       long proj_nr = get_Proj_proj(proj);
+                       if (proj_nr == pn_Call_X_except) Xproj = proj;
+                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = 0; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = 1; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = 2; } /* !Mproj && !Xproj   */
+               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
+               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
+               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
        }
 
        /* --
@@ -1085,7 +1116,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* -- Performing dead node elimination inlines the graph -- */
        /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
           entities. */
-       /* @@@ endless loops are not copied!! -- they should be, I think... */
        irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
                 get_irg_frame_type(called_graph));
 
@@ -1121,8 +1151,11 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        /* -- archive keepalives -- */
        irn_arity = get_irn_arity(end);
-       for (i = 0; i < irn_arity; i++)
-               add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+       for (i = 0; i < irn_arity; i++) {
+               ir_node *ka = get_End_keepalive(end, i);
+               if (! is_Bad(ka))
+                       add_End_keepalive(get_irg_end(current_ir_graph), ka);
+       }
 
        /* The new end node will die.  We need not free as the in array is on the obstack:
           copy_node() only generated 'D' arrays. */
@@ -1141,7 +1174,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        /* -- Build a Tuple for all results of the method.
           Add Phi node if there was more than one Return.  -- */
-       turn_into_tuple(post_call, 4); /* FIXME: is th 4 corrct here ? */
+       turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
        n_ret = 0;
        for (i = 0; i < arity; i++) {
@@ -1164,7 +1197,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
                                ret = get_irn_n(end_bl, i);
-                               if (get_irn_op(ret) == op_Return) {
+                               if (is_Return(ret)) {
                                        cf_pred[n_ret] = get_Return_res(ret, j);
                                        n_ret++;
                                }
@@ -1184,6 +1217,12 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        } else {
                set_Tuple_pred(call, pn_Call_T_result, new_Bad());
        }
+       /* handle the regular call */
+       set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
+
+       /* For now, we cannot inline calls with value_base */
+       set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
+
        /* Finally the exception control flow.
           We have two (three) possible situations:
           First if the Call branches to an exception handler: We need to add a Phi node to
@@ -1194,14 +1233,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Second the Call branches to End, the exception is not handled.  Just
           add all inlined exception branches to the End node.
           Third: there is no Exception edge at all. Handle as case two. */
-       if (exc_handling == 0) {
+       if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
-                       ir_node *ret;
+                       ir_node *ret, *irn;
                        ret = get_irn_n(end_bl, i);
-                       if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+                       irn = skip_Proj(ret);
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
-                               n_exc++;
+                               ++n_exc;
                        }
                }
                if (n_exc > 0) {
@@ -1219,7 +1259,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                                        /* We rely that all cfops have the memory output at the same position. */
                                        cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
                                        n_exc++;
-                               } else if (get_irn_op(ret) == op_Raise) {
+                               } else if (is_Raise(ret)) {
                                        cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
                                        n_exc++;
                                }
@@ -1238,23 +1278,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
                        ir_node *ret = get_irn_n(end_bl, i);
+                       ir_node *irn = skip_Proj(ret);
 
-                       if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
                                n_exc++;
                        }
                }
                main_end_bl = get_irg_end_block(current_ir_graph);
                main_end_bl_arity = get_irn_arity(main_end_bl);
-               end_preds =  xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+               end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
 
                for (i = 0; i < main_end_bl_arity; ++i)
                        end_preds[i] = get_irn_n(main_end_bl, i);
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+               set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
+               set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
                free(end_preds);
        }
        free(res_pred);
@@ -1718,7 +1759,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
 
        /* Place floating nodes. */
        if (get_irn_pinned(n) == op_pin_state_floats) {
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
                int depth           = 0;
                ir_node *b          = NULL;   /* The block to place this node in */
@@ -1750,7 +1791,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                                 */
                                if (! in_dead_block) {
                                        if (get_irn_pinned(pred) == op_pin_state_floats &&
-                                               is_Block_unreachable(get_irn_n(pred, -1)))
+                                               is_Block_unreachable(get_nodes_block(pred)))
                                                set_nodes_block(pred, curr_block);
                                }
                                place_floats_early(pred, worklist);
@@ -1766,15 +1807,16 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        /* Because all loops contain at least one op_pin_state_pinned node, now all
                           our inputs are either op_pin_state_pinned or place_early() has already
                           been finished on them.  We do not have any unfinished inputs!  */
-                       pred_block = get_irn_n(pred, -1);
+                       pred_block = get_nodes_block(pred);
                        if ((!is_Block_dead(pred_block)) &&
                                (get_Block_dom_depth(pred_block) > depth)) {
                                b = pred_block;
                                depth = get_Block_dom_depth(pred_block);
                        }
                        /* Avoid that the node is placed in the Start block */
-                       if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
-                               && get_irg_phase_state(current_ir_graph) != phase_backend) {
+                       if (depth == 1 &&
+                                       get_Block_dom_depth(get_nodes_block(n)) > 1 &&
+                                       get_irg_phase_state(current_ir_graph) != phase_backend) {
                                b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
                                assert(b != get_irg_start_block(current_ir_graph));
                                depth = 2;
@@ -1790,7 +1832,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
         */
        irn_arity = get_irn_arity(n);
 
-       if (get_irn_op(n) == op_End) {
+       if (is_End(n)) {
                /*
                 * Simplest case: End node. Predecessors are keep-alives,
                 * no need to move out of dead block.
@@ -1812,14 +1854,14 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else if (is_Phi(n)) {
                ir_node *pred;
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * Phi nodes: move nodes from dead blocks into the effective use
                 * of the Phi-input if the Phi is not in a bad block.
                 */
-               pred = get_irn_n(n, -1);
+               pred = get_nodes_block(n);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1829,7 +1871,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_irn_n(pred, -1))) {
+                                       is_Block_unreachable(get_nodes_block(pred))) {
                                        set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
                                }
                                waitq_put(worklist, pred);
@@ -1837,13 +1879,13 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else {
                ir_node *pred;
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * All other nodes: move nodes from dead blocks into the same block.
                 */
-               pred = get_irn_n(n, -1);
+               pred = get_nodes_block(n);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1853,7 +1895,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_irn_n(pred, -1))) {
+                                       is_Block_unreachable(get_nodes_block(pred))) {
                                        set_nodes_block(pred, curr_block);
                                }
                                waitq_put(worklist, pred);
@@ -1920,37 +1962,30 @@ static ir_node *calc_dca(ir_node *dca, ir_node *block) {
  * I.e., DCA is the block where we might place PRODUCER.
  * A data flow edge points from producer to consumer.
  */
-static ir_node *
-consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) {
-       ir_node *block = NULL;
-
-       /* Compute the latest block into which we can place a node so that it is
+static ir_node *consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer)
+{
+       /* Compute the last block into which we can place a node so that it is
           before consumer. */
-       if (get_irn_op(consumer) == op_Phi) {
+       if (is_Phi(consumer)) {
                /* our consumer is a Phi-node, the effective use is in all those
                   blocks through which the Phi-node reaches producer */
-               int i, irn_arity;
                ir_node *phi_block = get_nodes_block(consumer);
-               irn_arity = get_irn_arity(consumer);
+               int      arity     = get_irn_arity(consumer);
+               int      i;
 
-               for (i = 0;  i < irn_arity; i++) {
-                       if (get_irn_n(consumer, i) == producer) {
-                               ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+               for (i = 0;  i < arity; i++) {
+                       if (get_Phi_pred(consumer, i) == producer) {
+                               ir_node *new_block = get_Block_cfgpred_block(phi_block, i);
 
-                               if (! is_Block_unreachable(new_block))
-                                       block = calc_dca(block, new_block);
+                               if (!is_Block_unreachable(new_block))
+                                       dca = calc_dca(dca, new_block);
                        }
                }
-
-               if (! block)
-                       block = get_irn_n(producer, -1);
        } else {
-               assert(is_no_Block(consumer));
-               block = get_nodes_block(consumer);
+               dca = calc_dca(dca, get_nodes_block(consumer));
        }
 
-       /* Compute the deepest common ancestor of block and dca. */
-       return calc_dca(dca, block);
+       return dca;
 }
 
 /* FIXME: the name clashes here with the function from ana/field_temperature.c
@@ -1995,6 +2030,53 @@ static void move_out_of_loops(ir_node *n, ir_node *early) {
        }
 }
 
+/* deepest common ancestor in the dominator tree of all nodes'
+   blocks depending on us; our final placement has to dominate DCA. */
+static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
+{
+       int i;
+
+       for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+               ir_node *succ = get_irn_out(node, i);
+
+               if (is_End(succ)) {
+                       /*
+                        * This consumer is the End node, a keep alive edge.
+                        * This is not a real consumer, so we ignore it
+                        */
+                       continue;
+               }
+
+               if (is_Proj(succ)) {
+                       dca = get_deepest_common_ancestor(succ, dca);
+               } else {
+                       /* ignore if succ is in dead code */
+                       ir_node *succ_blk = get_nodes_block(succ);
+                       if (is_Block_unreachable(succ_blk))
+                               continue;
+                       dca = consumer_dom_dca(dca, succ, node);
+               }
+       }
+
+       return dca;
+}
+
+static void set_projs_block(ir_node *node, ir_node *block)
+{
+       int i;
+
+       for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+               ir_node *succ = get_irn_out(node, i);
+
+               assert(is_Proj(succ));
+
+               if(get_irn_mode(succ) == mode_T) {
+                       set_projs_block(succ, block);
+               }
+               set_nodes_block(succ, block);
+       }
+}
+
 /**
  * Find the latest legal block for N and place N into the
  * `optimal' Block between the latest and earliest legal block.
@@ -2016,12 +2098,12 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
        mark_irn_visited(n);
 
        /* no need to place block nodes, control nodes are already placed. */
-       if ((get_irn_op(n) != op_Block) &&
+       if (!is_Block(n) &&
            (!is_cfop(n)) &&
            (get_irn_mode(n) != mode_X)) {
                /* Remember the early_blk placement of this block to move it
                   out of loop no further than the early_blk placement. */
-               early_blk = get_irn_n(n, -1);
+               early_blk = get_nodes_block(n);
 
                /*
                 * BEWARE: Here we also get code, that is live, but
@@ -2039,7 +2121,7 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
                    producer of one of their inputs in the same block anyway. */
                for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
                        ir_node *succ = get_irn_out(n, i);
-                       if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+                       if (irn_not_visited(succ) && !is_Phi(succ))
                                place_floats_late(succ, worklist);
                }
 
@@ -2054,31 +2136,16 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
                            (op != op_SymConst) &&
                            (op != op_Proj))
                        {
-                               ir_node *dca = NULL;  /* deepest common ancestor in the
-                                                                                dominator tree of all nodes'
-                                                                                blocks depending on us; our final
-                                                                                placement has to dominate DCA. */
-                               for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
-                                       ir_node *succ = get_irn_out(n, i);
-                                       ir_node *succ_blk;
-
-                                       if (get_irn_op(succ) == op_End) {
-                                               /*
-                                                * This consumer is the End node, a keep alive edge.
-                                                * This is not a real consumer, so we ignore it
-                                                */
-                                               continue;
-                                       }
-
-                                       /* ignore if succ is in dead code */
-                                       succ_blk = get_irn_n(succ, -1);
-                                       if (is_Block_unreachable(succ_blk))
-                                               continue;
-                                       dca = consumer_dom_dca(dca, succ, n);
-                               }
-                               if (dca) {
+                               /* deepest common ancestor in the dominator tree of all nodes'
+                                  blocks depending on us; our final placement has to dominate
+                                  DCA. */
+                               ir_node *dca = get_deepest_common_ancestor(n, NULL);
+                               if (dca != NULL) {
                                        set_nodes_block(n, dca);
                                        move_out_of_loops(n, early_blk);
+                                       if(get_irn_mode(n) == mode_T) {
+                                               set_projs_block(n, get_nodes_block(n));
+                                       }
                                }
                        }
                }
@@ -2128,7 +2195,7 @@ void place_code(ir_graph *irg) {
 
        if (1 || get_irg_loopinfo_state(irg) != loopinfo_consistent) {
                free_loop_information(irg);
-               construct_backedges(irg);
+               construct_cf_backedges(irg);
        }
 
        /* Place all floating nodes as early as possible. This guarantees
@@ -2149,6 +2216,10 @@ void place_code(ir_graph *irg) {
        current_ir_graph = rem;
 }
 
+typedef struct cf_env {
+       char changed;       /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
 /**
  * Called by walker of remove_critical_cf_edges().
  *
@@ -2156,12 +2227,12 @@ void place_code(ir_graph *irg) {
  * predecessors and a block of multiple successors.
  *
  * @param n   IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
  */
 static void walk_critical_cf_edges(ir_node *n, void *env) {
        int arity, i;
        ir_node *pre, *block, *jmp;
-       int *changed = env;
+       cf_env *cenv = env;
        ir_graph *irg = get_irn_irg(n);
 
        /* Block has multiple predecessors */
@@ -2175,26 +2246,34 @@ static void walk_critical_cf_edges(ir_node *n, void *env) {
 
                        pre = get_irn_n(n, i);
                        cfop = get_irn_op(skip_Proj(pre));
-                       /* Predecessor has multiple successors. Insert new control flow edge but
-                          ignore exception edges. */
-                       if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+                       if (is_op_fragile(cfop)) {
+                               if (cfop != op_Raise)
+                                       goto insert;
+                               continue;
+                       }
+                       if (is_op_forking(cfop)) {
+                               /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
                                /* set predecessor of new block */
                                block = new_r_Block(irg, 1, &pre);
                                /* insert new jmp node to new block */
                                jmp = new_r_Jmp(irg, block);
                                /* set successor of new block */
                                set_irn_n(n, i, jmp);
-                               *changed = 1;
+                               cenv->changed = 1;
                        } /* predecessor has multiple successors */
                } /* for all predecessors */
        } /* n is a multi-entry block */
 }
 
 void remove_critical_cf_edges(ir_graph *irg) {
-       int changed = 0;
+       cf_env env;
+
+       env.changed = 0;
 
-       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
-       if (changed) {
+       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+       if (env.changed) {
                /* control flow changed */
                set_irg_outs_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);