typo fixed
[libfirm] / ir / ir / irgopt.c
index 4f5ee50..1e719fa 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1995-2007 University of Karlsruhe.  All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
  *
  * This file is part of libFirm.
  *
@@ -52,7 +52,7 @@
 #include "irbackedge_t.h"
 #include "cgana.h"
 #include "trouts.h"
-
+#include "error.h"
 
 #include "irflag_t.h"
 #include "irhooks.h"
@@ -182,15 +182,13 @@ static void opt_walker(ir_node *n, void *env) {
 /* Applies local optimizations to all nodes in the graph until fixpoint. */
 void optimize_graph_df(ir_graph *irg) {
        pdeq     *waitq = new_pdeq();
-       int      state = edges_activated(irg);
        ir_graph *rem = current_ir_graph;
        ir_node  *end;
-       int      i;
+       int      i, state;
 
        current_ir_graph = irg;
 
-       if (! state)
-               edges_activate(irg);
+       state = edges_assure(irg);
 
        if (get_opt_global_cse())
                set_irg_pinned(current_ir_graph, op_pin_state_floats);
@@ -288,7 +286,7 @@ compute_new_arity(ir_node *b) {
                /* compute the number of good predecessors */
                res = irn_arity = get_irn_arity(b);
                for (i = 0; i < irn_arity; i++)
-                       if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
+                       if (is_Bad(get_irn_n(b, i))) res--;
                        /* save it in the flag. */
                        set_Block_block_visited(b, irg_v + res);
                        return res;
@@ -312,6 +310,7 @@ static void copy_node(ir_node *n, void *env) {
        ir_node *nn, *block;
        int new_arity;
        ir_op *op = get_irn_op(n);
+       (void) env;
 
        /* The end node looses it's flexible in array.  This doesn't matter,
           as dead node elimination builds End by hand, inlineing doesn't use
@@ -340,9 +339,13 @@ static void copy_node(ir_node *n, void *env) {
                get_irn_mode(n),
                new_arity,
                get_irn_in(n) + 1);
-               /* Copy the attributes.  These might point to additional data.  If this
-               was allocated on the old obstack the pointers now are dangling.  This
-       frees e.g. the memory of the graph_arr allocated in new_immBlock. */
+       /* Copy the attributes.  These might point to additional data.  If this
+          was allocated on the old obstack the pointers now are dangling.  This
+          frees e.g. the memory of the graph_arr allocated in new_immBlock. */
+       if (op == op_Block) {
+               /* we cannot allow blocks WITHOUT macroblock input */
+               set_irn_n(nn, -1, get_irn_n(n, -1));
+       }
        copy_node_attr(n, nn);
 
 #ifdef DEBUG_libfirm
@@ -374,8 +377,15 @@ static void copy_preds(ir_node *n, void *env) {
                /* copy the macro block header */
                ir_node *mbh = get_Block_MacroBlock(n);
 
-               if (! is_Bad(mbh))
-                       set_irn_n(nn, -1, get_new_node(mbh));
+               if (mbh == n) {
+                       /* this block is a macroblock header */
+                       set_irn_n(nn, -1, nn);
+               } else {
+                       /* get the macro block header */
+                       ir_node *nmbh = get_new_node(mbh);
+                       assert(nmbh != NULL);
+                       set_irn_n(nn, -1, nmbh);
+               }
 
                /* Don't copy Bad nodes. */
                j = 0;
@@ -397,7 +407,7 @@ static void copy_preds(ir_node *n, void *env) {
                   that the fields in ir_graph are set properly. */
                if ((get_opt_control_flow_straightening()) &&
                        (get_Block_n_cfgpreds(nn) == 1) &&
-                       (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+                       is_Jmp(get_Block_cfgpred(nn, 0))) {
                        ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
                        if (nn == old) {
                                /* Jmp jumps into the block it is in -- deal self cycle. */
@@ -407,7 +417,7 @@ static void copy_preds(ir_node *n, void *env) {
                                exchange(nn, old);
                        }
                }
-       } else if (is_Phi(n)) {
+       } else if (is_Phi(n) && get_irn_arity(n) > 0) {
                /* Don't copy node if corresponding predecessor in block is Bad.
                   The Block itself should not be Bad. */
                block = get_nodes_block(n);
@@ -435,7 +445,7 @@ static void copy_preds(ir_node *n, void *env) {
        }
        /* Now the new node is complete.  We can add it to the hash table for CSE.
           @@@ inlining aborts if we identify End. Why? */
-       if (get_irn_op(nn) != op_End)
+       if (!is_End(nn))
                add_identities(current_ir_graph->value_table, nn);
 }
 
@@ -606,62 +616,65 @@ copy_graph_env(int copy_node_nr) {
  * Adds all new nodes to a new hash table for CSE.  Does not
  * perform CSE, so the hash table might contain common subexpressions.
  */
-void
-dead_node_elimination(ir_graph *irg) {
-       if (get_opt_optimize() && get_opt_dead_node_elimination()) {
-               ir_graph *rem;
-               int rem_ipview = get_interprocedural_view();
-               struct obstack *graveyard_obst = NULL;
-               struct obstack *rebirth_obst   = NULL;
-               assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
-
-               /* inform statistics that we started a dead-node elimination run */
-               hook_dead_node_elim(irg, 1);
-
-               /* Remember external state of current_ir_graph. */
-               rem = current_ir_graph;
-               current_ir_graph = irg;
-               set_interprocedural_view(0);
+void dead_node_elimination(ir_graph *irg) {
+       ir_graph *rem;
+#ifdef INTERPROCEDURAL_VIEW
+       int rem_ipview = get_interprocedural_view();
+#endif
+       struct obstack *graveyard_obst = NULL;
+       struct obstack *rebirth_obst   = NULL;
+       assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
 
-               assert(get_irg_phase_state(irg) != phase_building);
+       /* inform statistics that we started a dead-node elimination run */
+       hook_dead_node_elim(irg, 1);
 
-               /* Handle graph state */
-               free_callee_info(irg);
-               free_irg_outs(irg);
-               free_trouts();
+       /* Remember external state of current_ir_graph. */
+       rem = current_ir_graph;
+       current_ir_graph = irg;
+#ifdef INTERPROCEDURAL_VIEW
+       set_interprocedural_view(0);
+#endif
 
-               /* @@@ so far we loose loops when copying */
-               free_loop_information(irg);
+       assert(get_irg_phase_state(irg) != phase_building);
 
-               set_irg_doms_inconsistent(irg);
+       /* Handle graph state */
+       free_callee_info(irg);
+       free_irg_outs(irg);
+       free_trouts();
 
-               /* A quiet place, where the old obstack can rest in peace,
-                  until it will be cremated. */
-               graveyard_obst = irg->obst;
+       /* @@@ so far we loose loops when copying */
+       free_loop_information(irg);
 
-               /* A new obstack, where the reachable nodes will be copied to. */
-               rebirth_obst = xmalloc(sizeof(*rebirth_obst));
-               irg->obst = rebirth_obst;
-               obstack_init(irg->obst);
-               irg->last_node_idx = 0;
+       set_irg_doms_inconsistent(irg);
 
-               /* We also need a new value table for CSE */
-               del_identities(irg->value_table);
-               irg->value_table = new_identities();
+       /* A quiet place, where the old obstack can rest in peace,
+          until it will be cremated. */
+       graveyard_obst = irg->obst;
 
-               /* Copy the graph from the old to the new obstack */
-               copy_graph_env(/*copy_node_nr=*/1);
+       /* A new obstack, where the reachable nodes will be copied to. */
+       rebirth_obst = xmalloc(sizeof(*rebirth_obst));
+       irg->obst = rebirth_obst;
+       obstack_init(irg->obst);
+       irg->last_node_idx = 0;
 
-               /* Free memory from old unoptimized obstack */
-               obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
-               xfree (graveyard_obst);           /* ... then free it.           */
+       /* We also need a new value table for CSE */
+       del_identities(irg->value_table);
+       irg->value_table = new_identities();
 
-               /* inform statistics that the run is over */
-               hook_dead_node_elim(irg, 0);
+       /* Copy the graph from the old to the new obstack */
+       copy_graph_env(/*copy_node_nr=*/1);
 
-               current_ir_graph = rem;
-               set_interprocedural_view(rem_ipview);
-       }
+       /* Free memory from old unoptimized obstack */
+       obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
+       xfree(graveyard_obst);            /* ... then free it.           */
+
+       /* inform statistics that the run is over */
+       hook_dead_node_elim(irg, 0);
+
+       current_ir_graph = rem;
+#ifdef INTERPROCEDURAL_VIEW
+       set_interprocedural_view(rem_ipview);
+#endif
 }
 
 /**
@@ -678,9 +691,7 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) {
 
        /* if link field of block is NULL, look for bad predecessors otherwise
           this is already done */
-       if (get_irn_op(n) == op_Block &&
-               get_irn_link(n) == NULL) {
-
+       if (is_Block(n) && get_irn_link(n) == NULL) {
                /* save old predecessors in link field (position 0 is the block operand)*/
                set_irn_link(n, get_irn_in(n));
 
@@ -725,12 +736,11 @@ static void relink_bad_predecessors(ir_node *n, void *env) {
        int i, old_irn_arity, new_irn_arity;
 
        /* relink bad predecessors of a block */
-       if (get_irn_op(n) == op_Block)
+       if (is_Block(n))
                relink_bad_block_predecessors(n, env);
 
        /* If Phi node relink its block and its predecessors */
-       if (get_irn_op(n) == op_Phi) {
-
+       if (is_Phi(n)) {
                /* Relink predecessors of phi's block */
                block = get_nodes_block(n);
                if (get_irn_link(block) == NULL)
@@ -752,7 +762,7 @@ static void relink_bad_predecessors(ir_node *n, void *env) {
                                }
 
                                ARR_SETLEN(ir_node *, n->in, new_irn_arity);
-                               ARR_SETLEN(int, n->attr.phi_backedge, new_irn_arity);
+                               ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
                }
        } /* n is a Phi node */
 }
@@ -767,6 +777,7 @@ static void relink_bad_predecessors(ir_node *n, void *env) {
  * changes).
  */
 void remove_bad_predecessors(ir_graph *irg) {
+       panic("Fix backedge handling first");
        irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
 }
 
@@ -904,13 +915,13 @@ copy_node_inline(ir_node *n, void *env) {
        ir_type *frame_tp = (ir_type *)env;
 
        copy_node(n, NULL);
-       if (get_irn_op(n) == op_Sel) {
+       if (is_Sel(n)) {
                nn = get_new_node (n);
                assert(is_Sel(nn));
                if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
                        set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
                }
-       } else if (get_irn_op(n) == op_Block) {
+       } else if (is_Block(n)) {
                nn = get_new_node (n);
                nn->attr.block.irg = current_ir_graph;
        }
@@ -921,9 +932,10 @@ copy_node_inline(ir_node *n, void *env) {
  */
 static void find_addr(ir_node *node, void *env) {
        int *allow_inline = env;
-       if (is_Proj(node) && get_irn_op(get_Proj_pred(node)) == op_Start) {
-               if (get_Proj_proj(node) == pn_Start_P_value_arg_base)
-                       *allow_inline = 0;
+       if (is_Proj(node) &&
+                       is_Start(get_Proj_pred(node)) &&
+                       get_Proj_proj(node) == pn_Start_P_value_arg_base) {
+               *allow_inline = 0;
        }
 }
 
@@ -966,9 +978,9 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
 }
 
 enum exc_mode {
-          exc_handler    = 0, /**< There is a handler. */
-          exc_to_end     = 1, /**< Branches to End. */
-          exc_no_handler = 2  /**< Exception handling not represented. */
+       exc_handler    = 0, /**< There is a handler. */
+       exc_to_end     = 1, /**< Branches to End. */
+       exc_no_handler = 2  /**< Exception handling not represented. */
 };
 
 /* Inlines a method at the given call site. */
@@ -980,13 +992,13 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node **res_pred;
        ir_node **cf_pred;
        ir_node *ret, *phi;
-       int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
+       int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity;
        enum exc_mode exc_handling;
        ir_type *called_frame;
        irg_inline_property prop = get_irg_inline_property(called_graph);
 
-       if ( (prop < irg_inline_forced) &&
-            (!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0;
+       if (prop == irg_inline_forbidden)
+               return 0;
 
        /* Do not inline variadic functions. */
        if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic)
@@ -1098,7 +1110,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* -- Replicate local entities of the called_graph -- */
        /* copy the entities. */
        called_frame = get_irg_frame_type(called_graph);
-       for (i = 0; i < get_class_n_members(called_frame); i++) {
+       for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
                ir_entity *new_ent, *old_ent;
                old_ent = get_class_member(called_frame, i);
                new_ent = copy_entity_own(old_ent, get_cur_frame_type());
@@ -1194,7 +1206,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
                                ret = get_irn_n(end_bl, i);
-                               if (get_irn_op(ret) == op_Return) {
+                               if (is_Return(ret)) {
                                        cf_pred[n_ret] = get_Return_res(ret, j);
                                        n_ret++;
                                }
@@ -1206,14 +1218,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        res_pred[j] = phi;
                        /* Conserve Phi-list for further inlinings -- but might be optimized */
                        if (get_nodes_block(phi) == post_bl) {
-                               set_irn_link(phi, get_irn_link(post_bl));
-                               set_irn_link(post_bl, phi);
+                               set_Phi_next(phi, get_Block_phis(post_bl));
+                               set_Block_phis(post_bl, phi);
                        }
                }
                set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
        } else {
                set_Tuple_pred(call, pn_Call_T_result, new_Bad());
        }
+       /* handle the regular call */
+       set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
 
        /* For now, we cannot inline calls with value_base */
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
@@ -1234,7 +1248,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        ir_node *ret, *irn;
                        ret = get_irn_n(end_bl, i);
                        irn = skip_Proj(ret);
-                       if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
                                ++n_exc;
                        }
@@ -1254,7 +1268,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                                        /* We rely that all cfops have the memory output at the same position. */
                                        cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
                                        n_exc++;
-                               } else if (get_irn_op(ret) == op_Raise) {
+                               } else if (is_Raise(ret)) {
                                        cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
                                        n_exc++;
                                }
@@ -1264,7 +1278,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
                        set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
-               set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
        } else {
                ir_node *main_end_bl;
                int main_end_bl_arity;
@@ -1276,7 +1289,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        ir_node *ret = get_irn_n(end_bl, i);
                        ir_node *irn = skip_Proj(ret);
 
-                       if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
                                n_exc++;
                        }
@@ -1290,7 +1303,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
                set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
                set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
                free(end_preds);
@@ -1334,7 +1346,7 @@ static ir_graph *get_call_called_irg(ir_node *call) {
        ir_graph *called_irg = NULL;
 
        addr = get_Call_ptr(call);
-       if (is_SymConst(addr) && get_SymConst_kind(addr) == symconst_addr_ent) {
+       if (is_SymConst_addr_ent(addr)) {
                called_irg = get_entity_irg(get_SymConst_entity(addr));
        }
 
@@ -1378,8 +1390,6 @@ void inline_small_irgs(ir_graph *irg, int size) {
        call_entry *entry;
        DEBUG_ONLY(firm_dbg_module_t *dbg;)
 
-       if (!(get_opt_optimize() && get_opt_inline())) return;
-
        FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
 
        current_ir_graph = irg;
@@ -1505,7 +1515,7 @@ static void collect_calls2(ir_node *call, void *ctx) {
 }
 
 /**
- * Returns TRUE if the number of callers in 0 in the irg's environment,
+ * Returns TRUE if the number of callers is 0 in the irg's environment,
  * hence this irg is a leave.
  */
 INLINE static int is_leave(ir_graph *irg) {
@@ -1514,7 +1524,8 @@ INLINE static int is_leave(ir_graph *irg) {
 }
 
 /**
- * Returns TRUE if the number of callers is smaller size in the irg's environment.
+ * Returns TRUE if the number of nodes in the callee is
+ * smaller then size in the irg's environment.
  */
 INLINE static int is_smaller(ir_graph *callee, int size) {
        inline_irg_env *env = get_irg_link(callee);
@@ -1560,8 +1571,6 @@ void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_run
        struct obstack   obst;
        DEBUG_ONLY(firm_dbg_module_t *dbg;)
 
-       if (!(get_opt_optimize() && get_opt_inline())) return;
-
        FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
        rem = current_ir_graph;
        obstack_init(&obst);
@@ -1606,7 +1615,8 @@ void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_run
                                call   = entry->call;
                                callee = entry->callee;
 
-                               if (is_leave(callee) && is_smaller(callee, leavesize)) {
+                               if (is_leave(callee) && (
+                                   is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
                                        if (!phiproj_computed) {
                                                phiproj_computed = 1;
                                                collect_phiprojs(current_ir_graph);
@@ -1614,9 +1624,9 @@ void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_run
                                        did_inline = inline_method(call, callee);
 
                                        if (did_inline) {
-                                               /* Do some statistics */
                                                inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
 
+                                               /* Do some statistics */
                                                env->got_inline = 1;
                                                --env->n_call_nodes;
                                                env->n_nodes += callee_env->n_nodes;
@@ -1701,11 +1711,12 @@ void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_run
                        optimize_graph_df(irg);
                        optimize_cf(irg);
                }
-               if (env->got_inline || (env->n_callers_orig != env->n_callers))
+               if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
                        DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
                        env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
                        env->n_callers_orig, env->n_callers,
                        get_entity_name(get_irg_entity(irg))));
+               }
        }
 
        obstack_free(&obst, NULL);
@@ -1756,7 +1767,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
 
        /* Place floating nodes. */
        if (get_irn_pinned(n) == op_pin_state_floats) {
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
                int depth           = 0;
                ir_node *b          = NULL;   /* The block to place this node in */
@@ -1788,7 +1799,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                                 */
                                if (! in_dead_block) {
                                        if (get_irn_pinned(pred) == op_pin_state_floats &&
-                                               is_Block_unreachable(get_irn_n(pred, -1)))
+                                               is_Block_unreachable(get_nodes_block(pred)))
                                                set_nodes_block(pred, curr_block);
                                }
                                place_floats_early(pred, worklist);
@@ -1804,15 +1815,16 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        /* Because all loops contain at least one op_pin_state_pinned node, now all
                           our inputs are either op_pin_state_pinned or place_early() has already
                           been finished on them.  We do not have any unfinished inputs!  */
-                       pred_block = get_irn_n(pred, -1);
+                       pred_block = get_nodes_block(pred);
                        if ((!is_Block_dead(pred_block)) &&
                                (get_Block_dom_depth(pred_block) > depth)) {
                                b = pred_block;
                                depth = get_Block_dom_depth(pred_block);
                        }
                        /* Avoid that the node is placed in the Start block */
-                       if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
-                               && get_irg_phase_state(current_ir_graph) != phase_backend) {
+                       if (depth == 1 &&
+                                       get_Block_dom_depth(get_nodes_block(n)) > 1 &&
+                                       get_irg_phase_state(current_ir_graph) != phase_backend) {
                                b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
                                assert(b != get_irg_start_block(current_ir_graph));
                                depth = 2;
@@ -1828,7 +1840,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
         */
        irn_arity = get_irn_arity(n);
 
-       if (get_irn_op(n) == op_End) {
+       if (is_End(n)) {
                /*
                 * Simplest case: End node. Predecessors are keep-alives,
                 * no need to move out of dead block.
@@ -1850,14 +1862,14 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else if (is_Phi(n)) {
                ir_node *pred;
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * Phi nodes: move nodes from dead blocks into the effective use
                 * of the Phi-input if the Phi is not in a bad block.
                 */
-               pred = get_irn_n(n, -1);
+               pred = get_nodes_block(n);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1867,7 +1879,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_irn_n(pred, -1))) {
+                                       is_Block_unreachable(get_nodes_block(pred))) {
                                        set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
                                }
                                waitq_put(worklist, pred);
@@ -1875,13 +1887,13 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else {
                ir_node *pred;
-               ir_node *curr_block = get_irn_n(n, -1);
+               ir_node *curr_block = get_nodes_block(n);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * All other nodes: move nodes from dead blocks into the same block.
                 */
-               pred = get_irn_n(n, -1);
+               pred = get_nodes_block(n);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1891,7 +1903,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_irn_n(pred, -1))) {
+                                       is_Block_unreachable(get_nodes_block(pred))) {
                                        set_nodes_block(pred, curr_block);
                                }
                                waitq_put(worklist, pred);
@@ -1958,37 +1970,30 @@ static ir_node *calc_dca(ir_node *dca, ir_node *block) {
  * I.e., DCA is the block where we might place PRODUCER.
  * A data flow edge points from producer to consumer.
  */
-static ir_node *
-consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) {
-       ir_node *block = NULL;
-
-       /* Compute the latest block into which we can place a node so that it is
+static ir_node *consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer)
+{
+       /* Compute the last block into which we can place a node so that it is
           before consumer. */
-       if (get_irn_op(consumer) == op_Phi) {
+       if (is_Phi(consumer)) {
                /* our consumer is a Phi-node, the effective use is in all those
                   blocks through which the Phi-node reaches producer */
-               int i, irn_arity;
                ir_node *phi_block = get_nodes_block(consumer);
-               irn_arity = get_irn_arity(consumer);
+               int      arity     = get_irn_arity(consumer);
+               int      i;
 
-               for (i = 0;  i < irn_arity; i++) {
-                       if (get_irn_n(consumer, i) == producer) {
-                               ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+               for (i = 0;  i < arity; i++) {
+                       if (get_Phi_pred(consumer, i) == producer) {
+                               ir_node *new_block = get_Block_cfgpred_block(phi_block, i);
 
-                               if (! is_Block_unreachable(new_block))
-                                       block = calc_dca(block, new_block);
+                               if (!is_Block_unreachable(new_block))
+                                       dca = calc_dca(dca, new_block);
                        }
                }
-
-               if (! block)
-                       block = get_irn_n(producer, -1);
        } else {
-               assert(is_no_Block(consumer));
-               block = get_nodes_block(consumer);
+               dca = calc_dca(dca, get_nodes_block(consumer));
        }
 
-       /* Compute the deepest common ancestor of block and dca. */
-       return calc_dca(dca, block);
+       return dca;
 }
 
 /* FIXME: the name clashes here with the function from ana/field_temperature.c
@@ -2041,7 +2046,6 @@ static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
 
        for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
                ir_node *succ = get_irn_out(node, i);
-               ir_node *succ_blk;
 
                if (is_End(succ)) {
                        /*
@@ -2051,11 +2055,11 @@ static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
                        continue;
                }
 
-               if(is_Proj(succ)) {
+               if (is_Proj(succ)) {
                        dca = get_deepest_common_ancestor(succ, dca);
                } else {
                        /* ignore if succ is in dead code */
-                       succ_blk = get_irn_n(succ, -1);
+                       ir_node *succ_blk = get_nodes_block(succ);
                        if (is_Block_unreachable(succ_blk))
                                continue;
                        dca = consumer_dom_dca(dca, succ, node);
@@ -2102,12 +2106,12 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
        mark_irn_visited(n);
 
        /* no need to place block nodes, control nodes are already placed. */
-       if ((get_irn_op(n) != op_Block) &&
+       if (!is_Block(n) &&
            (!is_cfop(n)) &&
            (get_irn_mode(n) != mode_X)) {
                /* Remember the early_blk placement of this block to move it
                   out of loop no further than the early_blk placement. */
-               early_blk = get_irn_n(n, -1);
+               early_blk = get_nodes_block(n);
 
                /*
                 * BEWARE: Here we also get code, that is live, but
@@ -2125,7 +2129,7 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
                    producer of one of their inputs in the same block anyway. */
                for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
                        ir_node *succ = get_irn_out(n, i);
-                       if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+                       if (irn_not_visited(succ) && !is_Phi(succ))
                                place_floats_late(succ, worklist);
                }
 
@@ -2199,7 +2203,7 @@ void place_code(ir_graph *irg) {
 
        if (1 || get_irg_loopinfo_state(irg) != loopinfo_consistent) {
                free_loop_information(irg);
-               construct_backedges(irg);
+               construct_cf_backedges(irg);
        }
 
        /* Place all floating nodes as early as possible. This guarantees
@@ -2220,6 +2224,11 @@ void place_code(ir_graph *irg) {
        current_ir_graph = rem;
 }
 
+typedef struct cf_env {
+       char ignore_exc_edges; /**< set if exception edges should be ignored. */
+       char changed;          /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
 /**
  * Called by walker of remove_critical_cf_edges().
  *
@@ -2227,12 +2236,12 @@ void place_code(ir_graph *irg) {
  * predecessors and a block of multiple successors.
  *
  * @param n   IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
  */
 static void walk_critical_cf_edges(ir_node *n, void *env) {
        int arity, i;
        ir_node *pre, *block, *jmp;
-       int *changed = env;
+       cf_env *cenv = env;
        ir_graph *irg = get_irn_irg(n);
 
        /* Block has multiple predecessors */
@@ -2245,27 +2254,40 @@ static void walk_critical_cf_edges(ir_node *n, void *env) {
                        const ir_op *cfop;
 
                        pre = get_irn_n(n, i);
+                       /* don't count Bad's */
+                       if (is_Bad(pre))
+                               continue;
+
                        cfop = get_irn_op(skip_Proj(pre));
-                       /* Predecessor has multiple successors. Insert new control flow edge but
-                          ignore exception edges. */
-                       if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+                       if (is_op_fragile(cfop)) {
+                               if (cenv->ignore_exc_edges && get_Proj_proj(pre) == pn_Generic_X_except)
+                                       continue;
+                               goto insert;
+                       }
+                       /* we don't want place nodes in the start block, so handle it like forking */
+                       if (is_op_forking(cfop) || cfop == op_Start) {
+                               /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
                                /* set predecessor of new block */
                                block = new_r_Block(irg, 1, &pre);
                                /* insert new jmp node to new block */
                                jmp = new_r_Jmp(irg, block);
                                /* set successor of new block */
                                set_irn_n(n, i, jmp);
-                               *changed = 1;
+                               cenv->changed = 1;
                        } /* predecessor has multiple successors */
                } /* for all predecessors */
        } /* n is a multi-entry block */
 }
 
 void remove_critical_cf_edges(ir_graph *irg) {
-       int changed = 0;
+       cf_env env;
+
+       env.ignore_exc_edges = 1;
+       env.changed          = 0;
 
-       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
-       if (changed) {
+       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+       if (env.changed) {
                /* control flow changed */
                set_irg_outs_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);