group High-Level exception support ops together
[libfirm] / ir / ir / irgopt.c
index 208b8f2..48a25f6 100644 (file)
@@ -92,15 +92,14 @@ optimize_in_place_wrapper (ir_node *n, void *env) {
 static INLINE void do_local_optimize(ir_node *n) {
   /* Handle graph state */
   assert(get_irg_phase_state(current_ir_graph) != phase_building);
+
   if (get_opt_global_cse())
     set_irg_pinned(current_ir_graph, op_pin_state_floats);
   if (get_irg_outs_state(current_ir_graph) == outs_consistent)
     set_irg_outs_inconsistent(current_ir_graph);
-  if (get_irg_dom_state(current_ir_graph) == dom_consistent)
-    set_irg_dom_inconsistent(current_ir_graph);
+  set_irg_doms_inconsistent(current_ir_graph);
   set_irg_loopinfo_inconsistent(current_ir_graph);
 
-
   /* Clean the value_table in irg for the CSE. */
   del_identities(current_ir_graph->value_table);
   current_ir_graph->value_table = new_identities();
@@ -118,11 +117,23 @@ void local_optimize_node(ir_node *n) {
   current_ir_graph = rem;
 }
 
+/**
+ * Block-Walker: uses dominance depth to mark dead blocks.
+ */
+static void kill_dead_blocks(ir_node *block, void *env)
+{
+  if (get_Block_dom_depth(block) < 0)
+    set_Block_dead(block);
+}
+
 void
 local_optimize_graph (ir_graph *irg) {
   ir_graph *rem = current_ir_graph;
   current_ir_graph = irg;
 
+  if (get_irg_dom_state(current_ir_graph) == dom_consistent)
+    irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
+
   do_local_optimize(irg->end);
 
   current_ir_graph = rem;
@@ -181,23 +192,6 @@ compute_new_arity(ir_node *b) {
   }
 }
 
-/* TODO: add an ir_op operation */
-static INLINE void new_backedge_info(ir_node *n) {
-  switch(get_irn_opcode(n)) {
-  case iro_Block:
-    n->attr.block.cg_backedge = NULL;
-    n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
-    break;
-  case iro_Phi:
-    n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
-    break;
-  case iro_Filter:
-    n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
-    break;
-  default: ;
-  }
-}
-
 /**
  * Copies the node to the new obstack. The Ins of the new node point to
  * the predecessors on the old obstack.  For block/phi nodes not all
@@ -439,6 +433,7 @@ copy_graph_env (int copy_node_nr) {
   set_irn_link(get_irg_globals    (current_ir_graph), NULL);
   set_irn_link(get_irg_args       (current_ir_graph), NULL);
   set_irn_link(get_irg_initial_mem(current_ir_graph), NULL);
+  set_irn_link(get_irg_bad        (current_ir_graph), NULL);
   set_irn_link(get_irg_no_mem     (current_ir_graph), NULL);
 
   /* we use the block walk flag for removing Bads from Blocks ins. */
@@ -454,6 +449,7 @@ copy_graph_env (int copy_node_nr) {
   set_irg_end_reg    (current_ir_graph, get_irg_end(current_ir_graph));
   free_End(old_end);
   set_irg_end_block  (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
+
   if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
     copy_node (get_irg_frame(current_ir_graph), INT_TO_PTR(copy_node_nr));
     copy_preds(get_irg_frame(current_ir_graph), NULL);
@@ -470,26 +466,22 @@ copy_graph_env (int copy_node_nr) {
     copy_node (get_irg_args(current_ir_graph), INT_TO_PTR(copy_node_nr));
     copy_preds(get_irg_args(current_ir_graph), NULL);
   }
-  set_irg_start      (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
-
-  set_irg_start_block(current_ir_graph,
-              get_new_node(get_irg_start_block(current_ir_graph)));
-  set_irg_frame      (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
-  set_irg_globals    (current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
-  set_irg_initial_mem(current_ir_graph, get_new_node(get_irg_initial_mem(current_ir_graph)));
-  set_irg_args       (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
-
   if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
     copy_node(get_irg_bad(current_ir_graph), INT_TO_PTR(copy_node_nr));
     copy_preds(get_irg_bad(current_ir_graph), NULL);
   }
-  set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
-
   if (get_irn_link(get_irg_no_mem(current_ir_graph)) == NULL) {
     copy_node(get_irg_no_mem(current_ir_graph), INT_TO_PTR(copy_node_nr));
     copy_preds(get_irg_no_mem(current_ir_graph), NULL);
   }
-  set_irg_no_mem(current_ir_graph, get_new_node(get_irg_no_mem(current_ir_graph)));
+  set_irg_start      (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
+  set_irg_start_block(current_ir_graph, get_new_node(get_irg_start_block(current_ir_graph)));
+  set_irg_frame      (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+  set_irg_globals    (current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+  set_irg_initial_mem(current_ir_graph, get_new_node(get_irg_initial_mem(current_ir_graph)));
+  set_irg_args       (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+  set_irg_bad        (current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
+  set_irg_no_mem     (current_ir_graph, get_new_node(get_irg_no_mem(current_ir_graph)));
 }
 
 /**
@@ -507,25 +499,28 @@ dead_node_elimination(ir_graph *irg) {
   struct obstack *graveyard_obst = NULL;
   struct obstack *rebirth_obst   = NULL;
 
-       edges_init_graph(irg);
+  if (get_opt_optimize() && get_opt_dead_node_elimination()) {
+    assert(! edges_activated(irg) && "dead node elimination requieres disabled edges");
 
-  /* inform statistics that we started a dead-node elimination run */
-  hook_dead_node_elim_start(irg);
+    /* inform statistics that we started a dead-node elimination run */
+    hook_dead_node_elim(irg, 1);
 
-  /* Remember external state of current_ir_graph. */
-  rem = current_ir_graph;
-  current_ir_graph = irg;
-  set_interprocedural_view(false);
+    /* Remember external state of current_ir_graph. */
+    rem = current_ir_graph;
+    current_ir_graph = irg;
+    set_interprocedural_view(0);
 
-  /* Handle graph state */
-  assert(get_irg_phase_state(current_ir_graph) != phase_building);
-  free_callee_info(current_ir_graph);
-  free_irg_outs(current_ir_graph);
-  free_trouts();
-  /* @@@ so far we loose loops when copying */
-  free_loop_information(current_ir_graph);
+    assert(get_irg_phase_state(current_ir_graph) != phase_building);
 
-  if (get_opt_optimize() && get_opt_dead_node_elimination()) {
+    /* Handle graph state */
+    free_callee_info(current_ir_graph);
+    free_irg_outs(current_ir_graph);
+    free_trouts();
+
+    /* @@@ so far we loose loops when copying */
+    free_loop_information(current_ir_graph);
+
+    set_irg_doms_inconsistent(irg);
 
     /* A quiet place, where the old obstack can rest in peace,
        until it will be cremated. */
@@ -546,13 +541,13 @@ dead_node_elimination(ir_graph *irg) {
     /* Free memory from old unoptimized obstack */
     obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
     xfree (graveyard_obst);           /* ... then free it.           */
-  }
 
-  /* inform statistics that the run is over */
-  hook_dead_node_elim_stop(irg);
+    /* inform statistics that the run is over */
+    hook_dead_node_elim(irg, 0);
 
-  current_ir_graph = rem;
-  set_interprocedural_view(rem_ipview);
+    current_ir_graph = rem;
+    set_interprocedural_view(rem_ipview);
+  }
 }
 
 /**
@@ -591,9 +586,9 @@ static void relink_bad_block_predecessors(ir_node *n, void *env) {
       for (i = 0; i < old_irn_arity; i++) {
         irn = get_irn_n(n, i);
         if (!is_Bad(irn)) {
-               new_in[new_irn_n] = irn;
-               is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
-               new_irn_n++;
+          new_in[new_irn_n] = irn;
+          is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
+          ++new_irn_n;
         }
       }
       //ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity);
@@ -638,9 +633,9 @@ static void relink_bad_predecessors(ir_node *n, void *env) {
       new_irn_arity = 1;
       for(i = 1; i < old_irn_arity; i++)
         if (!is_Bad((ir_node *)old_in[i])) {
-               n->in[new_irn_arity] = n->in[i];
-               is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
-               new_irn_arity++;
+          n->in[new_irn_arity] = n->in[i];
+          is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
+          ++new_irn_arity;
         }
 
       ARR_SETLEN(ir_node *, n->in, new_irn_arity);
@@ -680,7 +675,7 @@ void remove_bad_predecessors(ir_graph *irg) {
 static INLINE void
 copy_node_inline (ir_node *n, void *env) {
   ir_node *new;
-  type *frame_tp = (type *)env;
+  ir_type *frame_tp = (ir_type *)env;
 
   copy_node(n, NULL);
   if (get_irn_op(n) == op_Sel) {
@@ -712,7 +707,7 @@ static void find_addr(ir_node *node, void *env)
  */
 static int can_inline(ir_node *call, ir_graph *called_graph)
 {
-  type *call_type = get_Call_type(call);
+  ir_type *call_type = get_Call_type(call);
   int params, ress, i, res;
   assert(is_Method_type(call_type));
 
@@ -721,7 +716,7 @@ static int can_inline(ir_node *call, ir_graph *called_graph)
 
   /* check params */
   for (i = 0; i < params; ++i) {
-    type *p_type = get_method_param_type(call_type, i);
+    ir_type *p_type = get_method_param_type(call_type, i);
 
     if (is_compound_type(p_type))
       return 0;
@@ -729,7 +724,7 @@ static int can_inline(ir_node *call, ir_graph *called_graph)
 
   /* check res */
   for (i = 0; i < ress; ++i) {
-    type *r_type = get_method_res_type(call_type, i);
+    ir_type *r_type = get_method_res_type(call_type, i);
 
     if (is_compound_type(r_type))
       return 0;
@@ -751,7 +746,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
   ir_node *ret, *phi;
   int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
   int exc_handling;
-  type *called_frame;
+  ir_type *called_frame;
   irg_inline_property prop = get_irg_inline_property(called_graph);
 
   if ( (prop != irg_inline_forced) &&
@@ -1627,7 +1622,7 @@ static INLINE void place_early(pdeq *worklist) {
   }
 
   set_irg_outs_inconsistent(current_ir_graph);
-  current_ir_graph->op_pin_state_pinned = op_pin_state_pinned;
+  set_irg_pinned(current_ir_graph, op_pin_state_pinned);
 }
 
 /**