* Note that the new dominance code correctly handles
* the End block, i.e. it is always reachable from Start
*/
- set_Block_dead(block);
+ ir_graph *irg = get_irn_irg(block);
+ exchange(block, get_irg_bad(irg));
}
}
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- if (get_irg_dom_state(irg) == dom_consistent)
- irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
-
do_local_optimize(get_irg_end(irg));
current_ir_graph = rem;
/**
* Data flow optimization walker.
- * Optimizes all nodes and enqueue it's users
+ * Optimizes all nodes and enqueue its users
* if done.
*/
static void opt_walker(ir_node *n, void *env)
current_ir_graph = irg;
state = edges_assure(irg);
-
- if (get_opt_global_cse())
- set_irg_pinned(irg, op_pin_state_floats);
+ assure_doms(irg);
/* Clean the value_table in irg for the CSE. */
new_identities(irg);
- if (get_irg_dom_state(irg) == dom_consistent)
- irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
+ if (get_opt_global_cse()) {
+ set_irg_pinned(irg, op_pin_state_floats);
+ }
+
+ /* The following enables unreachable code elimination (=Blocks may be
+ * Bad). We cannot enable it in global_cse nodes since we can't
+ * determine a nodes block there and therefore can't remove all code
+ * in unreachable blocks */
+ set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
+ irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
/* invalidate info */
set_irg_outs_inconsistent(irg);
/* finish the wait queue */
while (! pdeq_empty(waitq)) {
ir_node *n = (ir_node*)pdeq_getl(waitq);
- if (! is_Bad(n))
- opt_walker(n, waitq);
+ opt_walker(n, waitq);
}
del_pdeq(waitq);
end = get_irg_end(irg);
remove_End_Bads_and_doublets(end);
+ clear_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
+
current_ir_graph = rem;
return changed;
}