X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fcfopt.c;h=f6817652bf12c581618ec32e6c153c6ca8144711;hb=b451899be294500fe974c6e97febc59cabdda114;hp=99b9116f918666f210f94a911a77be06157be4c7;hpb=c9665fa5c7f6a7ebef2e44ee48a8b4c5f102cf92;p=libfirm diff --git a/ir/opt/cfopt.c b/ir/opt/cfopt.c index 99b9116f9..f6817652b 100644 --- a/ir/opt/cfopt.c +++ b/ir/opt/cfopt.c @@ -21,7 +21,6 @@ * @file * @brief Control flow optimizations. * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack - * @version $Id$ * * Removes Bad control flow predecessors and empty blocks. A block is empty * if it contains only a Jmp node. Blocks can only be removed if they are not @@ -56,7 +55,8 @@ #include "irflag_t.h" #include "firmstat.h" #include "irpass.h" -#include "irphase_t.h" +#include "irnodehashmap.h" +#include "irtools.h" #include "iropt_dbg.h" @@ -73,13 +73,14 @@ static void set_Block_removable(ir_node *block, bool removable) } /** check if a block has the removable property set. */ -static bool is_Block_removable(ir_node *block) +static bool is_Block_removable(const ir_node *block) { return get_Block_mark(block); } /** checks if a given Cond node is a switch Cond. */ -static bool is_switch_Cond(ir_node *cond) { +static bool is_switch_Cond(const ir_node *cond) +{ ir_node *sel = get_Cond_selector(cond); return get_irn_mode(sel) != mode_b; } @@ -89,8 +90,12 @@ static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx) { (void) ctx; set_irn_link(node, NULL); - if (is_Block(node)) + if (is_Block(node)) { set_Block_removable(node, true); + set_Block_phis(node, NULL); + } else if (is_Phi(node)) { + set_Phi_next(node, NULL); + } } /** @@ -102,21 +107,23 @@ static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx) */ static void collect_nodes(ir_node *n, void *ctx) { - ir_node ***switch_conds = (ir_node***)ctx; - + (void) ctx; if (is_Phi(n)) { /* Collect Phi nodes to compact ins along with block's ins. */ ir_node *block = get_nodes_block(n); - set_irn_link(n, get_irn_link(block)); - set_irn_link(block, n); + add_Block_phi(block, n); } else if (is_Block(n)) { - if (has_Block_entity(n)) { + if (get_Block_entity(n) != NULL) { /* block with a jump label attached cannot be removed. */ set_Block_removable(n, false); } + } else if (is_Bad(n) || is_Jmp(n)) { + /* ignore these */ return; - } else if (!is_Jmp(n)) { /* Check for non-empty block. */ + } else { + /* Check for non-empty block. */ ir_node *block = get_nodes_block(n); + set_Block_removable(block, false); if (is_Proj(n)) { @@ -124,15 +131,12 @@ static void collect_nodes(ir_node *n, void *ctx) ir_node *pred = get_Proj_pred(n); set_irn_link(n, get_irn_link(pred)); set_irn_link(pred, n); - } else if (is_Cond(n) && is_switch_Cond(n)) { - /* found a switch-Cond, collect */ - ARR_APP1(ir_node*, *switch_conds, n); } } } /** Returns true if pred is predecessor of block b. */ -static bool is_pred_of(ir_node *pred, ir_node *b) +static bool is_pred_of(const ir_node *pred, const ir_node *b) { int i; @@ -171,7 +175,7 @@ static bool is_pred_of(ir_node *pred, ir_node *b) * To perform the test for pos, we must regard predecessors before pos * as already removed. **/ -static unsigned test_whether_dispensable(ir_node *b, int pos) +static unsigned test_whether_dispensable(const ir_node *b, int pos) { ir_node *pred = get_Block_cfgpred(b, pos); ir_node *predb = get_nodes_block(pred); @@ -186,7 +190,7 @@ static unsigned test_whether_dispensable(ir_node *b, int pos) goto non_dispensable; /* Seems to be empty. At least we detected this in collect_nodes. */ - if (get_irn_link(b) != NULL) { + if (get_Block_phis(b) != NULL) { int n_cfgpreds = get_Block_n_cfgpreds(b); int i; /* there are Phi nodes */ @@ -229,6 +233,26 @@ non_dispensable: return 1; } +/** + * This method merges blocks. A block is applicable to be merged, if it + * has only one predecessor with an unconditional jump to this block; + * and if this block does not contain any phis. + */ +static void merge_blocks(ir_node *b, void *env) +{ + (void) env; + + if (get_Block_n_cfgpreds(b) == 1) { + ir_node* pred = get_Block_cfgpred(b, 0); + if (is_Jmp(pred)) { + ir_node* pred_block = get_nodes_block(pred); + if (get_Block_phis(b) == NULL) { + exchange(b, pred_block); + } + } + } +} + /** * This method removes empty blocks. A block is empty if it only contains Phi * and Jmp nodes. @@ -279,7 +303,8 @@ non_dispensable: static void optimize_blocks(ir_node *b, void *ctx) { int i, j, k, n, max_preds, n_preds, p_preds = -1; - ir_node *pred, *phi, *next; + ir_node *phi; + ir_node *next; ir_node **in; merge_env *env = (merge_env*)ctx; @@ -297,21 +322,27 @@ static void optimize_blocks(ir_node *b, void *ctx) in = XMALLOCN(ir_node*, max_preds); /*- Fix the Phi nodes of the current block -*/ - for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) { - assert(is_Phi(phi)); - next = (ir_node*)get_irn_link(phi); + for (phi = get_Block_phis(b); phi != NULL; phi = next) { + next = get_Phi_next(phi); /* Find the new predecessors for the Phi */ p_preds = 0; for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) { ir_graph *irg = get_irn_irg(b); - pred = get_Block_cfgpred_block(b, i); + ir_node *predx = get_Block_cfgpred(b, i); + ir_node *pred; - if (is_Bad(pred)) { - /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */ + /* case Phi 1: maintain Bads, as somebody else is responsible to + * remove them */ + if (is_Bad(predx)) { in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi)); - } else if (is_Block_removable(pred) && !Block_block_visited(pred)) { - /* case Phi 2: It's an empty block and not yet visited. */ + continue; + } + + pred = get_nodes_block(predx); + + /* case Phi 2: It's an empty block and not yet visited. */ + if (is_Block_removable(pred) && !Block_block_visited(pred)) { ir_node *phi_pred = get_Phi_pred(phi, i); for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) { @@ -340,10 +371,11 @@ static void optimize_blocks(ir_node *b, void *ctx) assert(p_preds == max_preds); /* Fix the node */ - if (p_preds == 1) + if (p_preds == 1) { exchange(phi, in[0]); - else + } else { set_irn_in(phi, p_preds, in); + } env->changed = true; } @@ -360,37 +392,43 @@ static void optimize_blocks(ir_node *b, void *ctx) ir_node *next_phi; /* we found a predecessor block at position k that will be removed */ - for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) { + for (phi = get_Block_phis(predb); phi != NULL; phi = next_phi) { int q_preds = 0; - next_phi = (ir_node*)get_irn_link(phi); - - assert(is_Phi(phi)); + next_phi = get_Phi_next(phi); if (get_Block_idom(b) != predb) { - /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/ + /* predb is not the dominator. There can't be uses of + * pred's Phi nodes, kill them .*/ ir_graph *irg = get_irn_irg(b); ir_mode *mode = get_irn_mode(phi); exchange(phi, new_r_Bad(irg, mode)); } else { - /* predb is the direct dominator of b. There might be uses of the Phi nodes from - predb in further block, so move this phi from the predecessor into the block b */ + /* predb is the direct dominator of b. There might be uses + * of the Phi nodes from predb in further block, so move + * this phi from the predecessor into the block b */ set_nodes_block(phi, b); - set_irn_link(phi, get_irn_link(b)); - set_irn_link(b, phi); + set_Phi_next(phi, get_Block_phis(b)); + set_Block_phis(b, phi); env->phis_moved = true; /* first, copy all 0..k-1 predecessors */ for (i = 0; i < k; i++) { - pred = get_Block_cfgpred_block(b, i); + ir_node *predx = get_Block_cfgpred(b, i); + ir_node *pred_block; - if (is_Bad(pred)) { + if (is_Bad(predx)) { ir_graph *irg = get_irn_irg(b); ir_mode *mode = get_irn_mode(phi); in[q_preds++] = new_r_Bad(irg, mode); - } else if (is_Block_removable(pred) && !Block_block_visited(pred)) { + continue; + } + pred_block = get_nodes_block(predx); + if (is_Block_removable(pred_block) + && !Block_block_visited(pred_block)) { + int n_cfgpreds = get_Block_n_cfgpreds(pred_block); /* It's an empty block and not yet visited. */ - for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - if (! is_Bad(get_Block_cfgpred(pred, j))) { + for (j = 0; j < n_cfgpreds; j++) { + if (!is_Bad(get_Block_cfgpred(pred_block, j))) { in[q_preds++] = phi; } else { ir_graph *irg = get_irn_irg(b); @@ -485,81 +523,10 @@ static void optimize_blocks(ir_node *b, void *ctx) env->changed = true; /* see if phi-fix was correct */ - assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds)); + assert(get_Block_phis(b) == NULL || p_preds == -1 || (n_preds == p_preds)); xfree(in); } -/** - * Optimize table-switch Conds. - * - * @param cond the switch-Cond - * @return true if the switch-Cond was optimized - */ -static bool handle_switch_cond(ir_node *cond) -{ - ir_node *sel = get_Cond_selector(cond); - ir_node *proj1 = (ir_node*)get_irn_link(cond); - ir_node *proj2 = (ir_node*)get_irn_link(proj1); - ir_node *blk = get_nodes_block(cond); - - /* exactly 1 Proj on the Cond node: must be the defaultProj */ - if (proj2 == NULL) { - ir_node *jmp = new_r_Jmp(blk); - assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1)); - /* convert it into a Jmp */ - exchange(proj1, jmp); - return true; - } - - /* handle Cond nodes with constant argument. In this case the localopt rules - * should have killed all obviously impossible cases. - * So the only case left to handle here is 1 defaultProj + 1 case - * (this one case should be the one taken) */ - if (get_irn_link(proj2) == NULL) { - ir_tarval *tv = value_of(sel); - - if (tv != tarval_bad) { - /* we have a constant switch */ - long num = get_tarval_long(tv); - long def_num = get_Cond_default_proj(cond); - ir_graph *irg = get_irn_irg(cond); - ir_node *bad = new_r_Bad(irg, mode_X); - - if (def_num == get_Proj_proj(proj1)) { - /* first one is the defProj */ - if (num == get_Proj_proj(proj2)) { - ir_node *jmp = new_r_Jmp(blk); - exchange(proj2, jmp); - exchange(proj1, bad); - return true; - } - } else if (def_num == get_Proj_proj(proj2)) { - /* second one is the defProj */ - if (num == get_Proj_proj(proj1)) { - ir_node *jmp = new_r_Jmp(blk); - exchange(proj1, jmp); - exchange(proj2, bad); - return true; - } - } else { - /* neither: strange, Cond was not optimized so far */ - if (num == get_Proj_proj(proj1)) { - ir_node *jmp = new_r_Jmp(blk); - exchange(proj1, jmp); - exchange(proj2, bad); - return true; - } else if (num == get_Proj_proj(proj2)) { - ir_node *jmp = new_r_Jmp(blk); - exchange(proj2, jmp); - exchange(proj1, bad); - return true; - } - } - } - } - return false; -} - /** * Optimize boolean Conds, where true and false jump to the same block into a Jmp * Block must contain no Phi nodes. @@ -602,71 +569,95 @@ typedef enum block_flags_t { BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2, } block_flags_t; -static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag) { - return ((int)phase_get_irn_data(block_info, block)) & flag; +static bool get_block_flag(const ir_nodehashmap_t *infos, const ir_node *block, + int flag) +{ + return PTR_TO_INT(ir_nodehashmap_get(infos, block)) & flag; } -static void set_phase_flag(ir_phase *block_info, ir_node *block, block_flags_t flag) { - int data = (int)phase_get_irn_data(block_info, block); + +static void set_block_flag(ir_nodehashmap_t *infos, ir_node *block, + block_flags_t flag) +{ + int data = PTR_TO_INT(ir_nodehashmap_get(infos, block)); data |= flag; - phase_set_irn_data(block_info, block, (void*)data); + ir_nodehashmap_insert(infos, block, INT_TO_PTR(data)); } -static bool has_operations(ir_phase *block_info, ir_node *block) { - return get_phase_flag(block_info, block, BF_HAS_OPERATIONS); +static void clear_block_flag(ir_nodehashmap_t *infos, const ir_node *block) +{ + ir_nodehashmap_remove(infos, block); } -static void set_has_operations(ir_phase *block_info, ir_node *block) { - set_phase_flag(block_info, block, BF_HAS_OPERATIONS); + +static bool has_operations(ir_nodehashmap_t *infos, const ir_node *block) +{ + return get_block_flag(infos, block, BF_HAS_OPERATIONS); } -static bool has_phis(ir_phase *block_info, ir_node *block) { - return get_phase_flag(block_info, block, BF_HAS_PHIS); +static void set_has_operations(ir_nodehashmap_t *infos, ir_node *block) +{ + set_block_flag(infos, block, BF_HAS_OPERATIONS); } -static void set_has_phis(ir_phase *block_info, ir_node *block) { - set_phase_flag(block_info, block, BF_HAS_PHIS); + +static bool has_phis(ir_nodehashmap_t *infos, const ir_node *block) +{ + return get_block_flag(infos, block, BF_HAS_PHIS); } -static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block) { - return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET); +static void set_has_phis(ir_nodehashmap_t *infos, ir_node *block) +{ + set_block_flag(infos, block, BF_HAS_PHIS); +} + +static bool is_unknown_jump_target(ir_nodehashmap_t *infos, const ir_node *block) +{ + return get_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET); } -static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block) { - set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET); + +static void set_is_unknown_jump_target(ir_nodehashmap_t *infos, ir_node *block) +{ + set_block_flag(infos, block, BF_IS_UNKNOWN_JUMP_TARGET); } /** - * Walker: fill block info information. + * Pre-Walker: fill block info information. */ static void compute_block_info(ir_node *n, void *x) { - ir_phase *block_info = (ir_phase *)x; + ir_nodehashmap_t *infos = (ir_nodehashmap_t*)x; if (is_Block(n)) { int i, max = get_Block_n_cfgpreds(n); for (i=0; iphase, block)) + if (has_phis(&env->block_infos, block)) return; /* optimize Cond predecessors (might produce Bad predecessors) */ @@ -686,8 +677,8 @@ static void optimize_ifs(ir_node *block, void *x) } /** - * Pre-Block walker: remove empty blocks that are - * predecessors of the current block. + * Pre-Block walker: remove empty blocks (only contain a Jmp) + * that are control flow predecessors of the current block. */ static void remove_empty_blocks(ir_node *block, void *x) { @@ -696,57 +687,139 @@ static void remove_empty_blocks(ir_node *block, void *x) int n_preds = get_Block_n_cfgpreds(block); for (i = 0; i < n_preds; ++i) { - ir_node *jmp, *jmp_block, *pred, *pred_block; + ir_node *jmp, *jmp_block; + int n_jpreds = 0; jmp = get_Block_cfgpred(block, i); if (!is_Jmp(jmp)) continue; jmp_block = get_nodes_block(jmp); - if (is_unknown_jump_target(env->phase, jmp_block)) - continue; - if (has_operations(env->phase,jmp_block)) + if (jmp_block == block) + continue; /* this infinite loop cannot be optimized any further */ + if (is_unknown_jump_target(&env->block_infos, jmp_block)) + continue; /* unknown jump target must not be optimized */ + if (has_phis(&env->block_infos,jmp_block)) + continue; /* this block contains Phis and is not skipped */ + if (Block_block_visited(jmp_block)) { continue; - /* jmp_block is an empty block! */ + /* otherwise we could break the walker, + * if block was reached via + * KeepAlive edge -> jmp_block -> A ---> block, + * because the walker cannot handle Id nodes. + * + * A B + * \ / + * jmp_block + * / \ + * block End + */ + } - if (get_Block_n_cfgpreds(jmp_block) != 1) - continue; - pred = get_Block_cfgpred(jmp_block, 0); - exchange(jmp, pred); - env->changed = true; + /* jmp_block is an empty block and can be optimized! */ + + n_jpreds = get_Block_n_cfgpreds(jmp_block); + /** + * If the jmp block has only one predecessor this is straightforward. + * However, if there are more predecessors, we only handle this, + * if block has no Phis. + */ + if (n_jpreds == 1) { + ir_node *pred = get_Block_cfgpred(jmp_block, 0); + ir_node *pred_block = get_nodes_block(pred); + if (has_operations(&env->block_infos,jmp_block)) { + if (get_irg_start_block(get_irn_irg(pred_block)) == pred_block) + continue; /* must not merge operations into start block */ + if (!is_Jmp(pred)) + continue; /* must not create partially dead code, especially when it is mode_M */ + } - /* cleanup: jmp_block might have a Keep edge! */ - pred_block = get_nodes_block(pred); - exchange(jmp_block, pred_block); + /* skip jmp block by rerouting its predecessor to block + * + * A A + * | | + * jmp_block => | + * | | + * block block + */ + exchange(jmp, pred); + + /* cleanup: jmp_block might have a Keep edge! */ + exchange(jmp_block, pred_block); + env->changed = true; + } else if ( !has_phis(&env->block_infos, block) && + !has_operations(&env->block_infos,jmp_block)) + { + /* all predecessors can skip the jmp block, so block gets some new + * predecessors + * + * A B A B + * \ / | | + * jmp_block C => Bad C | | + * \ / \ | | / + * block block + */ + ir_node **ins = ALLOCAN(ir_node*, n_preds+n_jpreds); + int j; + /* first copy the old predecessors, because the outer loop (i) + * still walks over them */ + for (j = 0; j < n_preds; ++j) { + ins[j] = get_Block_cfgpred(block, j); + } + /* now append the new predecessors */ + for (j = 0; j < n_jpreds; ++j) { + ir_node *pred = get_Block_cfgpred(jmp_block, j); + ins[n_preds+j] = pred; + } + set_irn_in(block, n_preds+n_jpreds, ins); + /* convert the jmp_block to Bad */ + ir_graph *irg = get_irn_irg(block); + exchange(jmp_block, new_r_Bad(irg, mode_BB)); + exchange(jmp, new_r_Bad(irg, mode_X)); + /* let the outer loop walk over the new predecessors as well */ + n_preds += n_jpreds; + env->changed = true; + // TODO What if jmp_block had a KeepAlive edge? + } else { + /* This would involve Phis ... */ + } } } /* - * Some cfg optimizations, which do not touch Phi nodes + * All cfg optimizations, which do not touch Phi nodes. + * + * Note that this might create critical edges. */ -static void cfgopt_ignoring_phis(ir_graph *irg) { - ir_phase *block_info = new_phase(irg, NULL); - skip_env env = { false, block_info }; +static void cfgopt_ignoring_phis(ir_graph *irg) +{ + skip_env env; - irg_walk_graph(irg, compute_block_info, NULL, block_info); + env.changed = true; + ir_nodehashmap_init(&env.block_infos); - for (;;) { + while (env.changed) { + irg_walk_graph(irg, compute_block_info, NULL, &env.block_infos); env.changed = false; - /* optimize useless ifs: will not touch empty blocks */ + /* Remove blocks, which only consist of a Jmp */ + irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env); + + /* Optimize Cond->Jmp, where then- and else-block are the same. */ irg_block_walk_graph(irg, NULL, optimize_ifs, &env); - /* Remove empty blocks */ - irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env); if (env.changed) { - set_irg_doms_inconsistent(irg); - /* Removing blocks might enable more useless-if optimizations */ + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE); + /* clear block info, because it must be recomputed */ + irg_block_walk_graph(irg, clear_block_info, NULL, &env.block_infos); + /* Removing blocks and Conds might enable more optimizations */ continue; } else { + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); break; } } - phase_free(block_info); + ir_nodehashmap_destroy(&env.block_infos); } /* Optimizations of the control flow that also require changes of Phi nodes. */ @@ -767,47 +840,22 @@ void optimize_cf(ir_graph *irg) assert(get_irg_pinned(irg) != op_pin_state_floats && "Control flow optimization need a pinned graph"); - /* FIXME: control flow opt destroys block edges. So edges are deactivated - * here. Fix the edges! */ - edges_deactivate(irg); + assure_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE); + /* First the "simple" optimizations, which do not touch Phis */ cfgopt_ignoring_phis(irg); /* we use the mark flag to mark removable blocks */ - ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK); - - /* The switch Cond optimization might expose unreachable code, so we loop */ - for (;;) { - int length; - ir_node **switch_conds = NULL; - bool changed = false; - - assure_doms(irg); - - /* - * This pass collects all Phi nodes in a link list in the block - * nodes. Further it performs simple control flow optimizations. - * Finally it marks all blocks that do not contain useful - * computations, i.e., these blocks might be removed. - */ - switch_conds = NEW_ARR_F(ir_node*, 0); - irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds); - - /* handle all collected switch-Conds */ - length = ARR_LEN(switch_conds); - for (i = 0; i < length; ++i) { - ir_node *cond = switch_conds[i]; - changed |= handle_switch_cond(cond); - } - DEL_ARR_F(switch_conds); - - if (!changed) - break; - - set_irg_doms_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); - } + ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK + | IR_RESOURCE_PHI_LIST); + + /* + * This pass collects all Phi nodes in a link list in the block + * nodes. Further it performs simple control flow optimizations. + * Finally it marks all blocks that do not contain useful + * computations, i.e., these blocks might be removed. + */ + irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, NULL); /* assert due to collect_nodes: * 1. removable blocks are now marked as such @@ -815,11 +863,11 @@ void optimize_cf(ir_graph *irg) */ /* Optimize the standard code. - * It walks only over block nodes and adapts these and the Phi nodes in these - * blocks, which it finds in a linked list computed before. - * */ - assure_doms(irg); - irg_block_walk_graph(irg, optimize_blocks, NULL, &env); + * It walks only over block nodes and adapts these and the Phi nodes in + * these blocks, which it finds in a linked list computed before. + */ + assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE); + irg_block_walk_graph(irg, optimize_blocks, merge_blocks, &env); new_end = optimize_in_place(end); if (new_end != end) { @@ -828,7 +876,8 @@ void optimize_cf(ir_graph *irg) } remove_End_Bads_and_doublets(end); - ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK); + ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK + | IR_RESOURCE_PHI_LIST); if (env.phis_moved) { /* Bad: when we moved Phi's, we might produce dead Phi nodes @@ -866,12 +915,8 @@ void optimize_cf(ir_graph *irg) } } - if (env.changed) { - /* Handle graph state if was changed. */ - set_irg_doms_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); - } + confirm_irg_properties(irg, + env.changed ? IR_GRAPH_PROPERTIES_NONE : IR_GRAPH_PROPERTIES_ALL); } /* Creates an ir_graph pass for optimize_cf. */