X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fcfopt.c;h=d6f3c0ea715ec200c2023b9ecfce677639c05939;hb=762b472fc81c73cf7a1b0041b8cd286b7206d79d;hp=a68d286746bc4c36d43217628df20a4f613428fd;hpb=8ea5207a45f6de59e29ab2cff0968e4139f79e4e;p=libfirm diff --git a/ir/opt/cfopt.c b/ir/opt/cfopt.c index a68d28674..d6f3c0ea7 100644 --- a/ir/opt/cfopt.c +++ b/ir/opt/cfopt.c @@ -56,6 +56,7 @@ #include "irflag_t.h" #include "firmstat.h" #include "irpass.h" +#include "irphase_t.h" #include "iropt_dbg.h" @@ -63,20 +64,29 @@ typedef struct merge_env { bool changed; /**< Set if the graph was changed. */ bool phis_moved; /**< Set if Phi nodes were moved. */ - ir_node **switch_conds; /**< Helper list for all found Switch Conds. */ } merge_env; +/** set or reset the removable property of a block. */ static void set_Block_removable(ir_node *block, bool removable) { set_Block_mark(block, removable); } +/** check if a block has the removable property set. */ static bool is_Block_removable(ir_node *block) { return get_Block_mark(block); } -static void clear_link(ir_node *node, void *ctx) +/** checks if a given Cond node is a switch Cond. */ +static bool is_switch_Cond(ir_node *cond) +{ + ir_node *sel = get_Cond_selector(cond); + return get_irn_mode(sel) != mode_b; +} + +/** Walker: clear link fields and mark all blocks as removable. */ +static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx) { (void) ctx; set_irn_link(node, NULL); @@ -93,7 +103,7 @@ static void clear_link(ir_node *node, void *ctx) */ static void collect_nodes(ir_node *n, void *ctx) { - merge_env *env = (merge_env*)ctx; + ir_node ***switch_conds = (ir_node***)ctx; if (is_Phi(n)) { /* Collect Phi nodes to compact ins along with block's ins. */ @@ -101,8 +111,10 @@ static void collect_nodes(ir_node *n, void *ctx) set_irn_link(n, get_irn_link(block)); set_irn_link(block, n); } else if (is_Block(n)) { - if (has_Block_entity(n)) + if (has_Block_entity(n)) { + /* block with a jump label attached cannot be removed. */ set_Block_removable(n, false); + } return; } else if (!is_Jmp(n)) { /* Check for non-empty block. */ ir_node *block = get_nodes_block(n); @@ -113,17 +125,14 @@ static void collect_nodes(ir_node *n, void *ctx) ir_node *pred = get_Proj_pred(n); set_irn_link(n, get_irn_link(pred)); set_irn_link(pred, n); - } else if (is_Cond(n)) { - ir_node *sel = get_Cond_selector(n); - if (get_irn_mode(sel) != mode_b) { - /* found a switch-Cond, collect */ - ARR_APP1(ir_node*, env->switch_conds, n); - } + } else if (is_Cond(n) && is_switch_Cond(n)) { + /* found a switch-Cond, collect */ + ARR_APP1(ir_node*, *switch_conds, n); } } } -/** Returns true if pred is predecessor of block. */ +/** Returns true if pred is predecessor of block b. */ static bool is_pred_of(ir_node *pred, ir_node *b) { int i; @@ -136,20 +145,6 @@ static bool is_pred_of(ir_node *pred, ir_node *b) return false; } -static unsigned count_non_bad_inputs(const ir_node *node) -{ - int arity = get_irn_arity(node); - unsigned result = 0; - int i; - - for (i = 0; i < arity; ++i) { - ir_node *in = get_irn_n(node, i); - if (!is_Bad(in)) - ++result; - } - return result; -} - /** Test whether we can optimize away pred block pos of b. * * @param b A block node. @@ -182,11 +177,9 @@ static unsigned test_whether_dispensable(ir_node *b, int pos) ir_node *pred = get_Block_cfgpred(b, pos); ir_node *predb = get_nodes_block(pred); - /* Bad blocks will be optimized away, so we don't need space for them */ - if (is_Bad(pred)) - return 0; - if (!is_Block_removable(predb)) + if (is_Bad(pred) || !is_Block_removable(predb)) return 1; + /* can't remove self-loops */ if (predb == b) goto non_dispensable; @@ -230,7 +223,7 @@ static unsigned test_whether_dispensable(ir_node *b, int pos) if (Block_block_visited(predb)) return 1; /* if we get here, the block is dispensable, count useful preds */ - return count_non_bad_inputs(predb); + return get_irn_arity(predb); non_dispensable: set_Block_removable(predb, false); @@ -238,8 +231,8 @@ non_dispensable: } /** - * This method removed Bad cf predecessors from Blocks and Phis, and removes - * empty blocks. A block is empty if it only contains Phi and Jmp nodes. + * This method removes empty blocks. A block is empty if it only contains Phi + * and Jmp nodes. * * We first adapt Phi nodes, then Block nodes, as we need the old ins * of the Block to adapt the Phi nodes. We do this by computing new @@ -291,6 +284,11 @@ static void optimize_blocks(ir_node *b, void *ctx) ir_node **in; merge_env *env = (merge_env*)ctx; + if (get_Block_dom_depth(b) < 0) { + /* ignore unreachable blocks */ + return; + } + /* Count the number of predecessor if this block is merged with pred blocks that are empty. */ max_preds = 0; @@ -307,20 +305,23 @@ static void optimize_blocks(ir_node *b, void *ctx) /* Find the new predecessors for the Phi */ p_preds = 0; for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) { + ir_graph *irg = get_irn_irg(b); pred = get_Block_cfgpred_block(b, i); if (is_Bad(pred)) { - /* case Phi 1: Do nothing */ + /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */ + in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi)); } else if (is_Block_removable(pred) && !Block_block_visited(pred)) { /* case Phi 2: It's an empty block and not yet visited. */ ir_node *phi_pred = get_Phi_pred(phi, i); for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) { ir_node *pred_pred = get_Block_cfgpred(pred, j); - /* because of breaking loops, not all predecessors are - * Bad-clean, so we must check this here again */ - if (is_Bad(pred_pred)) + + if (is_Bad(pred_pred)) { + in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi)); continue; + } if (get_nodes_block(phi_pred) == pred) { /* case Phi 2a: */ @@ -341,7 +342,6 @@ static void optimize_blocks(ir_node *b, void *ctx) /* Fix the node */ if (p_preds == 1) - /* By removal of Bad ins the Phi might be degenerated. */ exchange(phi, in[0]); else set_irn_in(phi, p_preds, in); @@ -369,8 +369,9 @@ static void optimize_blocks(ir_node *b, void *ctx) if (get_Block_idom(b) != predb) { /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/ - ir_graph *irg = get_irn_irg(b); - exchange(phi, get_irg_bad(irg)); + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + exchange(phi, new_r_Bad(irg, mode)); } else { /* predb is the direct dominator of b. There might be uses of the Phi nodes from predb in further block, so move this phi from the predecessor into the block b */ @@ -384,12 +385,19 @@ static void optimize_blocks(ir_node *b, void *ctx) pred = get_Block_cfgpred_block(b, i); if (is_Bad(pred)) { - /* Do nothing */ + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + in[q_preds++] = new_r_Bad(irg, mode); } else if (is_Block_removable(pred) && !Block_block_visited(pred)) { /* It's an empty block and not yet visited. */ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - if (! is_Bad(get_Block_cfgpred(pred, j))) + if (! is_Bad(get_Block_cfgpred(pred, j))) { in[q_preds++] = phi; + } else { + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + in[q_preds++] = new_r_Bad(irg, mode); + } } } else { in[q_preds++] = phi; @@ -399,8 +407,7 @@ static void optimize_blocks(ir_node *b, void *ctx) /* now we are at k, copy the phi predecessors */ pred = get_nodes_block(get_Block_cfgpred(b, k)); for (i = 0; i < get_Phi_n_preds(phi); i++) { - if (! is_Bad(get_Block_cfgpred(pred, i))) - in[q_preds++] = get_Phi_pred(phi, i); + in[q_preds++] = get_Phi_pred(phi, i); } /* and now all the rest */ @@ -408,12 +415,19 @@ static void optimize_blocks(ir_node *b, void *ctx) pred = get_Block_cfgpred_block(b, i); if (is_Bad(pred)) { - /* Do nothing */ + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + in[q_preds++] = new_r_Bad(irg, mode); } else if (is_Block_removable(pred) && !Block_block_visited(pred)) { /* It's an empty block and not yet visited. */ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) { - if (! is_Bad(get_Block_cfgpred(pred, j))) + if (! is_Bad(get_Block_cfgpred(pred, j))) { in[q_preds++] = phi; + } else { + ir_graph *irg = get_irn_irg(b); + ir_mode *mode = get_irn_mode(phi); + in[q_preds++] = new_r_Bad(irg, mode); + } } } else { in[q_preds++] = phi; @@ -439,24 +453,28 @@ static void optimize_blocks(ir_node *b, void *ctx) for (i = 0; i < get_Block_n_cfgpreds(b); i++) { ir_node *pred = get_Block_cfgpred(b, i); ir_node *predb = get_nodes_block(pred); + ir_graph *irg = get_irn_irg(pred); - /* case 1: Do nothing */ - if (is_Bad(pred)) + /* case 1: Bad predecessor */ + if (is_Bad(pred)) { + in[n_preds++] = new_r_Bad(irg, mode_X); continue; + } if (is_Block_removable(predb) && !Block_block_visited(predb)) { /* case 2: It's an empty block and not yet visited. */ for (j = 0; j < get_Block_n_cfgpreds(predb); j++) { ir_node *predpred = get_Block_cfgpred(predb, j); - /* because of breaking loops, not all predecessors are - * Bad-clean, so we must check this here again */ - if (is_Bad(predpred)) + if (is_Bad(predpred)) { + in[n_preds++] = new_r_Bad(irg, mode_X); continue; + } + in[n_preds++] = predpred; } /* Remove block+jump as it might be kept alive. */ - exchange(pred, get_irg_bad(get_irn_irg(b))); - exchange(predb, get_irg_bad(get_irn_irg(b))); + exchange(pred, new_r_Bad(get_irn_irg(b), mode_X)); + exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB)); } else { /* case 3: */ in[n_preds++] = pred; @@ -472,21 +490,6 @@ static void optimize_blocks(ir_node *b, void *ctx) xfree(in); } -/** - * Block walker: optimize all blocks using the default optimizations. - * This removes Blocks with only a Jmp predecessor. - */ -static void remove_simple_blocks(ir_node *block, void *ctx) -{ - merge_env *env = (merge_env*)ctx; - ir_node *new_blk = equivalent_node(block); - - if (new_blk != block) { - exchange(block, new_blk); - env->changed = true; - } -} - /** * Optimize table-switch Conds. * @@ -511,7 +514,7 @@ static bool handle_switch_cond(ir_node *cond) /* handle Cond nodes with constant argument. In this case the localopt rules * should have killed all obviously impossible cases. - * So the only case left to handle here is 1 defaultProj + 1case + * So the only case left to handle here is 1 defaultProj + 1 case * (this one case should be the one taken) */ if (get_irn_link(proj2) == NULL) { ir_tarval *tv = value_of(sel); @@ -521,7 +524,7 @@ static bool handle_switch_cond(ir_node *cond) long num = get_tarval_long(tv); long def_num = get_Cond_default_proj(cond); ir_graph *irg = get_irn_irg(cond); - ir_node *bad = get_irg_bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); if (def_num == get_Proj_proj(proj1)) { /* first one is the defProj */ @@ -558,21 +561,210 @@ static bool handle_switch_cond(ir_node *cond) return false; } -/* Optimizations of the control flow that also require changes of Phi nodes. - * - * This optimization performs two passes over the graph. - * - * The first pass collects all Phi nodes in a link list in the block - * nodes. Further it performs simple control flow optimizations. - * Finally it marks all blocks that do not contain useful - * computations, i.e., these blocks might be removed. - * - * The second pass performs the optimizations intended by this algorithm. - * It walks only over block nodes and adapts these and the Phi nodes in these - * blocks, which it finds in a linked list computed by the first pass. +/** + * Optimize boolean Conds, where true and false jump to the same block into a Jmp + * Block must contain no Phi nodes. * - * We use the mark flag to mark removable blocks in the first phase. + * Cond + * / \ + * projA projB => Jmp Bad + * \ / \ / + * block block */ +static bool optimize_pred_cond(ir_node *block, int i, int j) +{ + ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad; + assert(i != j); + + projA = get_Block_cfgpred(block, i); + if (!is_Proj(projA)) return false; + projB = get_Block_cfgpred(block, j); + if (!is_Proj(projB)) return false; + cond = get_Proj_pred(projA); + if (!is_Cond(cond)) return false; + + if (cond != get_Proj_pred(projB)) return false; + if (is_switch_Cond(cond)) return false; + + /* cond should actually be a Jmp */ + pred_block = get_nodes_block(cond); + jmp = new_r_Jmp(pred_block); + bad = new_r_Bad(get_irn_irg(block), mode_X); + + assert(projA != projB); + exchange(projA, jmp); + exchange(projB, bad); + return true; +} + +typedef enum block_flags_t { + BF_HAS_OPERATIONS = 1 << 0, + BF_HAS_PHIS = 1 << 1, + BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2, +} block_flags_t; + +static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag) +{ + return PTR_TO_INT(phase_get_irn_data(block_info, block)) & flag; +} + +static void set_phase_flag(ir_phase *block_info, ir_node *block, + block_flags_t flag) +{ + int data = PTR_TO_INT(phase_get_irn_data(block_info, block)); + data |= flag; + phase_set_irn_data(block_info, block, INT_TO_PTR(data)); +} + +static bool has_operations(ir_phase *block_info, ir_node *block) +{ + return get_phase_flag(block_info, block, BF_HAS_OPERATIONS); +} + +static void set_has_operations(ir_phase *block_info, ir_node *block) +{ + set_phase_flag(block_info, block, BF_HAS_OPERATIONS); +} + +static bool has_phis(ir_phase *block_info, ir_node *block) +{ + return get_phase_flag(block_info, block, BF_HAS_PHIS); +} + +static void set_has_phis(ir_phase *block_info, ir_node *block) +{ + set_phase_flag(block_info, block, BF_HAS_PHIS); +} + +static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block) +{ + return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET); +} + +static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block) +{ + set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET); +} + +/** + * Walker: fill block info information. + */ +static void compute_block_info(ir_node *n, void *x) +{ + ir_phase *block_info = (ir_phase *)x; + + if (is_Block(n)) { + int i, max = get_Block_n_cfgpreds(n); + for (i=0; iphase, block)) + return; + + /* optimize Cond predecessors (might produce Bad predecessors) */ + for (i = 0; i < n_preds; ++i) { + for (j = i+1; j < n_preds; ++j) { + optimize_pred_cond(block, i, j); + } + } +} + +/** + * Pre-Block walker: remove empty blocks that are + * predecessors of the current block. + */ +static void remove_empty_blocks(ir_node *block, void *x) +{ + skip_env *env = (skip_env*)x; + int i; + int n_preds = get_Block_n_cfgpreds(block); + + for (i = 0; i < n_preds; ++i) { + ir_node *jmp, *jmp_block, *pred, *pred_block; + + jmp = get_Block_cfgpred(block, i); + if (!is_Jmp(jmp)) + continue; + jmp_block = get_nodes_block(jmp); + if (is_unknown_jump_target(env->phase, jmp_block)) + continue; + if (has_operations(env->phase,jmp_block)) + continue; + /* jmp_block is an empty block! */ + + if (get_Block_n_cfgpreds(jmp_block) != 1) + continue; + pred = get_Block_cfgpred(jmp_block, 0); + exchange(jmp, pred); + env->changed = true; + + /* cleanup: jmp_block might have a Keep edge! */ + pred_block = get_nodes_block(pred); + exchange(jmp_block, pred_block); + } +} + +/* + * Some cfg optimizations, which do not touch Phi nodes + */ +static void cfgopt_ignoring_phis(ir_graph *irg) +{ + ir_phase *block_info = new_phase(irg, NULL); + skip_env env = { false, block_info }; + + irg_walk_graph(irg, compute_block_info, NULL, block_info); + + for (;;) { + env.changed = false; + + /* optimize useless ifs: will not touch empty blocks */ + irg_block_walk_graph(irg, NULL, optimize_ifs, &env); + + /* Remove empty blocks */ + irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env); + if (env.changed) { + set_irg_doms_inconsistent(irg); + /* Removing blocks might enable more useless-if optimizations */ + continue; + } else { + break; + } + } + + phase_free(block_info); +} + +/* Optimizations of the control flow that also require changes of Phi nodes. */ void optimize_cf(ir_graph *irg) { int i, j, n; @@ -581,6 +773,9 @@ void optimize_cf(ir_graph *irg) ir_node *new_end; merge_env env; + env.changed = false; + env.phis_moved = false; + assert(get_irg_phase_state(irg) != phase_building); /* if the graph is not pinned, we cannot determine empty blocks */ @@ -591,41 +786,55 @@ void optimize_cf(ir_graph *irg) * here. Fix the edges! */ edges_deactivate(irg); + cfgopt_ignoring_phis(irg); + /* we use the mark flag to mark removable blocks */ ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK); -restart: - env.changed = false; - env.phis_moved = false; - assure_doms(irg); + /* The switch Cond optimization might expose unreachable code, so we loop */ + for (;;) { + int length; + ir_node **switch_conds = NULL; + bool changed = false; - env.switch_conds = NEW_ARR_F(ir_node*, 0); - irg_walk(end, clear_link, collect_nodes, &env); + assure_doms(irg); - /* handle all collected switch-Conds */ - n = ARR_LEN(env.switch_conds); - for (i = 0; i < n; ++i) { - ir_node *cond = env.switch_conds[i]; - env.changed |= handle_switch_cond(cond); - } - DEL_ARR_F(env.switch_conds); + /* + * This pass collects all Phi nodes in a link list in the block + * nodes. Further it performs simple control flow optimizations. + * Finally it marks all blocks that do not contain useful + * computations, i.e., these blocks might be removed. + */ + switch_conds = NEW_ARR_F(ir_node*, 0); + irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds); + + /* handle all collected switch-Conds */ + length = ARR_LEN(switch_conds); + for (i = 0; i < length; ++i) { + ir_node *cond = switch_conds[i]; + changed |= handle_switch_cond(cond); + } + DEL_ARR_F(switch_conds); + + if (!changed) + break; - if (env.changed) { - /* Handle graph state if was changed. */ - set_irg_outs_inconsistent(irg); set_irg_doms_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); - - /* The Cond optimization might generate unreachable code, so restart if - it happens. */ - goto restart; } - /* Optimize the standard code. */ + /* assert due to collect_nodes: + * 1. removable blocks are now marked as such + * 2. phi lists are up to date + */ + + /* Optimize the standard code. + * It walks only over block nodes and adapts these and the Phi nodes in these + * blocks, which it finds in a linked list computed before. + * */ assure_doms(irg); - irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env); + irg_block_walk_graph(irg, optimize_blocks, NULL, &env); new_end = optimize_in_place(end); if (new_end != end) { @@ -639,15 +848,11 @@ restart: if (env.phis_moved) { /* Bad: when we moved Phi's, we might produce dead Phi nodes that are kept-alive. - Some other phases cannot copy with this, so will them. + Some other phases cannot copy with this, so kill them. */ n = get_End_n_keepalives(end); if (n > 0) { NEW_ARR_A(ir_node *, in, n); - if (env.changed) { - /* Handle graph state if was changed. */ - set_irg_outs_inconsistent(irg); - } assure_irg_outs(irg); for (i = j = 0; i < n; ++i) { @@ -678,21 +883,10 @@ restart: if (env.changed) { /* Handle graph state if was changed. */ - set_irg_outs_inconsistent(irg); set_irg_doms_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); } - - /* the verifier doesn't work yet with floating nodes */ - if (get_irg_pinned(irg) == op_pin_state_pinned) { - /* after optimize_cf(), only Bad data flow may remain. */ - if (irg_verify_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) { - dump_ir_graph(irg, "-verify-cf"); - fprintf(stderr, "VERIFY_BAD in optimize_cf()\n"); - } - } } /* Creates an ir_graph pass for optimize_cf. */