X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fjumpthreading.c;h=7561ded9623914b76023a2694517c120d631e3a3;hb=b27ae245166bb695bc4e418ff416d91bc37d0f28;hp=135ede9dd9827db039165cf9da85231fcf48edde;hpb=4f8e80463195b1fee6ebbeefb59722faf76134cc;p=libfirm diff --git a/ir/opt/jumpthreading.c b/ir/opt/jumpthreading.c index 135ede9dd..7561ded96 100644 --- a/ir/opt/jumpthreading.c +++ b/ir/opt/jumpthreading.c @@ -83,10 +83,10 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode, ir_node **in; ir_node *dummy; - /* This is needed because we create bads sometimes */ - if (is_Bad(block) || is_Block_dead(block)) { + /* In case of a bad input to a block we need to return the bad value */ + if (is_Bad(block)) { ir_graph *irg = get_irn_irg(block); - return new_r_Bad(irg); + return new_r_Bad(irg, mode); } /* the other defs can't be marked for cases where a user of the original @@ -275,6 +275,13 @@ static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block, ir_node *copy; ir_mode *mode; + if (is_End(node)) { + /* edge is a Keep edge. If the end block is unreachable via normal control flow, + * we must maintain end's reachability with Keeps. + */ + keep_alive(copy_block); + continue; + } /* ignore control flow */ mode = get_irn_mode(node); if (mode == mode_X || is_Cond(node)) @@ -624,7 +631,7 @@ static void thread_jumps(ir_node* block, void* data) int selector_evaluated; const ir_edge_t *edge, *next; ir_graph *irg; - ir_node *bad; + ir_node *badX; int cnst_pos; if (get_Block_n_cfgpreds(block) != 1) @@ -686,7 +693,7 @@ static void thread_jumps(ir_node* block, void* data) if (selector_evaluated == 0) { ir_graph *irg = get_irn_irg(block); - bad = new_r_Bad(irg); + ir_node *bad = new_r_Bad(irg, mode_X); exchange(projx, bad); *changed = 1; return; @@ -709,21 +716,27 @@ static void thread_jumps(ir_node* block, void* data) if (copy_block == NULL) return; + /* We might thread the condition block of an infinite loop, + * such that there is no path to End anymore. */ + keep_alive(block); + /* we have to remove the edge towards the pred as the pred now * jumps into the true_block. We also have to shorten Phis * in our block because of this */ - bad = new_r_Bad(irg); + badX = new_r_Bad(irg, mode_X); cnst_pos = env.cnst_pos; /* shorten Phis */ foreach_out_edge_safe(env.cnst_pred, edge, next) { ir_node *node = get_edge_src_irn(edge); - if (is_Phi(node)) + if (is_Phi(node)) { + ir_node *bad = new_r_Bad(irg, get_irn_mode(node)); set_Phi_pred(node, cnst_pos, bad); + } } - set_Block_cfgpred(env.cnst_pred, cnst_pos, bad); + set_Block_cfgpred(env.cnst_pred, cnst_pos, badX); /* the graph is changed now */ *changed = 1; @@ -739,6 +752,10 @@ void opt_jumpthreading(ir_graph* irg) remove_critical_cf_edges(irg); + /* ugly: jump threading might get confused by garbage nodes + * of mode_X in copy_and_fix_node(), so remove all garbage edges. */ + edges_deactivate(irg); + edges_assure(irg); ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED); @@ -753,15 +770,9 @@ void opt_jumpthreading(ir_graph* irg) if (changed) { /* control flow changed, some blocks may become dead */ - set_irg_outs_inconsistent(irg); set_irg_doms_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); - - /* Dead code might be created. Optimize it away as it is dangerous - * to call optimize_df() an dead code. */ - optimize_cf(irg); } }