2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Path-Sensitive Jump Threading
24 * @author Christoph Mallon, Matthias Braun
29 #include "iroptimize.h"
41 #include "iredges_t.h"
45 #include "opt_confirms.h"
46 #include "iropt_dbg.h"
51 DEBUG_ONLY(static firm_dbg_module_t *dbg);
54 * Add the new predecessor x to node node, which is either a Block or a Phi
56 static void add_pred(ir_node* node, ir_node* x)
62 assert(is_Block(node) || is_Phi(node));
64 n = get_irn_arity(node);
65 NEW_ARR_A(ir_node*, ins, n + 1);
66 for (i = 0; i < n; i++)
67 ins[i] = get_irn_n(node, i);
69 set_irn_in(node, n + 1, ins);
72 static ir_node *ssa_second_def;
73 static ir_node *ssa_second_def_block;
75 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
84 /* This is needed because we create bads sometimes */
88 /* the other defs can't be marked for cases where a user of the original
89 * value is in the same block as the alternative definition.
90 * In this case we mustn't use the alternative definition.
91 * So we keep a flag that indicated wether we walked at least 1 block
92 * away and may use the alternative definition */
93 if (block == ssa_second_def_block && !first) {
94 return ssa_second_def;
97 /* already processed this block? */
98 if (irn_visited(block)) {
99 ir_node *value = (ir_node*) get_irn_link(block);
103 irg = get_irn_irg(block);
104 assert(block != get_irg_start_block(irg));
106 /* a Block with only 1 predecessor needs no Phi */
107 n_cfgpreds = get_Block_n_cfgpreds(block);
108 if (n_cfgpreds == 1) {
109 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
110 ir_node *value = search_def_and_create_phis(pred_block, mode, 0);
112 set_irn_link(block, value);
113 mark_irn_visited(block);
117 /* create a new Phi */
118 NEW_ARR_A(ir_node*, in, n_cfgpreds);
119 for(i = 0; i < n_cfgpreds; ++i)
120 in[i] = new_Unknown(mode);
122 phi = new_r_Phi(block, n_cfgpreds, in, mode);
123 set_irn_link(block, phi);
124 mark_irn_visited(block);
126 /* set Phi predecessors */
127 for(i = 0; i < n_cfgpreds; ++i) {
128 ir_node *pred_block = get_Block_cfgpred_block(block, i);
129 ir_node *pred_val = search_def_and_create_phis(pred_block, mode, 0);
131 set_irn_n(phi, i, pred_val);
138 * Given a set of values this function constructs SSA-form for the users of the
139 * first value (the users are determined through the out-edges of the value).
140 * Uses the irn_visited flags. Works without using the dominance tree.
142 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
143 ir_node *second_block, ir_node *second_val)
147 const ir_edge_t *edge;
148 const ir_edge_t *next;
150 /* no need to do anything */
151 if (orig_val == second_val)
154 irg = get_irn_irg(orig_val);
155 inc_irg_visited(irg);
157 mode = get_irn_mode(orig_val);
158 set_irn_link(orig_block, orig_val);
159 mark_irn_visited(orig_block);
161 ssa_second_def_block = second_block;
162 ssa_second_def = second_val;
164 /* Only fix the users of the first, i.e. the original node */
165 foreach_out_edge_safe(orig_val, edge, next) {
166 ir_node *user = get_edge_src_irn(edge);
167 int j = get_edge_src_pos(edge);
168 ir_node *user_block = get_nodes_block(user);
175 DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));
178 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
179 newval = search_def_and_create_phis(pred_block, mode, 1);
181 newval = search_def_and_create_phis(user_block, mode, 1);
184 /* don't fix newly created Phis from the SSA construction */
185 if (newval != user) {
186 DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
187 set_irn_n(user, j, newval);
192 static void split_critical_edge(ir_node *block, int pos) {
193 ir_graph *irg = get_irn_irg(block);
198 in[0] = get_Block_cfgpred(block, pos);
199 new_block = new_r_Block(irg, 1, in);
200 new_jmp = new_r_Jmp(new_block);
201 set_Block_cfgpred(block, pos, new_jmp);
204 typedef struct jumpthreading_env_t {
206 ir_node *cmp; /**< The Compare node that might be partial evaluated */
207 pn_Cmp pnc; /**< The Compare mode of the Compare node. */
210 ir_visited_t visited_nr;
212 ir_node *cnst_pred; /**< the block before the constant */
213 int cnst_pos; /**< the pos to the constant block (needed to
214 kill that edge later) */
215 } jumpthreading_env_t;
217 static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
218 ir_node *block, ir_node *copy_block, int j,
224 /* we can evaluate Phis right now, all other nodes get copied */
226 copy = get_Phi_pred(node, j);
227 /* we might have to evaluate a Phi-cascade */
228 if (get_irn_visited(copy) >= env->visited_nr) {
229 copy = get_irn_link(copy);
232 copy = exact_copy(node);
233 set_nodes_block(copy, copy_block);
235 assert(get_irn_mode(copy) != mode_X);
237 arity = get_irn_arity(copy);
238 for(i = 0; i < arity; ++i) {
239 ir_node *pred = get_irn_n(copy, i);
242 if (get_nodes_block(pred) != block)
245 if (get_irn_visited(pred) >= env->visited_nr) {
246 new_pred = get_irn_link(pred);
248 new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
250 DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
251 set_irn_n(copy, i, new_pred);
255 set_irn_link(node, copy);
256 set_irn_visited(node, env->visited_nr);
261 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
262 ir_node *copy_block, int j)
264 const ir_edge_t *edge;
266 /* Look at all nodes in the cond_block and copy them into pred */
267 foreach_out_edge(block, edge) {
268 ir_node *node = get_edge_src_irn(edge);
272 if (is_Block(node)) {
273 /* Block->Block edge, should be the MacroBlock edge */
274 assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
278 /* ignore control flow */
279 mode = get_irn_mode(node);
280 if (mode == mode_X || is_Cond(node))
283 /* we may not copy mode_b nodes, because this could produce Phi with
284 * mode_bs which can't be handled in all backends. Instead we duplicate
285 * the node and move it to its users */
286 if (mode == mode_b) {
287 const ir_edge_t *edge, *next;
291 assert(is_Proj(node));
293 pred = get_Proj_pred(node);
294 pn = get_Proj_proj(node);
296 foreach_out_edge_safe(node, edge, next) {
298 ir_node *user = get_edge_src_irn(edge);
299 int pos = get_edge_src_pos(edge);
300 ir_node *user_block = get_nodes_block(user);
302 if (user_block == block)
305 cmp_copy = exact_copy(pred);
306 set_nodes_block(cmp_copy, user_block);
307 copy = new_r_Proj(current_ir_graph, user_block, cmp_copy, mode_b, pn);
308 set_irn_n(user, pos, copy);
314 copy = copy_and_fix_node(env, block, copy_block, j, node);
316 /* we might hit values in blocks that have already been processed by a
317 * recursive find_phi_with_const() call */
318 assert(get_irn_visited(copy) <= env->visited_nr);
319 if (get_irn_visited(copy) >= env->visited_nr) {
320 ir_node *prev_copy = get_irn_link(copy);
321 if (prev_copy != NULL)
322 set_irn_link(node, prev_copy);
326 /* fix data-flow (and reconstruct SSA if needed) */
327 foreach_out_edge(block, edge) {
328 ir_node *node = get_edge_src_irn(edge);
332 if (is_Block(node)) {
333 /* Block->Block edge, should be the MacroBlock edge */
334 assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
338 mode = get_irn_mode(node);
339 if (mode == mode_X || is_Cond(node))
346 DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
348 copy_node = get_irn_link(node);
349 construct_ssa(block, node, copy_block, copy_node);
354 * returns whether the cmp evaluates to true or false, or can't be evaluated!
355 * 1: true, 0: false, -1: can't evaluate
357 * @param pnc the compare mode of the Compare
358 * @param tv_left the left tarval
359 * @param tv_right the right tarval
361 static int eval_cmp_tv(pn_Cmp pnc, tarval *tv_left, tarval *tv_right)
363 pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
365 /* does the compare evaluate to true? */
366 if (cmp_result == pn_Cmp_False)
368 if ((cmp_result & pnc) != cmp_result)
375 * returns whether the cmp evaluates to true or false, or can't be evaluated!
376 * 1: true, 0: false, -1: can't evaluate
378 * @param env the environment
379 * @param cand the candidate node, either a Const or a Confirm
381 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
383 if (is_Const(cand)) {
384 tarval *tv_cand = get_Const_tarval(cand);
385 tarval *tv_cmp = get_Const_tarval(env->cnst);
387 return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
388 } else { /* a Confirm */
389 tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);
391 if (res == tarval_bad)
393 return res == tarval_b_true;
398 * Check for Const or Confirm with Const.
400 static int is_Const_or_Confirm(const ir_node *node)
402 if (is_Confirm(node))
403 node = get_Confirm_bound(node);
404 return is_Const(node);
408 * get the tarval of a Const or Confirm with
410 static tarval *get_Const_or_Confirm_tarval(const ir_node *node)
412 if (is_Confirm(node)) {
413 if (get_Confirm_bound(node))
414 node = get_Confirm_bound(node);
416 return get_Const_tarval(node);
419 static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
422 ir_node *block = get_nodes_block(jump);
424 if (irn_visited_else_mark(value))
427 if (is_Const_or_Confirm(value)) {
428 if (eval_cmp(env, value) <= 0) {
434 "> Found jump threading candidate %+F->%+F\n",
435 env->true_block, block
438 /* adjust true_block to point directly towards our jump */
439 add_pred(env->true_block, jump);
441 split_critical_edge(env->true_block, 0);
443 /* we need a bigger visited nr when going back */
452 /* the Phi has to be in the same Block as the Jmp */
453 if (get_nodes_block(value) != block) {
457 arity = get_irn_arity(value);
458 for(i = 0; i < arity; ++i) {
460 ir_node *phi_pred = get_Phi_pred(value, i);
461 ir_node *cfgpred = get_Block_cfgpred(block, i);
463 copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
464 if (copy_block == NULL)
467 /* copy duplicated nodes in copy_block and fix SSA */
468 copy_and_fix(env, block, copy_block, i);
470 if (copy_block == get_nodes_block(cfgpred)) {
471 env->cnst_pred = block;
475 /* return now as we can't process more possibilities in 1 run */
483 static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
486 ir_node *block = get_nodes_block(jump);
488 if (irn_visited_else_mark(value)) {
492 if (is_Const_or_Confirm(value)) {
493 tarval *tv = get_Const_or_Confirm_tarval(value);
500 "> Found jump threading candidate %+F->%+F\n",
501 env->true_block, block
504 /* adjust true_block to point directly towards our jump */
505 add_pred(env->true_block, jump);
507 split_critical_edge(env->true_block, 0);
509 /* we need a bigger visited nr when going back */
517 /* the Phi has to be in the same Block as the Jmp */
518 if (get_nodes_block(value) != block)
521 arity = get_irn_arity(value);
522 for(i = 0; i < arity; ++i) {
524 ir_node *phi_pred = get_Phi_pred(value, i);
525 ir_node *cfgpred = get_Block_cfgpred(block, i);
527 copy_block = find_candidate(env, cfgpred, phi_pred);
528 if (copy_block == NULL)
531 /* copy duplicated nodes in copy_block and fix SSA */
532 copy_and_fix(env, block, copy_block, i);
534 if (copy_block == get_nodes_block(cfgpred)) {
535 env->cnst_pred = block;
539 /* return now as we can't process more possibilities in 1 run */
543 if (is_Proj(value)) {
547 ir_node *cmp = get_Proj_pred(value);
551 left = get_Cmp_left(cmp);
552 right = get_Cmp_right(cmp);
553 pnc = get_Proj_proj(value);
555 /* we assume that the constant is on the right side, swap left/right
557 if (is_Const(left)) {
562 pnc = get_inversed_pnc(pnc);
565 if (!is_Const(right))
568 if (get_nodes_block(left) != block) {
572 /* negate condition when we're looking for the false block */
573 if (env->tv == tarval_b_false) {
574 pnc = get_negated_pnc(pnc, get_irn_mode(right));
577 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
582 return find_const_or_confirm(env, jump, left);
589 * Block-walker: searches for the following construct
591 * Const or Phi with constants
601 static void thread_jumps(ir_node* block, void* data)
603 jumpthreading_env_t env;
609 int selector_evaluated;
610 const ir_edge_t *edge, *next;
614 if (get_Block_n_cfgpreds(block) != 1)
617 projx = get_Block_cfgpred(block, 0);
620 assert(get_irn_mode(projx) == mode_X);
622 cond = get_Proj_pred(projx);
626 selector = get_Cond_selector(cond);
627 /* TODO handle switch Conds */
628 if (get_irn_mode(selector) != mode_b)
631 /* handle cases that can be immediately evaluated */
632 selector_evaluated = -1;
633 if (is_Proj(selector)) {
634 ir_node *cmp = get_Proj_pred(selector);
636 ir_node *left = get_Cmp_left(cmp);
637 ir_node *right = get_Cmp_right(cmp);
638 if (is_Const(left) && is_Const(right)) {
639 int pnc = get_Proj_proj(selector);
640 tarval *tv_left = get_Const_tarval(left);
641 tarval *tv_right = get_Const_tarval(right);
643 selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
644 if (selector_evaluated < 0)
648 } else if (is_Const_or_Confirm(selector)) {
649 tarval *tv = get_Const_or_Confirm_tarval(selector);
650 if (tv == tarval_b_true) {
651 selector_evaluated = 1;
653 assert(tv == tarval_b_false);
654 selector_evaluated = 0;
658 env.cnst_pred = NULL;
659 if (get_Proj_proj(projx) == pn_Cond_false) {
660 env.tv = tarval_b_false;
661 if (selector_evaluated >= 0)
662 selector_evaluated = !selector_evaluated;
664 env.tv = tarval_b_true;
667 if (selector_evaluated == 0) {
669 exchange(projx, bad);
672 } else if (selector_evaluated == 1) {
673 dbg_info *dbgi = get_irn_dbg_info(selector);
674 ir_node *jmp = new_rd_Jmp(dbgi, get_nodes_block(projx));
675 DBG_OPT_JUMPTHREADING(projx, jmp);
676 exchange(projx, jmp);
681 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
682 env.true_block = block;
683 inc_irg_visited(current_ir_graph);
684 env.visited_nr = get_irg_visited(current_ir_graph);
686 copy_block = find_candidate(&env, projx, selector);
687 if (copy_block == NULL)
690 /* we have to remove the edge towards the pred as the pred now
691 * jumps into the true_block. We also have to shorten Phis
692 * in our block because of this */
694 cnst_pos = env.cnst_pos;
697 foreach_out_edge_safe(env.cnst_pred, edge, next) {
698 ir_node *node = get_edge_src_irn(edge);
701 set_Phi_pred(node, cnst_pos, bad);
704 set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
706 /* the graph is changed now */
710 void opt_jumpthreading(ir_graph* irg)
714 FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
716 DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
718 remove_critical_cf_edges(irg);
721 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
726 irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
730 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
733 /* control flow changed, some blocks may become dead */
734 set_irg_outs_inconsistent(irg);
735 set_irg_doms_inconsistent(irg);
736 set_irg_extblk_inconsistent(irg);
737 set_irg_loopinfo_inconsistent(irg);
738 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
740 /* Dead code might be created. Optimize it away as it is dangerous
741 * to call optimize_df() an dead code. */
746 /* Creates an ir_graph pass for opt_jumpthreading. */
747 ir_graph_pass_t *opt_jumpthreading_pass(const char *name, int verify, int dump)
749 return def_graph_pass(name ? name : "jumpthreading", verify, dump, opt_jumpthreading);
750 } /* opt_jumpthreading_pass */