2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Path-Sensitive Jump Threading
24 * @author Christoph Mallon, Matthias Braun
29 #include "iroptimize.h"
41 #include "iredges_t.h"
45 #include "opt_confirms.h"
46 #include "iropt_dbg.h"
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55 * Add the new predecessor x to node node, which is either a Block or a Phi
57 static void add_pred(ir_node* node, ir_node* x)
63 assert(is_Block(node) || is_Phi(node));
65 n = get_irn_arity(node);
66 NEW_ARR_A(ir_node*, ins, n + 1);
67 for (i = 0; i < n; i++)
68 ins[i] = get_irn_n(node, i);
70 set_irn_in(node, n + 1, ins);
73 static ir_node *ssa_second_def;
74 static ir_node *ssa_second_def_block;
76 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
85 /* This is needed because we create bads sometimes */
89 /* the other defs can't be marked for cases where a user of the original
90 * value is in the same block as the alternative definition.
91 * In this case we mustn't use the alternative definition.
92 * So we keep a flag that indicated wether we walked at least 1 block
93 * away and may use the alternative definition */
94 if (block == ssa_second_def_block && !first) {
95 return ssa_second_def;
98 /* already processed this block? */
99 if (irn_visited(block)) {
100 ir_node *value = (ir_node*) get_irn_link(block);
104 irg = get_irn_irg(block);
105 assert(block != get_irg_start_block(irg));
107 /* a Block with only 1 predecessor needs no Phi */
108 n_cfgpreds = get_Block_n_cfgpreds(block);
109 if (n_cfgpreds == 1) {
110 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
111 ir_node *value = search_def_and_create_phis(pred_block, mode, 0);
113 set_irn_link(block, value);
114 mark_irn_visited(block);
118 /* create a new Phi */
119 NEW_ARR_A(ir_node*, in, n_cfgpreds);
120 for (i = 0; i < n_cfgpreds; ++i)
121 in[i] = new_Unknown(mode);
123 phi = new_r_Phi(block, n_cfgpreds, in, mode);
124 set_irn_link(block, phi);
125 mark_irn_visited(block);
127 /* set Phi predecessors */
128 for (i = 0; i < n_cfgpreds; ++i) {
129 ir_node *pred_block = get_Block_cfgpred_block(block, i);
130 ir_node *pred_val = search_def_and_create_phis(pred_block, mode, 0);
132 set_irn_n(phi, i, pred_val);
139 * Given a set of values this function constructs SSA-form for the users of the
140 * first value (the users are determined through the out-edges of the value).
141 * Uses the irn_visited flags. Works without using the dominance tree.
143 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
144 ir_node *second_block, ir_node *second_val)
148 const ir_edge_t *edge;
149 const ir_edge_t *next;
151 /* no need to do anything */
152 if (orig_val == second_val)
155 irg = get_irn_irg(orig_val);
156 inc_irg_visited(irg);
158 mode = get_irn_mode(orig_val);
159 set_irn_link(orig_block, orig_val);
160 mark_irn_visited(orig_block);
162 ssa_second_def_block = second_block;
163 ssa_second_def = second_val;
165 /* Only fix the users of the first, i.e. the original node */
166 foreach_out_edge_safe(orig_val, edge, next) {
167 ir_node *user = get_edge_src_irn(edge);
168 int j = get_edge_src_pos(edge);
169 ir_node *user_block = get_nodes_block(user);
176 DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));
179 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
180 newval = search_def_and_create_phis(pred_block, mode, 1);
182 newval = search_def_and_create_phis(user_block, mode, 1);
185 /* don't fix newly created Phis from the SSA construction */
186 if (newval != user) {
187 DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
188 set_irn_n(user, j, newval);
193 static void split_critical_edge(ir_node *block, int pos)
195 ir_graph *irg = get_irn_irg(block);
200 in[0] = get_Block_cfgpred(block, pos);
201 new_block = new_r_Block(irg, 1, in);
202 new_jmp = new_r_Jmp(new_block);
203 set_Block_cfgpred(block, pos, new_jmp);
206 typedef struct jumpthreading_env_t {
208 ir_node *cmp; /**< The Compare node that might be partial evaluated */
209 pn_Cmp pnc; /**< The Compare mode of the Compare node. */
212 ir_visited_t visited_nr;
214 ir_node *cnst_pred; /**< the block before the constant */
215 int cnst_pos; /**< the pos to the constant block (needed to
216 kill that edge later) */
217 } jumpthreading_env_t;
219 static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
220 ir_node *block, ir_node *copy_block, int j,
226 /* we can evaluate Phis right now, all other nodes get copied */
228 copy = get_Phi_pred(node, j);
229 /* we might have to evaluate a Phi-cascade */
230 if (get_irn_visited(copy) >= env->visited_nr) {
231 copy = get_irn_link(copy);
234 copy = exact_copy(node);
235 set_nodes_block(copy, copy_block);
237 assert(get_irn_mode(copy) != mode_X);
239 arity = get_irn_arity(copy);
240 for (i = 0; i < arity; ++i) {
241 ir_node *pred = get_irn_n(copy, i);
244 if (get_nodes_block(pred) != block)
247 if (get_irn_visited(pred) >= env->visited_nr) {
248 new_pred = get_irn_link(pred);
250 new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
252 DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
253 set_irn_n(copy, i, new_pred);
257 set_irn_link(node, copy);
258 set_irn_visited(node, env->visited_nr);
263 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
264 ir_node *copy_block, int j)
266 const ir_edge_t *edge;
268 /* Look at all nodes in the cond_block and copy them into pred */
269 foreach_out_edge(block, edge) {
270 ir_node *node = get_edge_src_irn(edge);
274 if (is_Block(node)) {
275 /* Block->Block edge, should be the MacroBlock edge */
276 assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
280 /* ignore control flow */
281 mode = get_irn_mode(node);
282 if (mode == mode_X || is_Cond(node))
285 /* we may not copy mode_b nodes, because this could produce Phi with
286 * mode_bs which can't be handled in all backends. Instead we duplicate
287 * the node and move it to its users */
288 if (mode == mode_b) {
289 const ir_edge_t *edge, *next;
293 assert(is_Proj(node));
295 pred = get_Proj_pred(node);
296 pn = get_Proj_proj(node);
298 foreach_out_edge_safe(node, edge, next) {
300 ir_node *user = get_edge_src_irn(edge);
301 int pos = get_edge_src_pos(edge);
302 ir_node *user_block = get_nodes_block(user);
304 if (user_block == block)
307 cmp_copy = exact_copy(pred);
308 set_nodes_block(cmp_copy, user_block);
309 copy = new_r_Proj(current_ir_graph, user_block, cmp_copy, mode_b, pn);
310 set_irn_n(user, pos, copy);
316 copy = copy_and_fix_node(env, block, copy_block, j, node);
318 /* we might hit values in blocks that have already been processed by a
319 * recursive find_phi_with_const() call */
320 assert(get_irn_visited(copy) <= env->visited_nr);
321 if (get_irn_visited(copy) >= env->visited_nr) {
322 ir_node *prev_copy = get_irn_link(copy);
323 if (prev_copy != NULL)
324 set_irn_link(node, prev_copy);
328 /* fix data-flow (and reconstruct SSA if needed) */
329 foreach_out_edge(block, edge) {
330 ir_node *node = get_edge_src_irn(edge);
334 if (is_Block(node)) {
335 /* Block->Block edge, should be the MacroBlock edge */
336 assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
340 mode = get_irn_mode(node);
341 if (mode == mode_X || is_Cond(node))
348 DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
350 copy_node = get_irn_link(node);
351 construct_ssa(block, node, copy_block, copy_node);
356 * returns whether the cmp evaluates to true or false, or can't be evaluated!
357 * 1: true, 0: false, -1: can't evaluate
359 * @param pnc the compare mode of the Compare
360 * @param tv_left the left tarval
361 * @param tv_right the right tarval
363 static int eval_cmp_tv(pn_Cmp pnc, tarval *tv_left, tarval *tv_right)
365 pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
367 /* does the compare evaluate to true? */
368 if (cmp_result == pn_Cmp_False)
370 if ((cmp_result & pnc) != cmp_result)
377 * returns whether the cmp evaluates to true or false according to vrp
378 * information , or can't be evaluated!
379 * 1: true, 0: false, -1: can't evaluate
381 * @param pnc the compare mode of the Compare
382 * @param left the left node
383 * @param right the right node
385 static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
387 pn_Cmp cmp_result = vrp_cmp(left, right);
389 /* does the compare evaluate to true? */
390 if (cmp_result == pn_Cmp_False)
392 if ((cmp_result & pnc) != cmp_result)
398 * returns whether the cmp evaluates to true or false, or can't be evaluated!
399 * 1: true, 0: false, -1: can't evaluate
401 * @param env the environment
402 * @param cand the candidate node, either a Const or a Confirm
404 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
406 if (is_Const(cand)) {
407 tarval *tv_cand = get_Const_tarval(cand);
408 tarval *tv_cmp = get_Const_tarval(env->cnst);
410 return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
411 } else { /* a Confirm */
412 tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);
414 if (res == tarval_bad)
416 return res == tarval_b_true;
421 * Check for Const or Confirm with Const.
423 static int is_Const_or_Confirm(const ir_node *node)
425 if (is_Confirm(node))
426 node = get_Confirm_bound(node);
427 return is_Const(node);
431 * get the tarval of a Const or Confirm with
433 static tarval *get_Const_or_Confirm_tarval(const ir_node *node)
435 if (is_Confirm(node)) {
436 if (get_Confirm_bound(node))
437 node = get_Confirm_bound(node);
439 return get_Const_tarval(node);
442 static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
445 ir_node *block = get_nodes_block(jump);
447 if (irn_visited_else_mark(value))
450 if (is_Const_or_Confirm(value)) {
451 if (eval_cmp(env, value) <= 0) {
457 "> Found jump threading candidate %+F->%+F\n",
458 env->true_block, block
461 /* adjust true_block to point directly towards our jump */
462 add_pred(env->true_block, jump);
464 split_critical_edge(env->true_block, 0);
466 /* we need a bigger visited nr when going back */
475 /* the Phi has to be in the same Block as the Jmp */
476 if (get_nodes_block(value) != block) {
480 arity = get_irn_arity(value);
481 for (i = 0; i < arity; ++i) {
483 ir_node *phi_pred = get_Phi_pred(value, i);
484 ir_node *cfgpred = get_Block_cfgpred(block, i);
486 copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
487 if (copy_block == NULL)
490 /* copy duplicated nodes in copy_block and fix SSA */
491 copy_and_fix(env, block, copy_block, i);
493 if (copy_block == get_nodes_block(cfgpred)) {
494 env->cnst_pred = block;
498 /* return now as we can't process more possibilities in 1 run */
506 static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
509 ir_node *block = get_nodes_block(jump);
511 if (irn_visited_else_mark(value)) {
515 if (is_Const_or_Confirm(value)) {
516 tarval *tv = get_Const_or_Confirm_tarval(value);
523 "> Found jump threading candidate %+F->%+F\n",
524 env->true_block, block
527 /* adjust true_block to point directly towards our jump */
528 add_pred(env->true_block, jump);
530 split_critical_edge(env->true_block, 0);
532 /* we need a bigger visited nr when going back */
540 /* the Phi has to be in the same Block as the Jmp */
541 if (get_nodes_block(value) != block)
544 arity = get_irn_arity(value);
545 for (i = 0; i < arity; ++i) {
547 ir_node *phi_pred = get_Phi_pred(value, i);
548 ir_node *cfgpred = get_Block_cfgpred(block, i);
550 copy_block = find_candidate(env, cfgpred, phi_pred);
551 if (copy_block == NULL)
554 /* copy duplicated nodes in copy_block and fix SSA */
555 copy_and_fix(env, block, copy_block, i);
557 if (copy_block == get_nodes_block(cfgpred)) {
558 env->cnst_pred = block;
562 /* return now as we can't process more possibilities in 1 run */
566 if (is_Proj(value)) {
570 ir_node *cmp = get_Proj_pred(value);
574 left = get_Cmp_left(cmp);
575 right = get_Cmp_right(cmp);
576 pnc = get_Proj_proj(value);
578 /* we assume that the constant is on the right side, swap left/right
580 if (is_Const(left)) {
585 pnc = get_inversed_pnc(pnc);
588 if (!is_Const(right))
591 if (get_nodes_block(left) != block) {
595 /* negate condition when we're looking for the false block */
596 if (env->tv == tarval_b_false) {
597 pnc = get_negated_pnc(pnc, get_irn_mode(right));
600 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
605 return find_const_or_confirm(env, jump, left);
612 * Block-walker: searches for the following construct
614 * Const or Phi with constants
624 static void thread_jumps(ir_node* block, void* data)
626 jumpthreading_env_t env;
632 int selector_evaluated;
633 const ir_edge_t *edge, *next;
637 if (get_Block_n_cfgpreds(block) != 1)
640 projx = get_Block_cfgpred(block, 0);
643 assert(get_irn_mode(projx) == mode_X);
645 cond = get_Proj_pred(projx);
649 selector = get_Cond_selector(cond);
650 /* TODO handle switch Conds */
651 if (get_irn_mode(selector) != mode_b)
654 /* handle cases that can be immediately evaluated */
655 selector_evaluated = -1;
656 if (is_Proj(selector)) {
657 ir_node *cmp = get_Proj_pred(selector);
659 ir_node *left = get_Cmp_left(cmp);
660 ir_node *right = get_Cmp_right(cmp);
661 if (is_Const(left) && is_Const(right)) {
662 int pnc = get_Proj_proj(selector);
663 tarval *tv_left = get_Const_tarval(left);
664 tarval *tv_right = get_Const_tarval(right);
666 selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
668 if (selector_evaluated < 0) {
669 /* This is only the case if the predecessor nodes are not
670 * constant or the comparison could not be evaluated.
671 * Try with VRP information now.
673 int pnc = get_Proj_proj(selector);
675 selector_evaluated = eval_cmp_vrp(pnc, left, right);
678 } else if (is_Const_or_Confirm(selector)) {
679 tarval *tv = get_Const_or_Confirm_tarval(selector);
680 if (tv == tarval_b_true) {
681 selector_evaluated = 1;
683 assert(tv == tarval_b_false);
684 selector_evaluated = 0;
688 env.cnst_pred = NULL;
689 if (get_Proj_proj(projx) == pn_Cond_false) {
690 env.tv = tarval_b_false;
691 if (selector_evaluated >= 0)
692 selector_evaluated = !selector_evaluated;
694 env.tv = tarval_b_true;
697 if (selector_evaluated == 0) {
699 exchange(projx, bad);
702 } else if (selector_evaluated == 1) {
703 dbg_info *dbgi = get_irn_dbg_info(selector);
704 ir_node *jmp = new_rd_Jmp(dbgi, get_nodes_block(projx));
705 DBG_OPT_JUMPTHREADING(projx, jmp);
706 exchange(projx, jmp);
711 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
712 env.true_block = block;
713 inc_irg_visited(current_ir_graph);
714 env.visited_nr = get_irg_visited(current_ir_graph);
716 copy_block = find_candidate(&env, projx, selector);
717 if (copy_block == NULL)
720 /* we have to remove the edge towards the pred as the pred now
721 * jumps into the true_block. We also have to shorten Phis
722 * in our block because of this */
724 cnst_pos = env.cnst_pos;
727 foreach_out_edge_safe(env.cnst_pred, edge, next) {
728 ir_node *node = get_edge_src_irn(edge);
731 set_Phi_pred(node, cnst_pos, bad);
734 set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
736 /* the graph is changed now */
740 void opt_jumpthreading(ir_graph* irg)
744 FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
746 DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
748 remove_critical_cf_edges(irg);
751 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
756 irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
760 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
763 /* control flow changed, some blocks may become dead */
764 set_irg_outs_inconsistent(irg);
765 set_irg_doms_inconsistent(irg);
766 set_irg_extblk_inconsistent(irg);
767 set_irg_loopinfo_inconsistent(irg);
768 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
770 /* Dead code might be created. Optimize it away as it is dangerous
771 * to call optimize_df() an dead code. */
776 /* Creates an ir_graph pass for opt_jumpthreading. */
777 ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
779 return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
780 } /* opt_jumpthreading_pass */