2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Path-Sensitive Jump Threading
24 * @author Christoph Mallon, Matthias Braun
29 #include "iroptimize.h"
41 #include "iredges_t.h"
45 #include "iroptimize.h"
46 #include "iropt_dbg.h"
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55 * Add the new predecessor x to node node, which is either a Block or a Phi
57 static void add_pred(ir_node* node, ir_node* x)
63 assert(is_Block(node));
65 n = get_irn_arity(node);
66 NEW_ARR_A(ir_node*, ins, n + 1);
67 for (i = 0; i < n; i++)
68 ins[i] = get_irn_n(node, i);
70 set_irn_in(node, n + 1, ins);
73 static ir_node *ssa_second_def;
74 static ir_node *ssa_second_def_block;
76 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
86 /* In case of a bad input to a block we need to return the bad value */
88 ir_graph *irg = get_irn_irg(block);
89 return new_r_Bad(irg, mode);
92 /* the other defs can't be marked for cases where a user of the original
93 * value is in the same block as the alternative definition.
94 * In this case we mustn't use the alternative definition.
95 * So we keep a flag that indicated whether we walked at least 1 block
96 * away and may use the alternative definition */
97 if (block == ssa_second_def_block && !first) {
98 return ssa_second_def;
101 /* already processed this block? */
102 if (irn_visited(block)) {
103 ir_node *value = (ir_node*) get_irn_link(block);
107 irg = get_irn_irg(block);
108 assert(block != get_irg_start_block(irg));
110 /* a Block with only 1 predecessor needs no Phi */
111 n_cfgpreds = get_Block_n_cfgpreds(block);
112 if (n_cfgpreds == 1) {
113 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
114 ir_node *value = search_def_and_create_phis(pred_block, mode, 0);
116 set_irn_link(block, value);
117 mark_irn_visited(block);
121 /* create a new Phi */
122 NEW_ARR_A(ir_node*, in, n_cfgpreds);
123 dummy = new_r_Dummy(irg, mode);
124 for (i = 0; i < n_cfgpreds; ++i)
127 phi = new_r_Phi(block, n_cfgpreds, in, mode);
128 set_irn_link(block, phi);
129 mark_irn_visited(block);
131 /* set Phi predecessors */
132 for (i = 0; i < n_cfgpreds; ++i) {
133 ir_node *pred_block = get_Block_cfgpred_block(block, i);
134 ir_node *pred_val = search_def_and_create_phis(pred_block, mode, 0);
136 set_irn_n(phi, i, pred_val);
143 * Given a set of values this function constructs SSA-form for the users of the
144 * first value (the users are determined through the out-edges of the value).
145 * Uses the irn_visited flags. Works without using the dominance tree.
147 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
148 ir_node *second_block, ir_node *second_val)
152 const ir_edge_t *edge;
153 const ir_edge_t *next;
155 /* no need to do anything */
156 if (orig_val == second_val)
159 irg = get_irn_irg(orig_val);
160 inc_irg_visited(irg);
162 mode = get_irn_mode(orig_val);
163 set_irn_link(orig_block, orig_val);
164 mark_irn_visited(orig_block);
166 ssa_second_def_block = second_block;
167 ssa_second_def = second_val;
169 /* Only fix the users of the first, i.e. the original node */
170 foreach_out_edge_safe(orig_val, edge, next) {
171 ir_node *user = get_edge_src_irn(edge);
172 int j = get_edge_src_pos(edge);
173 ir_node *user_block = get_nodes_block(user);
180 DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));
183 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
184 newval = search_def_and_create_phis(pred_block, mode, 1);
186 newval = search_def_and_create_phis(user_block, mode, 1);
189 /* don't fix newly created Phis from the SSA construction */
190 if (newval != user) {
191 DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
192 set_irn_n(user, j, newval);
198 * jumpthreading produces critical edges, e.g. B-C:
205 * By splitting this critical edge more threadings might be possible.
207 static void split_critical_edge(ir_node *block, int pos)
209 ir_graph *irg = get_irn_irg(block);
214 in[0] = get_Block_cfgpred(block, pos);
215 new_block = new_r_Block(irg, 1, in);
216 new_jmp = new_r_Jmp(new_block);
217 set_Block_cfgpred(block, pos, new_jmp);
220 typedef struct jumpthreading_env_t {
222 ir_node *cmp; /**< The Compare node that might be partial evaluated */
223 ir_relation relation; /**< The Compare mode of the Compare node. */
226 ir_visited_t visited_nr;
228 ir_node *cnst_pred; /**< the block before the constant */
229 int cnst_pos; /**< the pos to the constant block (needed to
230 kill that edge later) */
231 } jumpthreading_env_t;
233 static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
234 ir_node *block, ir_node *copy_block, int j,
240 /* we can evaluate Phis right now, all other nodes get copied */
242 copy = get_Phi_pred(node, j);
243 /* we might have to evaluate a Phi-cascade */
244 if (get_irn_visited(copy) >= env->visited_nr) {
245 copy = (ir_node*)get_irn_link(copy);
248 copy = exact_copy(node);
249 set_nodes_block(copy, copy_block);
251 assert(get_irn_mode(copy) != mode_X);
253 arity = get_irn_arity(copy);
254 for (i = 0; i < arity; ++i) {
255 ir_node *pred = get_irn_n(copy, i);
258 if (get_nodes_block(pred) != block)
261 if (get_irn_visited(pred) >= env->visited_nr) {
262 new_pred = (ir_node*)get_irn_link(pred);
264 new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
266 DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
267 set_irn_n(copy, i, new_pred);
271 set_irn_link(node, copy);
272 set_irn_visited(node, env->visited_nr);
277 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
278 ir_node *copy_block, int j)
280 const ir_edge_t *edge;
282 /* Look at all nodes in the cond_block and copy them into pred */
283 foreach_out_edge(block, edge) {
284 ir_node *node = get_edge_src_irn(edge);
289 /* edge is a Keep edge. If the end block is unreachable via normal control flow,
290 * we must maintain end's reachability with Keeps.
292 keep_alive(copy_block);
295 /* ignore control flow */
296 mode = get_irn_mode(node);
297 if (mode == mode_X || is_Cond(node))
300 /* we may not copy mode_b nodes, because this could produce Phi with
301 * mode_bs which can't be handled in all backends. Instead we duplicate
302 * the node and move it to its users */
303 if (mode == mode_b) {
304 const ir_edge_t *edge, *next;
308 assert(is_Proj(node));
310 pred = get_Proj_pred(node);
311 pn = get_Proj_proj(node);
313 foreach_out_edge_safe(node, edge, next) {
315 ir_node *user = get_edge_src_irn(edge);
316 int pos = get_edge_src_pos(edge);
317 ir_node *user_block = get_nodes_block(user);
319 if (user_block == block)
322 cmp_copy = exact_copy(pred);
323 set_nodes_block(cmp_copy, user_block);
324 copy = new_r_Proj(cmp_copy, mode_b, pn);
325 set_irn_n(user, pos, copy);
331 copy = copy_and_fix_node(env, block, copy_block, j, node);
333 /* we might hit values in blocks that have already been processed by a
334 * recursive find_phi_with_const() call */
335 assert(get_irn_visited(copy) <= env->visited_nr);
336 if (get_irn_visited(copy) >= env->visited_nr) {
337 ir_node *prev_copy = (ir_node*)get_irn_link(copy);
338 if (prev_copy != NULL)
339 set_irn_link(node, prev_copy);
343 /* fix data-flow (and reconstruct SSA if needed) */
344 foreach_out_edge(block, edge) {
345 ir_node *node = get_edge_src_irn(edge);
349 mode = get_irn_mode(node);
350 if (mode == mode_X || is_Cond(node))
357 DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
359 copy_node = (ir_node*)get_irn_link(node);
360 construct_ssa(block, node, copy_block, copy_node);
365 * returns whether the cmp evaluates to true or false, or can't be evaluated!
366 * 1: true, 0: false, -1: can't evaluate
368 * @param relation the compare mode of the Compare
369 * @param tv_left the left tarval
370 * @param tv_right the right tarval
372 static int eval_cmp_tv(ir_relation relation, ir_tarval *tv_left,
375 ir_relation cmp_result = tarval_cmp(tv_left, tv_right);
377 /* does the compare evaluate to true? */
378 if (cmp_result == ir_relation_false)
380 if ((cmp_result & relation) != 0)
387 /* Matze: disabled, check first if the compare still is correct */
390 * returns whether the cmp evaluates to true or false according to vrp
391 * information , or can't be evaluated!
392 * 1: true, 0: false, -1: can't evaluate
394 * @param relation the compare mode of the Compare
395 * @param left the left node
396 * @param right the right node
398 static int eval_cmp_vrp(ir_relation relation, ir_node *left, ir_node *right)
400 ir_relation cmp_result = vrp_cmp(left, right);
401 /* does the compare evaluate to true? */
402 if (cmp_result == ir_relation_false)
405 if ((cmp_result & relation) != cmp_result) {
406 if ((cmp_result & relation) != 0) {
416 * returns whether the cmp evaluates to true or false, or can't be evaluated!
417 * 1: true, 0: false, -1: can't evaluate
419 * @param env the environment
420 * @param cand the candidate node, either a Const or a Confirm
422 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
424 if (is_Const(cand)) {
425 ir_tarval *tv_cand = get_Const_tarval(cand);
426 ir_tarval *tv_cmp = get_Const_tarval(env->cnst);
428 return eval_cmp_tv(env->relation, tv_cand, tv_cmp);
429 } else { /* a Confirm */
430 ir_tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->relation);
432 if (res == tarval_bad)
434 return res == tarval_b_true;
439 * Check for Const or Confirm with Const.
441 static int is_Const_or_Confirm(const ir_node *node)
443 if (is_Confirm(node))
444 node = get_Confirm_bound(node);
445 return is_Const(node);
449 * get the tarval of a Const or Confirm with
451 static ir_tarval *get_Const_or_Confirm_tarval(const ir_node *node)
453 if (is_Confirm(node)) {
454 if (get_Confirm_bound(node))
455 node = get_Confirm_bound(node);
457 return get_Const_tarval(node);
460 static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
463 ir_node *block = get_nodes_block(jump);
465 if (irn_visited_else_mark(value))
468 if (is_Const_or_Confirm(value)) {
469 if (eval_cmp(env, value) <= 0)
474 "> Found jump threading candidate %+F->%+F\n",
475 env->true_block, block
478 /* adjust true_block to point directly towards our jump */
479 add_pred(env->true_block, jump);
481 split_critical_edge(env->true_block, 0);
483 /* we need a bigger visited nr when going back */
492 /* the Phi has to be in the same Block as the Jmp */
493 if (get_nodes_block(value) != block)
496 arity = get_irn_arity(value);
497 for (i = 0; i < arity; ++i) {
499 ir_node *phi_pred = get_Phi_pred(value, i);
500 ir_node *cfgpred = get_Block_cfgpred(block, i);
502 copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
503 if (copy_block == NULL)
506 /* copy duplicated nodes in copy_block and fix SSA */
507 copy_and_fix(env, block, copy_block, i);
509 if (copy_block == get_nodes_block(cfgpred)) {
510 env->cnst_pred = block;
514 /* return now as we can't process more possibilities in 1 run */
522 static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
525 ir_node *block = get_nodes_block(jump);
527 if (irn_visited_else_mark(value)) {
531 if (is_Const_or_Confirm(value)) {
532 ir_tarval *tv = get_Const_or_Confirm_tarval(value);
539 "> Found jump threading candidate %+F->%+F\n",
540 env->true_block, block
543 /* adjust true_block to point directly towards our jump */
544 add_pred(env->true_block, jump);
546 split_critical_edge(env->true_block, 0);
548 /* we need a bigger visited nr when going back */
556 /* the Phi has to be in the same Block as the Jmp */
557 if (get_nodes_block(value) != block)
560 arity = get_irn_arity(value);
561 for (i = 0; i < arity; ++i) {
563 ir_node *phi_pred = get_Phi_pred(value, i);
564 ir_node *cfgpred = get_Block_cfgpred(block, i);
566 copy_block = find_candidate(env, cfgpred, phi_pred);
567 if (copy_block == NULL)
570 /* copy duplicated nodes in copy_block and fix SSA */
571 copy_and_fix(env, block, copy_block, i);
573 if (copy_block == get_nodes_block(cfgpred)) {
574 env->cnst_pred = block;
578 /* return now as we can't process more possibilities in 1 run */
583 ir_node *cmp = value;
584 ir_node *left = get_Cmp_left(cmp);
585 ir_node *right = get_Cmp_right(cmp);
586 ir_relation relation = get_Cmp_relation(cmp);
588 /* we assume that the constant is on the right side, swap left/right
590 if (is_Const(left)) {
595 relation = get_inversed_relation(relation);
598 if (!is_Const(right))
601 if (get_nodes_block(left) != block)
604 /* negate condition when we're looking for the false block */
605 if (env->tv == tarval_b_false) {
606 relation = get_negated_relation(relation);
609 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
611 env->relation = relation;
614 return find_const_or_confirm(env, jump, left);
621 * Block-walker: searches for the following construct
623 * Const or Phi with constants
633 static void thread_jumps(ir_node* block, void* data)
635 jumpthreading_env_t env;
636 int *changed = (int*)data;
641 int selector_evaluated;
642 const ir_edge_t *edge, *next;
647 /* we do not deal with Phis, so restrict this to exactly one cfgpred */
648 if (get_Block_n_cfgpreds(block) != 1)
651 projx = get_Block_cfgpred(block, 0);
654 assert(get_irn_mode(projx) == mode_X);
656 cond = get_Proj_pred(projx);
660 selector = get_Cond_selector(cond);
661 /* TODO handle switch Conds */
662 if (get_irn_mode(selector) != mode_b)
665 /* handle cases that can be immediately evaluated */
666 selector_evaluated = -1;
667 if (is_Cmp(selector)) {
668 ir_node *left = get_Cmp_left(selector);
669 ir_node *right = get_Cmp_right(selector);
670 if (is_Const(left) && is_Const(right)) {
671 ir_relation relation = get_Cmp_relation(selector);
672 ir_tarval *tv_left = get_Const_tarval(left);
673 ir_tarval *tv_right = get_Const_tarval(right);
675 selector_evaluated = eval_cmp_tv(relation, tv_left, tv_right);
678 if (selector_evaluated < 0) {
679 /* This is only the case if the predecessor nodes are not
680 * constant or the comparison could not be evaluated.
681 * Try with VRP information now.
683 selector_evaluated = eval_cmp_vrp(relation, left, right);
686 } else if (is_Const_or_Confirm(selector)) {
687 ir_tarval *tv = get_Const_or_Confirm_tarval(selector);
688 if (tv == tarval_b_true) {
689 selector_evaluated = 1;
691 assert(tv == tarval_b_false);
692 selector_evaluated = 0;
696 env.cnst_pred = NULL;
697 if (get_Proj_proj(projx) == pn_Cond_false) {
698 env.tv = tarval_b_false;
699 if (selector_evaluated >= 0)
700 selector_evaluated = !selector_evaluated;
702 env.tv = tarval_b_true;
705 if (selector_evaluated == 0) {
706 ir_graph *irg = get_irn_irg(block);
707 ir_node *bad = new_r_Bad(irg, mode_X);
708 exchange(projx, bad);
711 } else if (selector_evaluated == 1) {
712 dbg_info *dbgi = get_irn_dbg_info(selector);
713 ir_node *jmp = new_rd_Jmp(dbgi, get_nodes_block(projx));
714 DBG_OPT_JUMPTHREADING(projx, jmp);
715 exchange(projx, jmp);
720 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
721 env.true_block = block;
722 irg = get_irn_irg(block);
723 inc_irg_visited(irg);
724 env.visited_nr = get_irg_visited(irg);
726 copy_block = find_candidate(&env, projx, selector);
727 if (copy_block == NULL)
730 /* We might thread the condition block of an infinite loop,
731 * such that there is no path to End anymore. */
734 /* we have to remove the edge towards the pred as the pred now
735 * jumps into the true_block. We also have to shorten Phis
736 * in our block because of this */
737 badX = new_r_Bad(irg, mode_X);
738 cnst_pos = env.cnst_pos;
741 foreach_out_edge_safe(env.cnst_pred, edge, next) {
742 ir_node *node = get_edge_src_irn(edge);
745 ir_node *bad = new_r_Bad(irg, get_irn_mode(node));
746 set_Phi_pred(node, cnst_pos, bad);
750 set_Block_cfgpred(env.cnst_pred, cnst_pos, badX);
752 /* the graph is changed now */
756 void opt_jumpthreading(ir_graph* irg)
760 FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
762 DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
764 remove_critical_cf_edges(irg);
766 /* ugly: jump threading might get confused by garbage nodes
767 * of mode_X in copy_and_fix_node(), so remove all garbage edges. */
768 edges_deactivate(irg);
771 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
776 irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
780 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
783 /* control flow changed, some blocks may become dead */
784 set_irg_doms_inconsistent(irg);
785 set_irg_extblk_inconsistent(irg);
786 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
790 /* Creates an ir_graph pass for opt_jumpthreading. */
791 ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
793 return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
794 } /* opt_jumpthreading_pass */