2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Path-Sensitive Jump Threading
24 * @author Christoph Mallon, Matthias Braun
29 #include "iroptimize.h"
41 #include "iredges_t.h"
45 #include "opt_confirms.h"
46 #include "iropt_dbg.h"
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55 * Add the new predecessor x to node node, which is either a Block or a Phi
57 static void add_pred(ir_node* node, ir_node* x)
63 assert(is_Block(node) || is_Phi(node));
65 n = get_irn_arity(node);
66 NEW_ARR_A(ir_node*, ins, n + 1);
67 for (i = 0; i < n; i++)
68 ins[i] = get_irn_n(node, i);
70 set_irn_in(node, n + 1, ins);
73 static ir_node *ssa_second_def;
74 static ir_node *ssa_second_def_block;
76 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
85 /* This is needed because we create bads sometimes */
87 ir_graph *irg = get_irn_irg(block);
88 return new_r_Bad(irg);
91 /* the other defs can't be marked for cases where a user of the original
92 * value is in the same block as the alternative definition.
93 * In this case we mustn't use the alternative definition.
94 * So we keep a flag that indicated wether we walked at least 1 block
95 * away and may use the alternative definition */
96 if (block == ssa_second_def_block && !first) {
97 return ssa_second_def;
100 /* already processed this block? */
101 if (irn_visited(block)) {
102 ir_node *value = (ir_node*) get_irn_link(block);
106 irg = get_irn_irg(block);
107 assert(block != get_irg_start_block(irg));
109 /* a Block with only 1 predecessor needs no Phi */
110 n_cfgpreds = get_Block_n_cfgpreds(block);
111 if (n_cfgpreds == 1) {
112 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
113 ir_node *value = search_def_and_create_phis(pred_block, mode, 0);
115 set_irn_link(block, value);
116 mark_irn_visited(block);
120 /* create a new Phi */
121 NEW_ARR_A(ir_node*, in, n_cfgpreds);
122 for (i = 0; i < n_cfgpreds; ++i)
123 in[i] = new_r_Unknown(irg, mode);
125 phi = new_r_Phi(block, n_cfgpreds, in, mode);
126 set_irn_link(block, phi);
127 mark_irn_visited(block);
129 /* set Phi predecessors */
130 for (i = 0; i < n_cfgpreds; ++i) {
131 ir_node *pred_block = get_Block_cfgpred_block(block, i);
132 ir_node *pred_val = search_def_and_create_phis(pred_block, mode, 0);
134 set_irn_n(phi, i, pred_val);
141 * Given a set of values this function constructs SSA-form for the users of the
142 * first value (the users are determined through the out-edges of the value).
143 * Uses the irn_visited flags. Works without using the dominance tree.
145 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
146 ir_node *second_block, ir_node *second_val)
150 const ir_edge_t *edge;
151 const ir_edge_t *next;
153 /* no need to do anything */
154 if (orig_val == second_val)
157 irg = get_irn_irg(orig_val);
158 inc_irg_visited(irg);
160 mode = get_irn_mode(orig_val);
161 set_irn_link(orig_block, orig_val);
162 mark_irn_visited(orig_block);
164 ssa_second_def_block = second_block;
165 ssa_second_def = second_val;
167 /* Only fix the users of the first, i.e. the original node */
168 foreach_out_edge_safe(orig_val, edge, next) {
169 ir_node *user = get_edge_src_irn(edge);
170 int j = get_edge_src_pos(edge);
171 ir_node *user_block = get_nodes_block(user);
178 DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));
181 ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
182 newval = search_def_and_create_phis(pred_block, mode, 1);
184 newval = search_def_and_create_phis(user_block, mode, 1);
187 /* don't fix newly created Phis from the SSA construction */
188 if (newval != user) {
189 DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
190 set_irn_n(user, j, newval);
195 static void split_critical_edge(ir_node *block, int pos)
197 ir_graph *irg = get_irn_irg(block);
202 in[0] = get_Block_cfgpred(block, pos);
203 new_block = new_r_Block(irg, 1, in);
204 new_jmp = new_r_Jmp(new_block);
205 set_Block_cfgpred(block, pos, new_jmp);
208 typedef struct jumpthreading_env_t {
210 ir_node *cmp; /**< The Compare node that might be partial evaluated */
211 pn_Cmp pnc; /**< The Compare mode of the Compare node. */
214 ir_visited_t visited_nr;
216 ir_node *cnst_pred; /**< the block before the constant */
217 int cnst_pos; /**< the pos to the constant block (needed to
218 kill that edge later) */
219 } jumpthreading_env_t;
221 static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
222 ir_node *block, ir_node *copy_block, int j,
228 /* we can evaluate Phis right now, all other nodes get copied */
230 copy = get_Phi_pred(node, j);
231 /* we might have to evaluate a Phi-cascade */
232 if (get_irn_visited(copy) >= env->visited_nr) {
233 copy = get_irn_link(copy);
236 copy = exact_copy(node);
237 set_nodes_block(copy, copy_block);
239 assert(get_irn_mode(copy) != mode_X);
241 arity = get_irn_arity(copy);
242 for (i = 0; i < arity; ++i) {
243 ir_node *pred = get_irn_n(copy, i);
246 if (get_nodes_block(pred) != block)
249 if (get_irn_visited(pred) >= env->visited_nr) {
250 new_pred = get_irn_link(pred);
252 new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
254 DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
255 set_irn_n(copy, i, new_pred);
259 set_irn_link(node, copy);
260 set_irn_visited(node, env->visited_nr);
265 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
266 ir_node *copy_block, int j)
268 const ir_edge_t *edge;
270 /* Look at all nodes in the cond_block and copy them into pred */
271 foreach_out_edge(block, edge) {
272 ir_node *node = get_edge_src_irn(edge);
276 /* ignore control flow */
277 mode = get_irn_mode(node);
278 if (mode == mode_X || is_Cond(node))
281 /* we may not copy mode_b nodes, because this could produce Phi with
282 * mode_bs which can't be handled in all backends. Instead we duplicate
283 * the node and move it to its users */
284 if (mode == mode_b) {
285 const ir_edge_t *edge, *next;
289 assert(is_Proj(node));
291 pred = get_Proj_pred(node);
292 pn = get_Proj_proj(node);
294 foreach_out_edge_safe(node, edge, next) {
296 ir_node *user = get_edge_src_irn(edge);
297 int pos = get_edge_src_pos(edge);
298 ir_node *user_block = get_nodes_block(user);
300 if (user_block == block)
303 cmp_copy = exact_copy(pred);
304 set_nodes_block(cmp_copy, user_block);
305 copy = new_r_Proj(cmp_copy, mode_b, pn);
306 set_irn_n(user, pos, copy);
312 copy = copy_and_fix_node(env, block, copy_block, j, node);
314 /* we might hit values in blocks that have already been processed by a
315 * recursive find_phi_with_const() call */
316 assert(get_irn_visited(copy) <= env->visited_nr);
317 if (get_irn_visited(copy) >= env->visited_nr) {
318 ir_node *prev_copy = get_irn_link(copy);
319 if (prev_copy != NULL)
320 set_irn_link(node, prev_copy);
324 /* fix data-flow (and reconstruct SSA if needed) */
325 foreach_out_edge(block, edge) {
326 ir_node *node = get_edge_src_irn(edge);
330 mode = get_irn_mode(node);
331 if (mode == mode_X || is_Cond(node))
338 DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
340 copy_node = get_irn_link(node);
341 construct_ssa(block, node, copy_block, copy_node);
346 * returns whether the cmp evaluates to true or false, or can't be evaluated!
347 * 1: true, 0: false, -1: can't evaluate
349 * @param pnc the compare mode of the Compare
350 * @param tv_left the left tarval
351 * @param tv_right the right tarval
353 static int eval_cmp_tv(pn_Cmp pnc, tarval *tv_left, tarval *tv_right)
355 pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
357 /* does the compare evaluate to true? */
358 if (cmp_result == pn_Cmp_False)
360 if ((cmp_result & pnc) != cmp_result)
367 * returns whether the cmp evaluates to true or false according to vrp
368 * information , or can't be evaluated!
369 * 1: true, 0: false, -1: can't evaluate
371 * @param pnc the compare mode of the Compare
372 * @param left the left node
373 * @param right the right node
375 static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
377 pn_Cmp cmp_result = vrp_cmp(left, right);
378 /* does the compare evaluate to true? */
379 if (cmp_result == pn_Cmp_False) {
382 if ((cmp_result & pnc) != cmp_result) {
383 if ((cmp_result & pnc) != 0) {
391 * returns whether the cmp evaluates to true or false, or can't be evaluated!
392 * 1: true, 0: false, -1: can't evaluate
394 * @param env the environment
395 * @param cand the candidate node, either a Const or a Confirm
397 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
399 if (is_Const(cand)) {
400 tarval *tv_cand = get_Const_tarval(cand);
401 tarval *tv_cmp = get_Const_tarval(env->cnst);
403 return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
404 } else { /* a Confirm */
405 tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);
407 if (res == tarval_bad)
409 return res == tarval_b_true;
414 * Check for Const or Confirm with Const.
416 static int is_Const_or_Confirm(const ir_node *node)
418 if (is_Confirm(node))
419 node = get_Confirm_bound(node);
420 return is_Const(node);
424 * get the tarval of a Const or Confirm with
426 static tarval *get_Const_or_Confirm_tarval(const ir_node *node)
428 if (is_Confirm(node)) {
429 if (get_Confirm_bound(node))
430 node = get_Confirm_bound(node);
432 return get_Const_tarval(node);
435 static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
438 ir_node *block = get_nodes_block(jump);
440 if (irn_visited_else_mark(value))
443 if (is_Const_or_Confirm(value)) {
444 if (eval_cmp(env, value) <= 0) {
450 "> Found jump threading candidate %+F->%+F\n",
451 env->true_block, block
454 /* adjust true_block to point directly towards our jump */
455 add_pred(env->true_block, jump);
457 split_critical_edge(env->true_block, 0);
459 /* we need a bigger visited nr when going back */
468 /* the Phi has to be in the same Block as the Jmp */
469 if (get_nodes_block(value) != block) {
473 arity = get_irn_arity(value);
474 for (i = 0; i < arity; ++i) {
476 ir_node *phi_pred = get_Phi_pred(value, i);
477 ir_node *cfgpred = get_Block_cfgpred(block, i);
479 copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
480 if (copy_block == NULL)
483 /* copy duplicated nodes in copy_block and fix SSA */
484 copy_and_fix(env, block, copy_block, i);
486 if (copy_block == get_nodes_block(cfgpred)) {
487 env->cnst_pred = block;
491 /* return now as we can't process more possibilities in 1 run */
499 static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
502 ir_node *block = get_nodes_block(jump);
504 if (irn_visited_else_mark(value)) {
508 if (is_Const_or_Confirm(value)) {
509 tarval *tv = get_Const_or_Confirm_tarval(value);
516 "> Found jump threading candidate %+F->%+F\n",
517 env->true_block, block
520 /* adjust true_block to point directly towards our jump */
521 add_pred(env->true_block, jump);
523 split_critical_edge(env->true_block, 0);
525 /* we need a bigger visited nr when going back */
533 /* the Phi has to be in the same Block as the Jmp */
534 if (get_nodes_block(value) != block)
537 arity = get_irn_arity(value);
538 for (i = 0; i < arity; ++i) {
540 ir_node *phi_pred = get_Phi_pred(value, i);
541 ir_node *cfgpred = get_Block_cfgpred(block, i);
543 copy_block = find_candidate(env, cfgpred, phi_pred);
544 if (copy_block == NULL)
547 /* copy duplicated nodes in copy_block and fix SSA */
548 copy_and_fix(env, block, copy_block, i);
550 if (copy_block == get_nodes_block(cfgpred)) {
551 env->cnst_pred = block;
555 /* return now as we can't process more possibilities in 1 run */
559 if (is_Proj(value)) {
563 ir_node *cmp = get_Proj_pred(value);
567 left = get_Cmp_left(cmp);
568 right = get_Cmp_right(cmp);
569 pnc = get_Proj_proj(value);
571 /* we assume that the constant is on the right side, swap left/right
573 if (is_Const(left)) {
578 pnc = get_inversed_pnc(pnc);
581 if (!is_Const(right))
584 if (get_nodes_block(left) != block) {
588 /* negate condition when we're looking for the false block */
589 if (env->tv == tarval_b_false) {
590 pnc = get_negated_pnc(pnc, get_irn_mode(right));
593 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
598 return find_const_or_confirm(env, jump, left);
605 * Block-walker: searches for the following construct
607 * Const or Phi with constants
617 static void thread_jumps(ir_node* block, void* data)
619 jumpthreading_env_t env;
625 int selector_evaluated;
626 const ir_edge_t *edge, *next;
631 if (get_Block_n_cfgpreds(block) != 1)
634 projx = get_Block_cfgpred(block, 0);
637 assert(get_irn_mode(projx) == mode_X);
639 cond = get_Proj_pred(projx);
643 selector = get_Cond_selector(cond);
644 /* TODO handle switch Conds */
645 if (get_irn_mode(selector) != mode_b)
648 /* handle cases that can be immediately evaluated */
649 selector_evaluated = -1;
650 if (is_Proj(selector)) {
651 ir_node *cmp = get_Proj_pred(selector);
653 ir_node *left = get_Cmp_left(cmp);
654 ir_node *right = get_Cmp_right(cmp);
655 if (is_Const(left) && is_Const(right)) {
656 int pnc = get_Proj_proj(selector);
657 tarval *tv_left = get_Const_tarval(left);
658 tarval *tv_right = get_Const_tarval(right);
660 selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
662 if (selector_evaluated < 0) {
663 /* This is only the case if the predecessor nodes are not
664 * constant or the comparison could not be evaluated.
665 * Try with VRP information now.
667 int pnc = get_Proj_proj(selector);
669 selector_evaluated = eval_cmp_vrp(pnc, left, right);
672 } else if (is_Const_or_Confirm(selector)) {
673 tarval *tv = get_Const_or_Confirm_tarval(selector);
674 if (tv == tarval_b_true) {
675 selector_evaluated = 1;
677 assert(tv == tarval_b_false);
678 selector_evaluated = 0;
682 env.cnst_pred = NULL;
683 if (get_Proj_proj(projx) == pn_Cond_false) {
684 env.tv = tarval_b_false;
685 if (selector_evaluated >= 0)
686 selector_evaluated = !selector_evaluated;
688 env.tv = tarval_b_true;
691 if (selector_evaluated == 0) {
692 ir_graph *irg = get_irn_irg(block);
693 bad = new_r_Bad(irg);
694 exchange(projx, bad);
697 } else if (selector_evaluated == 1) {
698 dbg_info *dbgi = get_irn_dbg_info(selector);
699 ir_node *jmp = new_rd_Jmp(dbgi, get_nodes_block(projx));
700 DBG_OPT_JUMPTHREADING(projx, jmp);
701 exchange(projx, jmp);
706 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
707 env.true_block = block;
708 irg = get_irn_irg(block);
709 inc_irg_visited(irg);
710 env.visited_nr = get_irg_visited(irg);
712 copy_block = find_candidate(&env, projx, selector);
713 if (copy_block == NULL)
716 /* we have to remove the edge towards the pred as the pred now
717 * jumps into the true_block. We also have to shorten Phis
718 * in our block because of this */
719 bad = new_r_Bad(irg);
720 cnst_pos = env.cnst_pos;
723 foreach_out_edge_safe(env.cnst_pred, edge, next) {
724 ir_node *node = get_edge_src_irn(edge);
727 set_Phi_pred(node, cnst_pos, bad);
730 set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
732 /* the graph is changed now */
736 void opt_jumpthreading(ir_graph* irg)
740 FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
742 DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
744 remove_critical_cf_edges(irg);
747 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
752 irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
756 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
759 /* control flow changed, some blocks may become dead */
760 set_irg_outs_inconsistent(irg);
761 set_irg_doms_inconsistent(irg);
762 set_irg_extblk_inconsistent(irg);
763 set_irg_loopinfo_inconsistent(irg);
764 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
766 /* Dead code might be created. Optimize it away as it is dangerous
767 * to call optimize_df() an dead code. */
772 /* Creates an ir_graph pass for opt_jumpthreading. */
773 ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
775 return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
776 } /* opt_jumpthreading_pass */