Some more cleanup: Put the return type and other specifiers on the same line as the...
[libfirm] / ir / opt / jumpthreading.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Path-Sensitive Jump Threading
23  * @date    10. Sep. 2006
24  * @author  Christoph Mallon, Matthias Braun
25  * @version $Id$
26  */
27 #include "config.h"
28
29 #include "iroptimize.h"
30
31 #include <assert.h>
32 #include "array_t.h"
33 #include "debug.h"
34 #include "ircons.h"
35 #include "irgmod.h"
36 #include "irgopt.h"
37 #include "irgwalk.h"
38 #include "irnode.h"
39 #include "irnode_t.h"
40 #include "iredges.h"
41 #include "iredges_t.h"
42 #include "irtools.h"
43 #include "irgraph.h"
44 #include "tv.h"
45 #include "opt_confirms.h"
46 #include "iropt_dbg.h"
47 #include "irpass.h"
48 #include "vrp.h"
49
50 #undef AVOID_PHIB
51
52 DEBUG_ONLY(static firm_dbg_module_t *dbg);
53
54 /**
55  * Add the new predecessor x to node node, which is either a Block or a Phi
56  */
57 static void add_pred(ir_node* node, ir_node* x)
58 {
59         ir_node** ins;
60         int n;
61         int i;
62
63         assert(is_Block(node) || is_Phi(node));
64
65         n = get_irn_arity(node);
66         NEW_ARR_A(ir_node*, ins, n + 1);
67         for (i = 0; i < n; i++)
68                 ins[i] = get_irn_n(node, i);
69         ins[n] = x;
70         set_irn_in(node, n + 1, ins);
71 }
72
73 static ir_node *ssa_second_def;
74 static ir_node *ssa_second_def_block;
75
76 static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
77                                            int first)
78 {
79         int i;
80         int n_cfgpreds;
81         ir_graph *irg;
82         ir_node *phi;
83         ir_node **in;
84
85         /* This is needed because we create bads sometimes */
86         if (is_Bad(block))
87                 return new_Bad();
88
89         /* the other defs can't be marked for cases where a user of the original
90          * value is in the same block as the alternative definition.
91          * In this case we mustn't use the alternative definition.
92          * So we keep a flag that indicated wether we walked at least 1 block
93          * away and may use the alternative definition */
94         if (block == ssa_second_def_block && !first) {
95                 return ssa_second_def;
96         }
97
98         /* already processed this block? */
99         if (irn_visited(block)) {
100                 ir_node *value = (ir_node*) get_irn_link(block);
101                 return value;
102         }
103
104         irg = get_irn_irg(block);
105         assert(block != get_irg_start_block(irg));
106
107         /* a Block with only 1 predecessor needs no Phi */
108         n_cfgpreds = get_Block_n_cfgpreds(block);
109         if (n_cfgpreds == 1) {
110                 ir_node *pred_block = get_Block_cfgpred_block(block, 0);
111                 ir_node *value      = search_def_and_create_phis(pred_block, mode, 0);
112
113                 set_irn_link(block, value);
114                 mark_irn_visited(block);
115                 return value;
116         }
117
118         /* create a new Phi */
119         NEW_ARR_A(ir_node*, in, n_cfgpreds);
120         for (i = 0; i < n_cfgpreds; ++i)
121                 in[i] = new_Unknown(mode);
122
123         phi = new_r_Phi(block, n_cfgpreds, in, mode);
124         set_irn_link(block, phi);
125         mark_irn_visited(block);
126
127         /* set Phi predecessors */
128         for (i = 0; i < n_cfgpreds; ++i) {
129                 ir_node *pred_block = get_Block_cfgpred_block(block, i);
130                 ir_node *pred_val   = search_def_and_create_phis(pred_block, mode, 0);
131
132                 set_irn_n(phi, i, pred_val);
133         }
134
135         return phi;
136 }
137
138 /**
139  * Given a set of values this function constructs SSA-form for the users of the
140  * first value (the users are determined through the out-edges of the value).
141  * Uses the irn_visited flags. Works without using the dominance tree.
142  */
143 static void construct_ssa(ir_node *orig_block, ir_node *orig_val,
144                           ir_node *second_block, ir_node *second_val)
145 {
146         ir_graph *irg;
147         ir_mode *mode;
148         const ir_edge_t *edge;
149         const ir_edge_t *next;
150
151         /* no need to do anything */
152         if (orig_val == second_val)
153                 return;
154
155         irg = get_irn_irg(orig_val);
156         inc_irg_visited(irg);
157
158         mode = get_irn_mode(orig_val);
159         set_irn_link(orig_block, orig_val);
160         mark_irn_visited(orig_block);
161
162         ssa_second_def_block = second_block;
163         ssa_second_def       = second_val;
164
165         /* Only fix the users of the first, i.e. the original node */
166         foreach_out_edge_safe(orig_val, edge, next) {
167                 ir_node *user = get_edge_src_irn(edge);
168                 int j = get_edge_src_pos(edge);
169                 ir_node *user_block = get_nodes_block(user);
170                 ir_node *newval;
171
172                 /* ignore keeps */
173                 if (is_End(user))
174                         continue;
175
176                 DB((dbg, LEVEL_3, ">>> Fixing user %+F (pred %d == %+F)\n", user, j, get_irn_n(user, j)));
177
178                 if (is_Phi(user)) {
179                         ir_node *pred_block = get_Block_cfgpred_block(user_block, j);
180                         newval = search_def_and_create_phis(pred_block, mode, 1);
181                 } else {
182                         newval = search_def_and_create_phis(user_block, mode, 1);
183                 }
184
185                 /* don't fix newly created Phis from the SSA construction */
186                 if (newval != user) {
187                         DB((dbg, LEVEL_4, ">>>> Setting input %d of %+F to %+F\n", j, user, newval));
188                         set_irn_n(user, j, newval);
189                 }
190         }
191 }
192
193 static void split_critical_edge(ir_node *block, int pos)
194 {
195         ir_graph *irg = get_irn_irg(block);
196         ir_node *in[1];
197         ir_node *new_block;
198         ir_node *new_jmp;
199
200         in[0] = get_Block_cfgpred(block, pos);
201         new_block = new_r_Block(irg, 1, in);
202         new_jmp = new_r_Jmp(new_block);
203         set_Block_cfgpred(block, pos, new_jmp);
204 }
205
206 typedef struct jumpthreading_env_t {
207         ir_node       *true_block;
208         ir_node       *cmp;        /**< The Compare node that might be partial evaluated */
209         pn_Cmp         pnc;        /**< The Compare mode of the Compare node. */
210         ir_node       *cnst;
211         tarval        *tv;
212         ir_visited_t   visited_nr;
213
214         ir_node       *cnst_pred;   /**< the block before the constant */
215         int            cnst_pos;    /**< the pos to the constant block (needed to
216                                           kill that edge later) */
217 } jumpthreading_env_t;
218
219 static ir_node *copy_and_fix_node(const jumpthreading_env_t *env,
220                                   ir_node *block, ir_node *copy_block, int j,
221                                   ir_node *node)
222 {
223         int      i, arity;
224         ir_node *copy;
225
226         /* we can evaluate Phis right now, all other nodes get copied */
227         if (is_Phi(node)) {
228                 copy = get_Phi_pred(node, j);
229                 /* we might have to evaluate a Phi-cascade */
230                 if (get_irn_visited(copy) >= env->visited_nr) {
231                         copy = get_irn_link(copy);
232                 }
233         } else {
234                 copy = exact_copy(node);
235                 set_nodes_block(copy, copy_block);
236
237                 assert(get_irn_mode(copy) != mode_X);
238
239                 arity = get_irn_arity(copy);
240                 for (i = 0; i < arity; ++i) {
241                         ir_node *pred     = get_irn_n(copy, i);
242                         ir_node *new_pred;
243
244                         if (get_nodes_block(pred) != block)
245                                 continue;
246
247                         if (get_irn_visited(pred) >= env->visited_nr) {
248                                 new_pred = get_irn_link(pred);
249                         } else {
250                                 new_pred = copy_and_fix_node(env, block, copy_block, j, pred);
251                         }
252                         DB((dbg, LEVEL_2, ">> Set Pred of %+F to %+F\n", copy, new_pred));
253                         set_irn_n(copy, i, new_pred);
254                 }
255         }
256
257         set_irn_link(node, copy);
258         set_irn_visited(node, env->visited_nr);
259
260         return copy;
261 }
262
263 static void copy_and_fix(const jumpthreading_env_t *env, ir_node *block,
264                          ir_node *copy_block, int j)
265 {
266         const ir_edge_t *edge;
267
268         /* Look at all nodes in the cond_block and copy them into pred */
269         foreach_out_edge(block, edge) {
270                 ir_node *node = get_edge_src_irn(edge);
271                 ir_node *copy;
272                 ir_mode *mode;
273
274                 if (is_Block(node)) {
275                         /* Block->Block edge, should be the MacroBlock edge */
276                         assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
277                         continue;
278                 }
279
280                 /* ignore control flow */
281                 mode = get_irn_mode(node);
282                 if (mode == mode_X || is_Cond(node))
283                         continue;
284 #ifdef AVOID_PHIB
285                 /* we may not copy mode_b nodes, because this could produce Phi with
286                  * mode_bs which can't be handled in all backends. Instead we duplicate
287                  * the node and move it to its users */
288                 if (mode == mode_b) {
289                         const ir_edge_t *edge, *next;
290                         ir_node *pred;
291                         int      pn;
292
293                         assert(is_Proj(node));
294
295                         pred = get_Proj_pred(node);
296                         pn   = get_Proj_proj(node);
297
298                         foreach_out_edge_safe(node, edge, next) {
299                                 ir_node *cmp_copy;
300                                 ir_node *user       = get_edge_src_irn(edge);
301                                 int pos             = get_edge_src_pos(edge);
302                                 ir_node *user_block = get_nodes_block(user);
303
304                                 if (user_block == block)
305                                         continue;
306
307                                 cmp_copy = exact_copy(pred);
308                                 set_nodes_block(cmp_copy, user_block);
309                                 copy = new_r_Proj(current_ir_graph, user_block, cmp_copy, mode_b, pn);
310                                 set_irn_n(user, pos, copy);
311                         }
312                         continue;
313                 }
314 #endif
315
316                 copy = copy_and_fix_node(env, block, copy_block, j, node);
317
318                 /* we might hit values in blocks that have already been processed by a
319                  * recursive find_phi_with_const() call */
320                 assert(get_irn_visited(copy) <= env->visited_nr);
321                 if (get_irn_visited(copy) >= env->visited_nr) {
322                         ir_node *prev_copy = get_irn_link(copy);
323                         if (prev_copy != NULL)
324                                 set_irn_link(node, prev_copy);
325                 }
326         }
327
328         /* fix data-flow (and reconstruct SSA if needed) */
329         foreach_out_edge(block, edge) {
330                 ir_node *node = get_edge_src_irn(edge);
331                 ir_node *copy_node;
332                 ir_mode *mode;
333
334                 if (is_Block(node)) {
335                         /* Block->Block edge, should be the MacroBlock edge */
336                         assert(get_Block_MacroBlock(node) == block && "Block->Block edge found");
337                         continue;
338                 }
339
340                 mode = get_irn_mode(node);
341                 if (mode == mode_X || is_Cond(node))
342                         continue;
343 #ifdef AVOID_PHIB
344                 if (mode == mode_b)
345                         continue;
346 #endif
347
348                 DB((dbg, LEVEL_2, ">> Fixing users of %+F\n", node));
349
350                 copy_node = get_irn_link(node);
351                 construct_ssa(block, node, copy_block, copy_node);
352         }
353 }
354
355 /**
356  * returns whether the cmp evaluates to true or false, or can't be evaluated!
357  * 1: true, 0: false, -1: can't evaluate
358  *
359  * @param pnc       the compare mode of the Compare
360  * @param tv_left   the left tarval
361  * @param tv_right  the right tarval
362  */
363 static int eval_cmp_tv(pn_Cmp pnc, tarval *tv_left, tarval *tv_right)
364 {
365         pn_Cmp cmp_result = tarval_cmp(tv_left, tv_right);
366
367         /* does the compare evaluate to true? */
368         if (cmp_result == pn_Cmp_False)
369                 return -1;
370         if ((cmp_result & pnc) != cmp_result)
371                 return 0;
372
373         return 1;
374 }
375
376 /**
377  * returns whether the cmp evaluates to true or false according to vrp
378  * information , or can't be evaluated!
379  * 1: true, 0: false, -1: can't evaluate
380  *
381  * @param pnc       the compare mode of the Compare
382  * @param left   the left node
383  * @param right  the right node
384  */
385 static int eval_cmp_vrp(pn_Cmp pnc, ir_node *left, ir_node *right)
386 {
387         pn_Cmp cmp_result = vrp_cmp(left, right);
388
389         /* does the compare evaluate to true? */
390         if (cmp_result == pn_Cmp_False)
391                 return -1;
392         if ((cmp_result & pnc) != cmp_result)
393                 return 0;
394
395         return 1;
396 }
397 /**
398  * returns whether the cmp evaluates to true or false, or can't be evaluated!
399  * 1: true, 0: false, -1: can't evaluate
400  *
401  * @param env      the environment
402  * @param cand     the candidate node, either a Const or a Confirm
403  */
404 static int eval_cmp(jumpthreading_env_t *env, ir_node *cand)
405 {
406         if (is_Const(cand)) {
407                 tarval *tv_cand   = get_Const_tarval(cand);
408                 tarval *tv_cmp    = get_Const_tarval(env->cnst);
409
410                 return eval_cmp_tv(env->pnc, tv_cand, tv_cmp);
411         } else { /* a Confirm */
412                 tarval *res = computed_value_Cmp_Confirm(env->cmp, cand, env->cnst, env->pnc);
413
414                 if (res == tarval_bad)
415                         return -1;
416                 return res == tarval_b_true;
417         }
418 }
419
420 /**
421  * Check for Const or Confirm with Const.
422  */
423 static int is_Const_or_Confirm(const ir_node *node)
424 {
425         if (is_Confirm(node))
426                 node = get_Confirm_bound(node);
427         return is_Const(node);
428 }
429
430 /**
431  * get the tarval of a Const or Confirm with
432  */
433 static tarval *get_Const_or_Confirm_tarval(const ir_node *node)
434 {
435         if (is_Confirm(node)) {
436                 if (get_Confirm_bound(node))
437                         node = get_Confirm_bound(node);
438         }
439         return get_Const_tarval(node);
440 }
441
442 static ir_node *find_const_or_confirm(jumpthreading_env_t *env, ir_node *jump,
443                                       ir_node *value)
444 {
445         ir_node *block = get_nodes_block(jump);
446
447         if (irn_visited_else_mark(value))
448                 return NULL;
449
450         if (is_Const_or_Confirm(value)) {
451                 if (eval_cmp(env, value) <= 0) {
452                         return NULL;
453                 }
454
455                 DB((
456                         dbg, LEVEL_1,
457                         "> Found jump threading candidate %+F->%+F\n",
458                         env->true_block, block
459                 ));
460
461                 /* adjust true_block to point directly towards our jump */
462                 add_pred(env->true_block, jump);
463
464                 split_critical_edge(env->true_block, 0);
465
466                 /* we need a bigger visited nr when going back */
467                 env->visited_nr++;
468
469                 return block;
470         }
471
472         if (is_Phi(value)) {
473                 int i, arity;
474
475                 /* the Phi has to be in the same Block as the Jmp */
476                 if (get_nodes_block(value) != block) {
477                         return NULL;
478                 }
479
480                 arity = get_irn_arity(value);
481                 for (i = 0; i < arity; ++i) {
482                         ir_node *copy_block;
483                         ir_node *phi_pred = get_Phi_pred(value, i);
484                         ir_node *cfgpred  = get_Block_cfgpred(block, i);
485
486                         copy_block = find_const_or_confirm(env, cfgpred, phi_pred);
487                         if (copy_block == NULL)
488                                 continue;
489
490                         /* copy duplicated nodes in copy_block and fix SSA */
491                         copy_and_fix(env, block, copy_block, i);
492
493                         if (copy_block == get_nodes_block(cfgpred)) {
494                                 env->cnst_pred = block;
495                                 env->cnst_pos  = i;
496                         }
497
498                         /* return now as we can't process more possibilities in 1 run */
499                         return copy_block;
500                 }
501         }
502
503         return NULL;
504 }
505
506 static ir_node *find_candidate(jumpthreading_env_t *env, ir_node *jump,
507                                ir_node *value)
508 {
509         ir_node *block = get_nodes_block(jump);
510
511         if (irn_visited_else_mark(value)) {
512                 return NULL;
513         }
514
515         if (is_Const_or_Confirm(value)) {
516                 tarval *tv = get_Const_or_Confirm_tarval(value);
517
518                 if (tv != env->tv)
519                         return NULL;
520
521                 DB((
522                         dbg, LEVEL_1,
523                         "> Found jump threading candidate %+F->%+F\n",
524                         env->true_block, block
525                 ));
526
527                 /* adjust true_block to point directly towards our jump */
528                 add_pred(env->true_block, jump);
529
530                 split_critical_edge(env->true_block, 0);
531
532                 /* we need a bigger visited nr when going back */
533                 env->visited_nr++;
534
535                 return block;
536         }
537         if (is_Phi(value)) {
538                 int i, arity;
539
540                 /* the Phi has to be in the same Block as the Jmp */
541                 if (get_nodes_block(value) != block)
542                         return NULL;
543
544                 arity = get_irn_arity(value);
545                 for (i = 0; i < arity; ++i) {
546                         ir_node *copy_block;
547                         ir_node *phi_pred = get_Phi_pred(value, i);
548                         ir_node *cfgpred  = get_Block_cfgpred(block, i);
549
550                         copy_block = find_candidate(env, cfgpred, phi_pred);
551                         if (copy_block == NULL)
552                                 continue;
553
554                         /* copy duplicated nodes in copy_block and fix SSA */
555                         copy_and_fix(env, block, copy_block, i);
556
557                         if (copy_block == get_nodes_block(cfgpred)) {
558                                 env->cnst_pred = block;
559                                 env->cnst_pos  = i;
560                         }
561
562                         /* return now as we can't process more possibilities in 1 run */
563                         return copy_block;
564                 }
565         }
566         if (is_Proj(value)) {
567                 ir_node *left;
568                 ir_node *right;
569                 int      pnc;
570                 ir_node *cmp = get_Proj_pred(value);
571                 if (!is_Cmp(cmp))
572                         return NULL;
573
574                 left  = get_Cmp_left(cmp);
575                 right = get_Cmp_right(cmp);
576                 pnc   = get_Proj_proj(value);
577
578                 /* we assume that the constant is on the right side, swap left/right
579                  * if needed */
580                 if (is_Const(left)) {
581                         ir_node *t = left;
582                         left       = right;
583                         right      = t;
584
585                         pnc        = get_inversed_pnc(pnc);
586                 }
587
588                 if (!is_Const(right))
589                         return 0;
590
591                 if (get_nodes_block(left) != block) {
592                         return 0;
593                 }
594
595                 /* negate condition when we're looking for the false block */
596                 if (env->tv == tarval_b_false) {
597                         pnc = get_negated_pnc(pnc, get_irn_mode(right));
598                 }
599
600                 /* (recursively) look if a pred of a Phi is a constant or a Confirm */
601                 env->cmp  = cmp;
602                 env->pnc  = pnc;
603                 env->cnst = right;
604
605                 return find_const_or_confirm(env, jump, left);
606         }
607
608         return NULL;
609 }
610
611 /**
612  * Block-walker: searches for the following construct
613  *
614  *  Const or Phi with constants
615  *           |
616  *          Cmp
617  *           |
618  *         Cond
619  *          /
620  *       ProjX
621  *        /
622  *     Block
623  */
624 static void thread_jumps(ir_node* block, void* data)
625 {
626         jumpthreading_env_t env;
627         int *changed = data;
628         ir_node *selector;
629         ir_node *projx;
630         ir_node *cond;
631         ir_node *copy_block;
632         int      selector_evaluated;
633         const ir_edge_t *edge, *next;
634         ir_node *bad;
635         size_t   cnst_pos;
636
637         if (get_Block_n_cfgpreds(block) != 1)
638                 return;
639
640         projx = get_Block_cfgpred(block, 0);
641         if (!is_Proj(projx))
642                 return;
643         assert(get_irn_mode(projx) == mode_X);
644
645         cond = get_Proj_pred(projx);
646         if (!is_Cond(cond))
647                 return;
648
649         selector = get_Cond_selector(cond);
650         /* TODO handle switch Conds */
651         if (get_irn_mode(selector) != mode_b)
652                 return;
653
654         /* handle cases that can be immediately evaluated */
655         selector_evaluated = -1;
656         if (is_Proj(selector)) {
657                 ir_node *cmp = get_Proj_pred(selector);
658                 if (is_Cmp(cmp)) {
659                         ir_node *left  = get_Cmp_left(cmp);
660                         ir_node *right = get_Cmp_right(cmp);
661                         if (is_Const(left) && is_Const(right)) {
662                                 int     pnc      = get_Proj_proj(selector);
663                                 tarval *tv_left  = get_Const_tarval(left);
664                                 tarval *tv_right = get_Const_tarval(right);
665
666                                 selector_evaluated = eval_cmp_tv(pnc, tv_left, tv_right);
667                         }
668                         if (selector_evaluated < 0) {
669                                 /* This is only the case if the predecessor nodes are not
670                                  * constant or the comparison could not be evaluated.
671                                  * Try with VRP information now.
672                                  */
673                                 int pnc = get_Proj_proj(selector);
674
675                                 selector_evaluated = eval_cmp_vrp(pnc, left, right);
676                         }
677                 }
678         } else if (is_Const_or_Confirm(selector)) {
679                 tarval *tv = get_Const_or_Confirm_tarval(selector);
680                 if (tv == tarval_b_true) {
681                         selector_evaluated = 1;
682                 } else {
683                         assert(tv == tarval_b_false);
684                         selector_evaluated = 0;
685                 }
686         }
687
688         env.cnst_pred = NULL;
689         if (get_Proj_proj(projx) == pn_Cond_false) {
690                 env.tv = tarval_b_false;
691                 if (selector_evaluated >= 0)
692                         selector_evaluated = !selector_evaluated;
693         } else {
694                 env.tv = tarval_b_true;
695         }
696
697         if (selector_evaluated == 0) {
698                 bad = new_Bad();
699                 exchange(projx, bad);
700                 *changed = 1;
701                 return;
702         } else if (selector_evaluated == 1) {
703                 dbg_info *dbgi = get_irn_dbg_info(selector);
704                 ir_node  *jmp  = new_rd_Jmp(dbgi, get_nodes_block(projx));
705                 DBG_OPT_JUMPTHREADING(projx, jmp);
706                 exchange(projx, jmp);
707                 *changed = 1;
708                 return;
709         }
710
711         /* (recursively) look if a pred of a Phi is a constant or a Confirm */
712         env.true_block = block;
713         inc_irg_visited(current_ir_graph);
714         env.visited_nr = get_irg_visited(current_ir_graph);
715
716         copy_block = find_candidate(&env, projx, selector);
717         if (copy_block == NULL)
718                 return;
719
720         /* we have to remove the edge towards the pred as the pred now
721          * jumps into the true_block. We also have to shorten Phis
722          * in our block because of this */
723         bad      = new_Bad();
724         cnst_pos = env.cnst_pos;
725
726         /* shorten Phis */
727         foreach_out_edge_safe(env.cnst_pred, edge, next) {
728                 ir_node *node = get_edge_src_irn(edge);
729
730                 if (is_Phi(node))
731                         set_Phi_pred(node, cnst_pos, bad);
732         }
733
734         set_Block_cfgpred(env.cnst_pred, cnst_pos, bad);
735
736         /* the graph is changed now */
737         *changed = 1;
738 }
739
740 void opt_jumpthreading(ir_graph* irg)
741 {
742         int changed, rerun;
743
744         FIRM_DBG_REGISTER(dbg, "firm.opt.jumpthreading");
745
746         DB((dbg, LEVEL_1, "===> Performing jumpthreading on %+F\n", irg));
747
748         remove_critical_cf_edges(irg);
749
750         edges_assure(irg);
751         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
752
753         changed = 0;
754         do {
755                 rerun = 0;
756                 irg_block_walk_graph(irg, thread_jumps, NULL, &rerun);
757                 changed |= rerun;
758         } while (rerun);
759
760         ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
761
762         if (changed) {
763                 /* control flow changed, some blocks may become dead */
764                 set_irg_outs_inconsistent(irg);
765                 set_irg_doms_inconsistent(irg);
766                 set_irg_extblk_inconsistent(irg);
767                 set_irg_loopinfo_inconsistent(irg);
768                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
769
770                 /* Dead code might be created. Optimize it away as it is dangerous
771                  * to call optimize_df() an dead code. */
772                 optimize_cf(irg);
773         }
774 }
775
776 /* Creates an ir_graph pass for opt_jumpthreading. */
777 ir_graph_pass_t *opt_jumpthreading_pass(const char *name)
778 {
779         return def_graph_pass(name ? name : "jumpthreading", opt_jumpthreading);
780 }  /* opt_jumpthreading_pass */