some workaround to avoid condeval creating Phibs which not all backends like
[libfirm] / ir / opt / cfopt.c
1 /*
2  * Project:     libFIRM
3  * File name:   ir/opt/cfopt.c
4  * Purpose:     control flow optimizations
5  * Author:
6  * Created:
7  * CVS-ID:      $Id$
8  * Copyright:   (c) 1998-2004 Universität Karlsruhe
9  * Licence:     This file protected by GPL -  GNU GENERAL PUBLIC LICENSE.
10  */
11
12 #ifdef HAVE_CONFIG_H
13 # include "config.h"
14 #endif
15
16 #include <assert.h>
17
18 #include "plist.h"
19 #include "xmalloc.h"
20 #include "irnode_t.h"
21 #include "irgraph_t.h"
22 #include "irprog_t.h"
23
24 #include "ircons.h"
25 #include "iropt_t.h"
26 #include "irgwalk.h"
27 #include "irgmod.h"
28 #include "irdump.h"
29 #include "irvrfy.h"
30 #include "iredges.h"
31
32 #include "array.h"
33
34 #include "irouts.h"
35 #include "irbackedge_t.h"
36
37 #include "irflag_t.h"
38 #include "firmstat.h"
39
40 #include "cfopt.h"
41 #include "iropt_dbg.h"
42
43 /*------------------------------------------------------------------*/
44 /* Control flow optimization.                                       */
45 /*                                                                  */
46 /* Removes Bad control flow predecessors and empty blocks.  A block */
47 /* is empty if it contains only a Jmp node.                         */
48 /* Blocks can only be removed if they are not needed for the        */
49 /* semantics of Phi nodes.                                          */
50 /*------------------------------------------------------------------*/
51
52 /**
53  * Replace binary Conds that jumps twice into the same block
54  * by a simple Jmp.
55  * E.g.
56  * @verbatim
57  *               Cond                     Jmp  Bad
58  *             /       \                   |   /
59  *       ProjX True   ProjX False  ==>     |  /
60  *             \       /                   | /
61  *               Block                    Block
62  * @endverbatim
63  *
64  * Such pattern are the result of if-conversion.
65  *
66  * Note that the simple case that Block has only these two
67  * predecessors are already handled in equivalent_node_Block().
68  */
69 static void remove_senseless_conds(ir_node *bl, void *data) {
70         int i, j;
71         int n = get_Block_n_cfgpreds(bl);
72
73         assert(is_Block(bl));
74
75         for (i = 0; i < n; ++i) {
76                 ir_node *pred_i = get_Block_cfgpred(bl, i);
77                 ir_node *cond_i = skip_Proj(pred_i);
78
79                 /* binary Cond */
80                 if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
81
82                         for (j = i + 1; j < n; ++j) {
83                                 ir_node *pred_j = get_Block_cfgpred(bl, j);
84                                 ir_node *cond_j = skip_Proj(pred_j);
85
86                                 if (cond_j == cond_i) {
87                                         ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
88                                         set_irn_n(bl, i, jmp);
89                                         set_irn_n(bl, j, new_Bad());
90
91                                         DBG_OPT_IFSIM2(cond_i, jmp);
92                                         break;
93                                 }
94                         }
95                 }
96         }
97 }
98
99 /**
100  * Removes Tuples from Block control flow predecessors.
101  * Optimizes blocks with equivalent_node().  This is tricky,
102  * as we want to avoid nodes that have as block predecessor Bads.
103  * Therefore we also optimize at control flow operations, depending
104  * how we first reach the Block.
105  */
106 static void merge_blocks(ir_node *node, void *env) {
107         int i, n;
108         ir_node *new_block;
109
110         /* clear the link field for ALL nodes first */
111         set_irn_link(node, NULL);
112
113         if (is_Block(node)) {
114                 /* Remove Tuples */
115
116                 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
117                    A different order of optimizations might cause problems. */
118                 if (get_opt_normalize()) {
119                         for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
120                                 set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
121                 }
122
123                 /* see below */
124                 new_block = equivalent_node(node);
125                 if (new_block != node && ! is_Block_dead(new_block))
126                         exchange(node, new_block);
127
128         } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
129                 /* We will soon visit a block.  Optimize it before visiting! */
130                 ir_node *b = get_nodes_block(skip_Proj(node));
131
132                 if (!is_Block_dead(b)) {
133                         new_block = equivalent_node(b);
134
135                         while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
136                                 /* We would have to run gigo() if new is bad, so we
137                                    promote it directly below. Nevertheless, we sometimes reach a block
138                                    the first time through a dataflow node.  In this case we optimized the
139                                    block as such and have to promote the Bad here. */
140                                 assert((get_opt_control_flow_straightening() ||
141                                         get_opt_control_flow_weak_simplification()) &&
142                                         ("strange flag setting"));
143                                 exchange (b, new_block);
144                                 b = new_block;
145                                 new_block = equivalent_node(b);
146                         }
147
148                         /* normally, we would create a Bad block here, but this must be
149                          * prevented, so just set it's cf to Bad.
150                          */
151                         if (is_Block_dead(new_block))
152                                 exchange(node, new_Bad());
153                 }
154         }
155 }
156
157
158 /**
159  * Remove cf from dead block by inspecting dominance info
160  * Do not replace blocks by Bad.  This optimization shall
161  * ensure, that all Bad control flow predecessors are
162  * removed, and no new other Bads are introduced.
163  *
164  * Must be run in the post walker.
165  */
166 static void remove_dead_block_cf(ir_node *block, void *env) {
167         int i;
168
169         /* check block predecessors and turn control flow into bad */
170         for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
171                 ir_node *pred_X = get_Block_cfgpred(block, i);
172
173                 if (! is_Bad(pred_X)) {
174                         ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
175
176                         if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
177                                 set_Block_dead(pred_bl);
178                                 exchange(pred_X, new_Bad());
179                         }
180                 }
181         }
182 }
183
184 /**
185  * Collects all Phi nodes in link list of Block.
186  * Marks all blocks "block_visited" if they contain a node other
187  * than Jmp (and Proj).
188  * Links all Proj nodes to their predecessors.
189  * Collects all switch-Conds in a list.
190  */
191 static void collect_nodes(ir_node *n, void *env) {
192         ir_op *op = get_irn_op(n);
193         plist_t *list = env;
194
195         if (op != op_Block) {
196                 ir_node *b  = get_nodes_block(n);
197
198                 if (op == op_Phi) {
199                         /* Collect Phi nodes to compact ins along with block's ins. */
200                         set_irn_link(n, get_irn_link(b));
201                         set_irn_link(b, n);
202                 } else if (op != op_Jmp && !is_Bad(b)) {  /* Check for non empty block. */
203                         mark_Block_block_visited(b);
204
205                         if (op == op_Proj) {               /* link Proj nodes */
206                                 ir_node *pred = get_Proj_pred(n);
207
208                                 set_irn_link(n, get_irn_link(pred));
209                                 set_irn_link(pred, n);
210                         } else if (op == op_Cond) {
211                                 ir_node *sel = get_Cond_selector(n);
212                                 if (mode_is_int(get_irn_mode(sel))) {
213                                         /* found a switch-Cond, collect */
214                                         plist_insert_back(list, n);
215                                 }
216                         }
217                 }
218         }
219 }
220
221 /** Returns true if pred is predecessor of block. */
222 static int is_pred_of(ir_node *pred, ir_node *b) {
223         int i, n;
224
225         for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
226                 ir_node *b_pred = get_Block_cfgpred_block(b, i);
227                 if (b_pred == pred) return 1;
228         }
229         return 0;
230 }
231
232 /** Test whether we can optimize away pred block pos of b.
233  *
234  *  @param  b    A block node.
235  *  @param  pos  The position of the predecessor block to judge about.
236  *
237  *  @returns     The number of predecessors
238  *
239  *  The test is rather tricky.
240  *
241  *  The situation is something like the following:
242  *  @verbatim
243  *                 if-block
244  *                  /   \
245  *              then-b  else-b
246  *                  \   /
247  *                    b
248  *  @endverbatim
249  *
250  *  b merges the control flow of an if-then-else.  We may not remove
251  *  the 'then' _and_ the 'else' block of an 'if' if there is a Phi
252  *  node in b, even if both are empty.  The destruction of this Phi
253  *  requires that a copy is added before the merge.  We have to
254  *  keep one of the case blocks to place the copies in.
255  *
256  *  To perform the test for pos, we must regard predecessors before pos
257  *  as already removed.
258  **/
259 static int test_whether_dispensable(ir_node *b, int pos) {
260         int i, j, n_preds = 1;
261         ir_node *pred = get_Block_cfgpred_block(b, pos);
262
263         /* Bad blocks will be optimized away, so we don't need space for them */
264         if (is_Block_dead(pred))
265                 return 0;
266
267         if (get_Block_block_visited(pred) + 1
268                 < get_irg_block_visited(current_ir_graph)) {
269
270                 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
271                         /* Mark block so that is will not be removed: optimization is turned off. */
272                         set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
273                         return 1;
274                 }
275
276                 /* Seems to be empty. At least we detected this in collect_nodes. */
277                 if (!get_irn_link(b)) {
278                         /* There are no Phi nodes ==> all predecessors are dispensable. */
279                         n_preds = get_Block_n_cfgpreds(pred);
280                 } else {
281                         /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
282                            Handle all pred blocks with preds < pos as if they were already removed. */
283                         for (i = 0; i < pos; i++) {
284                                 ir_node *b_pred = get_Block_cfgpred_block(b, i);
285                                 if (! is_Block_dead(b_pred) &&
286                                         get_Block_block_visited(b_pred) + 1
287                                         < get_irg_block_visited(current_ir_graph)) {
288                                         for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
289                                                 ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
290                                                 if (is_pred_of(b_pred_pred, pred))
291                                                         goto non_dispensable;
292                                         }
293                                 } else {
294                                         if (is_pred_of(b_pred, pred))
295                                                 goto non_dispensable;
296                                 }
297                         }
298                         for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
299                                 ir_node *b_pred = get_Block_cfgpred_block(b, i);
300                                 if (is_pred_of(b_pred, pred))
301                                         goto non_dispensable;
302                         }
303                         /* if we get here, the block is dispensable */
304                         n_preds = get_Block_n_cfgpreds(pred);
305                 }
306         }
307
308         return n_preds;
309
310 non_dispensable:
311         set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
312         return 1;
313 }
314
315 /**
316  * Store to defer the exchanged of Phi nodes.
317  */
318 typedef struct _defer_ex_phi {
319         ir_node *phi_pred;    /**< the previous Phi node that will be replaced */
320         ir_node *phi;         /**< the new Phi node that replaces phi_pred */
321 } defer_ex_phi;
322
323 /**
324  * This method removed Bad cf predecessors from Blocks and Phis, and removes
325  * empty blocks.  A block is empty if it only contains Phi and Jmp nodes.
326  *
327  * We first adapt Phi nodes, then Block nodes, as we need the old ins
328  * of the Block to adapt the Phi nodes.  We do this by computing new
329  * in arrays, and then replacing the old ones.  So far we compute new in arrays
330  * for all nodes, not regarding whether there is a possibility for optimization.
331  *
332  * For each predecessor p of a Block b there are three cases:
333  *  -1. The predecessor p is a Bad node:  just skip it.  The in array of b shrinks by one.
334  *  -2. The predecessor p is empty.  Remove p.  All predecessors of p are now
335  *      predecessors of b.
336  *  -3. The predecessor p is a block containing useful code.  Just keep p as is.
337  *
338  * For Phi nodes f we have to check the conditions at the Block of f.
339  * For cases 1 and 3 we proceed as for Blocks.  For case 2 we can have two
340  * cases:
341  *  -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.  In this
342  *      case we proceed as for blocks. We remove pred_f.  All
343  *      predecessors of pred_f now are predecessors of f.
344  *  -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
345  *      We have to replicate f for each predecessor of the removed block. Or, with
346  *      other words, the removed predecessor block has exactly one predecessor.
347  *
348  * Further there is a special case for self referencing blocks:
349  * @verbatim
350  *
351  *    then_b     else_b                              then_b  else_b
352  *       \      /                                      \      /
353  *        \    /                                        |    /
354  *        pred_b                                        |   /
355  *         |   ____                                     |  /  ____
356  *         |  |    |                                    |  | |    |
357  *         |  |    |       === optimized to ===>        \  | |    |
358  *        loop_b   |                                     loop_b   |
359  *         |  |    |                                      |  |    |
360  *         |  |____|                                      |  |____|
361  *         |                                              |
362  * @endverbatim
363  *
364  * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
365  * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
366  * backedge.
367  * @@@ It is negotiable whether we should do this ... there might end up a copy
368  * from the Phi in the loop when removing the Phis.
369  */
370 static void optimize_blocks(ir_node *b, void *env) {
371         int i, j, k, n, max_preds, n_preds, p_preds = -1;
372         ir_node *pred, *phi;
373         ir_node **in;
374         defer_ex_phi *defers;
375
376         /* Count the number of predecessor if this block is merged with pred blocks
377            that are empty. */
378         max_preds = 0;
379         for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
380                 max_preds += test_whether_dispensable(b, i);
381         }
382         in = xmalloc(max_preds * sizeof(*in));
383
384         defers = NEW_ARR_F(defer_ex_phi, 0);
385
386         /*-
387         printf(" working on "); DDMN(b);
388         for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
389                 pred = get_nodes_block(get_Block_cfgpred(b, i));
390                 if (is_Bad(get_Block_cfgpred(b, i))) {
391                         printf("  removing Bad %i\n ", i);
392                 } else if (get_Block_block_visited(pred) +1
393                         < get_irg_block_visited(current_ir_graph)) {
394                         printf("  removing pred %i ", i); DDMN(pred);
395                 } else { printf("  Nothing to do for "); DDMN(pred); }
396         }
397         * end Debug output -*/
398
399         /*- Fix the Phi nodes of the current block -*/
400         for (phi = get_irn_link(b); phi; ) {
401                 assert(get_irn_op(phi) == op_Phi);
402
403                 /* Find the new predecessors for the Phi */
404                 p_preds = 0;
405                 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
406                         pred = get_Block_cfgpred_block(b, i);
407
408                         if (is_Bad(get_Block_cfgpred(b, i))) {
409                                 /* case Phi 1: Do nothing */
410                         }
411                         else if (get_Block_block_visited(pred) + 1
412                                  < get_irg_block_visited(current_ir_graph)) {
413                                 /* case Phi 2: It's an empty block and not yet visited. */
414                                 ir_node *phi_pred = get_Phi_pred(phi, i);
415
416                                 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
417                                         /* because of breaking loops, not all predecessors are Bad-clean,
418                                          * so we must check this here again */
419                                         if (! is_Bad(get_Block_cfgpred(pred, j))) {
420                                                 if (get_nodes_block(phi_pred) == pred) {
421                                                         /* case Phi 2a: */
422                                                         assert(get_irn_op(phi_pred) == op_Phi);  /* Block is empty!! */
423
424                                                         in[p_preds++] = get_Phi_pred(phi_pred, j);
425                                                 } else {
426                                                         /* case Phi 2b: */
427                                                         in[p_preds++] = phi_pred;
428                                                 }
429                                         }
430                                 }
431
432                                 /* The Phi_pred node is replaced now if it is a Phi.
433
434                                    Somehow the removed Phi node can be used legally in loops.
435                                    Therefore we replace the old phi by the new one.
436                                    This must be done _AFTER_ all Phis are optimized, or
437                                    it will fail if two Phis use the same pred_Phi.
438
439                                    FIXME: Is the following true? We ALWAYS replace it by the new one.
440
441                                    Further we have to remove the old Phi node by replacing it
442                                    by Bad.  Else it will remain in the keep alive array of End
443                                    and cause illegal situations.  So if there is no loop, we should
444                                    replace it by Bad.
445                                  */
446                                 if (get_nodes_block(phi_pred) == pred) {
447                                         int i;
448                                         /* remove the Phi as it might be kept alive. Further there
449                                            might be other users. */
450                                         for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
451                                                 if (defers[i].phi_pred == phi_pred)
452                                                         break;
453                                         }
454                                         if (i < 0) {
455                                                 /* we have a new replacement */
456                                                 defer_ex_phi elem;
457
458                                                 elem.phi_pred = phi_pred;
459                                                 elem.phi      = phi;
460                                                 ARR_APP1(defer_ex_phi, defers, elem);
461                                         }
462                                 }
463                         } else {
464                                 /* case Phi 3: */
465                                 in[p_preds++] = get_Phi_pred(phi, i);
466                         }
467                 }
468                 assert(p_preds <= max_preds);
469
470                 /* Fix the node */
471                 if (p_preds == 1)
472                         /* By removal of Bad ins the Phi might be degenerated. */
473                         exchange(phi, in[0]);
474                 else
475                         set_irn_in(phi, p_preds, in);
476
477                 phi = get_irn_link(phi);
478         }
479
480         /* now, exchange all Phis */
481         for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
482                 exchange(defers[i].phi_pred, defers[i].phi);
483         }
484         DEL_ARR_F(defers);
485
486         /*- This happens only if merge between loop backedge and single loop entry.
487             See special case above. -*/
488         for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
489                 pred = get_nodes_block(get_Block_cfgpred(b, k));
490
491                 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
492                         /* we found a predecessor block at position k that will be removed */
493                         for (phi = get_irn_link(pred); phi;) {
494                                 /*
495                                  * the previous phase may already changed the phi, and even
496                                  * removed it at all, so check here if this node is still a phi
497                                  */
498                                 if (get_irn_op(phi) == op_Phi) {
499                                         int q_preds = 0;
500
501                                         /* move this phi from the predecessor into the block b */
502                                         set_nodes_block(phi, b);
503
504                                         /* first, copy all 0..k-1 predecessors */
505                                         for (i = 0; i < k; i++) {
506                                                 pred = get_Block_cfgpred_block(b, i);
507
508                                                 if (is_Bad(pred)) {
509                                                         /* Do nothing */
510                                                 } else if (get_Block_block_visited(pred) + 1
511                                                         < get_irg_block_visited(current_ir_graph)) {
512                                                         /* It's an empty block and not yet visited. */
513                                                         for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
514                                                                 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
515                                                                    muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
516                                                                    Anweisungen.) Trotzdem tuts bisher!! */
517                                                                 if (! is_Bad(get_Block_cfgpred(pred, j)))
518                                                                         in[q_preds++] = phi;
519                                                         }
520                                                 } else {
521                                                         in[q_preds++] = phi;
522                                                 }
523                                         }
524
525                                         /* now we are at k, copy the phi predecessors */
526                                         pred = get_nodes_block(get_Block_cfgpred(b, k));
527                                         for (i = 0; i < get_Phi_n_preds(phi); i++) {
528                                                 if (! is_Bad(get_Block_cfgpred(pred, i)))
529                                                         in[q_preds++] = get_Phi_pred(phi, i);
530                                         }
531
532                                         /* and now all the rest */
533                                         for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
534                                                 pred = get_nodes_block(get_Block_cfgpred(b, i));
535
536                                                 if (is_Bad(get_Block_cfgpred(b, i))) {
537                                                         /* Do nothing */
538                                                 } else if (get_Block_block_visited(pred) +1
539                                                         < get_irg_block_visited(current_ir_graph)) {
540                                                         /* It's an empty block and not yet visited. */
541                                                         for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
542                                                                 if (! is_Bad(get_Block_cfgpred(pred, j)))
543                                                                         in[q_preds++] = phi;
544                                                         }
545                                                 } else {
546                                                         in[q_preds++] = phi;
547                                                 }
548                                         }
549
550                                         /* Fix the node */
551                                         if (q_preds == 1)
552                                                 exchange(phi, in[0]);
553                                         else
554                                                 set_irn_in(phi, q_preds, in);
555
556                                         assert(q_preds <= max_preds);
557                                         // assert(p_preds == q_preds && "Wrong Phi Fix");
558                                 }
559                                 phi = get_irn_link(phi);
560                         }
561                 }
562         }
563
564         /*- Fix the block -*/
565         n_preds = 0;
566         for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
567                 pred = get_Block_cfgpred_block(b, i);
568
569                 if (is_Bad(pred)) {
570                         /* case 1: Do nothing */
571                 } else if (get_Block_block_visited(pred) +1
572                         < get_irg_block_visited(current_ir_graph)) {
573                         /* case 2: It's an empty block and not yet visited. */
574                         assert(get_Block_n_cfgpreds(b) > 1);
575                         /* Else it should be optimized by equivalent_node. */
576                         for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
577                                 ir_node *pred_block = get_Block_cfgpred(pred, j);
578
579                                 /* because of breaking loops, not all predecessors are Bad-clean,
580                                  * so we must check this here again */
581                                 if (! is_Bad(pred_block))
582                                         in[n_preds++] = pred_block;
583                         }
584                         /* Remove block as it might be kept alive. */
585                         exchange(pred, b/*new_Bad()*/);
586                 } else {
587                         /* case 3: */
588                         in[n_preds++] = get_Block_cfgpred(b, i);
589                 }
590         }
591         assert(n_preds <= max_preds);
592
593         set_irn_in(b, n_preds, in);
594
595         assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
596         xfree(in);
597 }
598
599 /**
600  * Walker: optimize all blocks using the default optimizations.
601  * This removes Blocks that with only a Jmp predecessor.
602  */
603 static void remove_simple_blocks(ir_node *block, void *env) {
604         ir_node *new_blk = equivalent_node(block);
605         if (new_blk != block)
606                 exchange(block, new_blk);
607 }
608
609 /**
610  * Handle pre-optimized table switch Cond's.
611  * During iropt, all Projs from a switch-Cond are already removed except
612  * the defProj and maybe the taken one.
613  * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
614  *
615  * @param cond the switch-Cond
616  *
617  * @return non-zero if a switch-Cond was optimized
618  *
619  * Expects all Proj's linked to the cond node
620  */
621 static int handle_switch_cond(ir_node *cond) {
622         ir_node *sel = get_Cond_selector(cond);
623
624         ir_node *proj1 = get_irn_link(cond);
625         ir_node *proj2 = get_irn_link(proj1);
626         ir_node *jmp, *blk;
627
628         blk = get_nodes_block(cond);
629
630         if (proj2 == NULL) {
631                 /* this Cond has only one Proj: must be the defProj */
632                 assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
633                 /* convert it into a Jmp */
634                 jmp = new_r_Jmp(current_ir_graph, blk);
635                 exchange(proj1, jmp);
636                 return 1;
637         } else if (get_irn_link(proj2) == NULL) {
638                 /* We have two Proj's here. Check if the Cond has
639                    a constant argument */
640                 tarval *tv = value_of(sel);
641
642                 if (tv != tarval_bad) {
643                         /* we have a constant switch */
644                         long num     = get_tarval_long(tv);
645                         long def_num = get_Cond_defaultProj(cond);
646
647                         if (def_num == get_Proj_proj(proj1)) {
648                                 /* first one is the defProj */
649                                 if (num == get_Proj_proj(proj2)) {
650                                         jmp = new_r_Jmp(current_ir_graph, blk);
651                                         exchange(proj2, jmp);
652                                         exchange(proj1, new_Bad());
653                                         return 1;
654                                 }
655                         } else if (def_num == get_Proj_proj(proj2)) {
656                                 /* second one is the defProj */
657                                 if (num == get_Proj_proj(proj1)) {
658                                         jmp = new_r_Jmp(current_ir_graph, blk);
659                                         exchange(proj1, jmp);
660                                         exchange(proj2, new_Bad());
661                                         return 1;
662                                 }
663                         } else {
664                                 /* neither: strange, Cond was not optimized so far */
665                                 if (num == get_Proj_proj(proj1)) {
666                                         jmp = new_r_Jmp(current_ir_graph, blk);
667                                         exchange(proj1, jmp);
668                                         exchange(proj2, new_Bad());
669                                         return 1;
670                                 } else if (num == get_Proj_proj(proj2)) {
671                                         jmp = new_r_Jmp(current_ir_graph, blk);
672                                         exchange(proj2, jmp);
673                                         exchange(proj1, new_Bad());
674                                         return 1;
675                                 }
676                         }
677                 }
678         }
679         return 0;
680 }
681
682 /* Optimizations of the control flow that also require changes of Phi nodes.
683  *
684  * This optimization performs two passes over the graph.
685  *
686  * The first pass collects all Phi nodes in a link list in the block
687  * nodes.  Further it performs simple control flow optimizations.
688  * Finally it marks all blocks that do not contain useful
689  * computations, i.e., these blocks might be removed.
690  *
691  * The second pass performs the optimizations intended by this algorithm.
692  * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
693  * which it finds in a linked list computed by the first pass.
694  *
695  * We use the block_visited flag to mark empty blocks in the first
696  * phase.
697  * @@@ It would be better to add a struct in the link field
698  * that keeps the Phi list and the mark.  Place it on an obstack, as
699  * we will lose blocks and thereby generate memory leaks.
700  */
701 void optimize_cf(ir_graph *irg) {
702         int i, j, n;
703         ir_node **in = NULL;
704         ir_node *cond, *end = get_irg_end(irg);
705         ir_graph *rem = current_ir_graph;
706         irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
707         plist_t *list;
708         plist_element_t *el;
709
710         assert(get_irg_phase_state(irg) != phase_building);
711
712         /* if the graph is not pinned, we cannot determine empty blocks */
713         assert(get_irg_pinned(irg) != op_pin_state_floats &&
714                "Control flow optimization need a pinned graph");
715
716         current_ir_graph = irg;
717
718         edges_deactivate(irg);
719
720         /* Handle graph state */
721         set_irg_outs_inconsistent(current_ir_graph);
722         set_irg_extblk_inconsistent(current_ir_graph);
723         set_irg_loopinfo_inconsistent(current_ir_graph);
724         set_irg_doms_inconsistent(current_ir_graph);
725
726         if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
727                 ir_node *end;
728
729                 /* we have dominance info, we can kill dead block */
730                 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
731
732                 /* fix the keep-alives */
733                 end = get_irg_end(irg);
734                 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
735                         ir_node *ka = get_End_keepalive(end, i);
736
737                         if (is_Block(ka)) {
738                                 /* do NOT keep  dead blocks */
739                                 if (get_Block_dom_depth(ka) < 0)
740                                         set_End_keepalive(end, i, new_Bad());
741                         } else if (is_Block_dead(get_nodes_block(ka)) ||
742                                    get_Block_dom_depth(get_nodes_block(ka)) < 0)
743                                 /* do NOT keep nodes in dead blocks */
744                                 set_End_keepalive(end, i, new_Bad());
745                 }
746         }
747         irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
748
749         /* Use block visited flag to mark non-empty blocks. */
750         inc_irg_block_visited(irg);
751
752         list = plist_new();
753         irg_walk(end, merge_blocks, collect_nodes, list);
754
755         /* handle all collected switch-Conds */
756         foreach_plist(list, el) {
757                 cond = plist_element_get_value(el);
758                 handle_switch_cond(cond);
759         }
760         plist_free(list);
761
762         /* Optimize the standard code. */
763         irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
764
765         /* Walk all keep alives, optimize them if block, add to new in-array
766            for end if useful. */
767         n  = get_End_n_keepalives(end);
768         if (n > 0)
769                 NEW_ARR_A(ir_node *, in, n);
770         inc_irg_visited(irg);
771
772         /* fix the keep alive */
773         for (i = j = 0; i < n; i++) {
774                 ir_node *ka = get_End_keepalive(end, i);
775
776                 if (irn_not_visited(ka)) {
777                         ir_op *op = get_irn_op(ka);
778
779                         if ((op == op_Block) && Block_not_block_visited(ka)) {
780                                 set_irg_block_visited(irg,  /* Don't walk all the way to Start. */
781                                         get_irg_block_visited(irg)-1);
782                                 irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
783                                 mark_irn_visited(ka);
784                                 in[j++] = ka;
785                         } else if (op == op_Phi) {
786                                 mark_irn_visited(ka);
787                                 if (! is_Block_dead(get_nodes_block(ka)))
788                                         in[j++] = ka;
789                         } else if (is_op_keep(op)) {
790                                 mark_irn_visited(ka);
791                                 if (! is_Block_dead(get_nodes_block(ka)))
792                                         in[j++] = ka;
793                         }
794                 }
795         }
796         if (j != n)
797                 set_End_keepalives(end, j, in);
798         /* the verifier doesn't work yet with floating nodes */
799         if (get_irg_pinned(irg) == op_pin_state_pinned) {
800                 /* after optimize_cf(), only Bad data flow may remain. */
801                 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
802                         dump_ir_block_graph(irg, "-vrfy-cf");
803                         dump_ir_graph(irg, "-vrfy-cf");
804                         fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
805                 }
806         }
807
808         current_ir_graph = rem;
809 }