Let collect_nodes cope with Bad nodes
[libfirm] / ir / opt / cfopt.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief   Control flow optimizations.
23  * @author  Goetz Lindenmaier, Michael Beck, Sebastian Hack
24  * @version $Id$
25  *
26  * Removes Bad control flow predecessors and empty blocks.  A block is empty
27  * if it contains only a Jmp node. Blocks can only be removed if they are not
28  * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
29  * blocks (even if we could move the label).
30  */
31 #include "config.h"
32
33 #include "iroptimize.h"
34
35 #include <assert.h>
36 #include <stdbool.h>
37
38 #include "xmalloc.h"
39 #include "irnode_t.h"
40 #include "irgraph_t.h"
41 #include "irprog_t.h"
42
43 #include "ircons.h"
44 #include "iropt_t.h"
45 #include "irgwalk.h"
46 #include "irgmod.h"
47 #include "irdump.h"
48 #include "irverify.h"
49 #include "iredges.h"
50
51 #include "array_t.h"
52
53 #include "irouts.h"
54 #include "irbackedge_t.h"
55
56 #include "irflag_t.h"
57 #include "firmstat.h"
58 #include "irpass.h"
59 #include "irphase_t.h"
60
61 #include "iropt_dbg.h"
62
63 /** An environment for merge_blocks and collect nodes. */
64 typedef struct merge_env {
65         bool      changed;      /**< Set if the graph was changed. */
66         bool      phis_moved;   /**< Set if Phi nodes were moved. */
67 } merge_env;
68
69 /** set or reset the removable property of a block. */
70 static void set_Block_removable(ir_node *block, bool removable)
71 {
72         set_Block_mark(block, removable);
73 }
74
75 /** check if a block has the removable property set. */
76 static bool is_Block_removable(ir_node *block)
77 {
78         return get_Block_mark(block);
79 }
80
81 /** checks if a given Cond node is a switch Cond. */
82 static bool is_switch_Cond(ir_node *cond)
83 {
84         ir_node *sel = get_Cond_selector(cond);
85         return get_irn_mode(sel) != mode_b;
86 }
87
88 /** Walker: clear link fields and mark all blocks as removable. */
89 static void clear_link_and_mark_blocks_removable(ir_node *node, void *ctx)
90 {
91         (void) ctx;
92         set_irn_link(node, NULL);
93         if (is_Block(node))
94                 set_Block_removable(node, true);
95 }
96
97 /**
98  * Collects all Phi nodes in link list of Block.
99  * Marks all blocks "non_removable" if they contain a node other
100  * than Jmp (and Proj).
101  * Links all Proj nodes to their predecessors.
102  * Collects all switch-Conds in a list.
103  */
104 static void collect_nodes(ir_node *n, void *ctx)
105 {
106         ir_node ***switch_conds = (ir_node***)ctx;
107
108         if (is_Phi(n)) {
109                 /* Collect Phi nodes to compact ins along with block's ins. */
110                 ir_node *block = get_nodes_block(n);
111                 set_irn_link(n, get_irn_link(block));
112                 set_irn_link(block, n);
113         } else if (is_Block(n)) {
114                 if (has_Block_entity(n)) {
115                         /* block with a jump label attached cannot be removed. */
116                         set_Block_removable(n, false);
117                 }
118         } else if (is_Bad(n) || is_Jmp(n)) {
119                 /* ignore these */
120                 return;
121         } else {
122                 /* Check for non-empty block. */
123                 ir_node *block = get_nodes_block(n);
124                 if (is_Bad(block))
125                         return;
126
127                 set_Block_removable(block, false);
128
129                 if (is_Proj(n)) {
130                         /* link Proj nodes */
131                         ir_node *pred = get_Proj_pred(n);
132                         set_irn_link(n, get_irn_link(pred));
133                         set_irn_link(pred, n);
134                 } else if (is_Cond(n) && is_switch_Cond(n)) {
135                         /* found a switch-Cond, collect */
136                         ARR_APP1(ir_node*, *switch_conds, n);
137                 }
138         }
139 }
140
141 /** Returns true if pred is predecessor of block b. */
142 static bool is_pred_of(ir_node *pred, ir_node *b)
143 {
144         int i;
145
146         for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
147                 ir_node *b_pred = get_Block_cfgpred_block(b, i);
148                 if (b_pred == pred)
149                         return true;
150         }
151         return false;
152 }
153
154 /** Test whether we can optimize away pred block pos of b.
155  *
156  *  @param  b    A block node.
157  *  @param  pos  The position of the predecessor block to judge about.
158  *
159  *  @returns     The number of predecessors
160  *
161  *  The test is rather tricky.
162  *
163  *  The situation is something like the following:
164  *  @verbatim
165  *                 if-block
166  *                  /   \
167  *              then-b  else-b
168  *                  \   /
169  *                    b
170  *  @endverbatim
171  *
172  *  b merges the control flow of an if-then-else.  We may not remove
173  *  the 'then' _and_ the 'else' block of an 'if' if there is a Phi
174  *  node in b, even if both are empty.  The destruction of this Phi
175  *  requires that a copy is added before the merge.  We have to
176  *  keep one of the case blocks to place the copies in.
177  *
178  *  To perform the test for pos, we must regard predecessors before pos
179  *  as already removed.
180  **/
181 static unsigned test_whether_dispensable(ir_node *b, int pos)
182 {
183         ir_node *pred  = get_Block_cfgpred(b, pos);
184         ir_node *predb = get_nodes_block(pred);
185
186         if (is_Bad(pred) || !is_Block_removable(predb))
187                 return 1;
188
189         /* can't remove self-loops */
190         if (predb == b)
191                 goto non_dispensable;
192         if (is_unknown_jump(pred))
193                 goto non_dispensable;
194
195         /* Seems to be empty. At least we detected this in collect_nodes. */
196         if (get_irn_link(b) != NULL) {
197                 int n_cfgpreds = get_Block_n_cfgpreds(b);
198                 int i;
199                 /* there are Phi nodes */
200
201                 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
202                  * Handle all pred blocks with preds < pos as if they were already
203                  * removed. */
204                 for (i = 0; i < pos; i++) {
205                         ir_node *other_pred  = get_Block_cfgpred(b, i);
206                         ir_node *other_predb = get_nodes_block(other_pred);
207                         if (is_Bad(other_pred))
208                                 continue;
209                         if (is_Block_removable(other_predb)
210                             && !Block_block_visited(other_predb)) {
211                                 int j;
212                                 for (j = get_Block_n_cfgpreds(other_predb) - 1; j >= 0; --j) {
213                                         ir_node *other_predpred
214                                                 = get_Block_cfgpred_block(other_predb, j);
215                                         if (is_pred_of(other_predpred, predb))
216                                                 goto non_dispensable;
217                                 }
218                         } else if (is_pred_of(other_predb, predb)) {
219                                 goto non_dispensable;
220                         }
221                 }
222                 for (i = pos+1; i < n_cfgpreds; i++) {
223                         ir_node *other_predb = get_Block_cfgpred_block(b, i);
224                         if (is_pred_of(other_predb, predb))
225                                 goto non_dispensable;
226                 }
227         }
228         /* we will not dispense already visited blocks */
229         if (Block_block_visited(predb))
230                 return 1;
231         /* if we get here, the block is dispensable, count useful preds */
232         return get_irn_arity(predb);
233
234 non_dispensable:
235         set_Block_removable(predb, false);
236         return 1;
237 }
238
239 /**
240  * This method removes empty blocks.  A block is empty if it only contains Phi
241  * and Jmp nodes.
242  *
243  * We first adapt Phi nodes, then Block nodes, as we need the old ins
244  * of the Block to adapt the Phi nodes.  We do this by computing new
245  * in arrays, and then replacing the old ones.  So far we compute new in arrays
246  * for all nodes, not regarding whether there is a possibility for optimization.
247  *
248  * For each predecessor p of a Block b there are three cases:
249  *  - The predecessor p is a Bad node: just skip it. The in array of b shrinks
250  *    by one.
251  *  - The predecessor p is empty. Remove p. All predecessors of p are now
252  *    predecessors of b.
253  *  - The predecessor p is a block containing useful code. Just keep p as is.
254  *
255  * For Phi nodes f we have to check the conditions at the Block of f.
256  * For cases 1 and 3 we proceed as for Blocks.  For case 2 we can have two
257  * cases:
258  *  -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
259  *       In this case we proceed as for blocks. We remove pred_f.  All
260  *       predecessors of pred_f now are predecessors of f.
261  *  -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
262  *       too. We have to replicate f for each predecessor of the removed block.
263  *       Or, with other words, the removed predecessor block has exactly one
264  *       predecessor.
265  *
266  * Further there is a special case for self referencing blocks:
267  * @verbatim
268  *
269  *    then_b     else_b                              then_b  else_b
270  *       \      /                                      \      /
271  *        \    /                                        |    /
272  *        pred_b                                        |   /
273  *         |   ____                                     |  /  ____
274  *         |  |    |                                    |  | |    |
275  *         |  |    |       === optimized to ===>        \  | |    |
276  *        loop_b   |                                     loop_b   |
277  *         |  |    |                                      |  |    |
278  *         |  |____|                                      |  |____|
279  *         |                                              |
280  * @endverbatim
281  *
282  * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
283  * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
284  * backedge.
285  */
286 static void optimize_blocks(ir_node *b, void *ctx)
287 {
288         int i, j, k, n, max_preds, n_preds, p_preds = -1;
289         ir_node *pred, *phi, *next;
290         ir_node **in;
291         merge_env *env = (merge_env*)ctx;
292
293         if (get_Block_dom_depth(b) < 0) {
294                 /* ignore unreachable blocks */
295                 return;
296         }
297
298         /* Count the number of predecessor if this block is merged with pred blocks
299            that are empty. */
300         max_preds = 0;
301         for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
302                 max_preds += test_whether_dispensable(b, i);
303         }
304         in = XMALLOCN(ir_node*, max_preds);
305
306         /*- Fix the Phi nodes of the current block -*/
307         for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
308                 assert(is_Phi(phi));
309                 next = (ir_node*)get_irn_link(phi);
310
311                 /* Find the new predecessors for the Phi */
312                 p_preds = 0;
313                 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
314                         ir_graph *irg = get_irn_irg(b);
315                         pred = get_Block_cfgpred_block(b, i);
316
317                         if (is_Bad(pred)) {
318                                 /* case Phi 1: maintain Bads, as somebody else is responsible to remove them */
319                                 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
320                         } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
321                                 /* case Phi 2: It's an empty block and not yet visited. */
322                                 ir_node *phi_pred = get_Phi_pred(phi, i);
323
324                                 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
325                                         ir_node *pred_pred = get_Block_cfgpred(pred, j);
326
327                                         if (is_Bad(pred_pred)) {
328                                                 in[p_preds++] = new_r_Bad(irg, get_irn_mode(phi));
329                                                 continue;
330                                         }
331
332                                         if (get_nodes_block(phi_pred) == pred) {
333                                                 /* case Phi 2a: */
334                                                 assert(is_Phi(phi_pred));  /* Block is empty!! */
335
336                                                 in[p_preds++] = get_Phi_pred(phi_pred, j);
337                                         } else {
338                                                 /* case Phi 2b: */
339                                                 in[p_preds++] = phi_pred;
340                                         }
341                                 }
342                         } else {
343                                 /* case Phi 3: */
344                                 in[p_preds++] = get_Phi_pred(phi, i);
345                         }
346                 }
347                 assert(p_preds == max_preds);
348
349                 /* Fix the node */
350                 if (p_preds == 1)
351                         exchange(phi, in[0]);
352                 else
353                         set_irn_in(phi, p_preds, in);
354                 env->changed = true;
355         }
356
357         /*- This happens only if merge between loop backedge and single loop entry.
358             Moreover, it is only needed if predb is the direct dominator of b,
359             else there can be no uses of the Phi's in predb ... -*/
360         for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
361                 ir_node *pred  = get_Block_cfgpred(b, k);
362                 ir_node *predb = get_nodes_block(pred);
363                 if (is_Bad(pred))
364                         continue;
365
366                 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
367                         ir_node *next_phi;
368
369                         /* we found a predecessor block at position k that will be removed */
370                         for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
371                                 int q_preds = 0;
372                                 next_phi = (ir_node*)get_irn_link(phi);
373
374                                 assert(is_Phi(phi));
375
376                                 if (get_Block_idom(b) != predb) {
377                                         /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
378                                         ir_graph *irg  = get_irn_irg(b);
379                                         ir_mode  *mode = get_irn_mode(phi);
380                                         exchange(phi, new_r_Bad(irg, mode));
381                                 } else {
382                                         /* predb is the direct dominator of b. There might be uses of the Phi nodes from
383                                            predb in further block, so move this phi from the predecessor into the block b */
384                                         set_nodes_block(phi, b);
385                                         set_irn_link(phi, get_irn_link(b));
386                                         set_irn_link(b, phi);
387                                         env->phis_moved = true;
388
389                                         /* first, copy all 0..k-1 predecessors */
390                                         for (i = 0; i < k; i++) {
391                                                 pred = get_Block_cfgpred_block(b, i);
392
393                                                 if (is_Bad(pred)) {
394                                                         ir_graph *irg  = get_irn_irg(b);
395                                                         ir_mode  *mode = get_irn_mode(phi);
396                                                         in[q_preds++] = new_r_Bad(irg, mode);
397                                                 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
398                                                         /* It's an empty block and not yet visited. */
399                                                         for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
400                                                                 if (! is_Bad(get_Block_cfgpred(pred, j))) {
401                                                                         in[q_preds++] = phi;
402                                                                 } else {
403                                                                         ir_graph *irg  = get_irn_irg(b);
404                                                                         ir_mode  *mode = get_irn_mode(phi);
405                                                                         in[q_preds++] = new_r_Bad(irg, mode);
406                                                                 }
407                                                         }
408                                                 } else {
409                                                         in[q_preds++] = phi;
410                                                 }
411                                         }
412
413                                         /* now we are at k, copy the phi predecessors */
414                                         pred = get_nodes_block(get_Block_cfgpred(b, k));
415                                         for (i = 0; i < get_Phi_n_preds(phi); i++) {
416                                                 in[q_preds++] = get_Phi_pred(phi, i);
417                                         }
418
419                                         /* and now all the rest */
420                                         for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
421                                                 pred = get_Block_cfgpred_block(b, i);
422
423                                                 if (is_Bad(pred)) {
424                                                         ir_graph *irg  = get_irn_irg(b);
425                                                         ir_mode  *mode = get_irn_mode(phi);
426                                                         in[q_preds++] = new_r_Bad(irg, mode);
427                                                 } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
428                                                         /* It's an empty block and not yet visited. */
429                                                         for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
430                                                                 if (! is_Bad(get_Block_cfgpred(pred, j))) {
431                                                                         in[q_preds++] = phi;
432                                                                 } else {
433                                                                         ir_graph *irg  = get_irn_irg(b);
434                                                                         ir_mode  *mode = get_irn_mode(phi);
435                                                                         in[q_preds++] = new_r_Bad(irg, mode);
436                                                                 }
437                                                         }
438                                                 } else {
439                                                         in[q_preds++] = phi;
440                                                 }
441                                         }
442
443                                         /* Fix the node */
444                                         if (q_preds == 1)
445                                                 exchange(phi, in[0]);
446                                         else
447                                                 set_irn_in(phi, q_preds, in);
448                                         env->changed = true;
449
450                                         assert(q_preds <= max_preds);
451                                         // assert(p_preds == q_preds && "Wrong Phi Fix");
452                                 }
453                         }
454                 }
455         }
456
457         /*- Fix the block -*/
458         n_preds = 0;
459         for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
460                 ir_node *pred  = get_Block_cfgpred(b, i);
461                 ir_node *predb = get_nodes_block(pred);
462                 ir_graph *irg  = get_irn_irg(pred);
463
464                 /* case 1: Bad predecessor */
465                 if (is_Bad(pred)) {
466                         in[n_preds++] = new_r_Bad(irg, mode_X);
467                         continue;
468                 }
469                 if (is_Block_removable(predb) && !Block_block_visited(predb)) {
470                         /* case 2: It's an empty block and not yet visited. */
471                         for (j = 0; j < get_Block_n_cfgpreds(predb); j++) {
472                                 ir_node *predpred = get_Block_cfgpred(predb, j);
473
474                                 if (is_Bad(predpred)) {
475                                         in[n_preds++] = new_r_Bad(irg, mode_X);
476                                         continue;
477                                 }
478
479                                 in[n_preds++] = predpred;
480                         }
481                         /* Remove block+jump as it might be kept alive. */
482                         exchange(pred, new_r_Bad(get_irn_irg(b), mode_X));
483                         exchange(predb, new_r_Bad(get_irn_irg(b), mode_BB));
484                 } else {
485                         /* case 3: */
486                         in[n_preds++] = pred;
487                 }
488         }
489         assert(n_preds == max_preds);
490
491         set_irn_in(b, n_preds, in);
492         env->changed = true;
493
494         /* see if phi-fix was correct */
495         assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds));
496         xfree(in);
497 }
498
499 /**
500  * Optimize table-switch Conds.
501  *
502  * @param cond the switch-Cond
503  * @return true if the switch-Cond was optimized
504  */
505 static bool handle_switch_cond(ir_node *cond)
506 {
507         ir_node *sel   = get_Cond_selector(cond);
508         ir_node *proj1 = (ir_node*)get_irn_link(cond);
509         ir_node *proj2 = (ir_node*)get_irn_link(proj1);
510         ir_node *blk   = get_nodes_block(cond);
511
512         /* exactly 1 Proj on the Cond node: must be the defaultProj */
513         if (proj2 == NULL) {
514                 ir_node *jmp = new_r_Jmp(blk);
515                 assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
516                 /* convert it into a Jmp */
517                 exchange(proj1, jmp);
518                 return true;
519         }
520
521         /* handle Cond nodes with constant argument. In this case the localopt rules
522          * should have killed all obviously impossible cases.
523          * So the only case left to handle here is 1 defaultProj + 1 case
524          * (this one case should be the one taken) */
525         if (get_irn_link(proj2) == NULL) {
526                 ir_tarval *tv = value_of(sel);
527
528                 if (tv != tarval_bad) {
529                         /* we have a constant switch */
530                         long      num     = get_tarval_long(tv);
531                         long      def_num = get_Cond_default_proj(cond);
532                         ir_graph *irg     = get_irn_irg(cond);
533                         ir_node  *bad     = new_r_Bad(irg, mode_X);
534
535                         if (def_num == get_Proj_proj(proj1)) {
536                                 /* first one is the defProj */
537                                 if (num == get_Proj_proj(proj2)) {
538                                         ir_node *jmp = new_r_Jmp(blk);
539                                         exchange(proj2, jmp);
540                                         exchange(proj1, bad);
541                                         return true;
542                                 }
543                         } else if (def_num == get_Proj_proj(proj2)) {
544                                 /* second one is the defProj */
545                                 if (num == get_Proj_proj(proj1)) {
546                                         ir_node *jmp = new_r_Jmp(blk);
547                                         exchange(proj1, jmp);
548                                         exchange(proj2, bad);
549                                         return true;
550                                 }
551                         } else {
552                                 /* neither: strange, Cond was not optimized so far */
553                                 if (num == get_Proj_proj(proj1)) {
554                                         ir_node *jmp = new_r_Jmp(blk);
555                                         exchange(proj1, jmp);
556                                         exchange(proj2, bad);
557                                         return true;
558                                 } else if (num == get_Proj_proj(proj2)) {
559                                         ir_node *jmp = new_r_Jmp(blk);
560                                         exchange(proj2, jmp);
561                                         exchange(proj1, bad);
562                                         return true;
563                                 }
564                         }
565                 }
566         }
567         return false;
568 }
569
570 /**
571  * Optimize boolean Conds, where true and false jump to the same block into a Jmp
572  * Block must contain no Phi nodes.
573  *
574  *        Cond
575  *       /    \
576  *  projA      projB   =>   Jmp     Bad
577  *       \    /                \   /
578  *       block                 block
579  */
580 static bool optimize_pred_cond(ir_node *block, int i, int j)
581 {
582         ir_node *projA, *projB, *cond, *pred_block, *jmp, *bad;
583         assert(i != j);
584
585         projA = get_Block_cfgpred(block, i);
586         if (!is_Proj(projA)) return false;
587         projB = get_Block_cfgpred(block, j);
588         if (!is_Proj(projB)) return false;
589         cond  = get_Proj_pred(projA);
590         if (!is_Cond(cond))  return false;
591
592         if (cond != get_Proj_pred(projB)) return false;
593         if (is_switch_Cond(cond)) return false;
594
595         /* cond should actually be a Jmp */
596         pred_block = get_nodes_block(cond);
597         jmp = new_r_Jmp(pred_block);
598         bad = new_r_Bad(get_irn_irg(block), mode_X);
599
600         assert(projA != projB);
601         exchange(projA, jmp);
602         exchange(projB, bad);
603         return true;
604 }
605
606 typedef enum block_flags_t {
607         BF_HAS_OPERATIONS         = 1 << 0,
608         BF_HAS_PHIS               = 1 << 1,
609         BF_IS_UNKNOWN_JUMP_TARGET = 1 << 2,
610 } block_flags_t;
611
612 static bool get_phase_flag(ir_phase *block_info, ir_node *block, int flag)
613 {
614         return PTR_TO_INT(phase_get_irn_data(block_info, block)) & flag;
615 }
616
617 static void set_phase_flag(ir_phase *block_info, ir_node *block,
618                            block_flags_t flag)
619 {
620         int data = PTR_TO_INT(phase_get_irn_data(block_info, block));
621         data |= flag;
622         phase_set_irn_data(block_info, block, INT_TO_PTR(data));
623 }
624
625 static void clear_phase_flag(ir_phase *block_info, ir_node *block)
626 {
627         phase_set_irn_data(block_info, block, NULL);
628 }
629
630 static bool has_operations(ir_phase *block_info, ir_node *block)
631 {
632         return get_phase_flag(block_info, block, BF_HAS_OPERATIONS);
633 }
634
635 static void set_has_operations(ir_phase *block_info, ir_node *block)
636 {
637         set_phase_flag(block_info, block, BF_HAS_OPERATIONS);
638 }
639
640 static bool has_phis(ir_phase *block_info, ir_node *block)
641 {
642         return get_phase_flag(block_info, block, BF_HAS_PHIS);
643 }
644
645 static void set_has_phis(ir_phase *block_info, ir_node *block)
646 {
647         set_phase_flag(block_info, block, BF_HAS_PHIS);
648 }
649
650 static bool is_unknown_jump_target(ir_phase *block_info, ir_node *block)
651 {
652         return get_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
653 }
654
655 static void set_is_unknown_jump_target(ir_phase *block_info, ir_node *block)
656 {
657         set_phase_flag(block_info, block, BF_IS_UNKNOWN_JUMP_TARGET);
658 }
659
660 /**
661  * Pre-Walker: fill block info information.
662  */
663 static void compute_block_info(ir_node *n, void *x)
664 {
665         ir_phase *block_info = (ir_phase *)x;
666
667         if (is_Block(n)) {
668                 int i, max = get_Block_n_cfgpreds(n);
669                 for (i=0; i<max; i++) {
670                         ir_node *pred = get_Block_cfgpred(n,i);
671                         if (is_unknown_jump(pred)) {
672                                 set_is_unknown_jump_target(block_info, n);
673                         }
674                 }
675         } else if (is_Phi(n)) {
676                 ir_node *block = get_nodes_block(n);
677                 set_has_phis(block_info, block);
678         } else if (is_Jmp(n) || is_Cond(n) || is_Cmp(n) || is_Proj(n)) {
679                 /* ignore */
680         } else {
681                 ir_node *block = get_nodes_block(n);
682                 set_has_operations(block_info, block);
683         }
684 }
685
686 static void clear_block_info(ir_node *block, void *x)
687 {
688         ir_phase *block_info = (ir_phase *)x;
689         clear_phase_flag(block_info, block);
690 }
691
692 typedef struct skip_env {
693         bool changed;
694         ir_phase *phase;
695 } skip_env;
696
697 /**
698  * Post-Block-walker: Optimize useless if's (boolean Cond nodes
699  * with same true/false target)
700  * away.
701  */
702 static void optimize_ifs(ir_node *block, void *x)
703 {
704         skip_env *env = (skip_env*)x;
705         int i, j;
706         int n_preds = get_Block_n_cfgpreds(block);
707
708         if (has_phis(env->phase, block))
709                 return;
710
711         /* optimize Cond predecessors (might produce Bad predecessors) */
712         for (i = 0; i < n_preds; ++i) {
713                 for (j = i+1; j < n_preds; ++j) {
714                         optimize_pred_cond(block, i, j);
715                 }
716         }
717 }
718
719 /**
720  * Pre-Block walker: remove empty blocks (only contain a Jmp)
721  * that are control flow predecessors of the current block.
722  */
723 static void remove_empty_blocks(ir_node *block, void *x)
724 {
725         skip_env *env = (skip_env*)x;
726         int i;
727         int n_preds = get_Block_n_cfgpreds(block);
728
729         for (i = 0; i < n_preds; ++i) {
730                 ir_node *jmp, *jmp_block, *pred, *pred_block;
731                 int n_jpreds = 0;
732
733                 jmp = get_Block_cfgpred(block, i);
734                 if (!is_Jmp(jmp))
735                         continue;
736                 jmp_block = get_nodes_block(jmp);
737                 if (jmp_block == block)
738                         continue; /* this infinite loop cannot be optimized any further */
739                 if (is_unknown_jump_target(env->phase, jmp_block))
740                         continue; /* unknown jump target must not be optimized */
741                 if (has_operations(env->phase,jmp_block))
742                         continue; /* this block contains operations and cannot be skipped */
743                 if (has_phis(env->phase,jmp_block))
744                         continue; /* this block contains Phis and is not skipped */
745
746                 /* jmp_block is an empty block and can be optimized! */
747
748                 n_jpreds = get_Block_n_cfgpreds(jmp_block);
749                 /**
750                  * If the jmp block has only one predecessor this is straightforward.
751                  * However, if there are more predecessors, we only handle this,
752                  * if block has no Phis.
753                  */
754                 if (n_jpreds == 1) {
755                         /* skip jmp block by rerouting its predecessor to block
756                          *
757                          *     A              A
758                          *     |              |
759                          *  jmp_block   =>    |
760                          *     |              |
761                          *   block          block
762                          */
763                         pred = get_Block_cfgpred(jmp_block, 0);
764                         exchange(jmp, pred);
765
766                         /* cleanup: jmp_block might have a Keep edge! */
767                         pred_block = get_nodes_block(pred);
768                         exchange(jmp_block, pred_block);
769                         env->changed = true;
770                 } else if (! has_phis(env->phase, block)) {
771                         /* all predecessors can skip the jmp block, so block gets some new predecessors
772                          *
773                          *  A     B                 A  B
774                          *   \   /                  |  |
775                          * jmp_block  C  =>  Bad  C |  |
776                          *      \    /          \ | | /
777                          *      block            block
778                          */
779                         ir_node **ins = NULL;
780                         int j;
781                         NEW_ARR_A(ir_node *, ins, n_preds+n_jpreds);
782                         /* first copy the old predecessors, because the outer loop (i) still walks over them */
783                         for (j = 0; j < n_preds; ++j) {
784                                 ins[j] = get_Block_cfgpred(block, j);
785                         }
786                         /* now append the new predecessors */
787                         for (j = 0; j < n_jpreds; ++j) {
788                                 pred = get_Block_cfgpred(jmp_block, j);
789                                 ins[n_preds+j] = pred;
790                         }
791                         set_irn_in(block, n_preds+n_jpreds, ins);
792                         /* convert the jmp_block to Bad */
793                         ir_graph *irg = get_irn_irg(block);
794                         exchange(jmp_block, new_r_Bad(irg, mode_BB));
795                         exchange(jmp, new_r_Bad(irg, mode_X));
796                         /* let the outer loop walk over the new predecessors as well */
797                         n_preds += n_jpreds;
798                         env->changed = true;
799                         // TODO What if jmp_block had a KeepAlive edge?
800                 } else {
801                         /* This would involve Phis ... */
802                 }
803         }
804 }
805
806 /*
807  * All cfg optimizations, which do not touch Phi nodes.
808  *
809  * Note that this might create critical edges.
810  */
811 static void cfgopt_ignoring_phis(ir_graph *irg)
812 {
813         ir_phase *block_info = new_phase(irg, NULL);
814         skip_env env = { true, block_info };
815
816         while (env.changed) {
817                 irg_walk_graph(irg, compute_block_info, NULL, block_info);
818                 env.changed = false;
819
820                 /* Remove blocks, which only consist of a Jmp */
821                 irg_block_walk_graph(irg, remove_empty_blocks, NULL, &env);
822
823                 /* Optimize Cond->Jmp, where then- and else-block are the same. */
824                 irg_block_walk_graph(irg, NULL, optimize_ifs, &env);
825
826                 if (env.changed) {
827                         set_irg_doms_inconsistent(irg);
828                         /* clear block info, because it must be recomputed */
829                         irg_block_walk_graph(irg, clear_block_info, NULL, block_info);
830                         /* Removing blocks and Conds might enable more optimizations */
831                         continue;
832                 } else {
833                         break;
834                 }
835         }
836
837         phase_free(block_info);
838 }
839
840 /* Optimizations of the control flow that also require changes of Phi nodes.  */
841 void optimize_cf(ir_graph *irg)
842 {
843         int i, j, n;
844         ir_node **in = NULL;
845         ir_node *end = get_irg_end(irg);
846         ir_node *new_end;
847         merge_env env;
848
849         env.changed    = false;
850         env.phis_moved = false;
851
852         assert(get_irg_phase_state(irg) != phase_building);
853
854         /* if the graph is not pinned, we cannot determine empty blocks */
855         assert(get_irg_pinned(irg) != op_pin_state_floats &&
856                "Control flow optimization need a pinned graph");
857
858         edges_deactivate(irg);
859
860         /* First the "simple" optimizations, which do not touch Phis */
861         cfgopt_ignoring_phis(irg);
862
863         /* we use the mark flag to mark removable blocks */
864         ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
865
866         /* The switch Cond optimization might expose unreachable code, so we loop */
867         for (;;) {
868                 int length;
869                 ir_node **switch_conds = NULL;
870                 bool changed = false;
871
872                 assure_doms(irg);
873
874                 /*
875                  * This pass collects all Phi nodes in a link list in the block
876                  * nodes.  Further it performs simple control flow optimizations.
877                  * Finally it marks all blocks that do not contain useful
878                  * computations, i.e., these blocks might be removed.
879                  */
880                 switch_conds = NEW_ARR_F(ir_node*, 0);
881                 irg_walk(end, clear_link_and_mark_blocks_removable, collect_nodes, &switch_conds);
882
883                 /* handle all collected switch-Conds */
884                 length = ARR_LEN(switch_conds);
885                 for (i = 0; i < length; ++i) {
886                         ir_node *cond = switch_conds[i];
887                         changed |= handle_switch_cond(cond);
888                 }
889                 DEL_ARR_F(switch_conds);
890
891                 if (!changed)
892                         break;
893
894                 set_irg_doms_inconsistent(irg);
895                 set_irg_extblk_inconsistent(irg);
896                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
897         }
898
899         /* assert due to collect_nodes:
900          * 1. removable blocks are now marked as such
901          * 2. phi lists are up to date
902          */
903
904         /* Optimize the standard code.
905          * It walks only over block nodes and adapts these and the Phi nodes in these
906          * blocks, which it finds in a linked list computed before.
907          * */
908         assure_doms(irg);
909         irg_block_walk_graph(irg, optimize_blocks, NULL, &env);
910
911         new_end = optimize_in_place(end);
912         if (new_end != end) {
913                 set_irg_end(irg, new_end);
914                 end = new_end;
915         }
916         remove_End_Bads_and_doublets(end);
917
918         ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
919
920         if (env.phis_moved) {
921                 /* Bad: when we moved Phi's, we might produce dead Phi nodes
922                    that are kept-alive.
923                    Some other phases cannot copy with this, so kill them.
924                  */
925                 n = get_End_n_keepalives(end);
926                 if (n > 0) {
927                         NEW_ARR_A(ir_node *, in, n);
928                         assure_irg_outs(irg);
929
930                         for (i = j = 0; i < n; ++i) {
931                                 ir_node *ka = get_End_keepalive(end, i);
932
933                                 if (is_Phi(ka)) {
934                                         int k;
935
936                                         for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
937                                                 ir_node *user = get_irn_out(ka, k);
938
939                                                 if (user != ka && user != end) {
940                                                         /* Is it a real user or just a self loop ? */
941                                                         break;
942                                                 }
943                                         }
944                                         if (k >= 0)
945                                                 in[j++] = ka;
946                                 } else
947                                         in[j++] = ka;
948                         }
949                         if (j != n) {
950                                 set_End_keepalives(end, j, in);
951                                 env.changed = true;
952                         }
953                 }
954         }
955
956         if (env.changed) {
957                 /* Handle graph state if was changed. */
958                 set_irg_doms_inconsistent(irg);
959                 set_irg_extblk_inconsistent(irg);
960                 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
961         }
962 }
963
964 /* Creates an ir_graph pass for optimize_cf. */
965 ir_graph_pass_t *optimize_cf_pass(const char *name)
966 {
967         return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
968 }