3 * File name: ir/opt/cfopt.c
4 * Purpose: control flow optimizations
8 * Copyright: (c) 1998-2004 Universit�t Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
20 #include "irgraph_t.h"
32 #include "irbackedge_t.h"
38 #include "iropt_dbg.h"
40 /*------------------------------------------------------------------*/
41 /* Control flow optimization. */
43 /* Removes Bad control flow predecessors and empty blocks. A block */
44 /* is empty if it contains only a Jmp node. */
45 /* Blocks can only be removed if they are not needed for the */
46 /* semantics of Phi nodes. */
47 /*------------------------------------------------------------------*/
50 * Replace binary Conds that jumps twice into the same block
56 * ProjX True ProjX False ==> | /
61 * Such pattern are the result of if-conversion.
63 * Note that the simple case that Block has only these two
64 * predecessors are already handled in equivalent_node_Block().
66 static void remove_senseless_conds(ir_node *bl, void *data)
69 int n = get_Block_n_cfgpreds(bl);
73 for (i = 0; i < n; ++i) {
74 ir_node *pred_i = get_Block_cfgpred(bl, i);
75 ir_node *cond_i = skip_Proj(pred_i);
77 for (j = i + 1; j < n; ++j) {
78 ir_node *pred_j = get_Block_cfgpred(bl, j);
79 ir_node *cond_j = skip_Proj(pred_j);
82 && get_irn_op(cond_i) == op_Cond
83 && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
85 ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
86 set_irn_n(bl, i, jmp);
87 set_irn_n(bl, j, new_Bad());
89 DBG_OPT_IFSIM2(cond_i, jmp);
98 * Removes Tuples from Block control flow predecessors.
99 * Optimizes blocks with equivalent_node(). This is tricky,
100 * as we want to avoid nodes that have as block predecessor Bads.
101 * Therefore we also optimize at control flow operations, depending
102 * how we first reach the Block.
104 static void merge_blocks(ir_node *n, void *env) {
108 /* clear the link field for ALL nodes first */
109 set_irn_link(n, NULL);
111 if (get_irn_op(n) == op_Block) {
113 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
114 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
115 A different order of optimizations might cause problems. */
116 if (get_opt_normalize())
117 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
121 new_block = equivalent_node(n);
122 if (new_block != n && ! is_Block_dead(new_block))
123 exchange (n, new_block);
125 } else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
126 /* We will soon visit a block. Optimize it before visiting! */
127 ir_node *b = get_nodes_block(skip_Proj(n));
129 if (!is_Block_dead(b)) {
130 new_block = equivalent_node(b);
132 while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
133 /* We would have to run gigo() if new is bad, so we
134 promote it directly below. Nevertheless, we sometimes reach a block
135 the first time through a dataflow node. In this case we optimized the
136 block as such and have to promote the Bad here. */
137 assert((get_opt_control_flow_straightening() ||
138 get_opt_control_flow_weak_simplification()) &&
139 ("strange flag setting"));
140 exchange (b, new_block);
142 new_block = equivalent_node(b);
145 /* normally, we would create a Bad block here, but this must be
146 * prevented, so just set it's cf to Bad.
148 if (is_Block_dead(new_block))
149 exchange(n, new_Bad());
155 * Remove cf from dead block by inspecting dominance info
156 * Do not replace blocks by Bad. This optimization shall
157 * ensure, that all Bad control flow predecessors are
158 * removed, and no new other Bads are introduced.
160 * Must be run in the post walker.
162 static void remove_dead_block_cf(ir_node *block, void *env)
166 /* check block predecessors and turn control flow into bad */
167 for (i = 0, n = get_Block_n_cfgpreds(block); i < n; ++i) {
168 ir_node *pred_X = get_Block_cfgpred(block, i);
170 if (! is_Bad(pred_X)) {
171 ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
173 if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0))
174 exchange (pred_X, new_Bad());
180 * Collects all Phi nodes in link list of Block.
181 * Marks all blocks "block_visited" if they contain a node other
183 * Replaces n by Bad if n is unreachable control flow. We do that
184 * in the post walker, so we catch all blocks.
186 static void collect_nodes(ir_node *n, void *env) {
187 if (is_no_Block(n)) {
188 ir_node *b = get_nodes_block(n);
190 if ((get_irn_op(n) == op_Phi)) {
191 /* Collect Phi nodes to compact ins along with block's ins. */
192 set_irn_link(n, get_irn_link(b));
195 else if ((get_irn_op(n) != op_Jmp) && !is_Bad(b)) { /* Check for non empty block. */
196 mark_Block_block_visited(b);
201 /** Returns true if pred is predecessor of block. */
202 static int is_pred_of(ir_node *pred, ir_node *b) {
205 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
206 ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i));
207 if (b_pred == pred) return 1;
213 /** Test whether we can optimize away pred block pos of b.
215 * @param b A block node.
216 * @param pos The position of the predecessor block to judge about.
218 * @returns The number of predecessors
220 * The test is rather tricky.
222 * The situation is something like the following:
231 * b merges the control flow of an if-then-else. We may not remove
232 * the 'then' _and_ the 'else' block of an 'if' if there is a Phi
233 * node in b, even if both are empty. The destruction of this Phi
234 * requires that a copy is added before the merge. We have to
235 * keep one of the case blocks to place the copies in.
237 * To perform the test for pos, we must regard predecessors before pos
238 * as already removed.
240 static int test_whether_dispensable(ir_node *b, int pos) {
241 int i, j, n_preds = 1;
243 ir_node *cfop = get_Block_cfgpred(b, pos);
244 ir_node *pred = get_nodes_block(cfop);
246 /* Bad blocks will be optimized away, so we don't need space for them */
250 if (get_Block_block_visited(pred) + 1
251 < get_irg_block_visited(current_ir_graph)) {
253 if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
254 /* Mark block so that is will not be removed: optimization is turned off. */
255 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
259 /* Seems to be empty. At least we detected this in collect_nodes. */
260 if (!get_irn_link(b)) {
261 /* There are no Phi nodes ==> all predecessors are dispensable. */
262 n_preds = get_Block_n_cfgpreds(pred);
264 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
265 Work preds < pos as if they were already removed. */
266 for (i = 0; i < pos; i++) {
267 ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i));
268 if (get_Block_block_visited(b_pred) + 1
269 < get_irg_block_visited(current_ir_graph)) {
270 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
271 ir_node *b_pred_pred = get_nodes_block(get_Block_cfgpred(b_pred, j));
272 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
275 if (is_pred_of(b_pred, pred)) dispensable = 0;
278 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
279 ir_node *b_pred = get_nodes_block(get_Block_cfgpred(b, i));
280 if (is_pred_of(b_pred, pred)) dispensable = 0;
283 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
286 n_preds = get_Block_n_cfgpreds(pred);
296 * This method removed Bad cf predecessors from Blocks and Phis, and removes
297 * empty blocks. A block is empty if it only contains Phi and Jmp nodes.
299 * We first adapt Phi nodes, then Block nodes, as we need the old ins
300 * of the Block to adapt the Phi nodes. We do this by computing new
301 * in arrays, and then replacing the old ones. So far we compute new in arrays
302 * for all nodes, not regarding whether there is a possibility for optimization.
304 * For each predecessor p of a Block b there are three cases:
305 * -#. The predecessor p is a Bad node: just skip it. The in array of b shrinks by one.
306 * -#. The predecessor p is empty. Remove p. All predecessors of p are now
308 * -#. The predecessor p is a block containing useful code. Just keep p as is.
310 * For Phi nodes f we have to check the conditions at the Block of f.
311 * For cases 1 and 3 we proceed as for Blocks. For case 2 we can have two
313 * -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED. In this
314 * case we proceed as for blocks. We remove pred_f. All
315 * predecessors of pred_f now are predecessors of f.
316 * -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
317 * We have to replicate f for each predecessor of the removed block. Or, with
318 * other words, the removed predecessor block has exactly one predecessor.
320 * Further there is a special case for self referencing blocks:
323 * then_b else_b then_b else_b
329 * | | | === optimized to ===> \ | | |
336 * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
337 * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
339 * @@@ It is negotiable whether we should do this ... there might end up a copy
340 * from the Phi in the loop when removing the Phis.
342 static void optimize_blocks(ir_node *b, void *env) {
343 int i, j, k, n, max_preds, n_preds, p_preds;
347 /* Count the number of predecessor if this block is merged with pred blocks
350 for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
351 max_preds += test_whether_dispensable(b, i);
353 in = xmalloc(max_preds * sizeof(*in));
356 printf(" working on "); DDMN(b);
357 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
358 pred = get_nodes_block(get_Block_cfgpred(b, i));
359 if (is_Bad(get_Block_cfgpred(b, i))) {
360 printf(" removing Bad %i\n ", i);
361 } else if (get_Block_block_visited(pred) +1
362 < get_irg_block_visited(current_ir_graph)) {
363 printf(" removing pred %i ", i); DDMN(pred);
364 } else { printf(" Nothing to do for "); DDMN(pred); }
366 * end Debug output -*/
368 /*- Fix the Phi nodes of the current block -*/
369 for (phi = get_irn_link(b); phi; ) {
370 assert(get_irn_op(phi) == op_Phi);
372 /* Find the new predecessors for the Phi */
374 for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
375 pred = get_nodes_block(get_Block_cfgpred(b, i));
377 if (is_Bad(get_Block_cfgpred(b, i))) {
378 /* case Phi 1: Do nothing */
380 else if (get_Block_block_visited(pred) + 1
381 < get_irg_block_visited(current_ir_graph)) {
382 /* case Phi 2: It's an empty block and not yet visited. */
383 ir_node *phi_pred = get_Phi_pred(phi, i);
385 for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
386 /* because of breaking loops, not all predecessors are Bad-clean,
387 * so we must check this here again */
388 if (! is_Bad(get_Block_cfgpred(pred, j))) {
389 if (get_nodes_block(phi_pred) == pred) {
391 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
393 in[p_preds++] = get_Phi_pred(phi_pred, j);
396 in[p_preds++] = phi_pred;
401 /* The Phi_pred node is replaced now if it is a Phi.
403 Somehow the removed Phi node can be used legally in loops.
404 Therefore we replace the old phi by the new one.
406 Further we have to remove the old Phi node by replacing it
407 by Bad. Else it will remain in the keep alive array of End
408 and cause illegal situations. So if there is no loop, we should
411 if (get_nodes_block(phi_pred) == pred) {
412 /* remove the Phi as it might be kept alive. Further there
413 might be other users. */
414 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
418 in[p_preds++] = get_Phi_pred(phi, i);
421 assert(p_preds <= max_preds);
425 /* By removal of Bad ins the Phi might be degenerated. */
426 exchange(phi, in[0]);
428 set_irn_in(phi, p_preds, in);
430 phi = get_irn_link(phi);
433 /*- This happens only if merge between loop backedge and single loop entry.
434 See special case above. -*/
435 for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
436 pred = get_nodes_block(get_Block_cfgpred(b, k));
438 if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
439 /* we found a predecessor block at position k that will be removed */
440 for (phi = get_irn_link(pred); phi;) {
442 * the previous phase may already changed the phi, and even
443 * removed it at all, so check here if this node is still a phi
445 if (get_irn_op(phi) == op_Phi) {
448 /* move this phi from the predecessor into the block b */
449 set_nodes_block(phi, b);
451 /* first, copy all 0..k-1 predecessors */
452 for (i = 0; i < k; i++) {
453 pred = get_nodes_block(get_Block_cfgpred(b, i));
455 if (is_Bad(get_Block_cfgpred(b, i))) {
457 } else if (get_Block_block_visited(pred) + 1
458 < get_irg_block_visited(current_ir_graph)) {
459 /* It's an empty block and not yet visited. */
460 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
461 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
462 muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
463 Anweisungen.) Trotzdem tuts bisher!! */
464 if (! is_Bad(get_Block_cfgpred(pred, j)))
472 /* now we are at k, copy the phi predecessors */
473 pred = get_nodes_block(get_Block_cfgpred(b, k));
474 for (i = 0; i < get_Phi_n_preds(phi); i++) {
475 if (! is_Bad(get_Block_cfgpred(pred, i)))
476 in[q_preds++] = get_Phi_pred(phi, i);
479 /* and now all the rest */
480 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
481 pred = get_nodes_block(get_Block_cfgpred(b, i));
483 if (is_Bad(get_Block_cfgpred(b, i))) {
485 } else if (get_Block_block_visited(pred) +1
486 < get_irg_block_visited(current_ir_graph)) {
487 /* It's an empty block and not yet visited. */
488 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
489 if (! is_Bad(get_Block_cfgpred(pred, j)))
499 exchange(phi, in[0]);
501 set_irn_in(phi, q_preds, in);
503 assert(q_preds <= max_preds);
504 // assert(p_preds == q_preds && "Wrong Phi Fix");
506 phi = get_irn_link(phi);
511 /*- Fix the block -*/
513 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
514 pred = get_nodes_block(get_Block_cfgpred(b, i));
516 if (is_Bad(get_Block_cfgpred(b, i))) {
517 /* case 1: Do nothing */
518 } else if (get_Block_block_visited(pred) +1
519 < get_irg_block_visited(current_ir_graph)) {
520 /* case 2: It's an empty block and not yet visited. */
521 assert(get_Block_n_cfgpreds(b) > 1);
522 /* Else it should be optimized by equivalent_node. */
523 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
524 ir_node *pred_block = get_Block_cfgpred(pred, j);
526 /* because of breaking loops, not all predecessors are Bad-clean,
527 * so we must check this here again */
528 if (! is_Bad(pred_block))
529 in[n_preds++] = pred_block;
531 /* Remove block as it might be kept alive. */
532 exchange(pred, b/*new_Bad()*/);
535 in[n_preds++] = get_Block_cfgpred(b, i);
538 assert(n_preds <= max_preds);
540 set_irn_in(b, n_preds, in);
542 assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
548 /* Optimizations of the control flow that also require changes of Phi nodes.
550 * This optimization performs two passes over the graph.
552 * The first pass collects all Phi nodes in a link list in the block
553 * nodes. Further it performs simple control flow optimizations.
554 * Finally it marks all blocks that do not contain useful
555 * computations, i.e., these blocks might be removed.
557 * The second pass performs the optimizations intended by this algorithm.
558 * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
559 * which it finds in a linked list computed by the first pass.
561 * We use the block_visited flag to mark empty blocks in the first
563 * @@@ It would be better to add a struct in the link field
564 * that keeps the Phi list and the mark. Place it on an obstack, as
565 * we will lose blocks and thereby generate memory leaks.
567 void optimize_cf(ir_graph *irg) {
570 ir_node *end = get_irg_end(irg);
571 ir_graph *rem = current_ir_graph;
572 irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
573 current_ir_graph = irg;
575 /* if the graph is not pinned, we cannot determine empty blocks */
576 assert(get_irg_pinned(irg) != op_pin_state_floats &&
577 "Control flow optimization need a pinned graph");
579 /* Handle graph state */
580 assert(get_irg_phase_state(irg) != phase_building);
581 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
582 set_irg_outs_inconsistent(current_ir_graph);
583 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
584 set_irg_dom_inconsistent(current_ir_graph);
586 if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
587 ir_node *end = get_irg_end(irg);
589 /* we have dominance info, we can kill dead block */
590 irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
592 /* fix the keep-alives */
593 for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
594 ir_node *ka = get_End_keepalive(end, i);
597 if (get_Block_dom_depth(ka) == -1)
598 set_End_keepalive(end, i, new_Bad());
600 else if (is_Block_dead(get_nodes_block(ka)) || get_Block_dom_depth(get_nodes_block(ka)) == -1)
601 set_End_keepalive(end, i, new_Bad());
605 irg_block_walk_graph(current_ir_graph, NULL, remove_senseless_conds, NULL);
607 /* Use block visited flag to mark non-empty blocks. */
608 inc_irg_block_visited(irg);
609 irg_walk(end, merge_blocks, collect_nodes, NULL);
611 /* Optimize the standard code. */
612 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
614 /* Walk all keep alives, optimize them if block, add to new in-array
615 for end if useful. */
616 in = NEW_ARR_F (ir_node *, 1);
617 in[0] = get_nodes_block(end);
618 inc_irg_visited(current_ir_graph);
620 for (i = 0; i < get_End_n_keepalives(end); i++) {
621 ir_node *ka = get_End_keepalive(end, i);
623 if (irn_not_visited(ka)) {
624 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
625 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
626 get_irg_block_visited(current_ir_graph)-1);
627 irg_block_walk(ka, optimize_blocks, NULL, NULL);
628 mark_irn_visited(ka);
629 ARR_APP1 (ir_node *, in, ka);
630 } else if (get_irn_op(ka) == op_Phi) {
631 mark_irn_visited(ka);
632 ARR_APP1 (ir_node *, in, ka);
633 } else if (get_irn_op(ka) == op_IJmp) {
634 mark_irn_visited(ka);
635 ARR_APP1 (ir_node *, in, ka);
639 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
643 /* the verifier doesn't work yet with floating nodes */
644 if (get_irg_pinned(irg) == op_pin_state_pinned) {
645 /* after optimize_cf(), only Bad data flow may remain. */
646 if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
647 dump_ir_block_graph(irg, "-vrfy-cf");
648 dump_ir_graph(irg, "-vrfy-cf");
649 fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
653 current_ir_graph = rem;