/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* PURPOSE.
*/
-/*
- * Project: libFIRM
- * File name: ir/opt/cfopt.c
- * Purpose: control flow optimizations
- * Author:
- * Created:
- * CVS-ID: $Id$
- * Copyright: (c) 1998-2004 Universität Karlsruhe
+/**
+ * @file
+ * @brief Control flow optimizations.
+ * @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
+ * @version $Id$
*/
+#include "config.h"
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "iroptimize.h"
#include <assert.h>
#include "irgwalk.h"
#include "irgmod.h"
#include "irdump.h"
-#include "irvrfy.h"
+#include "irverify.h"
#include "iredges.h"
-#include "array.h"
+#include "array_t.h"
#include "irouts.h"
#include "irbackedge_t.h"
#include "irflag_t.h"
#include "firmstat.h"
+#include "irpass.h"
-#include "cfopt.h"
#include "iropt_dbg.h"
/*------------------------------------------------------------------*/
/* is empty if it contains only a Jmp node. */
/* Blocks can only be removed if they are not needed for the */
/* semantics of Phi nodes. */
+/* Further, we NEVER remove labeled blocks (even if we could move */
+/* the label. */
/*------------------------------------------------------------------*/
+#define set_Block_removable(block) set_Block_mark(block, 1)
+#define set_Block_non_removable(block) set_Block_mark(block, 0)
+#define is_Block_removable(block) (get_Block_mark(block) != 0)
+
/**
* Replace binary Conds that jumps twice into the same block
* by a simple Jmp.
* Note that the simple case that Block has only these two
* predecessors are already handled in equivalent_node_Block().
*/
-static void remove_senseless_conds(ir_node *bl, void *data) {
+static int remove_senseless_conds(ir_node *bl)
+{
int i, j;
int n = get_Block_n_cfgpreds(bl);
-
- assert(is_Block(bl));
+ int changed = 0;
for (i = 0; i < n; ++i) {
ir_node *pred_i = get_Block_cfgpred(bl, i);
ir_node *cond_j = skip_Proj(pred_j);
if (cond_j == cond_i) {
- ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
set_irn_n(bl, i, jmp);
- set_irn_n(bl, j, new_Bad());
+ set_irn_n(bl, j, new_r_Bad(irg));
DBG_OPT_IFSIM2(cond_i, jmp);
+ changed = 1;
break;
}
}
}
}
+ return changed;
}
+/** An environment for merge_blocks and collect nodes. */
+typedef struct merge_env {
+ int changed; /**< Set if the graph was changed. */
+ int phis_moved; /**< Set if Phi nodes were moved. */
+ plist_t *list; /**< Helper list for all found Switch Conds. */
+} merge_env;
+
/**
* Removes Tuples from Block control flow predecessors.
* Optimizes blocks with equivalent_node(). This is tricky,
* Therefore we also optimize at control flow operations, depending
* how we first reach the Block.
*/
-static void merge_blocks(ir_node *node, void *env) {
- int i, n;
+static void merge_blocks(ir_node *node, void *ctx)
+{
+ int i;
ir_node *new_block;
+ merge_env *env = (merge_env*)ctx;
/* clear the link field for ALL nodes first */
set_irn_link(node, NULL);
if (is_Block(node)) {
/* Remove Tuples */
-
- /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go through.
- A different order of optimizations might cause problems. */
- if (get_opt_normalize()) {
- for (i = 0, n = get_Block_n_cfgpreds(node); i < n; ++i)
- set_Block_cfgpred(node, i, skip_Tuple(get_Block_cfgpred(node, i)));
+ for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(node, i);
+ ir_node *skipped = skip_Tuple(pred);
+ if (pred != skipped) {
+ set_Block_cfgpred(node, i, skipped);
+ env->changed = 1;
+ }
}
/* see below */
new_block = equivalent_node(node);
- if (new_block != node && ! is_Block_dead(new_block))
+ if (new_block != node && ! is_Block_dead(new_block)) {
exchange(node, new_block);
+ env->changed = 1;
+ }
} else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
if (!is_Block_dead(b)) {
new_block = equivalent_node(b);
- while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
+ while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
/* We would have to run gigo() if new is bad, so we
promote it directly below. Nevertheless, we sometimes reach a block
the first time through a dataflow node. In this case we optimized the
block as such and have to promote the Bad here. */
- assert((get_opt_control_flow_straightening() ||
- get_opt_control_flow_weak_simplification()) &&
- ("strange flag setting"));
- exchange (b, new_block);
+ exchange(b, new_block);
+ env->changed = 1;
b = new_block;
new_block = equivalent_node(b);
}
/* normally, we would create a Bad block here, but this must be
- * prevented, so just set it's cf to Bad.
+ * prevented, so just set its cf to Bad.
*/
- if (is_Block_dead(new_block))
- exchange(node, new_Bad());
+ if (is_Block_dead(new_block)) {
+ ir_graph *irg = get_irn_irg(node);
+ exchange(node, new_r_Bad(irg));
+ env->changed = 1;
+ }
}
}
}
-
/**
- * Remove cf from dead block by inspecting dominance info
+ * Block walker removing control flow from dead block by
+ * inspecting dominance info.
* Do not replace blocks by Bad. This optimization shall
* ensure, that all Bad control flow predecessors are
* removed, and no new other Bads are introduced.
+ * Further removed useless Conds and clear the mark of all blocks.
*
* Must be run in the post walker.
*/
-static void remove_dead_block_cf(ir_node *block, void *env) {
+static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+{
int i;
+ int *changed = (int*)env;
- /* check block predecessors and turn control flow into bad */
+ /* Check block predecessors and turn control flow into bad.
+ Beware of Tuple, kill them. */
for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
- ir_node *pred_X = get_Block_cfgpred(block, i);
+ ir_node *pred_X = get_Block_cfgpred(block, i);
+ ir_node *skipped = skip_Tuple(pred_X);
- if (! is_Bad(pred_X)) {
- ir_node *pred_bl = get_nodes_block(skip_Proj(pred_X));
+ if (! is_Bad(skipped)) {
+ ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
+ ir_graph *irg = get_irn_irg(block);
set_Block_dead(pred_bl);
- exchange(pred_X, new_Bad());
+ exchange(pred_X, new_r_Bad(irg));
+ *changed = 1;
+ } else if (skipped != pred_X) {
+ set_Block_cfgpred(block, i, skipped);
+ *changed = 1;
}
}
}
+
+ *changed |= remove_senseless_conds(block);
+
+ /* clear the block mark of all non labeled blocks */
+ if (has_Block_entity(block))
+ set_Block_non_removable(block);
+ else
+ set_Block_removable(block);
}
/**
* Collects all Phi nodes in link list of Block.
- * Marks all blocks "block_visited" if they contain a node other
+ * Marks all blocks "non_removable" if they contain a node other
* than Jmp (and Proj).
* Links all Proj nodes to their predecessors.
* Collects all switch-Conds in a list.
*/
-static void collect_nodes(ir_node *n, void *env) {
- ir_op *op = get_irn_op(n);
- plist_t *list = env;
-
- if (op != op_Block) {
- ir_node *b = get_nodes_block(n);
-
- if (op == op_Phi) {
+static void collect_nodes(ir_node *n, void *ctx)
+{
+ unsigned code = get_irn_opcode(n);
+ merge_env *env = (merge_env*)ctx;
+
+ if (code == iro_Block) {
+ /* mark the block as non-removable if it is labeled */
+ if (has_Block_entity(n))
+ set_Block_non_removable(n);
+ } else {
+ ir_node *b = get_nodes_block(n);
+
+ if (code == iro_Phi && get_irn_arity(n) > 0) {
/* Collect Phi nodes to compact ins along with block's ins. */
set_irn_link(n, get_irn_link(b));
set_irn_link(b, n);
- } else if (op != op_Jmp && !is_Bad(b)) { /* Check for non empty block. */
- mark_Block_block_visited(b);
+ } else if (code != iro_Jmp && !is_Bad(b)) { /* Check for non-empty block. */
+ set_Block_non_removable(b);
- if (op == op_Proj) { /* link Proj nodes */
+ if (code == iro_Proj) { /* link Proj nodes */
ir_node *pred = get_Proj_pred(n);
set_irn_link(n, get_irn_link(pred));
set_irn_link(pred, n);
- } else if (op == op_Cond) {
+ } else if (code == iro_Cond) {
ir_node *sel = get_Cond_selector(n);
if (mode_is_int(get_irn_mode(sel))) {
/* found a switch-Cond, collect */
- plist_insert_back(list, n);
+ plist_insert_back(env->list, n);
}
}
}
}
/** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b) {
- int i, n;
+static int is_pred_of(ir_node *pred, ir_node *b)
+{
+ int i;
- for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
+ for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
ir_node *b_pred = get_Block_cfgpred_block(b, i);
- if (b_pred == pred) return 1;
+ if (b_pred == pred)
+ return 1;
}
return 0;
}
* To perform the test for pos, we must regard predecessors before pos
* as already removed.
**/
-static int test_whether_dispensable(ir_node *b, int pos) {
+static int test_whether_dispensable(ir_node *b, int pos)
+{
int i, j, n_preds = 1;
ir_node *pred = get_Block_cfgpred_block(b, pos);
if (is_Block_dead(pred))
return 0;
- if (get_Block_block_visited(pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
-
- if (!get_opt_optimize() || !get_opt_control_flow_strong_simplification()) {
- /* Mark block so that is will not be removed: optimization is turned off. */
- set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
- return 1;
- }
-
+ if (is_Block_removable(pred)) {
/* Seems to be empty. At least we detected this in collect_nodes. */
- if (!get_irn_link(b)) {
+ if (get_irn_link(b) == NULL) {
/* There are no Phi nodes ==> all predecessors are dispensable. */
n_preds = get_Block_n_cfgpreds(pred);
} else {
Handle all pred blocks with preds < pos as if they were already removed. */
for (i = 0; i < pos; i++) {
ir_node *b_pred = get_Block_cfgpred_block(b, i);
- if (! is_Block_dead(b_pred) &&
- get_Block_block_visited(b_pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
- for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
+ if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
+ for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
if (is_pred_of(b_pred_pred, pred))
goto non_dispensable;
return n_preds;
non_dispensable:
- set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
+ set_Block_non_removable(pred);
return 1;
}
-/**
- * Store to defer the exchanged of Phi nodes.
- */
-typedef struct _defer_ex_phi {
- ir_node *phi_pred; /**< the previous Phi node that will be replaced */
- ir_node *phi; /**< the new Phi node that replaces phi_pred */
-} defer_ex_phi;
-
/**
* This method removed Bad cf predecessors from Blocks and Phis, and removes
* empty blocks. A block is empty if it only contains Phi and Jmp nodes.
* @@@ It is negotiable whether we should do this ... there might end up a copy
* from the Phi in the loop when removing the Phis.
*/
-static void optimize_blocks(ir_node *b, void *env) {
+static void optimize_blocks(ir_node *b, void *ctx)
+{
int i, j, k, n, max_preds, n_preds, p_preds = -1;
- ir_node *pred, *phi;
+ ir_node *pred, *phi, *next;
ir_node **in;
- defer_ex_phi *defers;
+ merge_env *env = (merge_env*)ctx;
/* Count the number of predecessor if this block is merged with pred blocks
that are empty. */
for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
max_preds += test_whether_dispensable(b, i);
}
- in = xmalloc(max_preds * sizeof(*in));
-
- defers = NEW_ARR_F(defer_ex_phi, 0);
-
- /*-
- printf(" working on "); DDMN(b);
- for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_nodes_block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- printf(" removing Bad %i\n ", i);
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- printf(" removing pred %i ", i); DDMN(pred);
- } else { printf(" Nothing to do for "); DDMN(pred); }
- }
- * end Debug output -*/
+ in = XMALLOCN(ir_node*, max_preds);
/*- Fix the Phi nodes of the current block -*/
- for (phi = get_irn_link(b); phi; ) {
- assert(get_irn_op(phi) == op_Phi);
+ for (phi = (ir_node*)get_irn_link(b); phi != NULL; phi = (ir_node*)next) {
+ assert(is_Phi(phi));
+ next = (ir_node*)get_irn_link(phi);
/* Find the new predecessors for the Phi */
p_preds = 0;
for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
pred = get_Block_cfgpred_block(b, i);
- if (is_Bad(get_Block_cfgpred(b, i))) {
+ if (is_Block_dead(pred)) {
/* case Phi 1: Do nothing */
- }
- else if (get_Block_block_visited(pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
+ } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
/* case Phi 2: It's an empty block and not yet visited. */
ir_node *phi_pred = get_Phi_pred(phi, i);
if (! is_Bad(get_Block_cfgpred(pred, j))) {
if (get_nodes_block(phi_pred) == pred) {
/* case Phi 2a: */
- assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ assert(is_Phi(phi_pred)); /* Block is empty!! */
in[p_preds++] = get_Phi_pred(phi_pred, j);
} else {
}
}
}
-
- /* The Phi_pred node is replaced now if it is a Phi.
-
- Somehow the removed Phi node can be used legally in loops.
- Therefore we replace the old phi by the new one.
- This must be done _AFTER_ all Phis are optimized, or
- it will fail if two Phis use the same pred_Phi.
-
- FIXME: Is the following true? We ALWAYS replace it by the new one.
-
- Further we have to remove the old Phi node by replacing it
- by Bad. Else it will remain in the keep alive array of End
- and cause illegal situations. So if there is no loop, we should
- replace it by Bad.
- */
- if (get_nodes_block(phi_pred) == pred) {
- int i;
- /* remove the Phi as it might be kept alive. Further there
- might be other users. */
- for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
- if (defers[i].phi_pred == phi_pred)
- break;
- }
- if (i < 0) {
- /* we have a new replacement */
- defer_ex_phi elem;
-
- elem.phi_pred = phi_pred;
- elem.phi = phi;
- ARR_APP1(defer_ex_phi, defers, elem);
- }
- }
} else {
/* case Phi 3: */
in[p_preds++] = get_Phi_pred(phi, i);
exchange(phi, in[0]);
else
set_irn_in(phi, p_preds, in);
-
- phi = get_irn_link(phi);
- }
-
- /* now, exchange all Phis */
- for (i = ARR_LEN(defers) - 1; i >= 0; --i) {
- exchange(defers[i].phi_pred, defers[i].phi);
+ env->changed = 1;
}
- DEL_ARR_F(defers);
/*- This happens only if merge between loop backedge and single loop entry.
- See special case above. -*/
+ Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
+ of the Phi's in predb ... -*/
for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
- pred = get_nodes_block(get_Block_cfgpred(b, k));
+ ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
+
+ if (is_Block_removable(predb) && !Block_block_visited(predb)) {
+ ir_node *next_phi;
- if (get_Block_block_visited(pred) + 1 < get_irg_block_visited(current_ir_graph)) {
/* we found a predecessor block at position k that will be removed */
- for (phi = get_irn_link(pred); phi;) {
- /*
- * the previous phase may already changed the phi, and even
- * removed it at all, so check here if this node is still a phi
- */
- if (get_irn_op(phi) == op_Phi) {
- int q_preds = 0;
-
- /* move this phi from the predecessor into the block b */
+ for (phi = (ir_node*)get_irn_link(predb); phi; phi = next_phi) {
+ int q_preds = 0;
+ next_phi = (ir_node*)get_irn_link(phi);
+
+ assert(is_Phi(phi));
+
+ if (get_Block_idom(b) != predb) {
+ /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
+ ir_graph *irg = get_irn_irg(b);
+ exchange(phi, new_r_Bad(irg));
+ } else {
+ /* predb is the direct dominator of b. There might be uses of the Phi nodes from
+ predb in further block, so move this phi from the predecessor into the block b */
set_nodes_block(phi, b);
+ set_irn_link(phi, get_irn_link(b));
+ set_irn_link(b, phi);
+ env->phis_moved = 1;
/* first, copy all 0..k-1 predecessors */
for (i = 0; i < k; i++) {
pred = get_Block_cfgpred_block(b, i);
- if (is_Bad(pred)) {
+ if (is_Block_dead(pred)) {
/* Do nothing */
- } else if (get_Block_block_visited(pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
+ } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
/* It's an empty block and not yet visited. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
- muss Rueckwaertskante sein! (An allen vier in[q_preds] = phi
- Anweisungen.) Trotzdem tuts bisher!! */
if (! is_Bad(get_Block_cfgpred(pred, j)))
in[q_preds++] = phi;
}
/* and now all the rest */
for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_nodes_block(get_Block_cfgpred(b, i));
+ pred = get_Block_cfgpred_block(b, i);
- if (is_Bad(get_Block_cfgpred(b, i))) {
+ if (is_Block_dead(pred)) {
/* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
+ } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
/* It's an empty block and not yet visited. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
if (! is_Bad(get_Block_cfgpred(pred, j)))
exchange(phi, in[0]);
else
set_irn_in(phi, q_preds, in);
+ env->changed = 1;
assert(q_preds <= max_preds);
// assert(p_preds == q_preds && "Wrong Phi Fix");
}
- phi = get_irn_link(phi);
}
}
}
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_Block_cfgpred_block(b, i);
- if (is_Bad(pred)) {
+ if (is_Block_dead(pred)) {
/* case 1: Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
+ } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
/* case 2: It's an empty block and not yet visited. */
- assert(get_Block_n_cfgpreds(b) > 1);
+ assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- ir_node *pred_block = get_Block_cfgpred(pred, j);
+ ir_node *pred_X = get_Block_cfgpred(pred, j);
/* because of breaking loops, not all predecessors are Bad-clean,
* so we must check this here again */
- if (! is_Bad(pred_block))
- in[n_preds++] = pred_block;
+ if (! is_Bad(pred_X))
+ in[n_preds++] = pred_X;
}
/* Remove block as it might be kept alive. */
- exchange(pred, b/*new_Bad()*/);
+ exchange(pred, b/*new_r_Bad(irg)*/);
} else {
/* case 3: */
in[n_preds++] = get_Block_cfgpred(b, i);
assert(n_preds <= max_preds);
set_irn_in(b, n_preds, in);
+ env->changed = 1;
- assert(get_irn_link(b) == NULL || (n_preds == p_preds && "Wrong Phi Fix"));
+ assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
xfree(in);
}
/**
- * Walker: optimize all blocks using the default optimizations.
+ * Block walker: optimize all blocks using the default optimizations.
* This removes Blocks that with only a Jmp predecessor.
*/
-static void remove_simple_blocks(ir_node *block, void *env) {
- ir_node *new_blk = equivalent_node(block);
- if (new_blk != block)
+static void remove_simple_blocks(ir_node *block, void *ctx)
+{
+ merge_env *env = (merge_env*)ctx;
+ ir_node *new_blk = equivalent_node(block);
+
+ if (new_blk != block) {
exchange(block, new_blk);
+ env->changed = 1;
+ }
}
/**
*
* Expects all Proj's linked to the cond node
*/
-static int handle_switch_cond(ir_node *cond) {
+static int handle_switch_cond(ir_node *cond)
+{
ir_node *sel = get_Cond_selector(cond);
- ir_node *proj1 = get_irn_link(cond);
- ir_node *proj2 = get_irn_link(proj1);
+ ir_node *proj1 = (ir_node*)get_irn_link(cond);
+ ir_node *proj2 = (ir_node*)get_irn_link(proj1);
ir_node *jmp, *blk;
blk = get_nodes_block(cond);
if (proj2 == NULL) {
/* this Cond has only one Proj: must be the defProj */
- assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
+ assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
/* convert it into a Jmp */
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
return 1;
} else if (get_irn_link(proj2) == NULL) {
/* We have two Proj's here. Check if the Cond has
a constant argument */
- tarval *tv = value_of(sel);
+ ir_tarval *tv = value_of(sel);
if (tv != tarval_bad) {
/* we have a constant switch */
- long num = get_tarval_long(tv);
- long def_num = get_Cond_defaultProj(cond);
+ long num = get_tarval_long(tv);
+ long def_num = get_Cond_default_proj(cond);
+ ir_graph *irg = get_irn_irg(cond);
if (def_num == get_Proj_proj(proj1)) {
/* first one is the defProj */
if (num == get_Proj_proj(proj2)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
- exchange(proj1, new_Bad());
+ exchange(proj1, new_r_Bad(irg));
return 1;
}
} else if (def_num == get_Proj_proj(proj2)) {
/* second one is the defProj */
if (num == get_Proj_proj(proj1)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
- exchange(proj2, new_Bad());
+ exchange(proj2, new_r_Bad(irg));
return 1;
}
} else {
/* neither: strange, Cond was not optimized so far */
if (num == get_Proj_proj(proj1)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
- exchange(proj2, new_Bad());
+ exchange(proj2, new_r_Bad(irg));
return 1;
} else if (num == get_Proj_proj(proj2)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
- exchange(proj1, new_Bad());
+ exchange(proj1, new_r_Bad(irg));
return 1;
}
}
* It walks only over block nodes and adapts these and the Phi nodes in these blocks,
* which it finds in a linked list computed by the first pass.
*
- * We use the block_visited flag to mark empty blocks in the first
+ * We use the mark flag to mark removable blocks in the first
* phase.
- * @@@ It would be better to add a struct in the link field
- * that keeps the Phi list and the mark. Place it on an obstack, as
- * we will lose blocks and thereby generate memory leaks.
*/
-void optimize_cf(ir_graph *irg) {
- int i, j, n;
+void optimize_cf(ir_graph *irg)
+{
+ int i, j, n, changed;
ir_node **in = NULL;
ir_node *cond, *end = get_irg_end(irg);
- ir_graph *rem = current_ir_graph;
- irg_dom_state dom_state = get_irg_dom_state(current_ir_graph);
- plist_t *list;
plist_element_t *el;
+ merge_env env;
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"Control flow optimization need a pinned graph");
- current_ir_graph = irg;
-
+ /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
edges_deactivate(irg);
- /* Handle graph state */
- set_irg_outs_inconsistent(current_ir_graph);
- set_irg_extblk_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_doms_inconsistent(current_ir_graph);
-
- if (dom_state == dom_consistent && get_opt_optimize() && get_opt_unreachable_code()) {
- ir_node *end;
-
- /* we have dominance info, we can kill dead block */
- irg_block_walk_graph(irg, NULL, remove_dead_block_cf, NULL);
-
- /* fix the keep-alives */
- end = get_irg_end(irg);
- for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
- ir_node *ka = get_End_keepalive(end, i);
-
- if (is_Block(ka)) {
- /* do NOT keep dead blocks */
- if (get_Block_dom_depth(ka) < 0)
- set_End_keepalive(end, i, new_Bad());
- } else if (is_Block_dead(get_nodes_block(ka)) ||
- get_Block_dom_depth(get_nodes_block(ka)) < 0)
+ /* we use the mark flag to mark removable blocks */
+ ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
+restart:
+ env.changed = 0;
+ env.phis_moved = 0;
+
+ /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
+ Use dominator info to kill blocks. Also optimize useless Conds. */
+ assure_doms(irg);
+ irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
+
+ /* fix the keep-alives */
+ changed = 0;
+ for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
+ ir_node *ka = get_End_keepalive(end, i);
+
+ if (is_Block(ka)) {
+ /* do NOT keep dead blocks */
+ if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
+ set_End_keepalive(end, i, new_r_Bad(irg));
+ changed = 1;
+ }
+ } else {
+ ir_node *block = get_nodes_block(ka);
+
+ if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
/* do NOT keep nodes in dead blocks */
- set_End_keepalive(end, i, new_Bad());
+ set_End_keepalive(end, i, new_r_Bad(irg));
+ changed = 1;
+ }
}
}
- irg_block_walk_graph(irg, NULL, remove_senseless_conds, NULL);
+ env.changed |= changed;
- /* Use block visited flag to mark non-empty blocks. */
- inc_irg_block_visited(irg);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
- list = plist_new();
- irg_walk(end, merge_blocks, collect_nodes, list);
+ env.list = plist_new();
+ irg_walk(end, merge_blocks, collect_nodes, &env);
+
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+
+ if (env.changed) {
+ /* Handle graph state if was changed. */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_loopinfo_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ env.changed = 0;
+ }
/* handle all collected switch-Conds */
- foreach_plist(list, el) {
- cond = plist_element_get_value(el);
- handle_switch_cond(cond);
+ foreach_plist(env.list, el) {
+ cond = (ir_node*)plist_element_get_value(el);
+ env.changed |= handle_switch_cond(cond);
+ }
+ plist_free(env.list);
+
+ if (env.changed) {
+ /* The Cond optimization might generate unreachable code, so restart if
+ it happens. */
+ goto restart;
}
- plist_free(list);
/* Optimize the standard code. */
- irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, NULL);
+ env.changed = 0;
+ assure_doms(irg);
+ irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
- /* Walk all keep alives, optimize them if block, add to new in-array
- for end if useful. */
- n = get_End_n_keepalives(end);
- if (n > 0)
- NEW_ARR_A(ir_node *, in, n);
+ /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
- /* fix the keep alive */
- for (i = j = 0; i < n; i++) {
+ /* fix the keep-alives again */
+ changed = 0;
+ for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
ir_node *ka = get_End_keepalive(end, i);
- if (irn_not_visited(ka)) {
- ir_op *op = get_irn_op(ka);
-
- if ((op == op_Block) && Block_not_block_visited(ka)) {
- set_irg_block_visited(irg, /* Don't walk all the way to Start. */
- get_irg_block_visited(irg)-1);
- irg_block_walk(ka, optimize_blocks, remove_simple_blocks, NULL);
- mark_irn_visited(ka);
- in[j++] = ka;
- } else if (op == op_Phi) {
- mark_irn_visited(ka);
- if (! is_Block_dead(get_nodes_block(ka)))
- in[j++] = ka;
- } else if (is_op_keep(op)) {
- mark_irn_visited(ka);
- if (! is_Block_dead(get_nodes_block(ka)))
+ if (is_Block(ka)) {
+ /* do NOT keep dead blocks */
+ if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
+ set_End_keepalive(end, i, new_r_Bad(irg));
+ changed = 1;
+ }
+ } else {
+ ir_node *block = get_nodes_block(ka);
+
+ if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+ /* do NOT keep nodes in dead blocks */
+ set_End_keepalive(end, i, new_r_Bad(irg));
+ changed = 1;
+ }
+ }
+ }
+ env.changed |= changed;
+
+ remove_End_Bads_and_doublets(end);
+
+
+ ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
+
+ if (env.phis_moved) {
+ /* Bad: when we moved Phi's, we might produce dead Phi nodes
+ that are kept-alive.
+ Some other phases cannot copy with this, so will them.
+ */
+ n = get_End_n_keepalives(end);
+ if (n > 0) {
+ NEW_ARR_A(ir_node *, in, n);
+ if (env.changed) {
+ /* Handle graph state if was changed. */
+ set_irg_outs_inconsistent(irg);
+ }
+ assure_irg_outs(irg);
+
+ for (i = j = 0; i < n; ++i) {
+ ir_node *ka = get_End_keepalive(end, i);
+
+ if (is_Phi(ka)) {
+ int k;
+
+ for (k = get_irn_n_outs(ka) - 1; k >= 0; --k) {
+ ir_node *user = get_irn_out(ka, k);
+
+ if (user != ka && user != end) {
+ /* Is it a real user or just a self loop ? */
+ break;
+ }
+ }
+ if (k >= 0)
+ in[j++] = ka;
+ } else
in[j++] = ka;
}
+ if (j != n) {
+ set_End_keepalives(end, j, in);
+ env.changed = 1;
+ }
}
}
- if (j != n)
- set_End_keepalives(end, j, in);
+
+ if (env.changed) {
+ /* Handle graph state if was changed. */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_loopinfo_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ }
+
+
/* the verifier doesn't work yet with floating nodes */
if (get_irg_pinned(irg) == op_pin_state_pinned) {
/* after optimize_cf(), only Bad data flow may remain. */
- if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
- dump_ir_block_graph(irg, "-vrfy-cf");
- dump_ir_graph(irg, "-vrfy-cf");
- fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
+ if (irg_verify_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
+ dump_ir_graph(irg, "-verify-cf");
+ fprintf(stderr, "VERIFY_BAD in optimize_cf()\n");
}
}
-
- current_ir_graph = rem;
}
+
+/* Creates an ir_graph pass for optimize_cf. */
+ir_graph_pass_t *optimize_cf_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
+} /* optimize_cf_pass */