* @author Goetz Lindenmaier, Michael Beck, Sebastian Hack
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "iroptimize.h"
#include "irflag_t.h"
#include "firmstat.h"
+#include "irpass.h"
#include "iropt_dbg.h"
* Note that the simple case that Block has only these two
* predecessors are already handled in equivalent_node_Block().
*/
-static int remove_senseless_conds(ir_node *bl) {
+static int remove_senseless_conds(ir_node *bl)
+{
int i, j;
int n = get_Block_n_cfgpreds(bl);
int changed = 0;
ir_node *cond_j = skip_Proj(pred_j);
if (cond_j == cond_i) {
- ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
+ ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
set_irn_n(bl, i, jmp);
set_irn_n(bl, j, new_Bad());
* Therefore we also optimize at control flow operations, depending
* how we first reach the Block.
*/
-static void merge_blocks(ir_node *node, void *ctx) {
+static void merge_blocks(ir_node *node, void *ctx)
+{
int i;
ir_node *new_block;
merge_env *env = ctx;
*
* Must be run in the post walker.
*/
-static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
+static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+{
int i;
int *changed = env;
*changed |= remove_senseless_conds(block);
- /* clear the block mark of all blocks */
- set_Block_removable(block);
+ /* clear the block mark of all non labeled blocks */
+ if (has_Block_entity(block))
+ set_Block_non_removable(block);
+ else
+ set_Block_removable(block);
}
/**
* Links all Proj nodes to their predecessors.
* Collects all switch-Conds in a list.
*/
-static void collect_nodes(ir_node *n, void *ctx) {
+static void collect_nodes(ir_node *n, void *ctx)
+{
ir_opcode code = get_irn_opcode(n);
merge_env *env = ctx;
if (code == iro_Block) {
/* mark the block as non-removable if it is labeled */
- if (has_Block_label(n))
+ if (has_Block_entity(n))
set_Block_non_removable(n);
} else {
ir_node *b = get_nodes_block(n);
}
/** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b) {
+static int is_pred_of(ir_node *pred, ir_node *b)
+{
int i;
for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
* To perform the test for pos, we must regard predecessors before pos
* as already removed.
**/
-static int test_whether_dispensable(ir_node *b, int pos) {
+static int test_whether_dispensable(ir_node *b, int pos)
+{
int i, j, n_preds = 1;
ir_node *pred = get_Block_cfgpred_block(b, pos);
* @@@ It is negotiable whether we should do this ... there might end up a copy
* from the Phi in the loop when removing the Phis.
*/
-static void optimize_blocks(ir_node *b, void *ctx) {
+static void optimize_blocks(ir_node *b, void *ctx)
+{
int i, j, k, n, max_preds, n_preds, p_preds = -1;
- ir_node *pred, *phi;
+ ir_node *pred, *phi, *next;
ir_node **in;
merge_env *env = ctx;
in = XMALLOCN(ir_node*, max_preds);
/*- Fix the Phi nodes of the current block -*/
- for (phi = get_irn_link(b); phi; ) {
- assert(get_irn_op(phi) == op_Phi);
+ for (phi = get_irn_link(b); phi != NULL; phi = next) {
+ assert(is_Phi(phi));
+ next = get_irn_link(phi);
/* Find the new predecessors for the Phi */
p_preds = 0;
if (! is_Bad(get_Block_cfgpred(pred, j))) {
if (get_nodes_block(phi_pred) == pred) {
/* case Phi 2a: */
- assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ assert(is_Phi(phi_pred)); /* Block is empty!! */
in[p_preds++] = get_Phi_pred(phi_pred, j);
} else {
else
set_irn_in(phi, p_preds, in);
env->changed = 1;
-
- phi = get_irn_link(phi);
}
/*- This happens only if merge between loop backedge and single loop entry.
/* case 1: Do nothing */
} else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
/* case 2: It's an empty block and not yet visited. */
- assert(get_Block_n_cfgpreds(b) > 1);
+ assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
ir_node *pred_X = get_Block_cfgpred(pred, j);
* Block walker: optimize all blocks using the default optimizations.
* This removes Blocks that with only a Jmp predecessor.
*/
-static void remove_simple_blocks(ir_node *block, void *ctx) {
+static void remove_simple_blocks(ir_node *block, void *ctx)
+{
ir_node *new_blk = equivalent_node(block);
merge_env *env = ctx;
*
* Expects all Proj's linked to the cond node
*/
-static int handle_switch_cond(ir_node *cond) {
+static int handle_switch_cond(ir_node *cond)
+{
ir_node *sel = get_Cond_selector(cond);
ir_node *proj1 = get_irn_link(cond);
if (proj2 == NULL) {
/* this Cond has only one Proj: must be the defProj */
- assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
+ assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
/* convert it into a Jmp */
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
return 1;
} else if (get_irn_link(proj2) == NULL) {
if (tv != tarval_bad) {
/* we have a constant switch */
long num = get_tarval_long(tv);
- long def_num = get_Cond_defaultProj(cond);
+ long def_num = get_Cond_default_proj(cond);
if (def_num == get_Proj_proj(proj1)) {
/* first one is the defProj */
if (num == get_Proj_proj(proj2)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
exchange(proj1, new_Bad());
return 1;
} else if (def_num == get_Proj_proj(proj2)) {
/* second one is the defProj */
if (num == get_Proj_proj(proj1)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
exchange(proj2, new_Bad());
return 1;
} else {
/* neither: strange, Cond was not optimized so far */
if (num == get_Proj_proj(proj1)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj1, jmp);
exchange(proj2, new_Bad());
return 1;
} else if (num == get_Proj_proj(proj2)) {
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
exchange(proj2, jmp);
exchange(proj1, new_Bad());
return 1;
* We use the mark flag to mark removable blocks in the first
* phase.
*/
-void optimize_cf(ir_graph *irg) {
- int i, j, n;
+void optimize_cf(ir_graph *irg)
+{
+ int i, j, n, changed;
ir_node **in = NULL;
ir_node *cond, *end = get_irg_end(irg);
ir_graph *rem = current_ir_graph;
irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
/* fix the keep-alives */
+ changed = 0;
for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
ir_node *ka = get_End_keepalive(end, i);
/* do NOT keep dead blocks */
if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
set_End_keepalive(end, i, new_Bad());
- env.changed = 1;
+ changed = 1;
+ }
+ } else {
+ ir_node *block = get_nodes_block(ka);
+
+ if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+ /* do NOT keep nodes in dead blocks */
+ set_End_keepalive(end, i, new_Bad());
+ changed = 1;
}
- } else if (is_Block_dead(get_nodes_block(ka)) ||
- get_Block_dom_depth(get_nodes_block(ka)) < 0) {
- /* do NOT keep nodes in dead blocks */
- set_End_keepalive(end, i, new_Bad());
- env.changed = 1;
}
}
+ env.changed |= changed;
ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
set_irg_doms_inconsistent(irg);
set_irg_extblk_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
env.changed = 0;
}
/* Optimize the standard code. */
env.changed = 0;
assure_doms(irg);
- irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
-
- /* Walk all keep alives, optimize them if block, add to new in-array
- for end if useful. */
- n = get_End_n_keepalives(end);
- if (n > 0)
- NEW_ARR_A(ir_node *, in, n);
+ irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
/* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
- inc_irg_visited(irg);
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
+ inc_irg_visited(irg);
- /* fix the keep alive */
- for (i = j = 0; i < n; i++) {
+ /* fix the keep-alives again */
+ changed = 0;
+ for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
ir_node *ka = get_End_keepalive(end, i);
- if (!irn_visited(ka)) {
- if (is_Block(ka) && !Block_block_visited(ka)) {
- /* irg_block_walk() will increase the block visited flag, but we must visit only
- these blocks that are not visited yet, so decrease it first. */
- set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
- irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
- mark_irn_visited(ka);
- in[j++] = ka;
- } else {
- mark_irn_visited(ka);
- /* don't keep alive dead blocks */
- if (!is_Bad(ka) && !is_Block_dead(get_nodes_block(ka)))
- in[j++] = ka;
+ if (is_Block(ka)) {
+ /* do NOT keep dead blocks */
+ if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
+ set_End_keepalive(end, i, new_Bad());
+ changed = 1;
+ }
+ } else {
+ ir_node *block = get_nodes_block(ka);
+
+ if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+ /* do NOT keep nodes in dead blocks */
+ set_End_keepalive(end, i, new_Bad());
+ changed = 1;
}
}
}
- if (j != n) {
- set_End_keepalives(end, j, in);
- env.changed = 1;
- }
+ env.changed |= changed;
+
+ remove_End_Bads_and_doublets(end);
+
ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
*/
n = get_End_n_keepalives(end);
if (n > 0) {
+ NEW_ARR_A(ir_node *, in, n);
if (env.changed) {
/* Handle graph state if was changed. */
set_irg_outs_inconsistent(irg);
set_irg_doms_inconsistent(irg);
set_irg_extblk_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
}
if (get_irg_pinned(irg) == op_pin_state_pinned) {
/* after optimize_cf(), only Bad data flow may remain. */
if (irg_vrfy_bads(irg, BAD_DF | BAD_BLOCK | TUPLE)) {
- dump_ir_block_graph(irg, "-vrfy-cf");
dump_ir_graph(irg, "-vrfy-cf");
fprintf(stderr, "VRFY_BAD in optimize_cf()\n");
}
current_ir_graph = rem;
}
+
+/* Creates an ir_graph pass for optimize_cf. */
+ir_graph_pass_t *optimize_cf_pass(const char *name)
+{
+ return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
+} /* optimize_cf_pass */