int n_preds;
ir_graph *irg;
- /* don't optimize dead or labeled blocks */
+ /* don't optimize labeled blocks */
if (has_Block_entity(n))
return n;
+ if (!get_Block_matured(n))
+ return n;
n_preds = get_Block_n_cfgpreds(n);
- /* The Block constructor does not call optimize, but mature_immBlock()
- calls the optimization. */
- assert(get_Block_matured(n));
-
irg = get_irn_irg(n);
/* Straightening: a single entry Block following a single exit Block
DBG_OPT_IFSIM1(oldn, a, b, n);
}
}
- } else if (is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
- int i;
- int n_cfgpreds = get_Block_n_cfgpreds(n);
-
- for (i = 0; i < n_cfgpreds; ++i) {
- ir_node *pred = get_Block_cfgpred(n, i);
- if (!is_Bad(pred))
- break;
- }
- /* only bad unreachable inputs? It's unreachable code (unless it is the
- * start or end block) */
- if (i >= n_cfgpreds && n != get_irg_start_block(irg)
- && n != get_irg_end_block(irg)) {
- return get_irg_bad(irg);
- }
}
return n;
}
}
- if (i >= n_preds) {
- ir_graph *irg = get_irn_irg(n);
- /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
- return get_irg_bad(irg);
- }
-
/* search for rest of inputs, determine if any of these
are non-self-referencing */
while (++i < n_preds) {
return n;
} /* equivalent_node_Phi */
-/**
- * Several optimizations:
- * - fold Sync-nodes, iff they have only one predecessor except
- * themselves.
- */
-static ir_node *equivalent_node_Sync(ir_node *n)
-{
- int arity = get_Sync_n_preds(n);
- int i;
-
- for (i = 0; i < arity;) {
- ir_node *pred = get_Sync_pred(n, i);
- int j;
-
- /* Remove Bad predecessors */
- if (is_Bad(pred)) {
- del_Sync_n(n, i);
- --arity;
- continue;
- }
-
- /* Remove duplicate predecessors */
- for (j = 0;; ++j) {
- if (j >= i) {
- ++i;
- break;
- }
- if (get_Sync_pred(n, j) == pred) {
- del_Sync_n(n, i);
- --arity;
- break;
- }
- }
- }
-
- if (arity == 0) {
- ir_graph *irg = get_irn_irg(n);
- return get_irg_bad(irg);
- }
- if (arity == 1) return get_Sync_pred(n, 0);
- return n;
-} /* equivalent_node_Sync */
-
/**
* Optimize Proj(Tuple).
*/
proj = get_CopyB_mem(copyb);
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
-
- case pn_CopyB_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
}
}
return proj;
DBG_OPT_EXC_REM(proj);
proj = get_Bound_mem(bound);
break;
- case pn_Bound_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
case pn_Bound_res:
proj = idx;
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
return proj;
} /* equivalent_node_Proj_Bound */
-/**
- * Optimize an Exception Proj(Load) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Load(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *load = get_Proj_pred(proj);
-
- /* get the Load address */
- const ir_node *addr = get_Load_ptr(load);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Load_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Load */
-
-/**
- * Optimize an Exception Proj(Store) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Store(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *store = get_Proj_pred(proj);
-
- /* get the load/store address */
- const ir_node *addr = get_Store_ptr(store);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Store_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Store */
-
/**
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
CASE(And);
CASE(Conv);
CASE(Phi);
- CASE(Sync);
CASE_PROJ(Tuple);
CASE_PROJ(Div);
CASE_PROJ(CopyB);
CASE_PROJ(Bound);
- CASE_PROJ(Load);
- CASE_PROJ(Store);
CASE(Proj);
CASE(Id);
CASE(Mux);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(irg), blk);
if (get_Proj_proj(proj) == pn_Load_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(load);
return new_r_Jmp(blk);
if (get_Proj_proj(proj) == pn_Store_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(store);
return new_r_Jmp(blk);
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Div_M: {
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Mod_M: {
} else {
ir_graph *irg = get_irn_irg(proj);
/* this case will NEVER be taken, kill it */
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
}
} else {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_greater) == cmp_result && (cmp_result2
- & ir_relation_less) == cmp_result2) {
+ if ((cmp_result & ir_relation_greater) == cmp_result
+ && (cmp_result2 & ir_relation_less) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
} else if (b_vrp->range_type == VRP_ANTIRANGE) {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_less_equal) == cmp_result && (cmp_result2
- & ir_relation_greater_equal) == cmp_result2) {
+ if ((cmp_result & ir_relation_less_equal) == cmp_result
+ && (cmp_result2 & ir_relation_greater_equal) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
}
b_vrp->bits_set
) == ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
if (!(tarval_cmp(
tarval_not(b_vrp->bits_not_set))
== ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
-
-
}
}
}
/* the following reassociations work only for == and != */
if (relation == ir_relation_equal || relation == ir_relation_less_greater) {
-
-#if 0 /* Might be not that good in general */
- /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
- if (tarval_is_null(tv) && is_Sub(left)) {
- right = get_Sub_right(left);
- left = get_Sub_left(left);
-
- tv = value_of(right);
- changed = 1;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
- }
-#endif
-
if (tv != tarval_bad) {
/* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
if (is_Sub(left)) {
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_X_except:
+ case pn_CopyB_X_except: {
+ ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(irg, mode_X);
break;
+ }
default:
break;
}
break;
case pn_Bound_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(get_irn_irg(proj), mode_X);
break;
case pn_Bound_res:
proj = idx;
return proj;
} /* transform_node_Proj */
+static bool is_block_unreachable(const ir_node *block)
+{
+ const ir_graph *irg = get_irn_irg(block);
+ if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
+ return false;
+ return get_Block_dom_depth(block) < 0;
+}
+
+static ir_node *transform_node_Block(ir_node *block)
+{
+ ir_graph *irg = get_irn_irg(block);
+ int arity = get_irn_arity(block);
+ ir_node *bad = NULL;
+ int i;
+
+ if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
+ return block;
+
+ for (i = 0; i < arity; ++i) {
+ ir_node *pred = get_Block_cfgpred(block, i);
+ ir_node *pred_block = get_nodes_block(pred);
+ if (!is_Bad(pred) && !is_block_unreachable(pred_block))
+ continue;
+ if (bad == NULL)
+ bad = new_r_Bad(irg, mode_X);
+ set_irn_n(block, i, bad);
+ }
+
+ return block;
+}
+
static ir_node *transform_node_Phi(ir_node *phi)
{
int n = get_irn_arity(phi);
ir_mode *mode = get_irn_mode(phi);
ir_node *block = get_nodes_block(phi);
ir_graph *irg = get_irn_irg(phi);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = NULL;
int i;
/* Set phi-operands for bad-block inputs to bad */
for (i = 0; i < n; ++i) {
ir_node *pred = get_Block_cfgpred(block, i);
- if (!is_Bad(pred))
- continue;
- set_irn_n(phi, i, bad);
+ if (is_Bad(pred) || is_block_unreachable(get_nodes_block(pred))) {
+ if (bad == NULL)
+ bad = new_r_Bad(irg, mode);
+ set_irn_n(phi, i, bad);
+ }
}
/* Move Confirms down through Phi nodes. */
for (i = j = 0; i < n_keepalives; ++i) {
ir_node *ka = get_End_keepalive(n, i);
- if (is_Bad(ka)) {
- /* no need to keep Bad */
+ ir_node *block;
+ /* no need to keep Bad */
+ if (is_Bad(ka))
+ continue;
+ /* dont keep unreachable code */
+ block = is_Block(ka) ? ka : get_nodes_block(ka);
+ if (is_block_unreachable(block))
continue;
- }
in[j++] = ka;
}
if (j != n_keepalives)
int pred_arity;
int j;
+ /* Remove Bad predecessors */
+ if (is_Bad(pred)) {
+ del_Sync_n(n, i);
+ --arity;
+ continue;
+ }
+
+ /* Remove duplicate predecessors */
+ for (j = 0; j < i; ++j) {
+ if (get_Sync_pred(n, j) == pred) {
+ del_Sync_n(n, i);
+ --arity;
+ break;
+ }
+ }
+ if (j < i)
+ continue;
+
if (!is_Sync(pred)) {
++i;
continue;
}
}
+ if (arity == 0) {
+ ir_graph *irg = get_irn_irg(n);
+ return new_r_Bad(irg, mode_M);
+ }
+ if (arity == 1) {
+ return get_Sync_pred(n, 0);
+ }
+
/* rehash the sync node */
add_identities(n);
-
return n;
-} /* transform_node_Sync */
+}
static ir_node *transform_node_Load(ir_node *n)
{
ir_node *block = get_nodes_block(n);
ir_node *jmp = new_r_Jmp(block);
ir_graph *irg = get_irn_irg(n);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
ir_mode *mode = get_Load_mode(n);
ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
ir_node *block = get_nodes_block(n);
ir_node *jmp = new_r_Jmp(block);
ir_graph *irg = get_irn_irg(n);
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
ir_node *res = value;
ir_node *in[pn_Load_max] = { mem, jmp, bad, res };
ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
switch (code) {
CASE(Add);
CASE(And);
+ CASE(Block);
CASE(Call);
CASE(Cmp);
CASE(Conv);
current_ir_graph = rem;
} /* visit_all_identities */
-/**
- * Garbage in, garbage out. If a node has a dead input, i.e., the
- * Bad node is input to the node, return the Bad node.
- */
-static ir_node *gigo(ir_node *node)
-{
- ir_op *op = get_irn_op(node);
-
- /* Code in "Bad" blocks is unreachable and can be replaced by Bad */
- if (op != op_Block && is_Bad(get_nodes_block(node))) {
- ir_graph *irg = get_irn_irg(node);
- return get_irg_bad(irg);
- }
-
- /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
- blocks predecessors is dead. */
- if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor
- && op != op_Sync && op != op_End) {
- ir_graph *irg = get_irn_irg(node);
- int irn_arity = get_irn_arity(node);
- int i;
-
- for (i = 0; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(node, i);
-
- if (is_Bad(pred)) {
- /* be careful not to kill cfopts too early or we might violate
- * the 1 cfop per block property */
- if (!is_cfop(node)
- || is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
- return get_irg_bad(irg);
- }
- }
- }
-
- return node;
-}
-
/**
* These optimizations deallocate nodes from the obstack.
* It can only be called if it is guaranteed that no other nodes
/* Always optimize Phi nodes: part of the construction. */
if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
- /* Remove nodes with dead (Bad) input.
- Run always for transformation induced Bads. */
- n = gigo(n);
- if (n != oldn) {
- edges_node_deleted(oldn);
-
- /* We found an existing, better node, so we can deallocate the old node. */
- irg_kill_node(irg, oldn);
- return n;
- }
-
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (iro == iro_Deleted)
return n;
- /* Remove nodes with dead (Bad) input.
- Run always for transformation induced Bads. */
- n = gigo(n);
- if (is_Bad(n))
- return n;
-
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (get_opt_global_cse())
set_irg_pinned(irg, op_pin_state_floats);
- if (get_irg_outs_state(irg) == outs_consistent)
- set_irg_outs_inconsistent(irg);
/* FIXME: Maybe we could also test whether optimizing the node can
change the control graph. */