} /* computed_value_Not */
/**
- * Tests wether a shift shifts more bits than available in the mode
+ * Tests whether a shift shifts more bits than available in the mode
*/
static bool is_oversize_shift(const ir_node *n)
{
/**
- * Optimize an "self-inverse unary op", ie op(op(n)) = n.
+ * Optimize an "self-inverse unary op", i.e. op(op(n)) = n.
*
* @todo
* -(-a) == a, but might overflow two times.
* then we can use that to minimize the value of Add(x, const) or
* Sub(Const, x). In particular this often avoids 1 instruction in some
* backends for the Shift(x, Sub(Const, y)) case because it can be replaced
- * by Shift(x, Minus(y)) which doesnt't need an explicit Const constructed.
+ * by Shift(x, Minus(y)) which does not need an explicit Const constructed.
*/
static ir_node *transform_node_shift_modulo(ir_node *n,
new_shift_func new_shift)
/* no need to keep Bad */
if (is_Bad(ka))
continue;
- /* dont keep unreachable code */
+ /* do not keep unreachable code */
block = is_Block(ka) ? ka : get_nodes_block(ka);
if (is_block_unreachable(block))
continue;
/* for pinned nodes, the block inputs must be equal */
if (get_irn_n(a, -1) != get_irn_n(b, -1))
return 1;
- } else if (! get_opt_global_cse()) {
- /* for block-local CSE both nodes must be in the same Block */
- if (get_nodes_block(a) != get_nodes_block(b))
- return 1;
+ } else {
+ ir_node *block_a = get_nodes_block(a);
+ ir_node *block_b = get_nodes_block(b);
+ if (! get_opt_global_cse()) {
+ /* for block-local CSE both nodes must be in the same Block */
+ if (block_a != block_b)
+ return 1;
+ } else {
+ /* The optimistic approach would be to do nothing here.
+ * However doing GCSE optimistically produces a lot of partially dead code which appears
+ * to be worse in practice than the missed opportunities.
+ * So we use a very conservative variant here and only CSE if 1 value dominates the
+ * other. */
+ if (!block_dominates(block_a, block_b)
+ && !block_dominates(block_b, block_a))
+ return 1;
+ }
}
/* compare a->in[0..ins] with b->in[0..ins] */