/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
void **res;
ir_node *pred;
ir_graph *irg;
- int i, n = get_irn_arity(a);
+ int i, n;
+ if (get_nodes_block(a) != get_nodes_block(b))
+ return NULL;
+
+ n = get_irn_arity(a);
NEW_ARR_A(void *, res, n);
for (i = 0; i < n; ++i) {
ir_mode *mode = get_irn_mode(n);
if (mode_is_reference(mode)) {
- ir_node *left = get_binop_left(n);
- ir_node *right = get_binop_right(n);
- int ref_bits = get_mode_size_bits(mode);
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ unsigned ref_bits = get_mode_size_bits(mode);
if (is_Conv(left)) {
ir_mode *mode = get_irn_mode(left);
- int bits = get_mode_size_bits(mode);
+ unsigned bits = get_mode_size_bits(mode);
if (ref_bits == bits &&
mode_is_int(mode) &&
if (is_Conv(right)) {
ir_mode *mode = get_irn_mode(right);
- int bits = get_mode_size_bits(mode);
+ unsigned bits = get_mode_size_bits(mode);
if (ref_bits == bits &&
mode_is_int(mode) &&
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
return n;
}
- if (! is_reassoc_running()) {
- /* do NOT execute this code if reassociation is enabled, it does the inverse! */
- if (is_Mul(a)) {
- ir_node *ma = get_Mul_left(a);
- ir_node *mb = get_Mul_right(a);
-
- if (b == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- return n;
- } else if (b == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- return n;
- }
- }
- if (is_Mul(b)) {
- ir_node *ma = get_Mul_left(b);
- ir_node *mb = get_Mul_right(b);
-
- if (a == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- return n;
- }
- if (a == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- return n;
- }
- }
- }
if (get_mode_arithmetic(mode) == irma_twos_complement) {
/* Here we rely on constants be on the RIGHT side */
if (is_Not(a)) {
if (mode == get_irn_mode(b)) {
ir_mode *ma, *mb;
-
- a = get_Conv_op(a);
- b = get_Conv_op(b);
+ ir_node *op_a = get_Conv_op(a);
+ ir_node *op_b = get_Conv_op(b);
/* check if it's allowed to skip the conv */
- ma = get_irn_mode(a);
- mb = get_irn_mode(b);
+ ma = get_irn_mode(op_a);
+ mb = get_irn_mode(op_b);
if (mode_is_reference(ma) && mode_is_reference(mb)) {
/* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
+ a = op_a; b = op_b;
set_Sub_left(n, a);
set_Sub_right(n, b);
ir_mode *smode = get_irn_mode(a);
if (ta == get_mode_one(smode)) {
+ /* (L)1 * (L)b = (L)b */
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (ta == get_mode_minus_one(smode)) {
+ /* (L)-1 * (L)b = (L)b */
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode);
n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
return n;
}
if (tb == get_mode_one(smode)) {
+ /* (L)a * (L)1 = (L)a */
ir_node *blk = get_irn_n(a, -1);
n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (tb == get_mode_minus_one(smode)) {
+ /* (L)a * (L)-1 = (L)-a */
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode);
n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
if (is_Const(a)) {
tarval *tv = get_Const_tarval(a);
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode);
+ /* 2.0 * b = b + b */
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), b, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
else if (is_Const(b)) {
tarval *tv = get_Const_tarval(b);
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode);
+ /* a * 2.0 = a + a */
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
- va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
/**
* Optimize Abs(x) into x if x is Confirmed >= 0
* Optimize Abs(x) into -x if x is Confirmed <= 0
+ * Optimize Abs(-x) int Abs(x)
*/
static ir_node *transform_node_Abs(ir_node *n) {
ir_node *c, *oldn = n;
* not run it in the equivalent_node() context.
*/
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- get_irn_n(n, -1), a, mode);
+ get_nodes_block(n), a, mode);
DBG_OPT_CONFIRM(oldn, n);
return n;
DBG_OPT_CONFIRM(oldn, n);
return n;
default:
+ break;
+ }
+ if (is_Minus(a)) {
+ /* Abs(-x) = Abs(x) */
+ mode = get_irn_mode(n);
+ n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph,
+ get_nodes_block(n), get_Minus_op(a), mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X);
return n;
}
+ return n;
} /* transform_node_Abs */
/**
ir_node *b_left = get_binop_left(b);
ir_node *b_right = get_binop_right(b);
ir_node *c = NULL;
- ir_node *op1, *op2;
+ ir_node *op1 = NULL;
+ ir_node *op2 = NULL;
if (is_op_commutative(op)) {
if (a_left == b_left) {
if (is_Const(c)) {
tarval *tv = get_Const_tarval(c);
- if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) {
+ if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) {
/* -(a >>u (size-1)) = a >>s (size-1) */
ir_node *v = get_Shr_left(a);
if (is_Const(c)) {
tarval *tv = get_Const_tarval(c);
- if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) {
+ if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) {
/* -(a >>s (size-1)) = a >>u (size-1) */
ir_node *v = get_Shrs_left(a);
get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
} else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) {
- n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred),
- get_SymConst_kind(pred), tp);
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
+ get_SymConst_symbol(pred), get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
}
ir_mode *mode_left = get_irn_mode(op_left);
ir_mode *mode_right = get_irn_mode(op_right);
- if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)) {
+ if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
+ && mode_left != mode_b && mode_right != mode_b) {
ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
}
}
- if (!get_opt_reassociation())
- return proj;
-
/*
* First step: normalize the compare op
* by placing the constant on the right side
return or;
if (get_tarval_long(tv1) + get_tarval_long(tv2)
- != get_mode_size_bits(mode))
+ != (int) get_mode_size_bits(mode))
return or;
/* yet, condition met */
if (! tarval_is_long(tv1))
return or;
- if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode))
return or;
/* yet, condition met */
if (! tarval_is_long(tv1))
return or;
- if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode))
return or;
/* yet, condition met */
return c;
}
}
+
+ if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */
+ ir_mode *mode = get_irn_mode(n);
+ return new_r_Unknown(current_ir_graph, mode);
+ }
+
return n;
} /* transform_node_Conv */
} /* firm_set_default_node_cmp_attr */
/*
- * Compare function for two nodes in the hash table. Gets two
- * nodes as parameters. Returns 0 if the nodes are a cse.
+ * Compare function for two nodes in the value table. Gets two
+ * nodes as parameters. Returns 0 if the nodes are a Common Sub Expression.
*/
int identities_cmp(const void *elt, const void *key) {
- ir_node *a, *b;
+ const ir_node *a = elt;
+ const ir_node *b = key;
int i, irn_arity_a;
- a = (void *)elt;
- b = (void *)key;
-
if (a == b) return 0;
if ((get_irn_op(a) != get_irn_op(b)) ||
(get_irn_mode(a) != get_irn_mode(b))) return 1;
/* compare if a's in and b's in are of equal length */
- irn_arity_a = get_irn_intra_arity (a);
+ irn_arity_a = get_irn_intra_arity(a);
if (irn_arity_a != get_irn_intra_arity(b))
return 1;
- /* for block-local cse and op_pin_state_pinned nodes: */
- if (!get_opt_global_cse() || (get_irn_pinned(a) == op_pin_state_pinned)) {
+ if (get_irn_pinned(a) == op_pin_state_pinned) {
+ /* for pinned nodes, the block inputs must be equal */
if (get_irn_intra_n(a, -1) != get_irn_intra_n(b, -1))
return 1;
+ } else if (! get_opt_global_cse()) {
+ /* for block-local CSE both nodes must be in the same MacroBlock */
+ if (get_irn_MacroBlock(a) != get_irn_MacroBlock(b))
+ return 1;
}
/* compare a->in[0..ins] with b->in[0..ins] */
/**
* Normalize a node by putting constants (and operands with larger
- * node index) on the right
+ * node index) on the right (operator side).
*
* @param n The node to normalize
*/
static void normalize_node(ir_node *n) {
- if (get_opt_reassociation()) {
- if (is_op_commutative(get_irn_op(n))) {
- ir_node *l = get_binop_left(n);
- ir_node *r = get_binop_right(n);
-
- /* For commutative operators perform a OP b == b OP a but keep
- * constants on the RIGHT side. This helps greatly in some
- * optimizations. Moreover we use the idx number to make the form
- * deterministic. */
- if (!operands_are_normalized(l, r)) {
- set_binop_left(n, r);
- set_binop_right(n, l);
- }
+ if (is_op_commutative(get_irn_op(n))) {
+ ir_node *l = get_binop_left(n);
+ ir_node *r = get_binop_right(n);
+
+ /* For commutative operators perform a OP b == b OP a but keep
+ * constants on the RIGHT side. This helps greatly in some
+ * optimizations. Moreover we use the idx number to make the form
+ * deterministic. */
+ if (!operands_are_normalized(l, r)) {
+ set_binop_left(n, r);
+ set_binop_right(n, l);
}
}
} /* normalize_node */
+/**
+ * Update the nodes after a match in the value table. If both nodes have
+ * the same MacroBlock but different Blocks, we must ensure that the node
+ * with the dominating Block (the node that is near to the MacroBlock header
+ * is stored in the table.
+ * Because a MacroBlock has only one "non-exception" flow, we don't need
+ * dominance info here: We known, that one block must dominate the other and
+ * following the only block input will allow to find it.
+ */
+static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) {
+ ir_node *known_blk, *new_block, *block, *mbh;
+
+ if (get_opt_global_cse()) {
+ /* Block inputs are meaning less */
+ return;
+ }
+ known_blk = get_irn_n(known_irn, -1);
+ new_block = get_irn_n(new_ir_node, -1);
+ if (known_blk == new_block) {
+ /* already in the same block */
+ return;
+ }
+ /*
+ * We expect the typical case when we built the graph. In that case, the
+ * known_irn is already the upper one, so checking this should be faster.
+ */
+ block = new_block;
+ mbh = get_Block_MacroBlock(new_block);
+ for (;;) {
+ if (block == known_blk) {
+ /* ok, we have found it: known_block dominates new_block as expected */
+ return;
+ }
+ if (block == mbh) {
+ /*
+ * We have reached the MacroBlock header NOT founding
+ * the known_block. new_block must dominate known_block.
+ * Update known_irn.
+ */
+ set_irn_n(known_irn, -1, new_block);
+ return;
+ }
+ assert(get_Block_n_cfgpreds(block) == 1);
+ block = get_Block_cfgpred_block(block, 0);
+ }
+} /* update_value_table */
+
/**
* Return the canonical node computing the same value as n.
*
normalize_node(n);
o = pset_find(value_table, n, ir_node_hash(n));
- if (!o) return n;
+ if (o == NULL)
+ return n;
+ update_known_irn(o, n);
DBG_OPT_CSE(n, o);
return o;
* During construction we set the op_pin_state_pinned flag in the graph right when the
* optimization is performed. The flag turning on procedure global cse could
* be changed between two allocations. This way we are safe.
+ *
+ * @param value_table The value table
+ * @param n The node to lookup
*/
static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify(value_table, n);
- if (get_irn_n(old, -1) != get_irn_n(n, -1))
+ if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
set_irg_pinned(current_ir_graph, op_pin_state_floats);
return n;
} /* identify_cons */
* Return the canonical node computing the same value as n.
* Looks up the node in a hash table, enters it in the table
* if it isn't there yet.
+ *
+ * @param value_table the HashSet containing all nodes in the
+ * current IR graph
+ * @param n the node to look up
+ *
+ * @return a node that computes the same value as n or n if no such
+ * node could be found
*/
ir_node *identify_remember(pset *value_table, ir_node *n) {
ir_node *o = NULL;
o = pset_insert(value_table, n, ir_node_hash(n));
if (o != n) {
+ update_known_irn(o, n);
DBG_OPT_CSE(n, o);
}