#include "firm_types.h"
#include "bitfiddle.h"
#include "be.h"
+#include "error.h"
+#include "firmstat_t.h"
#include "entity_t.h"
* We handle it anyway here but the better way would be a
* flag. This would be needed for Pascal for instance.
*/
-static ir_node *equivalent_node_idempotent_unop(ir_node *n)
+static ir_node *equivalent_node_involution(ir_node *n)
{
ir_node *oldn = n;
ir_node *pred = get_unop_op(n);
-
- /* optimize symmetric unop */
if (get_irn_op(pred) == get_irn_op(n)) {
n = get_unop_op(pred);
- DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
+ DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_INVOLUTION);
}
return n;
}
ir_mode *n_mode = get_irn_mode(n);
ir_mode *a_mode = get_irn_mode(a);
-restart:
if (n_mode == a_mode) { /* No Conv necessary */
- if (get_Conv_strict(n)) {
- ir_node *p = a;
-
- /* neither Minus nor Confirm change the precision,
- so we can "look-through" */
- for (;;) {
- if (is_Minus(p)) {
- p = get_Minus_op(p);
- } else if (is_Confirm(p)) {
- p = get_Confirm_value(p);
- } else {
- /* stop here */
- break;
- }
- }
- if (is_Conv(p) && get_Conv_strict(p)) {
- /* we known already, that a_mode == n_mode, and neither
- Minus change the mode, so the second Conv
- can be kicked */
- assert(get_irn_mode(p) == n_mode);
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- return n;
- }
- if (is_Proj(p)) {
- ir_node *pred = get_Proj_pred(p);
- if (is_Load(pred)) {
- /* Loads always return with the exact precision of n_mode */
- assert(get_Load_mode(pred) == n_mode);
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- return n;
- }
- if (is_Proj(pred) && get_Proj_proj(pred) == pn_Start_T_args) {
- pred = get_Proj_pred(pred);
- if (is_Start(pred)) {
- /* Arguments always return with the exact precision,
- as strictConv's are place before Call -- if the
- caller was compiled with the same setting.
- Otherwise, the semantics is probably still right. */
- assert(get_irn_mode(p) == n_mode);
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- return n;
- }
- }
- }
- if (is_Conv(a)) {
- /* special case: the immediate predecessor is also a Conv */
- if (! get_Conv_strict(a)) {
- /* first one is not strict, kick it */
- a = get_Conv_op(a);
- a_mode = get_irn_mode(a);
- set_Conv_op(n, a);
- goto restart;
- }
- /* else both are strict conv, second is superfluous */
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- return n;
- }
- } else {
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- return n;
- }
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
+ return n;
} else if (is_Conv(a)) { /* Conv(Conv(b)) */
ir_node *b = get_Conv_op(a);
ir_mode *b_mode = get_irn_mode(b);
- if (get_Conv_strict(n) && get_Conv_strict(a)) {
- /* both are strict conv */
- if (smaller_mode(a_mode, n_mode)) {
- /* both are strict, but the first is smaller, so
- the second cannot remove more precision, remove the
- strict bit */
- set_Conv_strict(n, 0);
- }
- }
- if (n_mode == b_mode) {
- if (! get_Conv_strict(n) && ! get_Conv_strict(a)) {
- if (n_mode == mode_b) {
- n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- return n;
- } else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
- if (values_in_mode(b_mode, a_mode)) {
- n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- return n;
- }
- }
- }
- if (mode_is_int(n_mode) && get_mode_arithmetic(a_mode) == irma_ieee754) {
- /* ConvI(ConvF(I)) -> I, iff float mantissa >= int mode */
- unsigned int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
- unsigned float_mantissa = get_mode_mantissa_size(a_mode);
-
- if (float_mantissa >= int_mantissa) {
- n = b;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- return n;
- }
- }
- if (is_Conv(b)) {
- if (smaller_mode(b_mode, a_mode)) {
- if (get_Conv_strict(n))
- set_Conv_strict(b, 1);
- n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- return n;
- }
- }
+ if (n_mode == b_mode && values_in_mode(b_mode, a_mode)) {
+ n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ return n;
}
}
return n;
ir_node *first_val = NULL; /* to shutup gcc */
if (!get_opt_optimize() &&
- get_irg_phase_state(get_irn_irg(n)) != phase_building)
+ !irg_is_constrained(get_irn_irg(n), IR_GRAPH_CONSTRAINT_CONSTRUCTION))
return n;
n_preds = get_Phi_n_preds(n);
*/
static ir_node *apply_binop_on_phi(ir_node *phi, ir_tarval *other, eval_func eval, ir_mode *mode, int left)
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
if (left) {
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = do_eval(eval, other, tv, mode);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = do_eval(eval, other, tv, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return NULL;
}
- res[i] = tv;
+ tvs[i] = tv;
}
} else {
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = do_eval(eval, tv, other, mode);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = do_eval(eval, tv, other, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
}
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
{
- ir_tarval *tv_l, *tv_r, *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n;
-
if (get_nodes_block(a) != get_nodes_block(b))
return NULL;
- n = get_irn_arity(a);
- NEW_ARR_A(void *, res, n);
-
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(a, i);
- tv_l = get_Const_tarval(pred);
- pred = get_irn_n(b, i);
- tv_r = get_Const_tarval(pred);
- tv = do_eval(eval, tv_l, tv_r, mode);
+ int n = get_irn_arity(a);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred_a = get_irn_n(a, i);
+ ir_tarval *tv_l = get_Const_tarval(pred_a);
+ ir_node *pred_b = get_irn_n(b, i);
+ ir_tarval *tv_r = get_Const_tarval(pred_b);
+ ir_tarval *tv = do_eval(eval, tv_l, tv_r, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return NULL;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- irg = get_irn_irg(a);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(a, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(a);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(a);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_unop_on_phi(ir_node *phi, ir_tarval *(*eval)(ir_tarval *))
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_mode *mode;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = eval(tv);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = eval(tv);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- mode = get_irn_mode(phi);
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ ir_mode *mode = get_irn_mode(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
*/
static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
{
- ir_tarval *tv;
- void **res;
- ir_node *pred;
- ir_graph *irg;
- int i, n = get_irn_arity(phi);
-
- NEW_ARR_A(void *, res, n);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- tv = get_Const_tarval(pred);
- tv = tarval_convert_to(tv, mode);
+ int n = get_irn_arity(phi);
+ ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+ for (int i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+ ir_tarval *tv = get_Const_tarval(pred);
+ tv = tarval_convert_to(tv, mode);
if (tv == tarval_bad) {
/* folding failed, bad */
return 0;
}
- res[i] = tv;
+ tvs[i] = tv;
}
- irg = get_irn_irg(phi);
- for (i = 0; i < n; ++i) {
- pred = get_irn_n(phi, i);
- res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node **res = ALLOCAN(ir_node*, n);
+ for (int i = 0; i < n; ++i) {
+ res[i] = new_r_Const(irg, tvs[i]);
}
- return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+ ir_node *block = get_nodes_block(phi);
+ return new_r_Phi(block, n, res, mode);
}
/**
}
/* ok, all conditions met */
- block = get_irn_n(irn_or, -1);
+ block = get_nodes_block(irn_or);
irg = get_irn_irg(block);
new_and = new_r_And(block, value, new_r_Const(irg, tarval_and(tv4, tv2)), mode);
}
if (tb == get_mode_one(smode)) {
/* (L)a * (L)1 = (L)a */
- ir_node *blk = get_irn_n(a, -1);
+ ir_node *blk = get_nodes_block(a);
n = new_rd_Conv(get_irn_dbg_info(n), blk, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
set_Tuple_pred(n, pn_Cond_false, jmp);
set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
}
- /* We might generate an endless loop, so keep it alive. */
- add_End_keepalive(get_irg_end(irg), blk);
clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
}
return n;
switch (proj_nr) {
case pn_Mod_X_regular:
- return new_r_Jmp(get_irn_n(mod, -1));
+ return new_r_Jmp(get_nodes_block(mod));
case pn_Mod_X_except: {
ir_graph *irg = get_irn_irg(proj);
}
/* Remove unnecessary conversions */
- if (is_Conv(left) && is_Conv(right)) {
- ir_node *op_left = get_Conv_op(left);
- ir_node *op_right = get_Conv_op(right);
- ir_mode *mode_left = get_irn_mode(op_left);
- ir_mode *mode_right = get_irn_mode(op_right);
-
- if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
- && mode_left != mode_b && mode_right != mode_b) {
- ir_node *block = get_nodes_block(n);
-
- if (mode_left == mode_right) {
- left = op_left;
- right = op_right;
- changed = true;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
- } else if (smaller_mode(mode_left, mode_right)) {
- left = new_r_Conv(block, op_left, mode_right);
- right = op_right;
- changed = true;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
- } else if (smaller_mode(mode_right, mode_left)) {
- left = op_left;
- right = new_r_Conv(block, op_right, mode_left);
- changed = true;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ if (!mode_is_float(mode)
+ || be_get_backend_param()->mode_float_arithmetic == NULL) {
+ if (is_Conv(left) && is_Conv(right)) {
+ ir_node *op_left = get_Conv_op(left);
+ ir_node *op_right = get_Conv_op(right);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ ir_mode *mode_right = get_irn_mode(op_right);
+
+ if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
+ && mode_left != mode_b && mode_right != mode_b) {
+ ir_node *block = get_nodes_block(n);
+
+ if (mode_left == mode_right) {
+ left = op_left;
+ right = op_right;
+ changed = true;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
+ } else if (smaller_mode(mode_left, mode_right)) {
+ left = new_r_Conv(block, op_left, mode_right);
+ right = op_right;
+ changed = true;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ } else if (smaller_mode(mode_right, mode_left)) {
+ left = op_left;
+ right = new_r_Conv(block, op_right, mode_left);
+ changed = true;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ }
+ mode = get_irn_mode(left);
}
- mode = get_irn_mode(left);
- }
- }
- if (is_Conv(left) && is_Const(right)) {
- ir_node *op_left = get_Conv_op(left);
- ir_mode *mode_left = get_irn_mode(op_left);
- if (smaller_mode(mode_left, mode) && mode_left != mode_b) {
- ir_tarval *tv = get_Const_tarval(right);
- tarval_int_overflow_mode_t last_mode
- = tarval_get_integer_overflow_mode();
- ir_tarval *new_tv;
- tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
- new_tv = tarval_convert_to(tv, mode_left);
- tarval_set_integer_overflow_mode(last_mode);
- if (new_tv != tarval_bad) {
- ir_graph *irg = get_irn_irg(n);
- left = op_left;
- right = new_r_Const(irg, new_tv);
- mode = get_irn_mode(left);
- changed = true;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ }
+ if (is_Conv(left) && is_Const(right)) {
+ ir_node *op_left = get_Conv_op(left);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ if (smaller_mode(mode_left, mode) && mode_left != mode_b) {
+ ir_tarval *tv = get_Const_tarval(right);
+ tarval_int_overflow_mode_t last_mode
+ = tarval_get_integer_overflow_mode();
+ ir_tarval *new_tv;
+ tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
+ new_tv = tarval_convert_to(tv, mode_left);
+ tarval_set_integer_overflow_mode(last_mode);
+ if (new_tv != tarval_bad) {
+ ir_graph *irg = get_irn_irg(n);
+ left = op_left;
+ right = new_r_Const(irg, new_tv);
+ mode = get_irn_mode(left);
+ changed = true;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ }
}
}
}
if (tarval_is_single_bit(tv)) {
/* special case: (x % 2^n) CMP 0 ==> x & (2^n-1) CMP 0 */
ir_node *v = get_binop_left(op);
- ir_node *blk = get_irn_n(op, -1);
+ ir_node *blk = get_nodes_block(op);
ir_graph *irg = get_irn_irg(op);
ir_mode *mode = get_irn_mode(v);
return phi;
/* Move the Pin nodes "behind" the Phi. */
- block = get_irn_n(phi, -1);
new_phi = new_r_Phi(block, n, in, mode_M);
return new_r_Pin(block, new_phi);
}
/* Beware of Phi0 */
if (n > 0) {
ir_node *pred = get_irn_n(phi, 0);
- ir_node *bound, *new_phi, *block, **in;
+ ir_node *bound, *new_phi, **in;
ir_relation relation;
bool has_confirm = false;
return phi;
/* move the Confirm nodes "behind" the Phi */
- block = get_irn_n(phi, -1);
new_phi = new_r_Phi(block, n, in, get_irn_mode(phi));
return new_r_Confirm(block, new_phi, bound, relation);
}
return n;
}
+/**
+ * returns mode size for may_leave_out_middle_mode
+ */
+static unsigned get_significand_size(ir_mode *mode)
+{
+ const ir_mode_arithmetic arithmetic = get_mode_arithmetic(mode);
+ switch (arithmetic) {
+ case irma_ieee754:
+ case irma_x86_extended_float:
+ return get_mode_mantissa_size(mode) + 1;
+ case irma_twos_complement:
+ return get_mode_size_bits(mode);
+ case irma_none:
+ panic("Conv node with irma_none mode?");
+ }
+ panic("unexpected mode_arithmetic in get_significand_size");
+}
+
+/**
+ * Returns true if a conversion from mode @p m0 to @p m1 has the same effect
+ * as converting from @p m0 to @p m1 and then to @p m2.
+ * Classifying the 3 modes as the big(b), middle(m) and small(s) mode this
+ * gives the following truth table:
+ * s -> b -> m : true
+ * s -> m -> b : !signed(s) || signed(m)
+ * m -> b -> s : true
+ * m -> s -> b : false
+ * b -> s -> m : false
+ * b -> m -> s : true
+ *
+ * s -> b -> b : true
+ * s -> s -> b : false
+ *
+ * additional float constraints:
+ * F -> F -> F: fine
+ * F -> I -> I: signedness of Is must match
+ * I -> F -> I: signedness of Is must match
+ * I -> I -> F: signedness of Is must match
+ * F -> I -> F: bad
+ * I -> F -> F: fine
+ * F -> F -> I: fine
+ * at least 1 float involved: signedness must match
+ */
+bool may_leave_out_middle_conv(ir_mode *m0, ir_mode *m1, ir_mode *m2)
+{
+ int n_floats = mode_is_float(m0) + mode_is_float(m1) + mode_is_float(m2);
+ if (n_floats == 1) {
+#if 0
+ int n_signed = mode_is_signed(m0) + mode_is_signed(m1)
+ + mode_is_signed(m2);
+ /* we assume that float modes are always signed */
+ if ((n_signed & 1) != 1)
+ return false;
+#else
+ /* because overflow gives strange results we don't touch this case */
+ return false;
+#endif
+ } else if (n_floats == 2 && !mode_is_float(m1)) {
+ return false;
+ }
+
+ unsigned size0 = get_significand_size(m0);
+ unsigned size1 = get_significand_size(m1);
+ unsigned size2 = get_significand_size(m2);
+ if (size1 < size2 && size0 >= size1)
+ return false;
+ if (size1 >= size2)
+ return true;
+ return !mode_is_signed(m0) || mode_is_signed(m1);
+}
+
/**
* Transform a Conv.
*/
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Conv_op(n);
+ if (is_Conv(a)) {
+ ir_mode *a_mode = get_irn_mode(a);
+ ir_node *b = get_Conv_op(a);
+ ir_mode *b_mode = get_irn_mode(b);
+ if (may_leave_out_middle_conv(b_mode, a_mode, mode)) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ return new_rd_Conv(dbgi, block, b, mode);
+ }
+ }
+
if (mode != mode_b && is_const_Phi(a)) {
/* Do NOT optimize mode_b Conv's, this leads to remaining
* Phib nodes later, because the conv_b_lower operation
return n;
}
+static ir_node *create_load_replacement_tuple(ir_node *n, ir_node *mem,
+ ir_node *res)
+{
+ ir_node *block = get_nodes_block(n);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *in[pn_Load_max+1];
+ size_t n_in = 2;
+ in[pn_Load_M] = mem;
+ in[pn_Load_res] = res;
+ if (ir_throws_exception(n)) {
+ in[pn_Load_X_regular] = new_r_Jmp(block);
+ in[pn_Load_X_except] = new_r_Bad(irg, mode_X);
+ n_in = 4;
+ assert(pn_Load_max == 4);
+ }
+ ir_node *tuple = new_r_Tuple(block, n_in, in);
+ return tuple;
+}
+
static ir_node *transform_node_Load(ir_node *n)
{
+ /* don't touch volatile loads */
+ if (get_Load_volatility(n) == volatility_is_volatile)
+ return n;
+
+ ir_node *ptr = get_Load_ptr(n);
+ const ir_node *confirm;
+ if (value_not_zero(ptr, &confirm) && confirm == NULL) {
+ set_irn_pinned(n, op_pin_state_floats);
+ }
+
/* if our memory predecessor is a load from the same address, then reuse the
* previous result */
ir_node *mem = get_Load_mem(n);
- ir_node *mem_pred;
-
if (!is_Proj(mem))
return n;
- /* don't touch volatile loads */
- if (get_Load_volatility(n) == volatility_is_volatile)
- return n;
- mem_pred = get_Proj_pred(mem);
+ ir_node *mem_pred = get_Proj_pred(mem);
if (is_Load(mem_pred)) {
ir_node *pred_load = mem_pred;
/* conservatively compare the 2 loads. TODO: This could be less strict
* with fixup code in some situations (like smaller/bigger modes) */
- if (get_Load_ptr(pred_load) != get_Load_ptr(n))
+ if (get_Load_ptr(pred_load) != ptr)
return n;
if (get_Load_mode(pred_load) != get_Load_mode(n))
return n;
/* all combinations of aligned/unaligned pred/n should be fine so we do
* not compare the unaligned attribute */
- {
- ir_node *block = get_nodes_block(n);
- ir_node *jmp = new_r_Jmp(block);
- ir_graph *irg = get_irn_irg(n);
- ir_node *bad = new_r_Bad(irg, mode_X);
- ir_mode *mode = get_Load_mode(n);
- ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
- ir_node *in[] = { mem, res, jmp, bad };
- ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
- return tuple;
- }
+ ir_mode *mode = get_Load_mode(n);
+ ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
+ return create_load_replacement_tuple(n, mem, res);
} else if (is_Store(mem_pred)) {
ir_node *pred_store = mem_pred;
ir_node *value = get_Store_value(pred_store);
- if (get_Store_ptr(pred_store) != get_Load_ptr(n))
+ if (get_Store_ptr(pred_store) != ptr)
return n;
if (get_irn_mode(value) != get_Load_mode(n))
return n;
/* all combinations of aligned/unaligned pred/n should be fine so we do
* not compare the unaligned attribute */
- {
- ir_node *block = get_nodes_block(n);
- ir_node *jmp = new_r_Jmp(block);
- ir_graph *irg = get_irn_irg(n);
- ir_node *bad = new_r_Bad(irg, mode_X);
- ir_node *res = value;
- ir_node *in[] = { mem, res, jmp, bad };
- ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
- return tuple;
- }
+ return create_load_replacement_tuple(n, mem, value);
}
return n;
}
+static ir_node *transform_node_Store(ir_node *n)
+{
+ /* don't touch volatile stores */
+ if (get_Store_volatility(n) == volatility_is_volatile)
+ return n;
+
+ ir_node *ptr = get_Store_ptr(n);
+ const ir_node *confirm;
+ if (value_not_zero(ptr, &confirm) && confirm == NULL) {
+ set_irn_pinned(n, op_pin_state_floats);
+ }
+ return n;
+}
+
/**
* optimize a trampoline Call into a direct Call
*/
register_equivalent_node_func(op_Conv, equivalent_node_Conv);
register_equivalent_node_func(op_Eor, equivalent_node_Eor);
register_equivalent_node_func(op_Id, equivalent_node_Id);
- register_equivalent_node_func(op_Minus, equivalent_node_idempotent_unop);
+ register_equivalent_node_func(op_Minus, equivalent_node_involution);
register_equivalent_node_func(op_Mul, equivalent_node_Mul);
register_equivalent_node_func(op_Mux, equivalent_node_Mux);
- register_equivalent_node_func(op_Not, equivalent_node_idempotent_unop);
+ register_equivalent_node_func(op_Not, equivalent_node_involution);
register_equivalent_node_func(op_Or, equivalent_node_Or);
register_equivalent_node_func(op_Phi, equivalent_node_Phi);
register_equivalent_node_func(op_Proj, equivalent_node_Proj);
register_transform_node_func(op_Shl, transform_node_Shl);
register_transform_node_func(op_Shrs, transform_node_Shrs);
register_transform_node_func(op_Shr, transform_node_Shr);
+ register_transform_node_func(op_Store, transform_node_Store);
register_transform_node_func(op_Sub, transform_node_Sub);
register_transform_node_func(op_Switch, transform_node_Switch);
register_transform_node_func(op_Sync, transform_node_Sync);
if (get_irn_pinned(a) == op_pin_state_pinned) {
/* for pinned nodes, the block inputs must be equal */
- if (get_irn_n(a, -1) != get_irn_n(b, -1))
+ if (get_nodes_block(a) != get_nodes_block(b))
return 1;
} else {
ir_node *block_a = get_nodes_block(a);
if (!block_dominates(block_a, block_b)
&& !block_dominates(block_b, block_a))
return 1;
+ /* respect the workaround rule: do not move nodes which are only
+ * held by keepalive edges */
+ if (only_used_by_keepalive(a) || only_used_by_keepalive(b))
+ return 1;
}
}
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- foreach_pset(irg->value_table, ir_node*, node) {
+ foreach_pset(irg->value_table, ir_node, node) {
visit(node, env);
}
current_ir_graph = rem;
ir_node *optimize_in_place(ir_node *n)
{
ir_graph *irg = get_irn_irg(n);
- /* Handle graph state */
- assert(get_irg_phase_state(irg) != phase_building);
if (get_opt_global_cse())
set_irg_pinned(irg, op_pin_state_floats);