- break;
- case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
- case iro_Not: if(ta) { /*res = tarval_not (ta)*/; } break;
- case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
- case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
- case iro_Shrs: if(ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
- case iro_Rot: if(ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
- case iro_Conv: if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
- break;
- case iro_Proj:
- {
- ir_node *aa, *ab;
-
- a = get_Proj_pred(n);
- /* Optimize Cmp nodes.
- This performs a first step of unreachable code elimination.
- Proj can not be computed, but folding a Cmp above the Proj here is
- not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
- only 1 is used.
- There are several case where we can evaluate a Cmp node:
- 1. The nodes compared are both the same. If we compare for
- equal, this will return true, else it will return false.
- This step relies on cse.
- 2. The predecessors of Cmp are target values. We can evaluate
- the Cmp.
- 3. The predecessors are Allocs or void* constants. Allocs never
- return NULL, they raise an exception. Therefore we can predict
- the Cmp result. */
- if (get_irn_op(a) == op_Cmp) {
- aa = get_Cmp_left(a);
- ab = get_Cmp_right(a);
- if (aa == ab) { /* 1.: */
- /* This is a tric with the bits used for encoding the Cmp
- Proj numbers, the following statement is not the same:
- res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
- res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
- } else {
- tarval *taa = computed_value (aa);
- tarval *tab = computed_value (ab);
- if (taa && tab) { /* 2.: */
- /* strange checks... */
- ir_pncmp flags = tarval_comp (taa, tab);
- if (flags != irpn_False) {
- res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
- }
- } else { /* check for 3.: */
- ir_node *aaa = skip_nop(skip_Proj(aa));
- ir_node *aba = skip_nop(skip_Proj(ab));
- if ( ( (/* aa is ProjP and aaa is Alloc */
- (get_irn_op(aa) == op_Proj)
- && (get_irn_mode(aa) == mode_p)
- && (get_irn_op(aaa) == op_Alloc))
- && ( (/* ab is constant void */
- (get_irn_op(ab) == op_Const)
- && (get_irn_mode(ab) == mode_p)
- && (get_Const_tarval(ab) == tarval_p_void))
- || (/* ab is other Alloc */
- (get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
- && (get_irn_op(aba) == op_Alloc)
- && (aaa != aba))))
- || (/* aa is void and aba is Alloc */
- (get_irn_op(aa) == op_Const)
- && (get_irn_mode(aa) == mode_p)
- && (get_Const_tarval(aa) == tarval_p_void)
- && (get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
- && (get_irn_op(aba) == op_Alloc)))
- /* 3.: */
- res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
- }
+ }
+ return tarval_bad;
+}
+
+/**
+ * If the parameter n can be computed, return its value, else tarval_bad.
+ * Performs constant folding.
+ *
+ * GL: Only if n is arithmetic operator?
+ */
+tarval *computed_value(ir_node *n)
+{
+ if (n->op->computed_value)
+ return n->op->computed_value(n);
+ return tarval_bad;
+}
+
+/**
+ * set the default computed_value evaluator
+ */
+static ir_op *firm_set_default_computed_value(ir_op *op)
+{
+#define CASE(a) \
+ case iro_##a: \
+ op->computed_value = computed_value_##a; \
+ break
+
+ switch (op->code) {
+ CASE(Const);
+ CASE(SymConst);
+ CASE(Add);
+ CASE(Sub);
+ CASE(Minus);
+ CASE(Mul);
+ CASE(Quot);
+ CASE(Div);
+ CASE(Mod);
+ CASE(Abs);
+ CASE(And);
+ CASE(Or);
+ CASE(Eor);
+ CASE(Not);
+ CASE(Shl);
+ CASE(Shr);
+ CASE(Shrs);
+ CASE(Rot);
+ CASE(Conv);
+ CASE(Proj);
+ default:
+ op->computed_value = NULL;
+ }
+
+ return op;
+#undef CASE
+}
+
+#if 0
+/* returns 1 if the a and b are pointers to different locations. */
+static bool
+different_identity (ir_node *a, ir_node *b)
+{
+ assert (mode_is_reference(get_irn_mode (a))
+ && mode_is_reference(get_irn_mode (b)));
+
+ if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
+ ir_node *a1 = get_Proj_pred (a);
+ ir_node *b1 = get_Proj_pred (b);
+ if (a1 != b1 && get_irn_op (a1) == op_Alloc
+ && get_irn_op (b1) == op_Alloc)
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+static ir_node *equivalent_node_Block(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* The Block constructor does not call optimize, but mature_block
+ calls the optimization. */
+ assert(get_Block_matured(n));
+
+ /* Straightening: a single entry Block following a single exit Block
+ can be merged, if it is not the Start block. */
+ /* !!! Beware, all Phi-nodes of n must have been optimized away.
+ This should be true, as the block is matured before optimize is called.
+ But what about Phi-cycles with the Phi0/Id that could not be resolved?
+ Remaining Phi nodes are just Ids. */
+ if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
+ ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ } else if (get_opt_control_flow_straightening()) {
+ n = predblock; DBG_OPT_STG;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
+ ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 2) &&
+ (get_opt_control_flow_weak_simplification())) {
+ /* Test whether Cond jumps twice to this block
+ @@@ we could do this also with two loops finding two preds from several ones. */
+ ir_node *a = get_Block_cfgpred(n, 0);
+ ir_node *b = get_Block_cfgpred(n, 1);
+
+ if ((get_irn_op(a) == op_Proj) &&
+ (get_irn_op(b) == op_Proj) &&
+ (get_Proj_pred(a) == get_Proj_pred(b)) &&
+ (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
+ /* Also a single entry Block following a single exit Block. Phis have
+ twice the same operand and will be optimized away. */
+ n = get_nodes_Block(a); DBG_OPT_IFSIM;
+ }
+ } else if (get_opt_unreachable_code() &&
+ (n != current_ir_graph->start_block) &&
+ (n != current_ir_graph->end_block) ) {
+ int i;
+ /* If all inputs are dead, this block is dead too, except if it is
+ the start or end block. This is a step of unreachable code
+ elimination */
+ for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
+ if (!is_Bad(get_Block_cfgpred(n, i))) break;
+ }
+ if (i == get_Block_n_cfgpreds(n))
+ n = new_Bad();
+ }
+
+ return n;
+}
+
+static ir_node *equivalent_node_Jmp(ir_node *n)
+{
+ /* GL: Why not same for op_Raise?? */
+ /* unreachable code elimination */
+ if (is_Bad(get_nodes_Block(n)))
+ n = new_Bad();
+
+ return n;
+}
+
+static ir_node *equivalent_node_Cond(ir_node *n)
+{
+ /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
+ See cases for iro_Cond and iro_Proj in transform_node. */
+ return n;
+}
+
+static ir_node *equivalent_node_Or(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Or_left(n);
+ ir_node *b = get_Or_right(n);
+
+ /* remove a v a */
+ if (a == b) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+/**
+ * optimize operations that are commutative and have neutral 0.
+ */
+static ir_node *equivalent_node_neutral_zero(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+
+ tarval *tv;
+ ir_node *on;
+
+ /* After running compute_node there is only one constant predecessor.
+ Find this predecessors value and remember the other node: */
+ if ((tv = computed_value (a)) != tarval_bad) {
+ on = b;
+ } else if ((tv = computed_value (b)) != tarval_bad) {
+ on = a;
+ } else
+ return n;
+
+ /* If this predecessors constant value is zero, the operation is
+ unnecessary. Remove it: */
+ if (tarval_classify (tv) == TV_CLASSIFY_NULL) {
+ n = on; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+static ir_node *equivalent_node_Add(ir_node *n)
+{
+ return equivalent_node_neutral_zero(n);
+}
+
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
+ return equivalent_node_neutral_zero(n);
+}
+
+/**
+ * optimize operations that are not commutative but have neutral 0 on left.
+ * Test only one predecessor.
+ */
+static ir_node *equivalent_node_left_zero(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+static ir_node *equivalent_node_Sub(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shl(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shr(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shrs(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Rot(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_symmetric_unop(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* optimize symmetric unop */
+ if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
+ n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Not(ir_node *n)
+{
+ /* NotNot x == x */
+ return equivalent_node_symmetric_unop(n);
+}
+
+static ir_node *equivalent_node_Minus(ir_node *n)
+{
+ /* --x == x */ /* ??? Is this possible or can --x raise an
+ out of bounds exception if min =! max? */
+ return equivalent_node_symmetric_unop(n);
+}
+
+static ir_node *equivalent_node_Mul(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+
+ /* Mul is commutative and has again an other neutral element. */
+ if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ONE) {
+ n = b; DBG_OPT_ALGSIM1;
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Div(ir_node *n)
+{
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+
+ /* Div is not commutative. */
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
+ /* Turn Div into a tuple (mem, bad, a) */
+ ir_node *mem = get_Div_mem(n);
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_And(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_And_left(n);
+ ir_node *b = get_And_right(n);
+
+ if (a == b) {
+ n = a; /* And has it's own neutral element */
+ } else if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ALL_ONE) {
+ n = b;
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ALL_ONE) {
+ n = a;
+ }
+ if (n != oldn) DBG_OPT_ALGSIM1;
+ return n;
+}
+
+static ir_node *equivalent_node_Conv(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Conv_op(n);
+ ir_node *b;
+
+ ir_mode *n_mode = get_irn_mode(n);
+ ir_mode *a_mode = get_irn_mode(a);
+
+ if (n_mode == a_mode) { /* No Conv necessary */
+ n = a; DBG_OPT_ALGSIM3;
+ } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ ir_mode *b_mode;
+
+ b = get_Conv_op(a);
+ n_mode = get_irn_mode(n);
+ b_mode = get_irn_mode(b);
+
+ if (n_mode == b_mode) {
+ if (n_mode == mode_b) {
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
+ }
+ else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+ if (smaller_mode(b_mode, a_mode)){
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;