2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief iropt --- optimizations intertwined with IR construction.
23 * @author Christian Schaefer, Goetz Lindenmaier, Michael Beck
31 #include "irgraph_t.h"
32 #include "iredges_t.h"
39 #include "dbginfo_t.h"
40 #include "iropt_dbg.h"
45 #include "opt_confirms.h"
46 #include "opt_polymorphy.h"
51 #include "firm_types.h"
53 /* Make types visible to allow most efficient access */
57 * Returns the tarval of a Const node or tarval_bad for all other nodes.
59 static tarval *default_value_of(const ir_node *n)
62 return get_Const_tarval(n); /* might return tarval_bad */
67 value_of_func value_of_ptr = default_value_of;
69 /* * Set a new value_of function. */
70 void set_value_of_func(value_of_func func)
75 value_of_ptr = default_value_of;
79 * Return the value of a Constant.
81 static tarval *computed_value_Const(const ir_node *n)
83 return get_Const_tarval(n);
84 } /* computed_value_Const */
87 * Return the value of a 'sizeof', 'alignof' or 'offsetof' SymConst.
89 static tarval *computed_value_SymConst(const ir_node *n)
94 switch (get_SymConst_kind(n)) {
95 case symconst_type_size:
96 type = get_SymConst_type(n);
97 if (get_type_state(type) == layout_fixed)
98 return new_tarval_from_long(get_type_size_bytes(type), get_irn_mode(n));
100 case symconst_type_align:
101 type = get_SymConst_type(n);
102 if (get_type_state(type) == layout_fixed)
103 return new_tarval_from_long(get_type_alignment_bytes(type), get_irn_mode(n));
105 case symconst_ofs_ent:
106 ent = get_SymConst_entity(n);
107 type = get_entity_owner(ent);
108 if (get_type_state(type) == layout_fixed)
109 return new_tarval_from_long(get_entity_offset(ent), get_irn_mode(n));
115 } /* computed_value_SymConst */
118 * Return the value of an Add.
120 static tarval *computed_value_Add(const ir_node *n)
122 ir_node *a = get_Add_left(n);
123 ir_node *b = get_Add_right(n);
125 tarval *ta = value_of(a);
126 tarval *tb = value_of(b);
128 if ((ta != tarval_bad) && (tb != tarval_bad))
129 return tarval_add(ta, tb);
132 } /* computed_value_Add */
135 * Return the value of a Sub.
136 * Special case: a - a
138 static tarval *computed_value_Sub(const ir_node *n)
140 ir_mode *mode = get_irn_mode(n);
141 ir_node *a = get_Sub_left(n);
142 ir_node *b = get_Sub_right(n);
147 if (! mode_is_float(mode)) {
150 return get_mode_null(mode);
156 if ((ta != tarval_bad) && (tb != tarval_bad))
157 return tarval_sub(ta, tb, mode);
160 } /* computed_value_Sub */
163 * Return the value of an unary Minus.
165 static tarval *computed_value_Minus(const ir_node *n)
167 ir_node *a = get_Minus_op(n);
168 tarval *ta = value_of(a);
170 if (ta != tarval_bad)
171 return tarval_neg(ta);
174 } /* computed_value_Minus */
177 * Return the value of a Mul.
179 static tarval *computed_value_Mul(const ir_node *n)
181 ir_node *a = get_Mul_left(n);
182 ir_node *b = get_Mul_right(n);
185 tarval *ta = value_of(a);
186 tarval *tb = value_of(b);
188 mode = get_irn_mode(n);
189 if (mode != get_irn_mode(a)) {
190 /* n * n = 2n bit multiplication */
191 ta = tarval_convert_to(ta, mode);
192 tb = tarval_convert_to(tb, mode);
195 if (ta != tarval_bad && tb != tarval_bad) {
196 return tarval_mul(ta, tb);
198 /* a * 0 != 0 if a == NaN or a == Inf */
199 if (!mode_is_float(mode)) {
200 /* a*0 = 0 or 0*b = 0 */
201 if (ta == get_mode_null(mode))
203 if (tb == get_mode_null(mode))
208 } /* computed_value_Mul */
211 * Return the value of an Abs.
213 static tarval *computed_value_Abs(const ir_node *n)
215 ir_node *a = get_Abs_op(n);
216 tarval *ta = value_of(a);
218 if (ta != tarval_bad)
219 return tarval_abs(ta);
222 } /* computed_value_Abs */
225 * Return the value of an And.
226 * Special case: a & 0, 0 & b
228 static tarval *computed_value_And(const ir_node *n)
230 ir_node *a = get_And_left(n);
231 ir_node *b = get_And_right(n);
233 tarval *ta = value_of(a);
234 tarval *tb = value_of(b);
236 if ((ta != tarval_bad) && (tb != tarval_bad)) {
237 return tarval_and (ta, tb);
239 if (tarval_is_null(ta)) return ta;
240 if (tarval_is_null(tb)) return tb;
243 } /* computed_value_And */
246 * Return the value of an Or.
247 * Special case: a | 1...1, 1...1 | b
249 static tarval *computed_value_Or(const ir_node *n)
251 ir_node *a = get_Or_left(n);
252 ir_node *b = get_Or_right(n);
254 tarval *ta = value_of(a);
255 tarval *tb = value_of(b);
257 if ((ta != tarval_bad) && (tb != tarval_bad)) {
258 return tarval_or (ta, tb);
260 if (tarval_is_all_one(ta)) return ta;
261 if (tarval_is_all_one(tb)) return tb;
264 } /* computed_value_Or */
267 * Return the value of an Eor.
269 static tarval *computed_value_Eor(const ir_node *n)
271 ir_node *a = get_Eor_left(n);
272 ir_node *b = get_Eor_right(n);
277 return get_mode_null(get_irn_mode(n));
282 if ((ta != tarval_bad) && (tb != tarval_bad)) {
283 return tarval_eor(ta, tb);
286 } /* computed_value_Eor */
289 * Return the value of a Not.
291 static tarval *computed_value_Not(const ir_node *n)
293 ir_node *a = get_Not_op(n);
294 tarval *ta = value_of(a);
296 if (ta != tarval_bad)
297 return tarval_not(ta);
300 } /* computed_value_Not */
303 * Return the value of a Shl.
305 static tarval *computed_value_Shl(const ir_node *n)
307 ir_node *a = get_Shl_left(n);
308 ir_node *b = get_Shl_right(n);
310 tarval *ta = value_of(a);
311 tarval *tb = value_of(b);
313 if ((ta != tarval_bad) && (tb != tarval_bad)) {
314 return tarval_shl(ta, tb);
317 } /* computed_value_Shl */
320 * Return the value of a Shr.
322 static tarval *computed_value_Shr(const ir_node *n)
324 ir_node *a = get_Shr_left(n);
325 ir_node *b = get_Shr_right(n);
327 tarval *ta = value_of(a);
328 tarval *tb = value_of(b);
330 if ((ta != tarval_bad) && (tb != tarval_bad)) {
331 return tarval_shr(ta, tb);
334 } /* computed_value_Shr */
337 * Return the value of a Shrs.
339 static tarval *computed_value_Shrs(const ir_node *n)
341 ir_node *a = get_Shrs_left(n);
342 ir_node *b = get_Shrs_right(n);
344 tarval *ta = value_of(a);
345 tarval *tb = value_of(b);
347 if ((ta != tarval_bad) && (tb != tarval_bad)) {
348 return tarval_shrs(ta, tb);
351 } /* computed_value_Shrs */
354 * Return the value of a Rotl.
356 static tarval *computed_value_Rotl(const ir_node *n)
358 ir_node *a = get_Rotl_left(n);
359 ir_node *b = get_Rotl_right(n);
361 tarval *ta = value_of(a);
362 tarval *tb = value_of(b);
364 if ((ta != tarval_bad) && (tb != tarval_bad)) {
365 return tarval_rotl(ta, tb);
368 } /* computed_value_Rotl */
371 * Return the value of a Conv.
373 static tarval *computed_value_Conv(const ir_node *n)
375 ir_node *a = get_Conv_op(n);
376 tarval *ta = value_of(a);
378 if (ta != tarval_bad)
379 return tarval_convert_to(ta, get_irn_mode(n));
382 } /* computed_value_Conv */
385 * Calculate the value of a Mux: can be evaluated, if the
386 * sel and the right input are known.
388 static tarval *computed_value_Mux(const ir_node *n)
390 ir_node *sel = get_Mux_sel(n);
391 tarval *ts = value_of(sel);
393 if (ts == get_tarval_b_true()) {
394 ir_node *v = get_Mux_true(n);
397 else if (ts == get_tarval_b_false()) {
398 ir_node *v = get_Mux_false(n);
402 } /* computed_value_Mux */
405 * Calculate the value of a Confirm: can be evaluated,
406 * if it has the form Confirm(x, '=', Const).
408 static tarval *computed_value_Confirm(const ir_node *n)
411 * Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)).
412 * Do NOT optimize them away (jump threading wants them), so wait until
413 * remove_confirm is activated.
415 if (get_opt_remove_confirm()) {
416 if (get_Confirm_cmp(n) == pn_Cmp_Eq) {
417 tarval *tv = value_of(get_Confirm_bound(n));
418 if (tv != tarval_bad)
422 return value_of(get_Confirm_value(n));
423 } /* computed_value_Confirm */
426 * Return the value of a Proj(Cmp).
428 * This performs a first step of unreachable code elimination.
429 * Proj can not be computed, but folding a Cmp above the Proj here is
430 * not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
432 * There are several case where we can evaluate a Cmp node, see later.
434 static tarval *computed_value_Proj_Cmp(const ir_node *n)
436 ir_node *cmp = get_Proj_pred(n);
437 ir_node *left = get_Cmp_left(cmp);
438 ir_node *right = get_Cmp_right(cmp);
439 long pn_cmp = get_Proj_proj(n);
440 ir_mode *mode = get_irn_mode(left);
444 * BEWARE: a == a is NOT always True for floating Point values, as
445 * NaN != NaN is defined, so we must check this here.
447 if (left == right && (!mode_is_float(mode) || pn_cmp == pn_Cmp_Lt || pn_cmp == pn_Cmp_Gt)) {
448 /* This is a trick with the bits used for encoding the Cmp
449 Proj numbers, the following statement is not the same:
450 return new_tarval_from_long(pn_cmp == pn_Cmp_Eq, mode_b) */
451 return new_tarval_from_long(pn_cmp & pn_Cmp_Eq, mode_b);
453 tv_l = value_of(left);
454 tv_r = value_of(right);
456 if ((tv_l != tarval_bad) && (tv_r != tarval_bad)) {
458 * The predecessors of Cmp are target values. We can evaluate
461 pn_Cmp flags = tarval_cmp(tv_l, tv_r);
462 if (flags != pn_Cmp_False) {
463 return new_tarval_from_long (pn_cmp & flags, mode_b);
465 } else if (mode_is_int(mode)) {
466 /* for integer values, we can check against MIN/MAX */
469 if (tv_l == get_mode_min(mode)) {
470 /* MIN <=/> x. This results in true/false. */
471 if (pn_cmp == pn_Cmp_Le)
472 return tarval_b_true;
473 else if (pn_cmp == pn_Cmp_Gt)
474 return tarval_b_false;
475 } else if (tv_r == get_mode_min(mode)) {
476 /* x >=/< MIN. This results in true/false. */
477 if (pn_cmp == pn_Cmp_Ge)
478 return tarval_b_true;
479 else if (pn_cmp == pn_Cmp_Lt)
480 return tarval_b_false;
481 } else if (tv_l == get_mode_max(mode)) {
482 /* MAX >=/< x. This results in true/false. */
483 if (pn_cmp == pn_Cmp_Ge)
484 return tarval_b_true;
485 else if (pn_cmp == pn_Cmp_Lt)
486 return tarval_b_false;
487 } else if (tv_r == get_mode_max(mode)) {
488 /* x <=/> MAX. This results in true/false. */
489 if (pn_cmp == pn_Cmp_Le)
490 return tarval_b_true;
491 else if (pn_cmp == pn_Cmp_Gt)
492 return tarval_b_false;
495 cmp_result = vrp_cmp(left, right);
496 if (cmp_result != pn_Cmp_False) {
497 if (cmp_result == pn_Cmp_Lg) {
498 if (pn_cmp == pn_Cmp_Eq) {
499 return tarval_b_false;
500 } else if (pn_cmp == pn_Cmp_Lg) {
501 return tarval_b_true;
504 return new_tarval_from_long(cmp_result & pn_cmp, mode_b);
507 } else if (mode_is_reference(mode)) {
508 /* pointer compare */
509 ir_node *s_l = skip_Proj(left);
510 ir_node *s_r = skip_Proj(right);
512 if ((is_Alloc(s_l) && tarval_is_null(tv_r)) ||
513 (tarval_is_null(tv_l) && is_Alloc(s_r))) {
515 * The predecessors are Allocs and (void*)(0) constants. In Firm Allocs never
516 * return NULL, they raise an exception. Therefore we can predict
519 return new_tarval_from_long(pn_cmp & pn_Cmp_Lg, mode_b);
522 return computed_value_Cmp_Confirm(cmp, left, right, pn_cmp);
523 } /* computed_value_Proj_Cmp */
526 * Return the value of a floating point Quot.
528 static tarval *do_computed_value_Quot(const ir_node *a, const ir_node *b)
530 tarval *ta = value_of(a);
531 tarval *tb = value_of(b);
533 /* cannot optimize 0 / b = 0 because of NaN */
534 if (ta != tarval_bad && tb != tarval_bad)
535 return tarval_quo(ta, tb);
537 } /* do_computed_value_Quot */
540 * Calculate the value of an integer Div of two nodes.
541 * Special case: 0 / b
543 static tarval *do_computed_value_Div(const ir_node *a, const ir_node *b)
545 tarval *ta = value_of(a);
547 const ir_node *dummy;
549 /* Compute c1 / c2 or 0 / a, a != 0 */
550 if (tarval_is_null(ta) && value_not_zero(b, &dummy))
551 return ta; /* 0 / b == 0 */
553 if (ta != tarval_bad && tb != tarval_bad)
554 return tarval_div(ta, tb);
556 } /* do_computed_value_Div */
559 * Calculate the value of an integer Mod of two nodes.
560 * Special case: a % 1
562 static tarval *do_computed_value_Mod(const ir_node *a, const ir_node *b)
564 tarval *ta = value_of(a);
565 tarval *tb = value_of(b);
567 /* Compute a % 1 or c1 % c2 */
568 if (tarval_is_one(tb))
569 return get_mode_null(get_irn_mode(a));
570 if (ta != tarval_bad && tb != tarval_bad)
571 return tarval_mod(ta, tb);
573 } /* do_computed_value_Mod */
576 * Return the value of a Proj(DivMod).
578 static tarval *computed_value_Proj_DivMod(const ir_node *n)
580 long proj_nr = get_Proj_proj(n);
582 /* compute either the Div or the Mod part */
583 if (proj_nr == pn_DivMod_res_div) {
584 const ir_node *a = get_Proj_pred(n);
585 return do_computed_value_Div(get_DivMod_left(a), get_DivMod_right(a));
586 } else if (proj_nr == pn_DivMod_res_mod) {
587 const ir_node *a = get_Proj_pred(n);
588 return do_computed_value_Mod(get_DivMod_left(a), get_DivMod_right(a));
591 } /* computed_value_Proj_DivMod */
594 * Return the value of a Proj(Div).
596 static tarval *computed_value_Proj_Div(const ir_node *n)
598 long proj_nr = get_Proj_proj(n);
600 if (proj_nr == pn_Div_res) {
601 const ir_node *a = get_Proj_pred(n);
602 return do_computed_value_Div(get_Div_left(a), get_Div_right(a));
605 } /* computed_value_Proj_Div */
608 * Return the value of a Proj(Mod).
610 static tarval *computed_value_Proj_Mod(const ir_node *n)
612 long proj_nr = get_Proj_proj(n);
614 if (proj_nr == pn_Mod_res) {
615 const ir_node *a = get_Proj_pred(n);
616 return do_computed_value_Mod(get_Mod_left(a), get_Mod_right(a));
619 } /* computed_value_Proj_Mod */
622 * Return the value of a Proj(Quot).
624 static tarval *computed_value_Proj_Quot(const ir_node *n)
626 long proj_nr = get_Proj_proj(n);
628 if (proj_nr == pn_Quot_res) {
629 const ir_node *a = get_Proj_pred(n);
630 return do_computed_value_Quot(get_Quot_left(a), get_Quot_right(a));
633 } /* computed_value_Proj_Quot */
636 * Return the value of a Proj.
638 static tarval *computed_value_Proj(const ir_node *proj)
640 ir_node *n = get_Proj_pred(proj);
642 if (n->op->ops.computed_value_Proj != NULL)
643 return n->op->ops.computed_value_Proj(proj);
645 } /* computed_value_Proj */
648 * If the parameter n can be computed, return its value, else tarval_bad.
649 * Performs constant folding.
651 * @param n The node this should be evaluated
653 tarval *computed_value(const ir_node *n)
655 vrp_attr *vrp = vrp_get_info(n);
656 if (vrp && vrp->valid && tarval_cmp(vrp->bits_set, vrp->bits_not_set) == pn_Cmp_Eq) {
657 return vrp->bits_set;
659 if (n->op->ops.computed_value)
660 return n->op->ops.computed_value(n);
662 } /* computed_value */
665 * Set the default computed_value evaluator in an ir_op_ops.
667 * @param code the opcode for the default operation
668 * @param ops the operations initialized
673 static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops)
677 ops->computed_value = computed_value_##a; \
679 #define CASE_PROJ(a) \
681 ops->computed_value_Proj = computed_value_Proj_##a; \
717 } /* firm_set_default_computed_value */
720 * Returns a equivalent block for another block.
721 * If the block has only one predecessor, this is
722 * the equivalent one. If the only predecessor of a block is
723 * the block itself, this is a dead block.
725 * If both predecessors of a block are the branches of a binary
726 * Cond, the equivalent block is Cond's block.
728 * If all predecessors of a block are bad or lies in a dead
729 * block, the current block is dead as well.
731 * Note, that blocks are NEVER turned into Bad's, instead
732 * the dead_block flag is set. So, never test for is_Bad(block),
733 * always use is_dead_Block(block).
735 static ir_node *equivalent_node_Block(ir_node *n)
740 /* don't optimize dead or labeled blocks */
741 if (is_Block_dead(n) || has_Block_entity(n))
744 n_preds = get_Block_n_cfgpreds(n);
746 /* The Block constructor does not call optimize, but mature_immBlock()
747 calls the optimization. */
748 assert(get_Block_matured(n));
750 /* Straightening: a single entry Block following a single exit Block
751 can be merged, if it is not the Start block. */
752 /* !!! Beware, all Phi-nodes of n must have been optimized away.
753 This should be true, as the block is matured before optimize is called.
754 But what about Phi-cycles with the Phi0/Id that could not be resolved?
755 Remaining Phi nodes are just Ids. */
757 ir_node *pred = skip_Proj(get_Block_cfgpred(n, 0));
760 ir_node *predblock = get_nodes_block(pred);
761 if (predblock == oldn) {
762 /* Jmp jumps into the block it is in -- deal self cycle. */
763 n = set_Block_dead(n);
764 DBG_OPT_DEAD_BLOCK(oldn, n);
765 } else if (get_opt_control_flow_straightening()) {
767 DBG_OPT_STG(oldn, n);
769 } else if (is_Cond(pred)) {
770 ir_node *predblock = get_nodes_block(pred);
771 if (predblock == oldn) {
772 /* Jmp jumps into the block it is in -- deal self cycle. */
773 n = set_Block_dead(n);
774 DBG_OPT_DEAD_BLOCK(oldn, n);
777 } else if ((n_preds == 2) &&
778 (get_opt_control_flow_weak_simplification())) {
779 /* Test whether Cond jumps twice to this block
780 * The more general case which more than 2 predecessors is handles
781 * in optimize_cf(), we handle only this special case for speed here.
783 ir_node *a = get_Block_cfgpred(n, 0);
784 ir_node *b = get_Block_cfgpred(n, 1);
786 if (is_Proj(a) && is_Proj(b)) {
787 ir_node *cond = get_Proj_pred(a);
789 if (cond == get_Proj_pred(b) && is_Cond(cond) &&
790 get_irn_mode(get_Cond_selector(cond)) == mode_b) {
791 /* Also a single entry Block following a single exit Block. Phis have
792 twice the same operand and will be optimized away. */
793 n = get_nodes_block(cond);
794 DBG_OPT_IFSIM1(oldn, a, b, n);
797 } else if (get_opt_unreachable_code() &&
798 (n != get_irg_start_block(current_ir_graph)) &&
799 (n != get_irg_end_block(current_ir_graph)) ) {
802 /* If all inputs are dead, this block is dead too, except if it is
803 the start or end block. This is one step of unreachable code
805 for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
806 ir_node *pred = get_Block_cfgpred(n, i);
809 if (is_Bad(pred)) continue;
810 pred_blk = get_nodes_block(skip_Proj(pred));
812 if (is_Block_dead(pred_blk)) continue;
815 /* really found a living input */
820 n = set_Block_dead(n);
821 DBG_OPT_DEAD_BLOCK(oldn, n);
826 } /* equivalent_node_Block */
829 * Returns a equivalent node for a Jmp, a Bad :-)
830 * Of course this only happens if the Block of the Jmp is dead.
832 static ir_node *equivalent_node_Jmp(ir_node *n)
836 /* unreachable code elimination */
837 if (is_Block_dead(get_nodes_block(n))) {
838 n = get_irg_bad(current_ir_graph);
839 DBG_OPT_DEAD_BLOCK(oldn, n);
842 } /* equivalent_node_Jmp */
844 /** Raise is handled in the same way as Jmp. */
845 #define equivalent_node_Raise equivalent_node_Jmp
848 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
849 See transform_node_Proj_Cond(). */
852 * Optimize operations that are commutative and have neutral 0,
853 * so a op 0 = 0 op a = a.
855 static ir_node *equivalent_node_neutral_zero(ir_node *n)
859 ir_node *a = get_binop_left(n);
860 ir_node *b = get_binop_right(n);
865 /* After running compute_node there is only one constant predecessor.
866 Find this predecessors value and remember the other node: */
867 if ((tv = value_of(a)) != tarval_bad) {
869 } else if ((tv = value_of(b)) != tarval_bad) {
874 /* If this predecessors constant value is zero, the operation is
875 * unnecessary. Remove it.
877 * Beware: If n is a Add, the mode of on and n might be different
878 * which happens in this rare construction: NULL + 3.
879 * Then, a Conv would be needed which we cannot include here.
881 if (tarval_is_null(tv) && get_irn_mode(on) == get_irn_mode(n)) {
884 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
888 } /* equivalent_node_neutral_zero */
891 * Eor is commutative and has neutral 0.
893 static ir_node *equivalent_node_Eor(ir_node *n)
899 n = equivalent_node_neutral_zero(n);
900 if (n != oldn) return n;
903 b = get_Eor_right(n);
906 ir_node *aa = get_Eor_left(a);
907 ir_node *ab = get_Eor_right(a);
910 /* (a ^ b) ^ a -> b */
912 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
914 } else if (ab == b) {
915 /* (a ^ b) ^ b -> a */
917 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
922 ir_node *ba = get_Eor_left(b);
923 ir_node *bb = get_Eor_right(b);
926 /* a ^ (a ^ b) -> b */
928 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
930 } else if (bb == a) {
931 /* a ^ (b ^ a) -> b */
933 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
941 * Optimize a - 0 and (a - x) + x (for modes with wrap-around).
943 * The second one looks strange, but this construct
944 * is used heavily in the LCC sources :-).
946 * Beware: The Mode of an Add may be different than the mode of its
947 * predecessors, so we could not return a predecessors in all cases.
949 static ir_node *equivalent_node_Add(ir_node *n)
952 ir_node *left, *right;
953 ir_mode *mode = get_irn_mode(n);
955 n = equivalent_node_neutral_zero(n);
959 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
960 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
963 left = get_Add_left(n);
964 right = get_Add_right(n);
967 if (get_Sub_right(left) == right) {
970 n = get_Sub_left(left);
971 if (mode == get_irn_mode(n)) {
972 DBG_OPT_ALGSIM1(oldn, left, right, n, FS_OPT_ADD_SUB);
978 if (get_Sub_right(right) == left) {
981 n = get_Sub_left(right);
982 if (mode == get_irn_mode(n)) {
983 DBG_OPT_ALGSIM1(oldn, left, right, n, FS_OPT_ADD_SUB);
989 } /* equivalent_node_Add */
992 * optimize operations that are not commutative but have neutral 0 on left,
995 static ir_node *equivalent_node_left_zero(ir_node *n)
999 ir_node *a = get_binop_left(n);
1000 ir_node *b = get_binop_right(n);
1001 tarval *tb = value_of(b);
1003 if (tarval_is_null(tb)) {
1006 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
1009 } /* equivalent_node_left_zero */
1011 #define equivalent_node_Shl equivalent_node_left_zero
1012 #define equivalent_node_Shr equivalent_node_left_zero
1013 #define equivalent_node_Shrs equivalent_node_left_zero
1014 #define equivalent_node_Rotl equivalent_node_left_zero
1017 * Optimize a - 0 and (a + x) - x (for modes with wrap-around).
1019 * The second one looks strange, but this construct
1020 * is used heavily in the LCC sources :-).
1022 * Beware: The Mode of a Sub may be different than the mode of its
1023 * predecessors, so we could not return a predecessors in all cases.
1025 static ir_node *equivalent_node_Sub(ir_node *n)
1029 ir_mode *mode = get_irn_mode(n);
1032 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
1033 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
1036 b = get_Sub_right(n);
1039 /* Beware: modes might be different */
1040 if (tarval_is_null(tb)) {
1041 ir_node *a = get_Sub_left(n);
1042 if (mode == get_irn_mode(a)) {
1045 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
1049 } /* equivalent_node_Sub */
1053 * Optimize an "self-inverse unary op", ie op(op(n)) = n.
1056 * -(-a) == a, but might overflow two times.
1057 * We handle it anyway here but the better way would be a
1058 * flag. This would be needed for Pascal for instance.
1060 static ir_node *equivalent_node_idempotent_unop(ir_node *n)
1063 ir_node *pred = get_unop_op(n);
1065 /* optimize symmetric unop */
1066 if (get_irn_op(pred) == get_irn_op(n)) {
1067 n = get_unop_op(pred);
1068 DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
1071 } /* equivalent_node_idempotent_unop */
1073 /** Optimize Not(Not(x)) == x. */
1074 #define equivalent_node_Not equivalent_node_idempotent_unop
1076 /** -(-x) == x ??? Is this possible or can --x raise an
1077 out of bounds exception if min =! max? */
1078 #define equivalent_node_Minus equivalent_node_idempotent_unop
1081 * Optimize a * 1 = 1 * a = a.
1083 static ir_node *equivalent_node_Mul(ir_node *n)
1086 ir_node *a = get_Mul_left(n);
1088 /* we can handle here only the n * n = n bit cases */
1089 if (get_irn_mode(n) == get_irn_mode(a)) {
1090 ir_node *b = get_Mul_right(n);
1094 * Mul is commutative and has again an other neutral element.
1095 * Constants are place right, so check this case first.
1098 if (tarval_is_one(tv)) {
1100 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
1103 if (tarval_is_one(tv)) {
1105 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
1110 } /* equivalent_node_Mul */
1113 * Use algebraic simplification a | a = a | 0 = 0 | a = a.
1115 static ir_node *equivalent_node_Or(ir_node *n)
1119 ir_node *a = get_Or_left(n);
1120 ir_node *b = get_Or_right(n);
1124 n = a; /* Or has it's own neutral element */
1125 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_OR);
1128 /* constants are cormalized to right, check this site first */
1130 if (tarval_is_null(tv)) {
1132 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR);
1136 if (tarval_is_null(tv)) {
1138 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR);
1143 } /* equivalent_node_Or */
1146 * Optimize a & 0b1...1 = 0b1...1 & a = a & a = (a|X) & a = a.
1148 static ir_node *equivalent_node_And(ir_node *n)
1152 ir_node *a = get_And_left(n);
1153 ir_node *b = get_And_right(n);
1157 n = a; /* And has it's own neutral element */
1158 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
1161 /* constants are normalized to right, check this site first */
1163 if (tarval_is_all_one(tv)) {
1165 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
1168 if (tv != get_tarval_bad()) {
1169 ir_mode *mode = get_irn_mode(n);
1170 if (!mode_is_signed(mode) && is_Conv(a)) {
1171 ir_node *convop = get_Conv_op(a);
1172 ir_mode *convopmode = get_irn_mode(convop);
1173 if (!mode_is_signed(convopmode)) {
1174 if (tarval_is_all_one(tarval_convert_to(tv, convopmode))) {
1175 /* Conv(X) & all_one(mode(X)) = Conv(X) */
1177 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
1184 if (tarval_is_all_one(tv)) {
1186 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
1190 if (b == get_Or_left(a) || b == get_Or_right(a)) {
1193 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
1198 if (a == get_Or_left(b) || a == get_Or_right(b)) {
1201 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
1206 } /* equivalent_node_And */
1209 * Try to remove useless Conv's:
1211 static ir_node *equivalent_node_Conv(ir_node *n)
1214 ir_node *a = get_Conv_op(n);
1216 ir_mode *n_mode = get_irn_mode(n);
1217 ir_mode *a_mode = get_irn_mode(a);
1220 if (n_mode == a_mode) { /* No Conv necessary */
1221 if (get_Conv_strict(n)) {
1224 /* neither Minus nor Abs nor Confirm change the precision,
1225 so we can "look-through" */
1228 p = get_Minus_op(p);
1229 } else if (is_Abs(p)) {
1231 } else if (is_Confirm(p)) {
1232 p = get_Confirm_value(p);
1238 if (is_Conv(p) && get_Conv_strict(p)) {
1239 /* we known already, that a_mode == n_mode, and neither
1240 Abs nor Minus change the mode, so the second Conv
1242 assert(get_irn_mode(p) == n_mode);
1244 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
1248 ir_node *pred = get_Proj_pred(p);
1249 if (is_Load(pred)) {
1250 /* Loads always return with the exact precision of n_mode */
1251 assert(get_Load_mode(pred) == n_mode);
1253 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
1256 if (is_Proj(pred) && get_Proj_proj(pred) == pn_Start_T_args) {
1257 pred = get_Proj_pred(pred);
1258 if (is_Start(pred)) {
1259 /* Arguments always return with the exact precision,
1260 as strictConv's are place before Call -- if the
1261 caller was compiled with the same setting.
1262 Otherwise, the semantics is probably still right. */
1263 assert(get_irn_mode(p) == n_mode);
1265 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
1271 /* special case: the immediate predecessor is also a Conv */
1272 if (! get_Conv_strict(a)) {
1273 /* first one is not strict, kick it */
1275 a_mode = get_irn_mode(a);
1279 /* else both are strict conv, second is superfluous */
1281 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
1286 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
1289 } else if (is_Conv(a)) { /* Conv(Conv(b)) */
1290 ir_node *b = get_Conv_op(a);
1291 ir_mode *b_mode = get_irn_mode(b);
1293 if (get_Conv_strict(n) && get_Conv_strict(a)) {
1294 /* both are strict conv */
1295 if (smaller_mode(a_mode, n_mode)) {
1296 /* both are strict, but the first is smaller, so
1297 the second cannot remove more precision, remove the
1299 set_Conv_strict(n, 0);
1302 if (n_mode == b_mode) {
1303 if (! get_Conv_strict(n) && ! get_Conv_strict(a)) {
1304 if (n_mode == mode_b) {
1305 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
1306 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
1308 } else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
1309 if (values_in_mode(b_mode, a_mode)) {
1310 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
1311 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
1316 if (mode_is_int(n_mode) && get_mode_arithmetic(a_mode) == irma_ieee754) {
1317 /* ConvI(ConvF(I)) -> I, iff float mantissa >= int mode */
1318 unsigned int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
1319 unsigned float_mantissa = tarval_ieee754_get_mantissa_size(a_mode);
1321 if (float_mantissa >= int_mantissa) {
1323 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
1328 if (smaller_mode(b_mode, a_mode)) {
1329 if (get_Conv_strict(n))
1330 set_Conv_strict(b, 1);
1331 n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */
1332 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
1339 } /* equivalent_node_Conv */
1342 * A Cast may be removed if the type of the previous node
1343 * is already the type of the Cast.
1345 static ir_node *equivalent_node_Cast(ir_node *n)
1348 ir_node *pred = get_Cast_op(n);
1350 if (get_irn_type(pred) == get_Cast_type(n)) {
1352 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CAST);
1355 } /* equivalent_node_Cast */
1358 * - fold Phi-nodes, iff they have only one predecessor except
1361 static ir_node *equivalent_node_Phi(ir_node *n)
1367 ir_node *first_val = NULL; /* to shutup gcc */
1369 if (!get_opt_normalize()) return n;
1371 n_preds = get_Phi_n_preds(n);
1373 block = get_nodes_block(n);
1374 if (is_Block_dead(block)) /* Control dead */
1375 return get_irg_bad(current_ir_graph);
1377 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
1379 /* Find first non-self-referencing input */
1380 for (i = 0; i < n_preds; ++i) {
1381 first_val = get_Phi_pred(n, i);
1382 if ( (first_val != n) /* not self pointer */
1384 /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad
1385 * predecessors. Then, Phi nodes in dead code might be removed, causing
1386 * nodes pointing to themself (Add's for instance).
1387 * This is really bad and causes endless recursions in several
1388 * code pathes, so we do NOT optimize such a code.
1389 * This is not that bad as it sounds, optimize_cf() removes bad control flow
1390 * (and bad Phi predecessors), so live code is optimized later.
1392 && (! is_Bad(get_Block_cfgpred(block, i)))
1394 ) { /* value not dead */
1395 break; /* then found first value. */
1400 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
1401 return get_irg_bad(current_ir_graph);
1404 /* search for rest of inputs, determine if any of these
1405 are non-self-referencing */
1406 while (++i < n_preds) {
1407 ir_node *scnd_val = get_Phi_pred(n, i);
1408 if ( (scnd_val != n)
1409 && (scnd_val != first_val)
1412 && (! is_Bad(get_Block_cfgpred(block, i)))
1420 /* Fold, if no multiple distinct non-self-referencing inputs */
1422 DBG_OPT_PHI(oldn, n);
1425 } /* equivalent_node_Phi */
1428 * Several optimizations:
1429 * - fold Sync-nodes, iff they have only one predecessor except
1432 static ir_node *equivalent_node_Sync(ir_node *n)
1434 int arity = get_Sync_n_preds(n);
1437 for (i = 0; i < arity;) {
1438 ir_node *pred = get_Sync_pred(n, i);
1441 /* Remove Bad predecessors */
1448 /* Remove duplicate predecessors */
1454 if (get_Sync_pred(n, j) == pred) {
1462 if (arity == 0) return get_irg_bad(current_ir_graph);
1463 if (arity == 1) return get_Sync_pred(n, 0);
1465 } /* equivalent_node_Sync */
1468 * Optimize Proj(Tuple).
1470 static ir_node *equivalent_node_Proj_Tuple(ir_node *proj)
1472 ir_node *oldn = proj;
1473 ir_node *tuple = get_Proj_pred(proj);
1475 /* Remove the Tuple/Proj combination. */
1476 proj = get_Tuple_pred(tuple, get_Proj_proj(proj));
1477 DBG_OPT_TUPLE(oldn, tuple, proj);
1480 } /* equivalent_node_Proj_Tuple */
1483 * Optimize a / 1 = a.
1485 static ir_node *equivalent_node_Proj_Div(ir_node *proj)
1487 ir_node *oldn = proj;
1488 ir_node *div = get_Proj_pred(proj);
1489 ir_node *b = get_Div_right(div);
1490 tarval *tb = value_of(b);
1492 /* Div is not commutative. */
1493 if (tarval_is_one(tb)) { /* div(x, 1) == x */
1494 switch (get_Proj_proj(proj)) {
1496 proj = get_Div_mem(div);
1497 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1501 proj = get_Div_left(div);
1502 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1506 /* we cannot replace the exception Proj's here, this is done in
1507 transform_node_Proj_Div() */
1512 } /* equivalent_node_Proj_Div */
1515 * Optimize a / 1.0 = a.
1517 static ir_node *equivalent_node_Proj_Quot(ir_node *proj)
1519 ir_node *oldn = proj;
1520 ir_node *quot = get_Proj_pred(proj);
1521 ir_node *b = get_Quot_right(quot);
1522 tarval *tb = value_of(b);
1524 /* Div is not commutative. */
1525 if (tarval_is_one(tb)) { /* Quot(x, 1) == x */
1526 switch (get_Proj_proj(proj)) {
1528 proj = get_Quot_mem(quot);
1529 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1533 proj = get_Quot_left(quot);
1534 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1538 /* we cannot replace the exception Proj's here, this is done in
1539 transform_node_Proj_Quot() */
1544 } /* equivalent_node_Proj_Quot */
1547 * Optimize a / 1 = a.
1549 static ir_node *equivalent_node_Proj_DivMod(ir_node *proj)
1551 ir_node *oldn = proj;
1552 ir_node *divmod = get_Proj_pred(proj);
1553 ir_node *b = get_DivMod_right(divmod);
1554 tarval *tb = value_of(b);
1556 /* Div is not commutative. */
1557 if (tarval_is_one(tb)) { /* div(x, 1) == x */
1558 switch (get_Proj_proj(proj)) {
1560 proj = get_DivMod_mem(divmod);
1561 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1564 case pn_DivMod_res_div:
1565 proj = get_DivMod_left(divmod);
1566 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NEUTRAL_1);
1570 /* we cannot replace the exception Proj's here, this is done in
1571 transform_node_Proj_DivMod().
1572 Note further that the pn_DivMod_res_div case is handled in
1573 computed_value_Proj(). */
1578 } /* equivalent_node_Proj_DivMod */
1581 * Optimize CopyB(mem, x, x) into a Nop.
1583 static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
1585 ir_node *oldn = proj;
1586 ir_node *copyb = get_Proj_pred(proj);
1587 ir_node *a = get_CopyB_dst(copyb);
1588 ir_node *b = get_CopyB_src(copyb);
1591 /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
1592 switch (get_Proj_proj(proj)) {
1594 proj = get_CopyB_mem(copyb);
1595 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
1598 case pn_CopyB_X_except:
1599 DBG_OPT_EXC_REM(proj);
1600 proj = get_irg_bad(current_ir_graph);
1605 } /* equivalent_node_Proj_CopyB */
1608 * Optimize Bounds(idx, idx, upper) into idx.
1610 static ir_node *equivalent_node_Proj_Bound(ir_node *proj)
1612 ir_node *oldn = proj;
1613 ir_node *bound = get_Proj_pred(proj);
1614 ir_node *idx = get_Bound_index(bound);
1615 ir_node *pred = skip_Proj(idx);
1618 if (idx == get_Bound_lower(bound))
1620 else if (is_Bound(pred)) {
1622 * idx was Bounds checked in the same MacroBlock previously,
1623 * it is still valid if lower <= pred_lower && pred_upper <= upper.
1625 ir_node *lower = get_Bound_lower(bound);
1626 ir_node *upper = get_Bound_upper(bound);
1627 if (get_Bound_lower(pred) == lower &&
1628 get_Bound_upper(pred) == upper &&
1629 get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
1631 * One could expect that we simply return the previous
1632 * Bound here. However, this would be wrong, as we could
1633 * add an exception Proj to a new location then.
1634 * So, we must turn in into a tuple.
1640 /* Turn Bound into a tuple (mem, jmp, bad, idx) */
1641 switch (get_Proj_proj(proj)) {
1643 DBG_OPT_EXC_REM(proj);
1644 proj = get_Bound_mem(bound);
1646 case pn_Bound_X_except:
1647 DBG_OPT_EXC_REM(proj);
1648 proj = get_irg_bad(current_ir_graph);
1652 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
1655 /* cannot optimize pn_Bound_X_regular, handled in transform ... */
1660 } /* equivalent_node_Proj_Bound */
1663 * Optimize an Exception Proj(Load) with a non-null address.
1665 static ir_node *equivalent_node_Proj_Load(ir_node *proj)
1667 if (get_opt_ldst_only_null_ptr_exceptions()) {
1668 if (get_irn_mode(proj) == mode_X) {
1669 ir_node *load = get_Proj_pred(proj);
1671 /* get the Load address */
1672 const ir_node *addr = get_Load_ptr(load);
1673 const ir_node *confirm;
1675 if (value_not_null(addr, &confirm)) {
1676 if (get_Proj_proj(proj) == pn_Load_X_except) {
1677 DBG_OPT_EXC_REM(proj);
1678 return get_irg_bad(current_ir_graph);
1684 } /* equivalent_node_Proj_Load */
1687 * Optimize an Exception Proj(Store) with a non-null address.
1689 static ir_node *equivalent_node_Proj_Store(ir_node *proj)
1691 if (get_opt_ldst_only_null_ptr_exceptions()) {
1692 if (get_irn_mode(proj) == mode_X) {
1693 ir_node *store = get_Proj_pred(proj);
1695 /* get the load/store address */
1696 const ir_node *addr = get_Store_ptr(store);
1697 const ir_node *confirm;
1699 if (value_not_null(addr, &confirm)) {
1700 if (get_Proj_proj(proj) == pn_Store_X_except) {
1701 DBG_OPT_EXC_REM(proj);
1702 return get_irg_bad(current_ir_graph);
1708 } /* equivalent_node_Proj_Store */
1711 * Does all optimizations on nodes that must be done on it's Proj's
1712 * because of creating new nodes.
1714 static ir_node *equivalent_node_Proj(ir_node *proj)
1716 ir_node *n = get_Proj_pred(proj);
1718 if (get_irn_mode(proj) == mode_X) {
1719 if (is_Block_dead(get_nodes_block(n))) {
1720 /* Remove dead control flow -- early gigo(). */
1721 return get_irg_bad(current_ir_graph);
1724 if (n->op->ops.equivalent_node_Proj)
1725 return n->op->ops.equivalent_node_Proj(proj);
1727 } /* equivalent_node_Proj */
1732 static ir_node *equivalent_node_Id(ir_node *n)
1740 DBG_OPT_ID(oldn, n);
1742 } /* equivalent_node_Id */
1747 static ir_node *equivalent_node_Mux(ir_node *n)
1749 ir_node *oldn = n, *sel = get_Mux_sel(n);
1751 tarval *ts = value_of(sel);
1753 /* Mux(true, f, t) == t */
1754 if (ts == tarval_b_true) {
1755 n = get_Mux_true(n);
1756 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
1759 /* Mux(false, f, t) == f */
1760 if (ts == tarval_b_false) {
1761 n = get_Mux_false(n);
1762 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
1765 n_t = get_Mux_true(n);
1766 n_f = get_Mux_false(n);
1768 /* Mux(v, x, T) == x */
1769 if (is_Unknown(n_f)) {
1771 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
1774 /* Mux(v, T, x) == x */
1775 if (is_Unknown(n_t)) {
1777 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
1781 /* Mux(v, x, x) == x */
1784 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
1787 if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
1788 ir_node *cmp = get_Proj_pred(sel);
1789 long proj_nr = get_Proj_proj(sel);
1790 ir_node *f = get_Mux_false(n);
1791 ir_node *t = get_Mux_true(n);
1794 * Note further that these optimization work even for floating point
1795 * with NaN's because -NaN == NaN.
1796 * However, if +0 and -0 is handled differently, we cannot use the first one.
1799 ir_node *const cmp_l = get_Cmp_left(cmp);
1800 ir_node *const cmp_r = get_Cmp_right(cmp);
1804 if ((cmp_l == t && cmp_r == f) || /* Mux(t == f, t, f) -> f */
1805 (cmp_l == f && cmp_r == t)) { /* Mux(f == t, t, f) -> f */
1807 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
1814 if ((cmp_l == t && cmp_r == f) || /* Mux(t != f, t, f) -> t */
1815 (cmp_l == f && cmp_r == t)) { /* Mux(f != t, t, f) -> t */
1817 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
1824 * Note: normalization puts the constant on the right side,
1825 * so we check only one case.
1827 if (cmp_l == t && tarval_is_null(value_of(cmp_r))) {
1828 /* Mux(t CMP 0, X, t) */
1829 if (is_Minus(f) && get_Minus_op(f) == t) {
1830 /* Mux(t CMP 0, -t, t) */
1831 if (proj_nr == pn_Cmp_Eq) {
1832 /* Mux(t == 0, -t, t) ==> -t */
1834 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
1835 } else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
1836 /* Mux(t != 0, -t, t) ==> t */
1838 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
1845 } /* equivalent_node_Mux */
1848 * Remove Confirm nodes if setting is on.
1849 * Replace Confirms(x, '=', Constlike) by Constlike.
1851 static ir_node *equivalent_node_Confirm(ir_node *n)
1853 ir_node *pred = get_Confirm_value(n);
1854 pn_Cmp pnc = get_Confirm_cmp(n);
1856 while (is_Confirm(pred) && pnc == get_Confirm_cmp(pred)) {
1858 * rare case: two identical Confirms one after another,
1859 * replace the second one with the first.
1862 pred = get_Confirm_value(n);
1863 pnc = get_Confirm_cmp(n);
1865 if (get_opt_remove_confirm())
1866 return get_Confirm_value(n);
1871 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
1872 * perform no actual computation, as, e.g., the Id nodes. It does not create
1873 * new nodes. It is therefore safe to free n if the node returned is not n.
1874 * If a node returns a Tuple we can not just skip it. If the size of the
1875 * in array fits, we transform n into a tuple (e.g., Div).
1877 ir_node *equivalent_node(ir_node *n)
1879 if (n->op->ops.equivalent_node)
1880 return n->op->ops.equivalent_node(n);
1882 } /* equivalent_node */
1885 * Sets the default equivalent node operation for an ir_op_ops.
1887 * @param code the opcode for the default operation
1888 * @param ops the operations initialized
1893 static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *ops)
1897 ops->equivalent_node = equivalent_node_##a; \
1899 #define CASE_PROJ(a) \
1901 ops->equivalent_node_Proj = equivalent_node_Proj_##a; \
1944 } /* firm_set_default_equivalent_node */
1947 * Returns non-zero if a node is a Phi node
1948 * with all predecessors constant.
1950 static int is_const_Phi(ir_node *n)
1954 if (! is_Phi(n) || get_irn_arity(n) == 0)
1956 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
1957 if (! is_Const(get_irn_n(n, i)))
1961 } /* is_const_Phi */
1963 typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode);
1964 typedef tarval *(*tarval_binop_type)(tarval *a, tarval *b);
1967 * in reality eval_func should be tarval (*eval_func)() but incomplete
1968 * declarations are bad style and generate noisy warnings
1970 typedef void (*eval_func)(void);
1973 * Wrapper for the tarval binop evaluation, tarval_sub has one more parameter.
1975 static tarval *do_eval(eval_func eval, tarval *a, tarval *b, ir_mode *mode)
1977 if (eval == (eval_func) tarval_sub) {
1978 tarval_sub_type func = (tarval_sub_type)eval;
1980 return func(a, b, mode);
1982 tarval_binop_type func = (tarval_binop_type)eval;
1989 * Apply an evaluator on a binop with a constant operators (and one Phi).
1991 * @param phi the Phi node
1992 * @param other the other operand
1993 * @param eval an evaluator function
1994 * @param mode the mode of the result, may be different from the mode of the Phi!
1995 * @param left if non-zero, other is the left operand, else the right
1997 * @return a new Phi node if the conversion was successful, NULL else
1999 static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left)
2005 int i, n = get_irn_arity(phi);
2007 NEW_ARR_A(void *, res, n);
2009 for (i = 0; i < n; ++i) {
2010 pred = get_irn_n(phi, i);
2011 tv = get_Const_tarval(pred);
2012 tv = do_eval(eval, other, tv, mode);
2014 if (tv == tarval_bad) {
2015 /* folding failed, bad */
2021 for (i = 0; i < n; ++i) {
2022 pred = get_irn_n(phi, i);
2023 tv = get_Const_tarval(pred);
2024 tv = do_eval(eval, tv, other, mode);
2026 if (tv == tarval_bad) {
2027 /* folding failed, bad */
2033 irg = current_ir_graph;
2034 for (i = 0; i < n; ++i) {
2035 pred = get_irn_n(phi, i);
2036 res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
2038 return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
2039 } /* apply_binop_on_phi */
2042 * Apply an evaluator on a binop with two constant Phi.
2044 * @param a the left Phi node
2045 * @param b the right Phi node
2046 * @param eval an evaluator function
2047 * @param mode the mode of the result, may be different from the mode of the Phi!
2049 * @return a new Phi node if the conversion was successful, NULL else
2051 static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
2053 tarval *tv_l, *tv_r, *tv;
2059 if (get_nodes_block(a) != get_nodes_block(b))
2062 n = get_irn_arity(a);
2063 NEW_ARR_A(void *, res, n);
2065 for (i = 0; i < n; ++i) {
2066 pred = get_irn_n(a, i);
2067 tv_l = get_Const_tarval(pred);
2068 pred = get_irn_n(b, i);
2069 tv_r = get_Const_tarval(pred);
2070 tv = do_eval(eval, tv_l, tv_r, mode);
2072 if (tv == tarval_bad) {
2073 /* folding failed, bad */
2078 irg = current_ir_graph;
2079 for (i = 0; i < n; ++i) {
2080 pred = get_irn_n(a, i);
2081 res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
2083 return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
2084 } /* apply_binop_on_2_phis */
2087 * Apply an evaluator on a unop with a constant operator (a Phi).
2089 * @param phi the Phi node
2090 * @param eval an evaluator function
2092 * @return a new Phi node if the conversion was successful, NULL else
2094 static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *))
2101 int i, n = get_irn_arity(phi);
2103 NEW_ARR_A(void *, res, n);
2104 for (i = 0; i < n; ++i) {
2105 pred = get_irn_n(phi, i);
2106 tv = get_Const_tarval(pred);
2109 if (tv == tarval_bad) {
2110 /* folding failed, bad */
2115 mode = get_irn_mode(phi);
2116 irg = current_ir_graph;
2117 for (i = 0; i < n; ++i) {
2118 pred = get_irn_n(phi, i);
2119 res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
2121 return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
2122 } /* apply_unop_on_phi */
2125 * Apply a conversion on a constant operator (a Phi).
2127 * @param phi the Phi node
2129 * @return a new Phi node if the conversion was successful, NULL else
2131 static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
2137 int i, n = get_irn_arity(phi);
2139 NEW_ARR_A(void *, res, n);
2140 for (i = 0; i < n; ++i) {
2141 pred = get_irn_n(phi, i);
2142 tv = get_Const_tarval(pred);
2143 tv = tarval_convert_to(tv, mode);
2145 if (tv == tarval_bad) {
2146 /* folding failed, bad */
2151 irg = current_ir_graph;
2152 for (i = 0; i < n; ++i) {
2153 pred = get_irn_n(phi, i);
2154 res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
2156 return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
2157 } /* apply_conv_on_phi */
2160 * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
2161 * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
2162 * If possible, remove the Conv's.
2164 static ir_node *transform_node_AddSub(ir_node *n)
2166 ir_mode *mode = get_irn_mode(n);
2168 if (mode_is_reference(mode)) {
2169 ir_node *left = get_binop_left(n);
2170 ir_node *right = get_binop_right(n);
2171 unsigned ref_bits = get_mode_size_bits(mode);
2173 if (is_Conv(left)) {
2174 ir_mode *lmode = get_irn_mode(left);
2175 unsigned bits = get_mode_size_bits(lmode);
2177 if (ref_bits == bits &&
2178 mode_is_int(lmode) &&
2179 get_mode_arithmetic(lmode) == irma_twos_complement) {
2180 ir_node *pre = get_Conv_op(left);
2181 ir_mode *pre_mode = get_irn_mode(pre);
2183 if (mode_is_int(pre_mode) &&
2184 get_mode_size_bits(pre_mode) == bits &&
2185 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
2186 /* ok, this conv just changes to sign, moreover the calculation
2187 * is done with same number of bits as our address mode, so
2188 * we can ignore the conv as address calculation can be viewed
2189 * as either signed or unsigned
2191 set_binop_left(n, pre);
2196 if (is_Conv(right)) {
2197 ir_mode *rmode = get_irn_mode(right);
2198 unsigned bits = get_mode_size_bits(rmode);
2200 if (ref_bits == bits &&
2201 mode_is_int(rmode) &&
2202 get_mode_arithmetic(rmode) == irma_twos_complement) {
2203 ir_node *pre = get_Conv_op(right);
2204 ir_mode *pre_mode = get_irn_mode(pre);
2206 if (mode_is_int(pre_mode) &&
2207 get_mode_size_bits(pre_mode) == bits &&
2208 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
2209 /* ok, this conv just changes to sign, moreover the calculation
2210 * is done with same number of bits as our address mode, so
2211 * we can ignore the conv as address calculation can be viewed
2212 * as either signed or unsigned
2214 set_binop_right(n, pre);
2219 /* let address arithmetic use unsigned modes */
2220 if (is_Const(right)) {
2221 ir_mode *rmode = get_irn_mode(right);
2223 if (mode_is_signed(rmode) && get_mode_arithmetic(rmode) == irma_twos_complement) {
2224 /* convert a AddP(P, *s) into AddP(P, *u) */
2225 ir_mode *nm = get_reference_mode_unsigned_eq(mode);
2227 ir_node *pre = new_r_Conv(get_nodes_block(n), right, nm);
2228 set_binop_right(n, pre);
2234 } /* transform_node_AddSub */
2236 #define HANDLE_BINOP_PHI(eval, a, b, c, mode) \
2239 if (is_Const(b) && is_const_Phi(a)) { \
2240 /* check for Op(Phi, Const) */ \
2241 c = apply_binop_on_phi(a, get_Const_tarval(b), eval, mode, 0);\
2243 else if (is_Const(a) && is_const_Phi(b)) { \
2244 /* check for Op(Const, Phi) */ \
2245 c = apply_binop_on_phi(b, get_Const_tarval(a), eval, mode, 1);\
2247 else if (is_const_Phi(a) && is_const_Phi(b)) { \
2248 /* check for Op(Phi, Phi) */ \
2249 c = apply_binop_on_2_phis(a, b, eval, mode); \
2252 DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
2257 #define HANDLE_UNOP_PHI(eval, a, c) \
2260 if (is_const_Phi(a)) { \
2261 /* check for Op(Phi) */ \
2262 c = apply_unop_on_phi(a, eval); \
2264 DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
2271 * Do the AddSub optimization, then Transform
2272 * Constant folding on Phi
2273 * Add(a,a) -> Mul(a, 2)
2274 * Add(Mul(a, x), a) -> Mul(a, x+1)
2275 * if the mode is integer or float.
2276 * Transform Add(a,-b) into Sub(a,b).
2277 * Reassociation might fold this further.
2279 static ir_node *transform_node_Add(ir_node *n)
2282 ir_node *a, *b, *c, *oldn = n;
2283 vrp_attr *a_vrp, *b_vrp;
2285 n = transform_node_AddSub(n);
2287 a = get_Add_left(n);
2288 b = get_Add_right(n);
2290 mode = get_irn_mode(n);
2292 if (mode_is_reference(mode)) {
2293 ir_mode *lmode = get_irn_mode(a);
2295 if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) {
2296 /* an Add(a, NULL) is a hidden Conv */
2297 dbg_info *dbg = get_irn_dbg_info(n);
2298 return new_rd_Conv(dbg, get_nodes_block(n), a, mode);
2302 HANDLE_BINOP_PHI((eval_func) tarval_add, a, b, c, mode);
2304 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2305 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2308 if (mode_is_num(mode)) {
2309 /* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
2310 if (!is_irg_state(current_ir_graph, IR_GRAPH_STATE_ARCH_DEP)
2311 && a == b && mode_is_int(mode)) {
2312 ir_node *block = get_nodes_block(n);
2315 get_irn_dbg_info(n),
2318 new_Const_long(mode, 2),
2320 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
2325 get_irn_dbg_info(n),
2330 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
2335 get_irn_dbg_info(n),
2340 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
2343 if (get_mode_arithmetic(mode) == irma_twos_complement) {
2344 /* Here we rely on constants be on the RIGHT side */
2346 ir_node *op = get_Not_op(a);
2348 if (is_Const(b) && is_Const_one(b)) {
2350 ir_node *blk = get_nodes_block(n);
2351 n = new_rd_Minus(get_irn_dbg_info(n), blk, op, mode);
2352 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
2357 n = new_Const(get_mode_minus_one(mode));
2358 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
2363 ir_node *op = get_Not_op(b);
2367 n = new_Const(get_mode_minus_one(mode));
2368 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
2375 a_vrp = vrp_get_info(a);
2376 b_vrp = vrp_get_info(b);
2378 if (a_vrp && b_vrp) {
2379 tarval *c = tarval_and(
2380 a_vrp->bits_not_set,
2384 if (tarval_is_null(c)) {
2385 dbg_info *dbgi = get_irn_dbg_info(n);
2386 return new_rd_Or(dbgi, get_nodes_block(n),
2391 } /* transform_node_Add */
2394 * returns -cnst or NULL if impossible
2396 static ir_node *const_negate(ir_node *cnst)
2398 tarval *tv = tarval_neg(get_Const_tarval(cnst));
2399 dbg_info *dbgi = get_irn_dbg_info(cnst);
2400 ir_graph *irg = get_irn_irg(cnst);
2401 if (tv == tarval_bad) return NULL;
2402 return new_rd_Const(dbgi, irg, tv);
2406 * Do the AddSub optimization, then Transform
2407 * Constant folding on Phi
2408 * Sub(0,a) -> Minus(a)
2409 * Sub(Mul(a, x), a) -> Mul(a, x-1)
2410 * Sub(Sub(x, y), b) -> Sub(x, Add(y,b))
2411 * Sub(Add(a, x), x) -> a
2412 * Sub(x, Add(x, a)) -> -a
2413 * Sub(x, Const) -> Add(x, -Const)
2415 static ir_node *transform_node_Sub(ir_node *n)
2421 n = transform_node_AddSub(n);
2423 a = get_Sub_left(n);
2424 b = get_Sub_right(n);
2426 mode = get_irn_mode(n);
2428 if (mode_is_int(mode)) {
2429 ir_mode *lmode = get_irn_mode(a);
2431 if (is_Const(b) && is_Const_null(b) && mode_is_reference(lmode)) {
2432 /* a Sub(a, NULL) is a hidden Conv */
2433 dbg_info *dbg = get_irn_dbg_info(n);
2434 n = new_rd_Conv(dbg, get_nodes_block(n), a, mode);
2435 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_CONV);
2439 if (mode == lmode &&
2440 get_mode_arithmetic(mode) == irma_twos_complement &&
2442 get_Const_tarval(a) == get_mode_minus_one(mode)) {
2444 dbg_info *dbg = get_irn_dbg_info(n);
2445 n = new_rd_Not(dbg, get_nodes_block(n), b, mode);
2446 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_NOT);
2452 HANDLE_BINOP_PHI((eval_func) tarval_sub, a, b, c, mode);
2454 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
2455 if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
2458 if (is_Const(b) && !mode_is_reference(get_irn_mode(b))) {
2459 /* a - C -> a + (-C) */
2460 ir_node *cnst = const_negate(b);
2462 ir_node *block = get_nodes_block(n);
2463 dbg_info *dbgi = get_irn_dbg_info(n);
2465 n = new_rd_Add(dbgi, block, a, cnst, mode);
2466 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
2471 if (is_Minus(a)) { /* (-a) - b -> -(a + b) */
2472 dbg_info *dbg = get_irn_dbg_info(n);
2473 ir_node *block = get_nodes_block(n);
2474 ir_node *left = get_Minus_op(a);
2475 ir_node *add = new_rd_Add(dbg, block, left, b, mode);
2477 n = new_rd_Minus(dbg, block, add, mode);
2478 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
2480 } else if (is_Minus(b)) { /* a - (-b) -> a + b */
2481 dbg_info *dbg = get_irn_dbg_info(n);
2482 ir_node *block = get_nodes_block(n);
2483 ir_node *right = get_Minus_op(b);
2485 n = new_rd_Add(dbg, block, a, right, mode);
2486 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS);
2488 } else if (is_Sub(b)) {
2489 /* a - (b - c) -> a + (c - b)
2490 * -> (a - b) + c iff (b - c) is a pointer */
2491 dbg_info *s_dbg = get_irn_dbg_info(b);
2492 ir_node *s_block = get_nodes_block(b);
2493 ir_node *s_left = get_Sub_left(b);
2494 ir_node *s_right = get_Sub_right(b);
2495 ir_mode *s_mode = get_irn_mode(b);
2496 if (mode_is_reference(s_mode)) {
2497 ir_node *sub = new_rd_Sub(s_dbg, s_block, a, s_left, mode);
2498 dbg_info *a_dbg = get_irn_dbg_info(n);
2499 ir_node *a_block = get_nodes_block(n);
2502 s_right = new_r_Conv(a_block, s_right, mode);
2503 n = new_rd_Add(a_dbg, a_block, sub, s_right, mode);
2505 ir_node *sub = new_rd_Sub(s_dbg, s_block, s_right, s_left, s_mode);
2506 dbg_info *a_dbg = get_irn_dbg_info(n);
2507 ir_node *a_block = get_nodes_block(n);
2509 n = new_rd_Add(a_dbg, a_block, a, sub, mode);
2511 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
2513 } else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
2514 ir_node *m_right = get_Mul_right(b);
2515 if (is_Const(m_right)) {
2516 ir_node *cnst2 = const_negate(m_right);
2517 if (cnst2 != NULL) {
2518 dbg_info *m_dbg = get_irn_dbg_info(b);
2519 ir_node *m_block = get_nodes_block(b);
2520 ir_node *m_left = get_Mul_left(b);
2521 ir_mode *m_mode = get_irn_mode(b);
2522 ir_node *mul = new_rd_Mul(m_dbg, m_block, m_left, cnst2, m_mode);
2523 dbg_info *a_dbg = get_irn_dbg_info(n);
2524 ir_node *a_block = get_nodes_block(n);
2526 n = new_rd_Add(a_dbg, a_block, a, mul, mode);
2527 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
2533 /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
2534 if (mode_is_num(mode) && mode == get_irn_mode(a) && is_Const(a) && is_Const_null(a)) {
2536 get_irn_dbg_info(n),
2540 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
2544 if (mode_wrap_around(mode)) {
2545 ir_node *left = get_Add_left(a);
2546 ir_node *right = get_Add_right(a);
2548 /* FIXME: Does the Conv's work only for two complement or generally? */
2550 if (mode != get_irn_mode(right)) {
2551 /* This Sub is an effective Cast */
2552 right = new_r_Conv(get_nodes_block(n), right, mode);
2555 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
2557 } else if (right == b) {
2558 if (mode != get_irn_mode(left)) {
2559 /* This Sub is an effective Cast */
2560 left = new_r_Conv(get_nodes_block(n), left, mode);
2563 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
2569 if (mode_wrap_around(mode)) {
2570 ir_node *left = get_Add_left(b);
2571 ir_node *right = get_Add_right(b);
2573 /* FIXME: Does the Conv's work only for two complement or generally? */
2575 ir_mode *r_mode = get_irn_mode(right);
2577 n = new_r_Minus(get_nodes_block(n), right, r_mode);
2578 if (mode != r_mode) {
2579 /* This Sub is an effective Cast */
2580 n = new_r_Conv(get_nodes_block(n), n, mode);
2582 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
2584 } else if (right == a) {
2585 ir_mode *l_mode = get_irn_mode(left);
2587 n = new_r_Minus(get_nodes_block(n), left, l_mode);
2588 if (mode != l_mode) {
2589 /* This Sub is an effective Cast */
2590 n = new_r_Conv(get_nodes_block(n), n, mode);
2592 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
2597 if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
2598 ir_mode *mode = get_irn_mode(a);
2600 if (mode == get_irn_mode(b)) {
2602 ir_node *op_a = get_Conv_op(a);
2603 ir_node *op_b = get_Conv_op(b);
2605 /* check if it's allowed to skip the conv */
2606 ma = get_irn_mode(op_a);
2607 mb = get_irn_mode(op_b);
2609 if (mode_is_reference(ma) && mode_is_reference(mb)) {
2610 /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
2613 set_Sub_right(n, b);
2619 /* do NOT execute this code if reassociation is enabled, it does the inverse! */
2620 if (!is_reassoc_running() && is_Mul(a)) {
2621 ir_node *ma = get_Mul_left(a);
2622 ir_node *mb = get_Mul_right(a);
2625 ir_node *blk = get_nodes_block(n);
2627 get_irn_dbg_info(n),
2631 get_irn_dbg_info(n),
2634 new_Const(get_mode_one(mode)),
2637 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
2639 } else if (mb == b) {
2640 ir_node *blk = get_nodes_block(n);
2642 get_irn_dbg_info(n),
2646 get_irn_dbg_info(n),
2649 new_Const(get_mode_one(mode)),
2652 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
2656 if (is_Sub(a)) { /* (x - y) - b -> x - (y + b) */
2657 ir_node *x = get_Sub_left(a);
2658 ir_node *y = get_Sub_right(a);
2659 ir_node *blk = get_nodes_block(n);
2660 ir_mode *m_b = get_irn_mode(b);
2661 ir_mode *m_y = get_irn_mode(y);
2665 /* Determine the right mode for the Add. */
2668 else if (mode_is_reference(m_b))
2670 else if (mode_is_reference(m_y))
2674 * Both modes are different but none is reference,
2675 * happens for instance in SubP(SubP(P, Iu), Is).
2676 * We have two possibilities here: Cast or ignore.
2677 * Currently we ignore this case.
2682 add = new_r_Add(blk, y, b, add_mode);
2684 n = new_rd_Sub(get_irn_dbg_info(n), blk, x, add, mode);
2685 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_SUB_X_Y_Z);
2689 if (get_mode_arithmetic(mode) == irma_twos_complement) {
2690 if (is_Const(a) && is_Not(b)) {
2691 /* c - ~X = X + (c+1) */
2692 tarval *tv = get_Const_tarval(a);
2694 tv = tarval_add(tv, get_mode_one(mode));
2695 if (tv != tarval_bad) {
2696 ir_node *blk = get_nodes_block(n);
2697 ir_node *c = new_Const(tv);
2698 n = new_rd_Add(get_irn_dbg_info(n), blk, get_Not_op(b), c, mode);
2699 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_C_NOT_X);
2705 } /* transform_node_Sub */
2708 * Several transformation done on n*n=2n bits mul.
2709 * These transformations must be done here because new nodes may be produced.
2711 static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode)
2714 ir_node *a = get_Mul_left(n);
2715 ir_node *b = get_Mul_right(n);
2716 tarval *ta = value_of(a);
2717 tarval *tb = value_of(b);
2718 ir_mode *smode = get_irn_mode(a);
2720 if (ta == get_mode_one(smode)) {
2721 /* (L)1 * (L)b = (L)b */
2722 ir_node *blk = get_nodes_block(n);
2723 n = new_rd_Conv(get_irn_dbg_info(n), blk, b, mode);
2724 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
2727 else if (ta == get_mode_minus_one(smode)) {
2728 /* (L)-1 * (L)b = (L)b */
2729 ir_node *blk = get_nodes_block(n);
2730 n = new_rd_Minus(get_irn_dbg_info(n), blk, b, smode);
2731 n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
2732 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
2735 if (tb == get_mode_one(smode)) {
2736 /* (L)a * (L)1 = (L)a */
2737 ir_node *blk = get_irn_n(a, -1);
2738 n = new_rd_Conv(get_irn_dbg_info(n), blk, a, mode);
2739 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
2742 else if (tb == get_mode_minus_one(smode)) {
2743 /* (L)a * (L)-1 = (L)-a */
2744 ir_node *blk = get_nodes_block(n);
2745 n = new_rd_Minus(get_irn_dbg_info(n), blk, a, smode);
2746 n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
2747 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
2754 * Transform Mul(a,-1) into -a.
2755 * Do constant evaluation of Phi nodes.
2756 * Do architecture dependent optimizations on Mul nodes
2758 static ir_node *transform_node_Mul(ir_node *n)
2760 ir_node *c, *oldn = n;
2761 ir_mode *mode = get_irn_mode(n);
2762 ir_node *a = get_Mul_left(n);
2763 ir_node *b = get_Mul_right(n);
2765 if (is_Bad(a) || is_Bad(b))
2768 if (mode != get_irn_mode(a))
2769 return transform_node_Mul2n(n, mode);
2771 HANDLE_BINOP_PHI((eval_func) tarval_mul, a, b, c, mode);
2773 if (mode_is_signed(mode)) {
2776 if (value_of(a) == get_mode_minus_one(mode))
2778 else if (value_of(b) == get_mode_minus_one(mode))
2781 n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), r, mode);
2782 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
2787 if (is_Const(b)) { /* (-a) * const -> a * -const */
2788 ir_node *cnst = const_negate(b);
2790 dbg_info *dbgi = get_irn_dbg_info(n);
2791 ir_node *block = get_nodes_block(n);
2792 n = new_rd_Mul(dbgi, block, get_Minus_op(a), cnst, mode);
2793 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
2796 } else if (is_Minus(b)) { /* (-a) * (-b) -> a * b */
2797 dbg_info *dbgi = get_irn_dbg_info(n);
2798 ir_node *block = get_nodes_block(n);
2799 n = new_rd_Mul(dbgi, block, get_Minus_op(a), get_Minus_op(b), mode);
2800 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_MINUS);
2802 } else if (is_Sub(b)) { /* (-a) * (b - c) -> a * (c - b) */
2803 ir_node *sub_l = get_Sub_left(b);
2804 ir_node *sub_r = get_Sub_right(b);
2805 dbg_info *dbgi = get_irn_dbg_info(n);
2806 ir_node *block = get_nodes_block(n);
2807 ir_node *new_b = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
2808 n = new_rd_Mul(dbgi, block, get_Minus_op(a), new_b, mode);
2809 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
2812 } else if (is_Minus(b)) {
2813 if (is_Sub(a)) { /* (a - b) * (-c) -> (b - a) * c */
2814 ir_node *sub_l = get_Sub_left(a);
2815 ir_node *sub_r = get_Sub_right(a);
2816 dbg_info *dbgi = get_irn_dbg_info(n);
2817 ir_node *block = get_nodes_block(n);
2818 ir_node *new_a = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
2819 n = new_rd_Mul(dbgi, block, new_a, get_Minus_op(b), mode);
2820 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
2823 } else if (is_Shl(a)) {
2824 ir_node *const shl_l = get_Shl_left(a);
2825 if (is_Const(shl_l) && is_Const_one(shl_l)) {
2826 /* (1 << x) * b -> b << x */
2827 dbg_info *const dbgi = get_irn_dbg_info(n);
2828 ir_node *const block = get_nodes_block(n);
2829 ir_node *const shl_r = get_Shl_right(a);
2830 n = new_rd_Shl(dbgi, block, b, shl_r, mode);
2831 // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
2834 } else if (is_Shl(b)) {
2835 ir_node *const shl_l = get_Shl_left(b);
2836 if (is_Const(shl_l) && is_Const_one(shl_l)) {
2837 /* a * (1 << x) -> a << x */
2838 dbg_info *const dbgi = get_irn_dbg_info(n);
2839 ir_node *const block = get_nodes_block(n);
2840 ir_node *const shl_r = get_Shl_right(b);
2841 n = new_rd_Shl(dbgi, block, a, shl_r, mode);
2842 // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
2846 if (get_mode_arithmetic(mode) == irma_ieee754) {
2848 tarval *tv = get_Const_tarval(a);
2849 if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
2850 && !tarval_is_negative(tv)) {
2851 /* 2.0 * b = b + b */
2852 n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), b, b, mode);
2853 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
2857 else if (is_Const(b)) {
2858 tarval *tv = get_Const_tarval(b);
2859 if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
2860 && !tarval_is_negative(tv)) {
2861 /* a * 2.0 = a + a */
2862 n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), a, a, mode);
2863 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
2868 return arch_dep_replace_mul_with_shifts(n);
2869 } /* transform_node_Mul */
2872 * Transform a Div Node.
2874 static ir_node *transform_node_Div(ir_node *n)
2876 ir_mode *mode = get_Div_resmode(n);
2877 ir_node *a = get_Div_left(n);
2878 ir_node *b = get_Div_right(n);
2880 const ir_node *dummy;
2882 if (is_Const(b) && is_const_Phi(a)) {
2883 /* check for Div(Phi, Const) */
2884 value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
2886 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2890 else if (is_Const(a) && is_const_Phi(b)) {
2891 /* check for Div(Const, Phi) */
2892 value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
2894 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2898 else if (is_const_Phi(a) && is_const_Phi(b)) {
2899 /* check for Div(Phi, Phi) */
2900 value = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
2902 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2909 if (a == b && value_not_zero(a, &dummy)) {
2910 /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
2911 value = new_Const(get_mode_one(mode));
2912 DBG_OPT_CSTEVAL(n, value);
2915 if (mode_is_signed(mode) && is_Const(b)) {
2916 tarval *tv = get_Const_tarval(b);
2918 if (tv == get_mode_minus_one(mode)) {
2920 value = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
2921 DBG_OPT_CSTEVAL(n, value);
2925 /* Try architecture dependent optimization */
2926 value = arch_dep_replace_div_by_const(n);
2933 /* Turn Div into a tuple (mem, jmp, bad, value) */
2934 mem = get_Div_mem(n);
2935 blk = get_nodes_block(n);
2937 /* skip a potential Pin */
2938 mem = skip_Pin(mem);
2939 turn_into_tuple(n, pn_Div_max);
2940 set_Tuple_pred(n, pn_Div_M, mem);
2941 set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
2942 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
2943 set_Tuple_pred(n, pn_Div_res, value);
2946 } /* transform_node_Div */
2949 * Transform a Mod node.
2951 static ir_node *transform_node_Mod(ir_node *n)
2953 ir_mode *mode = get_Mod_resmode(n);
2954 ir_node *a = get_Mod_left(n);
2955 ir_node *b = get_Mod_right(n);
2959 if (is_Const(b) && is_const_Phi(a)) {
2960 /* check for Div(Phi, Const) */
2961 value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
2963 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2967 else if (is_Const(a) && is_const_Phi(b)) {
2968 /* check for Div(Const, Phi) */
2969 value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
2971 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2975 else if (is_const_Phi(a) && is_const_Phi(b)) {
2976 /* check for Div(Phi, Phi) */
2977 value = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
2979 DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
2986 if (tv != tarval_bad) {
2987 value = new_Const(tv);
2989 DBG_OPT_CSTEVAL(n, value);
2992 ir_node *a = get_Mod_left(n);
2993 ir_node *b = get_Mod_right(n);
2994 const ir_node *dummy;
2996 if (a == b && value_not_zero(a, &dummy)) {
2997 /* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
2998 value = new_Const(get_mode_null(mode));
2999 DBG_OPT_CSTEVAL(n, value);
3002 if (mode_is_signed(mode) && is_Const(b)) {
3003 tarval *tv = get_Const_tarval(b);
3005 if (tv == get_mode_minus_one(mode)) {
3007 value = new_Const(get_mode_null(mode));
3008 DBG_OPT_CSTEVAL(n, value);
3012 /* Try architecture dependent optimization */
3013 value = arch_dep_replace_mod_by_const(n);
3021 /* Turn Mod into a tuple (mem, jmp, bad, value) */
3022 mem = get_Mod_mem(n);
3023 blk = get_nodes_block(n);
3025 /* skip a potential Pin */
3026 mem = skip_Pin(mem);
3027 turn_into_tuple(n, pn_Mod_max);
3028 set_Tuple_pred(n, pn_Mod_M, mem);
3029 set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
3030 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
3031 set_Tuple_pred(n, pn_Mod_res, value);
3034 } /* transform_node_Mod */
3037 * Transform a DivMod node.
3039 static ir_node *transform_node_DivMod(ir_node *n)
3041 const ir_node *dummy;
3042 ir_node *a = get_DivMod_left(n);
3043 ir_node *b = get_DivMod_right(n);
3044 ir_mode *mode = get_DivMod_resmode(n);
3049 if (is_Const(b) && is_const_Phi(a)) {
3050 /* check for Div(Phi, Const) */
3051 va = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
3052 vb = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
3054 DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
3055 DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
3059 else if (is_Const(a) && is_const_Phi(b)) {
3060 /* check for Div(Const, Phi) */
3061 va = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
3062 vb = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
3064 DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
3065 DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
3069 else if (is_const_Phi(a) && is_const_Phi(b)) {
3070 /* check for Div(Phi, Phi) */
3071 va = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
3072 vb = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
3074 DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
3075 DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
3082 if (tb != tarval_bad) {
3083 if (tb == get_mode_one(get_tarval_mode(tb))) {
3085 vb = new_Const(get_mode_null(mode));
3086 DBG_OPT_CSTEVAL(n, vb);
3088 } else if (ta != tarval_bad) {
3089 tarval *resa, *resb;
3090 resa = tarval_div(ta, tb);
3091 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
3092 Jmp for X result!? */
3093 resb = tarval_mod(ta, tb);
3094 if (resb == tarval_bad) return n; /* Causes exception! */
3095 va = new_Const(resa);
3096 vb = new_Const(resb);
3097 DBG_OPT_CSTEVAL(n, va);
3098 DBG_OPT_CSTEVAL(n, vb);
3100 } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
3101 va = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
3102 vb = new_Const(get_mode_null(mode));
3103 DBG_OPT_CSTEVAL(n, va);
3104 DBG_OPT_CSTEVAL(n, vb);
3106 } else { /* Try architecture dependent optimization */
3109 arch_dep_replace_divmod_by_const(&va, &vb, n);
3110 evaluated = va != NULL;
3112 } else if (a == b) {
3113 if (value_not_zero(a, &dummy)) {
3115 va = new_Const(get_mode_one(mode));
3116 vb = new_Const(get_mode_null(mode));
3117 DBG_OPT_CSTEVAL(n, va);
3118 DBG_OPT_CSTEVAL(n, vb);
3121 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
3124 } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) {
3125 /* 0 / non-Const = 0 */
3130 if (evaluated) { /* replace by tuple */
3134 mem = get_DivMod_mem(n);
3135 /* skip a potential Pin */
3136 mem = skip_Pin(mem);
3138 blk = get_nodes_block(n);
3139 turn_into_tuple(n, pn_DivMod_max);
3140 set_Tuple_pred(n, pn_DivMod_M, mem);
3141 set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
3142 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
3143 set_Tuple_pred(n, pn_DivMod_res_div, va);
3144 set_Tuple_pred(n, pn_DivMod_res_mod, vb);
3148 } /* transform_node_DivMod */
3151 * Optimize x / c to x * (1/c)
3153 static ir_node *transform_node_Quot(ir_node *n)
3155 ir_mode *mode = get_Quot_resmode(n);
3158 if (get_mode_arithmetic(mode) == irma_ieee754) {
3159 ir_node *b = get_Quot_right(n);
3160 tarval *tv = value_of(b);
3162 if (tv != tarval_bad) {
3163 int rem = tarval_fp_ops_enabled();
3166 * Floating point constant folding might be disabled here to
3168 * However, as we check for exact result, doing it is safe.
3171 tarval_enable_fp_ops(1);
3172 tv = tarval_quo(get_mode_one(mode), tv);
3173 tarval_enable_fp_ops(rem);
3175 /* Do the transformation if the result is either exact or we are not
3176 using strict rules. */
3177 if (tv != tarval_bad &&
3178 (tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
3179 ir_node *blk = get_nodes_block(n);
3180 ir_node *c = new_Const(tv);
3181 ir_node *a = get_Quot_left(n);
3182 ir_node *m = new_rd_Mul(get_irn_dbg_info(n), blk, a, c, mode);
3183 ir_node *mem = get_Quot_mem(n);
3185 /* skip a potential Pin */
3186 mem = skip_Pin(mem);
3187 turn_into_tuple(n, pn_Quot_max);
3188 set_Tuple_pred(n, pn_Quot_M, mem);
3189 set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
3190 set_Tuple_pred(n, pn_Quot_X_except, new_Bad());
3191 set_Tuple_pred(n, pn_Quot_res, m);
3192 DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
3197 } /* transform_node_Quot */
3200 * Optimize Abs(x) into x if x is Confirmed >= 0
3201 * Optimize Abs(x) into -x if x is Confirmed <= 0
3202 * Optimize Abs(-x) int Abs(x)
3204 static ir_node *transform_node_Abs(ir_node *n)
3206 ir_node *c, *oldn = n;
3207 ir_node *a = get_Abs_op(n);
3210 HANDLE_UNOP_PHI(tarval_abs, a, c);
3212 switch (classify_value_sign(a)) {
3213 case value_classified_negative:
3214 mode = get_irn_mode(n);
3217 * We can replace the Abs by -x here.
3218 * We even could add a new Confirm here
3219 * (if not twos complement)
3221 * Note that -x would create a new node, so we could
3222 * not run it in the equivalent_node() context.
3224 n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
3226 DBG_OPT_CONFIRM(oldn, n);
3228 case value_classified_positive:
3229 /* n is positive, Abs is not needed */
3232 DBG_OPT_CONFIRM(oldn, n);
3238 /* Abs(-x) = Abs(x) */
3239 mode = get_irn_mode(n);
3240 n = new_rd_Abs(get_irn_dbg_info(n), get_nodes_block(n), get_Minus_op(a), mode);
3241 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X);
3245 } /* transform_node_Abs */
3248 * Optimize -a CMP -b into b CMP a.
3249 * This works only for for modes where unary Minus
3251 * Note that two-complement integers can Overflow
3252 * so it will NOT work.
3254 * For == and != can be handled in Proj(Cmp)
3256 static ir_node *transform_node_Cmp(ir_node *n)
3259 ir_node *left = get_Cmp_left(n);
3260 ir_node *right = get_Cmp_right(n);
3262 if (is_Minus(left) && is_Minus(right) &&
3263 !mode_overflow_on_unary_Minus(get_irn_mode(left))) {
3264 ir_node *const new_left = get_Minus_op(right);
3265 ir_node *const new_right = get_Minus_op(left);
3266 n = new_rd_Cmp(get_irn_dbg_info(n), get_nodes_block(n), new_left, new_right);
3267 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CMP_OP_OP);
3270 } /* transform_node_Cmp */
3274 * Transform a Cond node.
3276 * Replace the Cond by a Jmp if it branches on a constant
3279 static ir_node *transform_node_Cond(ir_node *n)
3283 ir_node *a = get_Cond_selector(n);
3284 tarval *ta = value_of(a);
3286 /* we need block info which is not available in floating irgs */
3287 if (get_irg_pinned(current_ir_graph) == op_pin_state_floats)
3290 if ((ta != tarval_bad) &&
3291 (get_irn_mode(a) == mode_b) &&
3292 (get_opt_unreachable_code())) {
3293 /* It's a boolean Cond, branching on a boolean constant.
3294 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
3295 ir_node *blk = get_nodes_block(n);
3296 jmp = new_r_Jmp(blk);
3297 turn_into_tuple(n, pn_Cond_max);
3298 if (ta == tarval_b_true) {
3299 set_Tuple_pred(n, pn_Cond_false, new_Bad());
3300 set_Tuple_pred(n, pn_Cond_true, jmp);
3302 set_Tuple_pred(n, pn_Cond_false, jmp);
3303 set_Tuple_pred(n, pn_Cond_true, new_Bad());
3305 /* We might generate an endless loop, so keep it alive. */
3306 add_End_keepalive(get_irg_end(current_ir_graph), blk);
3309 } /* transform_node_Cond */
3312 * Prototype of a recursive transform function
3313 * for bitwise distributive transformations.
3315 typedef ir_node* (*recursive_transform)(ir_node *n);
3318 * makes use of distributive laws for and, or, eor
3319 * and(a OP c, b OP c) -> and(a, b) OP c
3320 * note, might return a different op than n
3322 static ir_node *transform_bitwise_distributive(ir_node *n,
3323 recursive_transform trans_func)
3326 ir_node *a = get_binop_left(n);
3327 ir_node *b = get_binop_right(n);
3328 ir_op *op = get_irn_op(a);
3329 ir_op *op_root = get_irn_op(n);
3331 if (op != get_irn_op(b))
3334 /* and(conv(a), conv(b)) -> conv(and(a,b)) */
3335 if (op == op_Conv) {
3336 ir_node *a_op = get_Conv_op(a);
3337 ir_node *b_op = get_Conv_op(b);
3338 ir_mode *a_mode = get_irn_mode(a_op);
3339 ir_mode *b_mode = get_irn_mode(b_op);
3340 if (a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
3341 ir_node *blk = get_nodes_block(n);
3344 set_binop_left(n, a_op);
3345 set_binop_right(n, b_op);
3346 set_irn_mode(n, a_mode);
3348 n = new_r_Conv(blk, n, get_irn_mode(oldn));
3350 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
3356 /* nothing to gain here */
3360 if (op == op_Shrs || op == op_Shr || op == op_Shl
3361 || op == op_And || op == op_Or || op == op_Eor) {
3362 ir_node *a_left = get_binop_left(a);
3363 ir_node *a_right = get_binop_right(a);
3364 ir_node *b_left = get_binop_left(b);
3365 ir_node *b_right = get_binop_right(b);
3367 ir_node *op1 = NULL;
3368 ir_node *op2 = NULL;
3370 if (is_op_commutative(op)) {
3371 if (a_left == b_left) {
3375 } else if (a_left == b_right) {
3379 } else if (a_right == b_left) {
3385 if (a_right == b_right) {
3392 /* (a sop c) & (b sop c) => (a & b) sop c */
3393 ir_node *blk = get_nodes_block(n);
3395 ir_node *new_n = exact_copy(n);
3396 set_binop_left(new_n, op1);
3397 set_binop_right(new_n, op2);
3398 new_n = trans_func(new_n);
3400 if (op_root == op_Eor && op == op_Or) {
3401 dbg_info *dbgi = get_irn_dbg_info(n);
3402 ir_mode *mode = get_irn_mode(c);
3404 c = new_rd_Not(dbgi, blk, c, mode);
3405 n = new_rd_And(dbgi, blk, new_n, c, mode);
3408 set_nodes_block(n, blk);
3409 set_binop_left(n, new_n);
3410 set_binop_right(n, c);
3411 add_identities(current_ir_graph->value_table, n);
3414 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
3425 static ir_node *transform_node_And(ir_node *n)
3427 ir_node *c, *oldn = n;
3428 ir_node *a = get_And_left(n);
3429 ir_node *b = get_And_right(n);
3431 vrp_attr *a_vrp, *b_vrp;
3433 mode = get_irn_mode(n);
3434 HANDLE_BINOP_PHI((eval_func) tarval_and, a, b, c, mode);
3436 /* we can evaluate 2 Projs of the same Cmp */
3437 if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
3438 ir_node *pred_a = get_Proj_pred(a);
3439 ir_node *pred_b = get_Proj_pred(b);
3440 if (pred_a == pred_b) {
3441 dbg_info *dbgi = get_irn_dbg_info(n);
3442 pn_Cmp pn_a = get_Proj_proj(a);
3443 pn_Cmp pn_b = get_Proj_proj(b);
3444 /* yes, we can simply calculate with pncs */
3445 pn_Cmp new_pnc = pn_a & pn_b;
3447 return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
3452 ir_node *op = get_Not_op(b);
3454 ir_node *ba = get_And_left(op);
3455 ir_node *bb = get_And_right(op);
3457 /* it's enough to test the following cases due to normalization! */
3458 if (get_Or_left(a) == ba && get_Or_right(a) == bb) {
3459 /* (a|b) & ~(a&b) = a^b */
3460 ir_node *block = get_nodes_block(n);
3462 n = new_rd_Eor(get_irn_dbg_info(n), block, ba, bb, mode);
3463 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
3471 ir_node *op = get_Not_op(a);
3473 ir_node *aa = get_And_left(op);
3474 ir_node *ab = get_And_right(op);
3476 /* it's enough to test the following cases due to normalization! */
3477 if (get_Or_left(b) == aa && get_Or_right(b) == ab) {
3478 /* (a|b) & ~(a&b) = a^b */
3479 ir_node *block = get_nodes_block(n);
3481 n = new_rd_Eor(get_irn_dbg_info(n), block, aa, ab, mode);
3482 DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
3489 ir_node *al = get_Eor_left(a);
3490 ir_node *ar = get_Eor_right(a);
3493 /* (b ^ a) & b -> ~a & b */
3494 dbg_info *dbg = get_irn_dbg_info(n);
3495 ir_node *block = get_nodes_block(n);
3497 ar = new_rd_Not(dbg, block, ar, mode);
3498 n = new_rd_And(dbg, block, ar, b, mode);
3499 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
3503 /* (a ^ b) & b -> ~a & b */
3504 dbg_info *dbg = get_irn_dbg_info(n);
3505 ir_node *block = get_nodes_block(n);
3507 al = new_rd_Not(dbg, block, al, mode);
3508 n = new_rd_And(dbg, block, al, b, mode);
3509 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
3514 ir_node *bl = get_Eor_left(b);
3515 ir_node *br = get_Eor_right(b);
3518 /* a & (a ^ b) -> a & ~b */
3519 dbg_info *dbg = get_irn_dbg_info(n);
3520 ir_node *block = get_nodes_block(n);
3522 br = new_rd_Not(dbg, block, br, mode);
3523 n = new_rd_And(dbg, block, br, a, mode);
3524 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
3528 /* a & (b ^ a) -> a & ~b */
3529 dbg_info *dbg = get_irn_dbg_info(n);
3530 ir_node *block = get_nodes_block(n);
3532 bl = new_rd_Not(dbg, block, bl, mode);
3533 n = new_rd_And(dbg, block, bl, a, mode);
3534 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
3538 if (is_Not(a) && is_Not(b)) {
3539 /* ~a & ~b = ~(a|b) */
3540 ir_node *block = get_nodes_block(n);
3541 ir_mode *mode = get_irn_mode(n);
3545 n = new_rd_Or(get_irn_dbg_info(n), block, a, b, mode);
3546 n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
3547 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
3551 b_vrp = vrp_get_info(b);
3552 if (is_Const(a) && b_vrp && (tarval_cmp(tarval_or(get_Const_tarval(a),
3553 b_vrp->bits_not_set), get_Const_tarval(a)) == pn_Cmp_Eq)) {
3559 a_vrp = vrp_get_info(a);
3560 if (is_Const(b) && a_vrp && (tarval_cmp(tarval_or(get_Const_tarval(b),
3561 a_vrp->bits_not_set), get_Const_tarval(b)) == pn_Cmp_Eq)) {
3565 n = transform_bitwise_distributive(n, transform_node_And);
3568 } /* transform_node_And */
3573 static ir_node *transform_node_Eor(ir_node *n)
3575 ir_node *c, *oldn = n;
3576 ir_node *a = get_Eor_left(n);
3577 ir_node *b = get_Eor_right(n);
3578 ir_mode *mode = get_irn_mode(n);
3580 HANDLE_BINOP_PHI((eval_func) tarval_eor, a, b, c, mode);
3582 /* we can evaluate 2 Projs of the same Cmp */
3583 if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
3584 ir_node *pred_a = get_Proj_pred(a);
3585 ir_node *pred_b = get_Proj_pred(b);
3586 if (pred_a == pred_b) {
3587 dbg_info *dbgi = get_irn_dbg_info(n);
3588 pn_Cmp pn_a = get_Proj_proj(a);
3589 pn_Cmp pn_b = get_Proj_proj(b);
3590 /* yes, we can simply calculate with pncs */
3591 pn_Cmp new_pnc = pn_a ^ pn_b;
3593 return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
3599 n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph,
3600 get_mode_null(mode));
3601 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
3602 } else if (is_Const(b)) {
3603 if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
3604 ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b)));
3605 ir_node *not_op = get_Not_op(a);
3606 dbg_info *dbg = get_irn_dbg_info(n);
3607 ir_node *block = get_nodes_block(n);
3608 ir_mode *mode = get_irn_mode(n);
3609 n = new_rd_Eor(dbg, block, not_op, cnst, mode);
3611 } else if (is_Const_all_one(b)) { /* x ^ 1...1 -> ~1 */
3612 n = new_r_Not(get_nodes_block(n), a, mode);
3613 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
3616 n = transform_bitwise_distributive(n, transform_node_Eor);
3620 } /* transform_node_Eor */
3625 static ir_node *transform_node_Not(ir_node *n)
3627 ir_node *c, *oldn = n;
3628 ir_node *a = get_Not_op(n);
3629 ir_mode *mode = get_irn_mode(n);
3631 HANDLE_UNOP_PHI(tarval_not,a,c);
3633 /* check for a boolean Not */
3634 if (mode == mode_b && is_Proj(a)) {
3635 ir_node *a_pred = get_Proj_pred(a);
3636 if (is_Cmp(a_pred)) {
3637 /* We negate a Cmp. The Cmp has the negated result anyways! */
3638 n = new_r_Proj(get_Proj_pred(a),
3639 mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
3640 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
3645 ir_node *eor_b = get_Eor_right(a);
3646 if (is_Const(eor_b)) { /* ~(x ^ const) -> x ^ ~const */
3647 ir_node *cnst = new_Const(tarval_not(get_Const_tarval(eor_b)));
3648 ir_node *eor_a = get_Eor_left(a);
3649 dbg_info *dbg = get_irn_dbg_info(n);
3650 ir_node *block = get_nodes_block(n);
3651 ir_mode *mode = get_irn_mode(n);
3652 n = new_rd_Eor(dbg, block, eor_a, cnst, mode);
3656 if (get_mode_arithmetic(mode) == irma_twos_complement) {
3657 if (is_Minus(a)) { /* ~-x -> x + -1 */
3658 dbg_info *dbg = get_irn_dbg_info(n);
3659 ir_graph *irg = current_ir_graph;
3660 ir_node *block = get_nodes_block(n);
3661 ir_node *add_l = get_Minus_op(a);
3662 ir_node *add_r = new_rd_Const(dbg, irg, get_mode_minus_one(mode));
3663 n = new_rd_Add(dbg, block, add_l, add_r, mode);
3664 } else if (is_Add(a)) {
3665 ir_node *add_r = get_Add_right(a);
3666 if (is_Const(add_r) && is_Const_all_one(add_r)) {
3667 /* ~(x + -1) = -x */
3668 ir_node *op = get_Add_left(a);
3669 ir_node *blk = get_nodes_block(n);
3670 n = new_rd_Minus(get_irn_dbg_info(n), blk, op, get_irn_mode(n));
3671 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
3676 } /* transform_node_Not */
3679 * Transform a Minus.
3683 * -(a >>u (size-1)) = a >>s (size-1)
3684 * -(a >>s (size-1)) = a >>u (size-1)
3685 * -(a * const) -> a * -const
3687 static ir_node *transform_node_Minus(ir_node *n)
3689 ir_node *c, *oldn = n;
3690 ir_node *a = get_Minus_op(n);
3693 HANDLE_UNOP_PHI(tarval_neg,a,c);
3695 mode = get_irn_mode(a);
3696 if (get_mode_arithmetic(mode) == irma_twos_complement) {
3697 /* the following rules are only to twos-complement */
3700 ir_node *op = get_Not_op(a);
3701 tarval *tv = get_mode_one(mode);
3702 ir_node *blk = get_nodes_block(n);
3703 ir_node *c = new_Const(tv);
3704 n = new_rd_Add(get_irn_dbg_info(n), blk, op, c, mode);
3705 DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
3709 ir_node *c = get_Shr_right(a);
3712 tarval *tv = get_Const_tarval(c);
3714 if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) {
3715 /* -(a >>u (size-1)) = a >>s (size-1) */
3716 ir_node *v = get_Shr_left(a);
3718 n = new_rd_Shrs(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
3719 DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
3725 ir_node *c = get_Shrs_right(a);
3728 tarval *tv = get_Const_tarval(c);
3730 if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) {
3731 /* -(a >>s (size-1)) = a >>u (size-1) */
3732 ir_node *v = get_Shrs_left(a);
3734 n = new_rd_Shr(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
3735 DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
3742 /* - (a-b) = b - a */
3743 ir_node *la = get_Sub_left(a);
3744 ir_node *ra = get_Sub_right(a);
3745 ir_node *blk = get_nodes_block(n);
3747 n = new_rd_Sub(get_irn_dbg_info(n), blk, ra, la, mode);
3748 DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_SUB);
3752 if (is_Mul(a)) { /* -(a * const) -> a * -const */
3753 ir_node *mul_l = get_Mul_left(a);
3754 ir_node *mul_r = get_Mul_right(a);
3755 tarval *tv = value_of(mul_r);
3756 if (tv != tarval_bad) {
3757 tv = tarval_neg(tv);
3758 if (tv != tarval_bad) {
3759 ir_node *cnst = new_Const(tv);
3760 dbg_info *dbg = get_irn_dbg_info(a);
3761 ir_node *block = get_nodes_block(a);
3762 n = new_rd_Mul(dbg, block, mul_l, cnst, mode);
3763 DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_MUL_C);
3770 } /* transform_node_Minus */
3773 * Transform a Cast_type(Const) into a new Const_type
3775 static ir_node *transform_node_Cast(ir_node *n)
3778 ir_node *pred = get_Cast_op(n);
3779 ir_type *tp = get_irn_type(n);
3781 if (is_Const(pred) && get_Const_type(pred) != tp) {
3782 n = new_rd_Const_type(NULL, current_ir_graph, get_Const_tarval(pred), tp);
3783 DBG_OPT_CSTEVAL(oldn, n);
3784 } else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) {
3785 n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_mode(pred),
3786 get_SymConst_symbol(pred), get_SymConst_kind(pred), tp);
3787 DBG_OPT_CSTEVAL(oldn, n);
3791 } /* transform_node_Cast */
3794 * Transform a Proj(Load) with a non-null address.
3796 static ir_node *transform_node_Proj_Load(ir_node *proj)
3798 if (get_opt_ldst_only_null_ptr_exceptions()) {
3799 if (get_irn_mode(proj) == mode_X) {
3800 ir_node *load = get_Proj_pred(proj);
3802 /* get the Load address */
3803 const ir_node *addr = get_Load_ptr(load);
3804 const ir_node *confirm;
3806 if (value_not_null(addr, &confirm)) {
3807 if (confirm == NULL) {
3808 /* this node may float if it did not depend on a Confirm */
3809 set_irn_pinned(load, op_pin_state_floats);
3811 if (get_Proj_proj(proj) == pn_Load_X_except) {
3812 DBG_OPT_EXC_REM(proj);
3813 return get_irg_bad(current_ir_graph);
3815 ir_node *blk = get_nodes_block(load);
3816 return new_r_Jmp(blk);
3822 } /* transform_node_Proj_Load */
3825 * Transform a Proj(Store) with a non-null address.
3827 static ir_node *transform_node_Proj_Store(ir_node *proj)
3829 if (get_opt_ldst_only_null_ptr_exceptions()) {
3830 if (get_irn_mode(proj) == mode_X) {
3831 ir_node *store = get_Proj_pred(proj);
3833 /* get the load/store address */
3834 const ir_node *addr = get_Store_ptr(store);
3835 const ir_node *confirm;
3837 if (value_not_null(addr, &confirm)) {
3838 if (confirm == NULL) {
3839 /* this node may float if it did not depend on a Confirm */
3840 set_irn_pinned(store, op_pin_state_floats);
3842 if (get_Proj_proj(proj) == pn_Store_X_except) {
3843 DBG_OPT_EXC_REM(proj);
3844 return get_irg_bad(current_ir_graph);
3846 ir_node *blk = get_nodes_block(store);
3847 return new_r_Jmp(blk);
3853 } /* transform_node_Proj_Store */
3856 * Transform a Proj(Div) with a non-zero value.
3857 * Removes the exceptions and routes the memory to the NoMem node.
3859 static ir_node *transform_node_Proj_Div(ir_node *proj)
3861 ir_node *div = get_Proj_pred(proj);
3862 ir_node *b = get_Div_right(div);
3863 ir_node *res, *new_mem;
3864 const ir_node *confirm;
3867 if (value_not_zero(b, &confirm)) {
3868 /* div(x, y) && y != 0 */
3869 if (confirm == NULL) {
3870 /* we are sure we have a Const != 0 */
3871 new_mem = get_Div_mem(div);
3872 new_mem = skip_Pin(new_mem);
3873 set_Div_mem(div, new_mem);
3874 set_irn_pinned(div, op_pin_state_floats);
3877 proj_nr = get_Proj_proj(proj);
3879 case pn_Div_X_regular:
3880 return new_r_Jmp(get_nodes_block(div));
3882 case pn_Div_X_except:
3883 /* we found an exception handler, remove it */
3884 DBG_OPT_EXC_REM(proj);
3888 res = get_Div_mem(div);
3889 new_mem = get_irg_no_mem(current_ir_graph);
3892 /* This node can only float up to the Confirm block */
3893 new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
3895 set_irn_pinned(div, op_pin_state_floats);
3896 /* this is a Div without exception, we can remove the memory edge */
3897 set_Div_mem(div, new_mem);
3902 } /* transform_node_Proj_Div */
3905 * Transform a Proj(Mod) with a non-zero value.
3906 * Removes the exceptions and routes the memory to the NoMem node.
3908 static ir_node *transform_node_Proj_Mod(ir_node *proj)
3910 ir_node *mod = get_Proj_pred(proj);
3911 ir_node *b = get_Mod_right(mod);
3912 ir_node *res, *new_mem;
3913 const ir_node *confirm;
3916 if (value_not_zero(b, &confirm)) {
3917 /* mod(x, y) && y != 0 */
3918 proj_nr = get_Proj_proj(proj);
3920 if (confirm == NULL) {
3921 /* we are sure we have a Const != 0 */
3922 new_mem = get_Mod_mem(mod);
3923 new_mem = skip_Pin(new_mem);
3924 set_Mod_mem(mod, new_mem);
3925 set_irn_pinned(mod, op_pin_state_floats);
3930 case pn_Mod_X_regular:
3931 return new_r_Jmp(get_irn_n(mod, -1));
3933 case pn_Mod_X_except:
3934 /* we found an exception handler, remove it */
3935 DBG_OPT_EXC_REM(proj);
3939 res = get_Mod_mem(mod);
3940 new_mem = get_irg_no_mem(current_ir_graph);
3943 /* This node can only float up to the Confirm block */
3944 new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
3946 /* this is a Mod without exception, we can remove the memory edge */
3947 set_Mod_mem(mod, new_mem);
3950 if (get_Mod_left(mod) == b) {
3951 /* a % a = 0 if a != 0 */
3952 ir_mode *mode = get_irn_mode(proj);
3953 ir_node *res = new_Const(get_mode_null(mode));
3955 DBG_OPT_CSTEVAL(mod, res);
3961 } /* transform_node_Proj_Mod */
3964 * Transform a Proj(DivMod) with a non-zero value.
3965 * Removes the exceptions and routes the memory to the NoMem node.
3967 static ir_node *transform_node_Proj_DivMod(ir_node *proj)
3969 ir_node *divmod = get_Proj_pred(proj);
3970 ir_node *b = get_DivMod_right(divmod);
3971 ir_node *res, *new_mem;
3972 const ir_node *confirm;
3975 if (value_not_zero(b, &confirm)) {
3976 /* DivMod(x, y) && y != 0 */
3977 proj_nr = get_Proj_proj(proj);
3979 if (confirm == NULL) {
3980 /* we are sure we have a Const != 0 */
3981 new_mem = get_DivMod_mem(divmod);
3982 new_mem = skip_Pin(new_mem);
3983 set_DivMod_mem(divmod, new_mem);
3984 set_irn_pinned(divmod, op_pin_state_floats);
3989 case pn_DivMod_X_regular:
3990 return new_r_Jmp(get_nodes_block(divmod));
3992 case pn_DivMod_X_except:
3993 /* we found an exception handler, remove it */
3994 DBG_OPT_EXC_REM(proj);
3998 res = get_DivMod_mem(divmod);
3999 new_mem = get_irg_no_mem(current_ir_graph);
4002 /* This node can only float up to the Confirm block */
4003 new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
4005 /* this is a DivMod without exception, we can remove the memory edge */
4006 set_DivMod_mem(divmod, new_mem);
4009 case pn_DivMod_res_mod:
4010 if (get_DivMod_left(divmod) == b) {
4011 /* a % a = 0 if a != 0 */
4012 ir_mode *mode = get_irn_mode(proj);
4013 ir_node *res = new_Const(get_mode_null(mode));
4015 DBG_OPT_CSTEVAL(divmod, res);
4021 } /* transform_node_Proj_DivMod */
4024 * Optimizes jump tables (CondIs or CondIu) by removing all impossible cases.
4026 static ir_node *transform_node_Proj_Cond(ir_node *proj)
4028 if (get_opt_unreachable_code()) {
4029 ir_node *n = get_Proj_pred(proj);
4030 ir_node *b = get_Cond_selector(n);
4032 if (mode_is_int(get_irn_mode(b))) {
4033 tarval *tb = value_of(b);
4035 if (tb != tarval_bad) {
4036 /* we have a constant switch */
4037 long num = get_Proj_proj(proj);
4039 if (num != get_Cond_default_proj(n)) { /* we cannot optimize default Proj's yet */
4040 if (get_tarval_long(tb) == num) {
4041 /* Do NOT create a jump here, or we will have 2 control flow ops
4042 * in a block. This case is optimized away in optimize_cf(). */
4045 /* this case will NEVER be taken, kill it */
4046 return get_irg_bad(current_ir_graph);
4050 long num = get_Proj_proj(proj);
4051 vrp_attr *b_vrp = vrp_get_info(b);
4052 if (num != get_Cond_default_proj(n) && b_vrp) {
4053 /* Try handling with vrp data. We only remove dead parts. */
4054 tarval *tp = new_tarval_from_long(num, get_irn_mode(b));
4056 if (b_vrp->range_type == VRP_RANGE) {
4057 pn_Cmp cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
4058 pn_Cmp cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
4060 if ((cmp_result & pn_Cmp_Gt) == cmp_result && (cmp_result2
4061 & pn_Cmp_Lt) == cmp_result2) {
4062 return get_irg_bad(current_ir_graph);
4064 } else if (b_vrp->range_type == VRP_ANTIRANGE) {
4065 pn_Cmp cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
4066 pn_Cmp cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
4068 if ((cmp_result & pn_Cmp_Le) == cmp_result && (cmp_result2
4069 & pn_Cmp_Ge) == cmp_result2) {
4070 return get_irg_bad(current_ir_graph);
4075 tarval_and( b_vrp->bits_set, tp),
4079 return get_irg_bad(current_ir_graph);
4085 tarval_not(b_vrp->bits_not_set)),
4086 tarval_not(b_vrp->bits_not_set))
4089 return get_irg_bad(current_ir_graph);
4098 } /* transform_node_Proj_Cond */
4101 * Create a 0 constant of given mode.
4103 static ir_node *create_zero_const(ir_mode *mode)
4105 tarval *tv = get_mode_null(mode);
4106 ir_node *cnst = new_Const(tv);
4111 /* the order of the values is important! */
4112 typedef enum const_class {
4118 static const_class classify_const(const ir_node* n)
4120 if (is_Const(n)) return const_const;
4121 if (is_irn_constlike(n)) return const_like;
4126 * Determines whether r is more constlike or has a larger index (in that order)
4129 static int operands_are_normalized(const ir_node *l, const ir_node *r)
4131 const const_class l_order = classify_const(l);
4132 const const_class r_order = classify_const(r);
4134 l_order > r_order ||
4135 (l_order == r_order && get_irn_idx(l) <= get_irn_idx(r));
4139 * Normalizes and optimizes Cmp nodes.
4141 static ir_node *transform_node_Proj_Cmp(ir_node *proj)
4143 ir_node *n = get_Proj_pred(proj);
4144 ir_node *left = get_Cmp_left(n);
4145 ir_node *right = get_Cmp_right(n);
4148 ir_mode *mode = NULL;
4149 long proj_nr = get_Proj_proj(proj);
4151 /* we can evaluate some cases directly */
4154 return new_Const(get_tarval_b_false());
4156 return new_Const(get_tarval_b_true());
4158 if (!mode_is_float(get_irn_mode(left)))
4159 return new_Const(get_tarval_b_true());
4165 /* remove Casts of both sides */
4166 left = skip_Cast(left);
4167 right = skip_Cast(right);
4169 /* Remove unnecessary conversions */
4170 /* TODO handle constants */
4171 if (is_Conv(left) && is_Conv(right)) {
4172 ir_mode *mode = get_irn_mode(left);
4173 ir_node *op_left = get_Conv_op(left);
4174 ir_node *op_right = get_Conv_op(right);
4175 ir_mode *mode_left = get_irn_mode(op_left);
4176 ir_mode *mode_right = get_irn_mode(op_right);
4178 if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
4179 && mode_left != mode_b && mode_right != mode_b) {
4180 ir_node *block = get_nodes_block(n);
4182 if (mode_left == mode_right) {
4186 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
4187 } else if (smaller_mode(mode_left, mode_right)) {
4188 left = new_r_Conv(block, op_left, mode_right);
4191 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
4192 } else if (smaller_mode(mode_right, mode_left)) {
4194 right = new_r_Conv(block, op_right, mode_left);
4196 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
4201 /* remove operation on both sides if possible */
4202 if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
4204 * The following operations are NOT safe for floating point operations, for instance
4205 * 1.0 + inf == 2.0 + inf, =/=> x == y
4207 if (mode_is_int(get_irn_mode(left))) {
4208 unsigned lop = get_irn_opcode(left);
4210 if (lop == get_irn_opcode(right)) {
4211 ir_node *ll, *lr, *rl, *rr;
4213 /* same operation on both sides, try to remove */
4217 /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */
4218 left = get_unop_op(left);
4219 right = get_unop_op(right);
4221 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4224 ll = get_Add_left(left);
4225 lr = get_Add_right(left);
4226 rl = get_Add_left(right);
4227 rr = get_Add_right(right);
4230 /* X + a CMP X + b ==> a CMP b */
4234 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4235 } else if (ll == rr) {
4236 /* X + a CMP b + X ==> a CMP b */
4240 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4241 } else if (lr == rl) {
4242 /* a + X CMP X + b ==> a CMP b */
4246 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4247 } else if (lr == rr) {
4248 /* a + X CMP b + X ==> a CMP b */
4252 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4256 ll = get_Sub_left(left);
4257 lr = get_Sub_right(left);
4258 rl = get_Sub_left(right);
4259 rr = get_Sub_right(right);
4262 /* X - a CMP X - b ==> a CMP b */
4266 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4267 } else if (lr == rr) {
4268 /* a - X CMP b - X ==> a CMP b */
4272 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4276 if (get_Rotl_right(left) == get_Rotl_right(right)) {
4277 /* a ROTL X CMP b ROTL X ==> a CMP b */
4278 left = get_Rotl_left(left);
4279 right = get_Rotl_left(right);
4281 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4289 /* X+A == A, A+X == A, A-X == A -> X == 0 */
4290 if (is_Add(left) || is_Sub(left)) {
4291 ir_node *ll = get_binop_left(left);
4292 ir_node *lr = get_binop_right(left);
4294 if (lr == right && is_Add(left)) {
4301 right = create_zero_const(get_irn_mode(left));
4303 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4306 if (is_Add(right) || is_Sub(right)) {
4307 ir_node *rl = get_binop_left(right);
4308 ir_node *rr = get_binop_right(right);
4310 if (rr == left && is_Add(right)) {
4317 right = create_zero_const(get_irn_mode(left));
4319 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
4322 if (is_And(left) && is_Const(right)) {
4323 ir_node *ll = get_binop_left(left);
4324 ir_node *lr = get_binop_right(left);
4325 if (is_Shr(ll) && is_Const(lr)) {
4326 /* Cmp((x >>u c1) & c2, c3) = Cmp(x & (c2 << c1), c3 << c1) */
4327 ir_node *block = get_nodes_block(n);
4328 ir_mode *mode = get_irn_mode(left);
4330 ir_node *llr = get_Shr_right(ll);
4331 if (is_Const(llr)) {
4332 dbg_info *dbg = get_irn_dbg_info(left);
4334 tarval *c1 = get_Const_tarval(llr);
4335 tarval *c2 = get_Const_tarval(lr);
4336 tarval *c3 = get_Const_tarval(right);
4337 tarval *mask = tarval_shl(c2, c1);
4338 tarval *value = tarval_shl(c3, c1);
4340 left = new_rd_And(dbg, block, get_Shr_left(ll), new_Const(mask), mode);
4341 right = new_Const(value);
4346 } /* mode_is_int(...) */
4347 } /* proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg */
4349 /* replace mode_b compares with ands/ors */
4350 if (get_irn_mode(left) == mode_b) {
4351 ir_node *block = get_nodes_block(n);
4355 case pn_Cmp_Le: bres = new_r_Or( block, new_r_Not(block, left, mode_b), right, mode_b); break;
4356 case pn_Cmp_Lt: bres = new_r_And(block, new_r_Not(block, left, mode_b), right, mode_b); break;
4357 case pn_Cmp_Ge: bres = new_r_Or( block, left, new_r_Not(block, right, mode_b), mode_b); break;
4358 case pn_Cmp_Gt: bres = new_r_And(block, left, new_r_Not(block, right, mode_b), mode_b); break;
4359 case pn_Cmp_Lg: bres = new_r_Eor(block, left, right, mode_b); break;
4360 case pn_Cmp_Eq: bres = new_r_Not(block, new_r_Eor(block, left, right, mode_b), mode_b); break;
4361 default: bres = NULL;
4364 DBG_OPT_ALGSIM0(n, bres, FS_OPT_CMP_TO_BOOL);
4370 * First step: normalize the compare op
4371 * by placing the constant on the right side
4372 * or moving the lower address node to the left.
4374 if (!operands_are_normalized(left, right)) {
4380 proj_nr = get_inversed_pnc(proj_nr);
4385 * Second step: Try to reduce the magnitude
4386 * of a constant. This may help to generate better code
4387 * later and may help to normalize more compares.
4388 * Of course this is only possible for integer values.
4390 tv = value_of(right);
4391 if (tv != tarval_bad) {
4392 mode = get_irn_mode(right);
4394 /* TODO extend to arbitrary constants */
4395 if (is_Conv(left) && tarval_is_null(tv)) {
4396 ir_node *op = get_Conv_op(left);
4397 ir_mode *op_mode = get_irn_mode(op);
4400 * UpConv(x) REL 0 ==> x REL 0
4401 * Don't do this for float values as it's unclear whether it is a
4402 * win. (on the other side it makes detection/creation of fabs hard)
4404 if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
4405 ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
4406 mode_is_signed(mode) || !mode_is_signed(op_mode)) &&
4407 !mode_is_float(mode)) {
4408 tv = get_mode_null(op_mode);
4412 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
4416 if (tv != tarval_bad) {
4417 /* the following optimization is possible on modes without Overflow
4418 * on Unary Minus or on == and !=:
4419 * -a CMP c ==> a swap(CMP) -c
4421 * Beware: for two-complement Overflow may occur, so only == and != can
4422 * be optimized, see this:
4423 * -MININT < 0 =/=> MININT > 0 !!!
4425 if (is_Minus(left) &&
4426 (!mode_overflow_on_unary_Minus(mode) ||
4427 (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
4428 tv = tarval_neg(tv);
4430 if (tv != tarval_bad) {
4431 left = get_Minus_op(left);
4432 proj_nr = get_inversed_pnc(proj_nr);
4434 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4436 } else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) {
4437 /* Not(a) ==/!= c ==> a ==/!= Not(c) */
4438 tv = tarval_not(tv);
4440 if (tv != tarval_bad) {
4441 left = get_Not_op(left);
4443 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4447 /* for integer modes, we have more */
4448 if (mode_is_int(mode)) {
4449 /* Ne includes Unordered which is not possible on integers.
4450 * However, frontends often use this wrong, so fix it here */
4451 if (proj_nr & pn_Cmp_Uo) {
4452 proj_nr &= ~pn_Cmp_Uo;
4453 set_Proj_proj(proj, proj_nr);
4456 /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
4457 if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
4458 tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
4459 tv = tarval_sub(tv, get_mode_one(mode), NULL);
4461 if (tv != tarval_bad) {
4462 proj_nr ^= pn_Cmp_Eq;
4464 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
4467 /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
4468 else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
4469 tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
4470 tv = tarval_add(tv, get_mode_one(mode));
4472 if (tv != tarval_bad) {
4473 proj_nr ^= pn_Cmp_Eq;
4475 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
4479 /* the following reassociations work only for == and != */
4480 if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
4482 #if 0 /* Might be not that good in general */
4483 /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
4484 if (tarval_is_null(tv) && is_Sub(left)) {
4485 right = get_Sub_right(left);
4486 left = get_Sub_left(left);
4488 tv = value_of(right);
4490 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4494 if (tv != tarval_bad) {
4495 /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
4497 ir_node *c1 = get_Sub_right(left);
4498 tarval *tv2 = value_of(c1);
4500 if (tv2 != tarval_bad) {
4501 tv2 = tarval_add(tv, value_of(c1));
4503 if (tv2 != tarval_bad) {
4504 left = get_Sub_left(left);
4507 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4511 /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
4512 else if (is_Add(left)) {
4513 ir_node *a_l = get_Add_left(left);
4514 ir_node *a_r = get_Add_right(left);
4518 if (is_Const(a_l)) {
4520 tv2 = value_of(a_l);
4523 tv2 = value_of(a_r);
4526 if (tv2 != tarval_bad) {
4527 tv2 = tarval_sub(tv, tv2, NULL);
4529 if (tv2 != tarval_bad) {
4533 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4537 /* -a == c ==> a == -c, -a != c ==> a != -c */
4538 else if (is_Minus(left)) {
4539 tarval *tv2 = tarval_sub(get_mode_null(mode), tv, NULL);
4541 if (tv2 != tarval_bad) {
4542 left = get_Minus_op(left);
4545 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
4550 /* the following reassociations work only for <= */
4551 else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
4552 if (tv != tarval_bad) {
4553 /* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
4554 if (is_Abs(left)) { // TODO something is missing here
4560 if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
4561 switch (get_irn_opcode(left)) {
4565 c1 = get_And_right(left);
4568 * And(x, C1) == C2 ==> FALSE if C2 & C1 != C2
4569 * And(x, C1) != C2 ==> TRUE if C2 & C1 != C2
4571 tarval *mask = tarval_and(get_Const_tarval(c1), tv);
4573 /* TODO: move to constant evaluation */
4574 tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
4576 DBG_OPT_CSTEVAL(proj, c1);
4580 if (tarval_is_single_bit(tv)) {
4582 * optimization for AND:
4584 * And(x, C) == C ==> And(x, C) != 0
4585 * And(x, C) != C ==> And(X, C) == 0
4587 * if C is a single Bit constant.
4590 /* check for Constant's match. We have check hare the tarvals,
4591 because our const might be changed */
4592 if (get_Const_tarval(c1) == tv) {
4593 /* fine: do the transformation */
4594 tv = get_mode_null(get_tarval_mode(tv));
4595 proj_nr ^= pn_Cmp_Leg;
4597 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
4603 c1 = get_Or_right(left);
4604 if (is_Const(c1) && tarval_is_null(tv)) {
4606 * Or(x, C) == 0 && C != 0 ==> FALSE
4607 * Or(x, C) != 0 && C != 0 ==> TRUE
4609 if (! tarval_is_null(get_Const_tarval(c1))) {
4610 /* TODO: move to constant evaluation */
4611 tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
4613 DBG_OPT_CSTEVAL(proj, c1);
4620 * optimize x << c1 == c into x & (-1 >>u c1) == c >> c1 if c & (-1 << c1) == c
4622 * optimize x << c1 != c into x & (-1 >>u c1) != c >> c1 if c & (-1 << c1) == c
4625 c1 = get_Shl_right(left);
4627 tarval *tv1 = get_Const_tarval(c1);
4628 ir_mode *mode = get_irn_mode(left);
4629 tarval *minus1 = get_mode_all_one(mode);
4630 tarval *amask = tarval_shr(minus1, tv1);
4631 tarval *cmask = tarval_shl(minus1, tv1);
4634 if (tarval_and(tv, cmask) != tv) {
4635 /* condition not met */
4636 tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
4638 DBG_OPT_CSTEVAL(proj, c1);
4641 sl = get_Shl_left(left);
4642 blk = get_nodes_block(n);
4643 left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
4644 tv = tarval_shr(tv, tv1);
4646 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
4651 * optimize x >>u c1 == c into x & (-1 << c1) == c << c1 if c & (-1 >>u c1) == c
4653 * optimize x >>u c1 != c into x & (-1 << c1) != c << c1 if c & (-1 >>u c1) == c
4656 c1 = get_Shr_right(left);
4658 tarval *tv1 = get_Const_tarval(c1);
4659 ir_mode *mode = get_irn_mode(left);
4660 tarval *minus1 = get_mode_all_one(mode);
4661 tarval *amask = tarval_shl(minus1, tv1);
4662 tarval *cmask = tarval_shr(minus1, tv1);
4665 if (tarval_and(tv, cmask) != tv) {
4666 /* condition not met */
4667 tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
4669 DBG_OPT_CSTEVAL(proj, c1);
4672 sl = get_Shr_left(left);
4673 blk = get_nodes_block(n);
4674 left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
4675 tv = tarval_shl(tv, tv1);
4677 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
4682 * optimize x >>s c1 == c into x & (-1 << c1) == c << c1 if (c >>s (BITS - c1)) \in {0,-1}
4684 * optimize x >>s c1 != c into x & (-1 << c1) != c << c1 if (c >>s (BITS - c1)) \in {0,-1}
4687 c1 = get_Shrs_right(left);
4689 tarval *tv1 = get_Const_tarval(c1);
4690 ir_mode *mode = get_irn_mode(left);
4691 tarval *minus1 = get_mode_all_one(mode);
4692 tarval *amask = tarval_shl(minus1, tv1);
4693 tarval *cond = new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(tv1));
4696 cond = tarval_sub(cond, tv1, NULL);
4697 cond = tarval_shrs(tv, cond);
4699 if (!tarval_is_all_one(cond) && !tarval_is_null(cond)) {
4700 /* condition not met */
4701 tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
4703 DBG_OPT_CSTEVAL(proj, c1);
4706 sl = get_Shrs_left(left);
4707 blk = get_nodes_block(n);
4708 left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
4709 tv = tarval_shl(tv, tv1);
4711 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
4716 } /* tarval != bad */
4719 if (changed & 2) /* need a new Const */
4720 right = new_Const(tv);
4722 if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_Const(right) && is_Const_null(right) && is_Proj(left)) {
4723 ir_node *op = get_Proj_pred(left);
4725 if ((is_Mod(op) && get_Proj_proj(left) == pn_Mod_res) ||
4726 (is_DivMod(op) && get_Proj_proj(left) == pn_DivMod_res_mod)) {
4727 ir_node *c = get_binop_right(op);
4730 tarval *tv = get_Const_tarval(c);
4732 if (tarval_is_single_bit(tv)) {
4733 /* special case: (x % 2^n) CMP 0 ==> x & (2^n-1) CMP 0 */
4734 ir_node *v = get_binop_left(op);
4735 ir_node *blk = get_irn_n(op, -1);
4736 ir_mode *mode = get_irn_mode(v);
4738 tv = tarval_sub(tv, get_mode_one(mode), NULL);
4739 left = new_rd_And(get_irn_dbg_info(op), blk, v, new_Const(tv), mode);
4741 DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND);
4748 ir_node *block = get_nodes_block(n);
4750 /* create a new compare */
4751 n = new_rd_Cmp(get_irn_dbg_info(n), block, left, right);
4752 proj = new_rd_Proj(get_irn_dbg_info(proj), n, get_irn_mode(proj), proj_nr);
4756 } /* transform_node_Proj_Cmp */
4759 * Optimize CopyB(mem, x, x) into a Nop.
4761 static ir_node *transform_node_Proj_CopyB(ir_node *proj)
4763 ir_node *copyb = get_Proj_pred(proj);
4764 ir_node *a = get_CopyB_dst(copyb);
4765 ir_node *b = get_CopyB_src(copyb);
4768 switch (get_Proj_proj(proj)) {
4769 case pn_CopyB_X_regular:
4770 /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
4771 DBG_OPT_EXC_REM(proj);
4772 proj = new_r_Jmp(get_nodes_block(copyb));
4774 case pn_CopyB_X_except:
4775 DBG_OPT_EXC_REM(proj);
4776 proj = get_irg_bad(get_irn_irg(proj));
4783 } /* transform_node_Proj_CopyB */
4786 * Optimize Bounds(idx, idx, upper) into idx.
4788 static ir_node *transform_node_Proj_Bound(ir_node *proj)
4790 ir_node *oldn = proj;
4791 ir_node *bound = get_Proj_pred(proj);
4792 ir_node *idx = get_Bound_index(bound);
4793 ir_node *pred = skip_Proj(idx);
4796 if (idx == get_Bound_lower(bound))
4798 else if (is_Bound(pred)) {
4800 * idx was Bounds checked in the same MacroBlock previously,
4801 * it is still valid if lower <= pred_lower && pred_upper <= upper.
4803 ir_node *lower = get_Bound_lower(bound);
4804 ir_node *upper = get_Bound_upper(bound);
4805 if (get_Bound_lower(pred) == lower &&
4806 get_Bound_upper(pred) == upper &&
4807 get_irn_MacroBlock(bound) == get_irn_MacroBlock(pred)) {
4809 * One could expect that we simply return the previous
4810 * Bound here. However, this would be wrong, as we could
4811 * add an exception Proj to a new location then.
4812 * So, we must turn in into a tuple.
4818 /* Turn Bound into a tuple (mem, jmp, bad, idx) */
4819 switch (get_Proj_proj(proj)) {
4821 DBG_OPT_EXC_REM(proj);
4822 proj = get_Bound_mem(bound);
4824 case pn_Bound_X_except:
4825 DBG_OPT_EXC_REM(proj);
4826 proj = get_irg_bad(get_irn_irg(proj));
4830 DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
4832 case pn_Bound_X_regular:
4833 DBG_OPT_EXC_REM(proj);
4834 proj = new_r_Jmp(get_nodes_block(bound));
4841 } /* transform_node_Proj_Bound */
4844 * Does all optimizations on nodes that must be done on it's Proj's
4845 * because of creating new nodes.
4847 static ir_node *transform_node_Proj(ir_node *proj)
4849 ir_node *n = get_Proj_pred(proj);
4851 if (n->op->ops.transform_node_Proj)
4852 return n->op->ops.transform_node_Proj(proj);
4854 } /* transform_node_Proj */
4857 * Move Confirms down through Phi nodes.
4859 static ir_node *transform_node_Phi(ir_node *phi)
4862 ir_mode *mode = get_irn_mode(phi);
4864 if (mode_is_reference(mode)) {
4865 n = get_irn_arity(phi);
4867 /* Beware of Phi0 */
4869 ir_node *pred = get_irn_n(phi, 0);
4870 ir_node *bound, *new_Phi, *block, **in;
4873 if (! is_Confirm(pred))
4876 bound = get_Confirm_bound(pred);
4877 pnc = get_Confirm_cmp(pred);
4879 NEW_ARR_A(ir_node *, in, n);
4880 in[0] = get_Confirm_value(pred);
4882 for (i = 1; i < n; ++i) {
4883 pred = get_irn_n(phi, i);
4885 if (! is_Confirm(pred) ||
4886 get_Confirm_bound(pred) != bound ||
4887 get_Confirm_cmp(pred) != pnc)
4889 in[i] = get_Confirm_value(pred);
4891 /* move the Confirm nodes "behind" the Phi */
4892 block = get_irn_n(phi, -1);
4893 new_Phi = new_r_Phi(block, n, in, get_irn_mode(phi));
4894 return new_r_Confirm(block, new_Phi, bound, pnc);
4898 } /* transform_node_Phi */
4901 * Returns the operands of a commutative bin-op, if one operand is
4902 * a const, it is returned as the second one.
4904 static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
4906 ir_node *op_a = get_binop_left(binop);
4907 ir_node *op_b = get_binop_right(binop);
4909 assert(is_op_commutative(get_irn_op(binop)));
4911 if (is_Const(op_a)) {
4918 } /* get_comm_Binop_Ops */
4921 * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
4922 * Such pattern may arise in bitfield stores.
4924 * value c4 value c4 & c2
4925 * AND c3 AND c1 | c3
4932 * AND c1 ===> OR if (c1 | c2) == 0x111..11
4935 static ir_node *transform_node_Or_bf_store(ir_node *or)
4939 ir_node *and_l, *c3;
4940 ir_node *value, *c4;
4941 ir_node *new_and, *new_const, *block;
4942 ir_mode *mode = get_irn_mode(or);
4944 tarval *tv1, *tv2, *tv3, *tv4, *tv;
4947 get_comm_Binop_Ops(or, &and, &c1);
4948 if (!is_Const(c1) || !is_And(and))
4951 get_comm_Binop_Ops(and, &or_l, &c2);
4955 tv1 = get_Const_tarval(c1);
4956 tv2 = get_Const_tarval(c2);
4958 tv = tarval_or(tv1, tv2);
4959 if (tarval_is_all_one(tv)) {
4960 /* the AND does NOT clear a bit with isn't set by the OR */
4961 set_Or_left(or, or_l);
4962 set_Or_right(or, c1);
4964 /* check for more */
4971 get_comm_Binop_Ops(or_l, &and_l, &c3);
4972 if (!is_Const(c3) || !is_And(and_l))
4975 get_comm_Binop_Ops(and_l, &value, &c4);
4979 /* ok, found the pattern, check for conditions */
4980 assert(mode == get_irn_mode(and));
4981 assert(mode == get_irn_mode(or_l));
4982 assert(mode == get_irn_mode(and_l));
4984 tv3 = get_Const_tarval(c3);
4985 tv4 = get_Const_tarval(c4);
4987 tv = tarval_or(tv4, tv2);
4988 if (!tarval_is_all_one(tv)) {
4989 /* have at least one 0 at the same bit position */
4993 if (tv3 != tarval_andnot(tv3, tv4)) {
4994 /* bit in the or_mask is outside the and_mask */
4998 if (tv1 != tarval_andnot(tv1, tv2)) {
4999 /* bit in the or_mask is outside the and_mask */
5003 /* ok, all conditions met */
5004 block = get_irn_n(or, -1);
5006 new_and = new_r_And(block, value, new_Const(tarval_and(tv4, tv2)), mode);
5008 new_const = new_Const(tarval_or(tv3, tv1));
5010 set_Or_left(or, new_and);
5011 set_Or_right(or, new_const);
5013 /* check for more */
5015 } /* transform_node_Or_bf_store */
5018 * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
5020 static ir_node *transform_node_Or_Rotl(ir_node *or)
5022 ir_mode *mode = get_irn_mode(or);
5023 ir_node *shl, *shr, *block;
5024 ir_node *irn, *x, *c1, *c2, *v, *sub, *n, *rotval;
5027 if (! mode_is_int(mode))
5030 shl = get_binop_left(or);
5031 shr = get_binop_right(or);
5040 } else if (!is_Shl(shl)) {
5042 } else if (!is_Shr(shr)) {
5045 x = get_Shl_left(shl);
5046 if (x != get_Shr_left(shr))
5049 c1 = get_Shl_right(shl);
5050 c2 = get_Shr_right(shr);
5051 if (is_Const(c1) && is_Const(c2)) {
5052 tv1 = get_Const_tarval(c1);
5053 if (! tarval_is_long(tv1))
5056 tv2 = get_Const_tarval(c2);
5057 if (! tarval_is_long(tv2))
5060 if (get_tarval_long(tv1) + get_tarval_long(tv2)
5061 != (int) get_mode_size_bits(mode))
5064 /* yet, condition met */
5065 block = get_nodes_block(or);
5067 n = new_r_Rotl(block, x, c1, mode);
5069 DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
5076 rotval = sub; /* a Rot right is not supported, so use a rot left */
5077 } else if (is_Sub(c2)) {
5083 if (get_Sub_right(sub) != v)
5086 c1 = get_Sub_left(sub);
5090 tv1 = get_Const_tarval(c1);
5091 if (! tarval_is_long(tv1))
5094 if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode))
5097 /* yet, condition met */
5098 block = get_nodes_block(or);
5100 n = new_r_Rotl(block, x, rotval, mode);
5102 DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
5104 } /* transform_node_Or_Rotl */
5109 static ir_node *transform_node_Or(ir_node *n)
5111 ir_node *c, *oldn = n;
5112 ir_node *a = get_Or_left(n);
5113 ir_node *b = get_Or_right(n);
5116 if (is_Not(a) && is_Not(b)) {
5117 /* ~a | ~b = ~(a&b) */
5118 ir_node *block = get_nodes_block(n);
5120 mode = get_irn_mode(n);
5123 n = new_rd_And(get_irn_dbg_info(n), block, a, b, mode);
5124 n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
5125 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
5129 /* we can evaluate 2 Projs of the same Cmp */
5130 if (get_irn_mode(n) == mode_b && is_Proj(a) && is_Proj(b)) {
5131 ir_node *pred_a = get_Proj_pred(a);
5132 ir_node *pred_b = get_Proj_pred(b);
5133 if (pred_a == pred_b) {
5134 dbg_info *dbgi = get_irn_dbg_info(n);
5135 pn_Cmp pn_a = get_Proj_proj(a);
5136 pn_Cmp pn_b = get_Proj_proj(b);
5137 /* yes, we can simply calculate with pncs */
5138 pn_Cmp new_pnc = pn_a | pn_b;
5140 return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
5144 mode = get_irn_mode(n);
5145 HANDLE_BINOP_PHI((eval_func) tarval_or, a, b, c, mode);
5147 n = transform_node_Or_bf_store(n);
5148 n = transform_node_Or_Rotl(n);
5152 n = transform_bitwise_distributive(n, transform_node_Or);
5155 } /* transform_node_Or */
5159 static ir_node *transform_node(ir_node *n);
5162 * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rotl.
5164 * Should be moved to reassociation?
5166 static ir_node *transform_node_shift(ir_node *n)
5168 ir_node *left, *right;
5170 tarval *tv1, *tv2, *res;
5171 ir_node *in[2], *irn, *block;
5173 left = get_binop_left(n);
5175 /* different operations */
5176 if (get_irn_op(left) != get_irn_op(n))
5179 right = get_binop_right(n);
5180 tv1 = value_of(right);
5181 if (tv1 == tarval_bad)
5184 tv2 = value_of(get_binop_right(left));
5185 if (tv2 == tarval_bad)
5188 res = tarval_add(tv1, tv2);
5189 mode = get_irn_mode(n);
5191 /* beware: a simple replacement works only, if res < modulo shift */
5193 int modulo_shf = get_mode_modulo_shift(mode);
5194 if (modulo_shf > 0) {
5195 tarval *modulo = new_tarval_from_long(modulo_shf,
5196 get_tarval_mode(res));
5198 assert(modulo_shf >= (int) get_mode_size_bits(mode));
5200 /* shifting too much */
5201 if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) {
5203 ir_node *block = get_nodes_block(n);
5204 dbg_info *dbgi = get_irn_dbg_info(n);
5205 ir_mode *smode = get_irn_mode(right);
5206 ir_node *cnst = new_Const_long(smode, get_mode_size_bits(mode) - 1);
5207 return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
5210 return new_Const(get_mode_null(mode));
5214 res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res)));
5217 /* ok, we can replace it */
5218 block = get_nodes_block(n);
5220 in[0] = get_binop_left(left);
5221 in[1] = new_Const(res);
5223 irn = new_ir_node(NULL, get_Block_irg(block), block, get_irn_op(n), mode, 2, in);
5225 DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
5227 return transform_node(irn);
5228 } /* transform_node_shift */
5231 * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
5233 * - and, or, xor instead of &
5234 * - Shl, Shr, Shrs, rotl instead of >>
5235 * (with a special case for Or/Xor + Shrs)
5237 static ir_node *transform_node_bitop_shift(ir_node *n)
5240 ir_node *right = get_binop_right(n);
5241 ir_mode *mode = get_irn_mode(n);
5242 ir_node *bitop_left;
5243 ir_node *bitop_right;
5254 assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
5256 if (!is_Const(right))
5259 left = get_binop_left(n);
5260 op_left = get_irn_op(left);
5261 if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
5264 /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
5265 if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
5266 /* TODO: test if sign bit is affectes */
5270 bitop_right = get_binop_right(left);
5271 if (!is_Const(bitop_right))
5274 bitop_left = get_binop_left(left);
5276 block = get_nodes_block(n);
5277 dbgi = get_irn_dbg_info(n);
5278 tv1 = get_Const_tarval(bitop_right);
5279 tv2 = get_Const_tarval(right);
5281 assert(get_tarval_mode(tv1) == mode);
5284 new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
5285 tv_shift = tarval_shl(tv1, tv2);
5286 } else if (is_Shr(n)) {
5287 new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
5288 tv_shift = tarval_shr(tv1, tv2);
5289 } else if (is_Shrs(n)) {
5290 new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
5291 tv_shift = tarval_shrs(tv1, tv2);
5294 new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
5295 tv_shift = tarval_rotl(tv1, tv2);
5298 assert(get_tarval_mode(tv_shift) == mode);
5299 new_const = new_Const(tv_shift);
5301 if (op_left == op_And) {
5302 new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
5303 } else if (op_left == op_Or) {
5304 new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
5306 assert(op_left == op_Eor);
5307 new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
5315 * (x << c1) >> c2 <=> x OP (c2-c1) & ((-1 << c1) >> c2)
5317 * (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2)
5318 * (also with x >>s c1 when c1>=c2)
5320 static ir_node *transform_node_shl_shr(ir_node *n)
5323 ir_node *right = get_binop_right(n);
5338 assert(is_Shl(n) || is_Shr(n) || is_Shrs(n));
5340 if (!is_Const(right))
5343 left = get_binop_left(n);
5344 mode = get_irn_mode(n);
5345 if (is_Shl(n) && (is_Shr(left) || is_Shrs(left))) {
5346 ir_node *shr_right = get_binop_right(left);
5348 if (!is_Const(shr_right))
5351 x = get_binop_left(left);
5352 tv_shr = get_Const_tarval(shr_right);
5353 tv_shl = get_Const_tarval(right);
5355 if (is_Shrs(left)) {
5356 /* shrs variant only allowed if c1 >= c2 */
5357 if (! (tarval_cmp(tv_shl, tv_shr) & pn_Cmp_Ge))
5360 tv_mask = tarval_shrs(get_mode_all_one(mode), tv_shr);
5363 tv_mask = tarval_shr(get_mode_all_one(mode), tv_shr);
5365 tv_mask = tarval_shl(tv_mask, tv_shl);
5366 } else if (is_Shr(n) && is_Shl(left)) {
5367 ir_node *shl_right = get_Shl_right(left);
5369 if (!is_Const(shl_right))
5372 x = get_Shl_left(left);
5373 tv_shr = get_Const_tarval(right);
5374 tv_shl = get_Const_tarval(shl_right);
5376 tv_mask = tarval_shl(get_mode_all_one(mode), tv_shl);
5377 tv_mask = tarval_shr(tv_mask, tv_shr);
5382 if (get_tarval_mode(tv_shl) != get_tarval_mode(tv_shr)) {
5383 tv_shl = tarval_convert_to(tv_shl, get_tarval_mode(tv_shr));
5386 assert(tv_mask != tarval_bad);
5387 assert(get_tarval_mode(tv_mask) == mode);
5389 block = get_nodes_block(n);
5390 dbgi = get_irn_dbg_info(n);
5392 pnc = tarval_cmp(tv_shl, tv_shr);
5393 if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Eq) {
5394 tv_shift = tarval_sub(tv_shr, tv_shl, NULL);
5395 new_const = new_Const(tv_shift);
5397 new_shift = new_rd_Shrs(dbgi, block, x, new_const, mode);
5399 new_shift = new_rd_Shr(dbgi, block, x, new_const, mode);
5402 assert(pnc == pn_Cmp_Gt);
5403 tv_shift = tarval_sub(tv_shl, tv_shr, NULL);
5404 new_const = new_Const(tv_shift);
5405 new_shift = new_rd_Shl(dbgi, block, x, new_const, mode);
5408 new_const = new_Const(tv_mask);
5409 new_and = new_rd_And(dbgi, block, new_shift, new_const, mode);
5417 static ir_node *transform_node_Shr(ir_node *n)
5419 ir_node *c, *oldn = n;
5420 ir_node *left = get_Shr_left(n);
5421 ir_node *right = get_Shr_right(n);
5422 ir_mode *mode = get_irn_mode(n);
5424 HANDLE_BINOP_PHI((eval_func) tarval_shr, left, right, c, mode);
5425 n = transform_node_shift(n);
5428 n = transform_node_shl_shr(n);
5430 n = transform_node_bitop_shift(n);
5433 } /* transform_node_Shr */
5438 static ir_node *transform_node_Shrs(ir_node *n)
5440 ir_node *c, *oldn = n;
5441 ir_node *a = get_Shrs_left(n);
5442 ir_node *b = get_Shrs_right(n);
5443 ir_mode *mode = get_irn_mode(n);
5445 HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode);
5446 n = transform_node_shift(n);
5449 n = transform_node_bitop_shift(n);
5452 } /* transform_node_Shrs */
5457 static ir_node *transform_node_Shl(ir_node *n)
5459 ir_node *c, *oldn = n;
5460 ir_node *a = get_Shl_left(n);
5461 ir_node *b = get_Shl_right(n);
5462 ir_mode *mode = get_irn_mode(n);
5464 HANDLE_BINOP_PHI((eval_func) tarval_shl, a, b, c, mode);
5465 n = transform_node_shift(n);
5468 n = transform_node_shl_shr(n);
5470 n = transform_node_bitop_shift(n);
5473 } /* transform_node_Shl */
5478 static ir_node *transform_node_Rotl(ir_node *n)
5480 ir_node *c, *oldn = n;
5481 ir_node *a = get_Rotl_left(n);
5482 ir_node *b = get_Rotl_right(n);
5483 ir_mode *mode = get_irn_mode(n);
5485 HANDLE_BINOP_PHI((eval_func) tarval_rotl, a, b, c, mode);
5486 n = transform_node_shift(n);
5489 n = transform_node_bitop_shift(n);
5492 } /* transform_node_Rotl */
5497 static ir_node *transform_node_Conv(ir_node *n)
5499 ir_node *c, *oldn = n;
5500 ir_mode *mode = get_irn_mode(n);
5501 ir_node *a = get_Conv_op(n);
5503 if (mode != mode_b && is_const_Phi(a)) {
5504 /* Do NOT optimize mode_b Conv's, this leads to remaining
5505 * Phib nodes later, because the conv_b_lower operation
5506 * is instantly reverted, when it tries to insert a Convb.
5508 c = apply_conv_on_phi(a, mode);
5510 DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI);
5515 if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */
5516 return new_r_Unknown(current_ir_graph, mode);
5519 if (mode_is_reference(mode) &&
5520 get_mode_size_bits(mode) == get_mode_size_bits(get_irn_mode(a)) &&
5522 ir_node *l = get_Add_left(a);
5523 ir_node *r = get_Add_right(a);
5524 dbg_info *dbgi = get_irn_dbg_info(a);
5525 ir_node *block = get_nodes_block(n);
5527 ir_node *lop = get_Conv_op(l);
5528 if (get_irn_mode(lop) == mode) {
5529 /* ConvP(AddI(ConvI(P), x)) -> AddP(P, x) */
5530 n = new_rd_Add(dbgi, block, lop, r, mode);
5535 ir_node *rop = get_Conv_op(r);
5536 if (get_irn_mode(rop) == mode) {
5537 /* ConvP(AddI(x, ConvI(P))) -> AddP(x, P) */
5538 n = new_rd_Add(dbgi, block, l, rop, mode);
5545 } /* transform_node_Conv */
5548 * Remove dead blocks and nodes in dead blocks
5549 * in keep alive list. We do not generate a new End node.
5551 static ir_node *transform_node_End(ir_node *n)
5553 int i, j, n_keepalives = get_End_n_keepalives(n);
5556 NEW_ARR_A(ir_node *, in, n_keepalives);
5558 for (i = j = 0; i < n_keepalives; ++i) {
5559 ir_node *ka = get_End_keepalive(n, i);
5561 if (! is_Block_dead(ka)) {
5565 } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
5567 } else if (is_Bad(ka)) {
5568 /* no need to keep Bad */
5573 if (j != n_keepalives)
5574 set_End_keepalives(n, j, in);
5576 } /* transform_node_End */
5578 bool is_negated_value(ir_node *a, ir_node *b)
5580 if (is_Minus(a) && get_Minus_op(a) == b)
5582 if (is_Minus(b) && get_Minus_op(b) == a)
5584 if (is_Sub(a) && is_Sub(b)) {
5585 ir_node *a_left = get_Sub_left(a);
5586 ir_node *a_right = get_Sub_right(a);
5587 ir_node *b_left = get_Sub_left(b);
5588 ir_node *b_right = get_Sub_right(b);
5590 if (a_left == b_right && a_right == b_left)
5598 * Optimize a Mux into some simpler cases.
5600 static ir_node *transform_node_Mux(ir_node *n)
5602 ir_node *oldn = n, *sel = get_Mux_sel(n);
5603 ir_mode *mode = get_irn_mode(n);
5604 ir_node *t = get_Mux_true(n);
5605 ir_node *f = get_Mux_false(n);
5606 ir_graph *irg = current_ir_graph;
5608 if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
5612 ir_node* block = get_nodes_block(n);
5614 ir_node* c1 = get_Mux_sel(t);
5615 ir_node* t1 = get_Mux_true(t);
5616 ir_node* f1 = get_Mux_false(t);
5618 /* Mux(cond0, Mux(cond1, x, y), y) -> typical if (cond0 && cond1) x else y */
5619 ir_node* and_ = new_r_And(block, c0, c1, mode_b);
5620 ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
5625 DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
5626 } else if (f == t1) {
5627 /* Mux(cond0, Mux(cond1, x, y), x) */
5628 ir_node* not_c1 = new_r_Not(block, c1, mode_b);
5629 ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
5630 ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
5635 DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
5637 } else if (is_Mux(f)) {
5638 ir_node* block = get_nodes_block(n);
5640 ir_node* c1 = get_Mux_sel(f);
5641 ir_node* t1 = get_Mux_true(f);
5642 ir_node* f1 = get_Mux_false(f);
5644 /* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
5645 ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
5646 ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
5651 DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
5652 } else if (t == f1) {
5653 /* Mux(cond0, x, Mux(cond1, y, x)) */
5654 ir_node* not_c1 = new_r_Not(block, c1, mode_b);
5655 ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
5656 ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
5661 DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
5665 /* first normalization step: move a possible zero to the false case */
5667 ir_node *cmp = get_Proj_pred(sel);
5670 if (is_Const(t) && is_Const_null(t)) {
5673 /* Mux(x, 0, y) => Mux(x, y, 0) */
5674 pn_Cmp pnc = get_Proj_proj(sel);
5675 sel = new_r_Proj(cmp, mode_b,
5676 get_negated_pnc(pnc, get_irn_mode(get_Cmp_left(cmp))));
5677 n = new_rd_Mux(get_irn_dbg_info(n), get_nodes_block(n), sel, t, f, mode);
5685 /* note: after normalization, false can only happen on default */
5686 if (mode == mode_b) {
5687 dbg_info *dbg = get_irn_dbg_info(n);
5688 ir_node *block = get_nodes_block(n);
5691 tarval *tv_t = get_Const_tarval(t);
5692 if (tv_t == tarval_b_true) {
5694 /* Muxb(sel, true, false) = sel */
5695 assert(get_Const_tarval(f) == tarval_b_false);
5696 DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
5699 /* Muxb(sel, true, x) = Or(sel, x) */
5700 n = new_rd_Or(dbg, block, sel, f, mode_b);
5701 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
5705 } else if (is_Const(f)) {
5706 tarval *tv_f = get_Const_tarval(f);
5707 if (tv_f == tarval_b_true) {
5708 /* Muxb(sel, x, true) = Or(Not(sel), x) */
5709 ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
5710 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
5711 n = new_rd_Or(dbg, block, not_sel, t, mode_b);
5714 /* Muxb(sel, x, false) = And(sel, x) */
5715 assert(tv_f == tarval_b_false);
5716 n = new_rd_And(dbg, block, sel, t, mode_b);
5717 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
5723 /* more normalization: try to normalize Mux(x, C1, C2) into Mux(x, +1/-1, 0) op C2 */
5724 if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
5725 tarval *a = get_Const_tarval(t);
5726 tarval *b = get_Const_tarval(f);
5729 if (tarval_is_one(a) && tarval_is_null(b)) {
5730 ir_node *block = get_nodes_block(n);
5731 ir_node *conv = new_r_Conv(block, sel, mode);
5733 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
5735 } else if (tarval_is_null(a) && tarval_is_one(b)) {
5736 ir_node *block = get_nodes_block(n);
5737 ir_node *not_ = new_r_Not(block, sel, mode_b);
5738 ir_node *conv = new_r_Conv(block, not_, mode);
5740 DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
5743 /* TODO: it's not really clear if that helps in general or should be moved
5744 * to backend, especially with the MUX->Conv transformation above */
5745 if (tarval_cmp(a, b) & pn_Cmp_Gt) {
5746 diff = tarval_sub(a, b, NULL);
5749 diff = tarval_sub(b, a, NULL);
5753 if (diff == get_tarval_one(mode)) {
5754 dbg_info *dbg = get_irn_dbg_info(n);
5755 ir_node *block = get_nodes_block(n);
5756 ir_node *t = new_Const(tarval_sub(a, min, NULL));
5757 ir_node *f = new_Const(tarval_sub(b, min, NULL));
5758 n = new_rd_Mux(dbg, block, sel, f, t, mode);
5759 n = new_rd_Add(dbg, block, n, new_Const(min), mode);
5765 ir_node *cmp = get_Proj_pred(sel);
5766 long pn = get_Proj_proj(sel);
5769 * Note: normalization puts the constant on the right side,
5770 * so we check only one case.
5772 * Note further that these optimization work even for floating point
5773 * with NaN's because -NaN == NaN.
5774 * However, if +0 and -0 is handled differently, we cannot use the Abs/-Abs
5778 ir_node *cmp_r = get_Cmp_right(cmp);
5779 if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
5780 ir_node *block = get_nodes_block(n);
5781 ir_node *cmp_l = get_Cmp_left(cmp);
5783 if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) {
5786 /* NaN's work fine with abs, so it is ok to remove Uo */
5787 long pnc = pn & ~pn_Cmp_Uo;
5789 if ( (cmp_l == t && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt))
5790 || (cmp_l == f && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt)))
5792 /* Mux(a >/>= 0, a, -a) = Mux(a </<= 0, -a, a) ==> Abs(a) */
5793 n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
5794 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
5796 } else if ((cmp_l == t && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt))
5797 || (cmp_l == f && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt)))
5799 /* Mux(a </<= 0, a, -a) = Mux(a >/>= 0, -a, a) ==> -Abs(a) */
5800 n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
5801 n = new_rd_Minus(get_irn_dbg_info(n), block, n, mode);
5802 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
5807 if (mode_is_int(mode)) {
5809 if ((pn == pn_Cmp_Lg || pn == pn_Cmp_Eq) && is_And(cmp_l)) {
5810 /* Mux((a & b) != 0, c, 0) */
5811 ir_node *and_r = get_And_right(cmp_l);
5814 if (and_r == t && f == cmp_r) {
5815 if (is_Const(t) && tarval_is_single_bit(get_Const_tarval(t))) {
5816 if (pn == pn_Cmp_Lg) {
5817 /* Mux((a & 2^C) != 0, 2^C, 0) */
5819 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5821 /* Mux((a & 2^C) == 0, 2^C, 0) */
5822 n = new_rd_Eor(get_irn_dbg_info(n),
5823 block, cmp_l, t, mode);
5824 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5829 if (is_Shl(and_r)) {
5830 ir_node *shl_l = get_Shl_left(and_r);
5831 if (is_Const(shl_l) && is_Const_one(shl_l)) {
5832 if (and_r == t && f == cmp_r) {
5833 if (pn == pn_Cmp_Lg) {
5834 /* (a & (1 << n)) != 0, (1 << n), 0) */
5836 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5838 /* (a & (1 << n)) == 0, (1 << n), 0) */
5839 n = new_rd_Eor(get_irn_dbg_info(n),
5840 block, cmp_l, t, mode);
5841 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5847 and_l = get_And_left(cmp_l);
5848 if (is_Shl(and_l)) {
5849 ir_node *shl_l = get_Shl_left(and_l);
5850 if (is_Const(shl_l) && is_Const_one(shl_l)) {
5851 if (and_l == t && f == cmp_r) {
5852 if (pn == pn_Cmp_Lg) {
5853 /* ((1 << n) & a) != 0, (1 << n), 0) */
5855 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5857 /* ((1 << n) & a) == 0, (1 << n), 0) */
5858 n = new_rd_Eor(get_irn_dbg_info(n),
5859 block, cmp_l, t, mode);
5860 DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
5873 } /* transform_node_Mux */
5876 * optimize Sync nodes that have other syncs as input we simply add the inputs
5877 * of the other sync to our own inputs
5879 static ir_node *transform_node_Sync(ir_node *n)
5881 int arity = get_Sync_n_preds(n);
5884 for (i = 0; i < arity;) {
5885 ir_node *pred = get_Sync_pred(n, i);
5889 if (!is_Sync(pred)) {
5897 pred_arity = get_Sync_n_preds(pred);
5898 for (j = 0; j < pred_arity; ++j) {
5899 ir_node *pred_pred = get_Sync_pred(pred, j);
5904 add_irn_n(n, pred_pred);
5908 if (get_Sync_pred(n, k) == pred_pred) break;
5913 /* rehash the sync node */
5914 add_identities(current_ir_graph->value_table, n);
5917 } /* transform_node_Sync */
5920 * optimize a trampoline Call into a direct Call
5922 static ir_node *transform_node_Call(ir_node *call)
5924 ir_node *callee = get_Call_ptr(call);
5925 ir_node *adr, *mem, *res, *bl, **in;
5926 ir_type *ctp, *mtp, *tp;
5929 int i, n_res, n_param;
5932 if (! is_Proj(callee))
5934 callee = get_Proj_pred(callee);
5935 if (! is_Builtin(callee))
5937 if (get_Builtin_kind(callee) != ir_bk_inner_trampoline)
5940 mem = get_Call_mem(call);
5942 if (skip_Proj(mem) == callee) {
5943 /* memory is routed to the trampoline, skip */
5944 mem = get_Builtin_mem(callee);
5947 /* build a new call type */
5948 mtp = get_Call_type(call);
5949 tdb = get_type_dbg_info(mtp);
5951 n_res = get_method_n_ress(mtp);
5952 n_param = get_method_n_params(mtp);
5953 ctp = new_d_type_method(n_param + 1, n_res, tdb);
5955 for (i = 0; i < n_res; ++i)
5956 set_method_res_type(ctp, i, get_method_res_type(mtp, i));
5958 NEW_ARR_A(ir_node *, in, n_param + 1);
5960 /* FIXME: we don't need a new pointer type in every step */
5961 tp = get_irg_frame_type(current_ir_graph);
5962 tp = new_type_pointer(tp);
5963 set_method_param_type(ctp, 0, tp);
5965 in[0] = get_Builtin_param(callee, 2);
5966 for (i = 0; i < n_param; ++i) {
5967 set_method_param_type(ctp, i + 1, get_method_param_type(mtp, i));
5968 in[i + 1] = get_Call_param(call, i);
5970 var = get_method_variadicity(mtp);
5971 set_method_variadicity(ctp, var);
5972 if (var == variadicity_variadic) {
5973 set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
5975 /* When we resolve a trampoline, the function must be called by a this-call */
5976 set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
5977 set_method_additional_properties(ctp, get_method_additional_properties(mtp));
5979 adr = get_Builtin_param(callee, 1);
5981 db = get_irn_dbg_info(call);
5982 bl = get_nodes_block(call);
5984 res = new_rd_Call(db, bl, mem, adr, n_param + 1, in, ctp);
5985 if (get_irn_pinned(call) == op_pin_state_floats)
5986 set_irn_pinned(res, op_pin_state_floats);
5988 } /* transform_node_Call */
5991 * Tries several [inplace] [optimizing] transformations and returns an
5992 * equivalent node. The difference to equivalent_node() is that these
5993 * transformations _do_ generate new nodes, and thus the old node must
5994 * not be freed even if the equivalent node isn't the old one.
5996 static ir_node *transform_node(ir_node *n)
6001 * Transform_node is the only "optimizing transformation" that might
6002 * return a node with a different opcode. We iterate HERE until fixpoint
6003 * to get the final result.
6007 if (n->op->ops.transform_node != NULL)
6008 n = n->op->ops.transform_node(n);
6009 } while (oldn != n);
6012 } /* transform_node */
6015 * Sets the default transform node operation for an ir_op_ops.
6017 * @param code the opcode for the default operation
6018 * @param ops the operations initialized
6023 static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops)
6027 ops->transform_node = transform_node_##a; \
6029 #define CASE_PROJ(a) \
6031 ops->transform_node_Proj = transform_node_Proj_##a; \
6033 #define CASE_PROJ_EX(a) \
6035 ops->transform_node = transform_node_##a; \
6036 ops->transform_node_Proj = transform_node_Proj_##a; \
6045 CASE_PROJ_EX(DivMod);
6080 } /* firm_set_default_transform_node */
6083 /* **************** Common Subexpression Elimination **************** */
6085 /** The size of the hash table used, should estimate the number of nodes
6087 #define N_IR_NODES 512
6089 /** Compares the attributes of two Const nodes. */
6090 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
6092 return (get_Const_tarval(a) != get_Const_tarval(b))
6093 || (get_Const_type(a) != get_Const_type(b));
6094 } /* node_cmp_attr_Const */
6096 /** Compares the attributes of two Proj nodes. */
6097 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
6099 return a->attr.proj != b->attr.proj;
6100 } /* node_cmp_attr_Proj */
6102 /** Compares the attributes of two Alloc nodes. */
6103 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
6105 const alloc_attr *pa = &a->attr.alloc;
6106 const alloc_attr *pb = &b->attr.alloc;
6107 return (pa->where != pb->where) || (pa->type != pb->type);
6108 } /* node_cmp_attr_Alloc */
6110 /** Compares the attributes of two Free nodes. */
6111 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
6113 const free_attr *pa = &a->attr.free;
6114 const free_attr *pb = &b->attr.free;
6115 return (pa->where != pb->where) || (pa->type != pb->type);
6116 } /* node_cmp_attr_Free */
6118 /** Compares the attributes of two SymConst nodes. */
6119 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
6121 const symconst_attr *pa = &a->attr.symc;
6122 const symconst_attr *pb = &b->attr.symc;
6123 return (pa->kind != pb->kind)
6124 || (pa->sym.type_p != pb->sym.type_p)
6125 || (pa->tp != pb->tp);
6126 } /* node_cmp_attr_SymConst */
6128 /** Compares the attributes of two Call nodes. */
6129 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
6131 const call_attr *pa = &a->attr.call;
6132 const call_attr *pb = &b->attr.call;
6133 return (pa->type != pb->type)
6134 || (pa->tail_call != pb->tail_call);
6135 } /* node_cmp_attr_Call */
6137 /** Compares the attributes of two Sel nodes. */
6138 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
6140 const ir_entity *a_ent = get_Sel_entity(a);
6141 const ir_entity *b_ent = get_Sel_entity(b);
6142 return a_ent != b_ent;
6143 } /* node_cmp_attr_Sel */
6145 /** Compares the attributes of two Phi nodes. */
6146 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
6148 /* we can only enter this function if both nodes have the same number of inputs,
6149 hence it is enough to check if one of them is a Phi0 */
6151 /* check the Phi0 pos attribute */
6152 return a->attr.phi.u.pos != b->attr.phi.u.pos;
6155 } /* node_cmp_attr_Phi */
6157 /** Compares the attributes of two Conv nodes. */
6158 static int node_cmp_attr_Conv(ir_node *a, ir_node *b)
6160 return get_Conv_strict(a) != get_Conv_strict(b);
6161 } /* node_cmp_attr_Conv */
6163 /** Compares the attributes of two Cast nodes. */
6164 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
6166 return get_Cast_type(a) != get_Cast_type(b);
6167 } /* node_cmp_attr_Cast */
6169 /** Compares the attributes of two Load nodes. */
6170 static int node_cmp_attr_Load(ir_node *a, ir_node *b)
6172 if (get_Load_volatility(a) == volatility_is_volatile ||
6173 get_Load_volatility(b) == volatility_is_volatile)
6174 /* NEVER do CSE on volatile Loads */
6176 /* do not CSE Loads with different alignment. Be conservative. */
6177 if (get_Load_align(a) != get_Load_align(b))
6180 return get_Load_mode(a) != get_Load_mode(b);
6181 } /* node_cmp_attr_Load */
6183 /** Compares the attributes of two Store nodes. */
6184 static int node_cmp_attr_Store(ir_node *a, ir_node *b)
6186 /* do not CSE Stores with different alignment. Be conservative. */
6187 if (get_Store_align(a) != get_Store_align(b))
6190 /* NEVER do CSE on volatile Stores */
6191 return (get_Store_volatility(a) == volatility_is_volatile ||
6192 get_Store_volatility(b) == volatility_is_volatile);
6193 } /* node_cmp_attr_Store */
6195 /** Compares two exception attributes */
6196 static int node_cmp_exception(ir_node *a, ir_node *b)
6198 const except_attr *ea = &a->attr.except;
6199 const except_attr *eb = &b->attr.except;
6201 return ea->pin_state != eb->pin_state;
6204 #define node_cmp_attr_Bound node_cmp_exception
6206 /** Compares the attributes of two Div nodes. */
6207 static int node_cmp_attr_Div(ir_node *a, ir_node *b)
6209 const divmod_attr *ma = &a->attr.divmod;
6210 const divmod_attr *mb = &b->attr.divmod;
6211 return ma->exc.pin_state != mb->exc.pin_state ||
6212 ma->resmode != mb->resmode ||
6213 ma->no_remainder != mb->no_remainder;
6214 } /* node_cmp_attr_Div */
6216 /** Compares the attributes of two DivMod nodes. */
6217 static int node_cmp_attr_DivMod(ir_node *a, ir_node *b)
6219 const divmod_attr *ma = &a->attr.divmod;
6220 const divmod_attr *mb = &b->attr.divmod;
6221 return ma->exc.pin_state != mb->exc.pin_state ||
6222 ma->resmode != mb->resmode;
6223 } /* node_cmp_attr_DivMod */
6225 /** Compares the attributes of two Mod nodes. */
6226 static int node_cmp_attr_Mod(ir_node *a, ir_node *b)
6228 return node_cmp_attr_DivMod(a, b);
6229 } /* node_cmp_attr_Mod */
6231 /** Compares the attributes of two Quot nodes. */
6232 static int node_cmp_attr_Quot(ir_node *a, ir_node *b)
6234 return node_cmp_attr_DivMod(a, b);
6235 } /* node_cmp_attr_Quot */
6237 /** Compares the attributes of two Confirm nodes. */
6238 static int node_cmp_attr_Confirm(ir_node *a, ir_node *b)
6240 /* no need to compare the bound, as this is a input */
6241 return (get_Confirm_cmp(a) != get_Confirm_cmp(b));
6242 } /* node_cmp_attr_Confirm */
6244 /** Compares the attributes of two Builtin nodes. */
6245 static int node_cmp_attr_Builtin(ir_node *a, ir_node *b)
6247 /* no need to compare the type, equal kind means equal type */
6248 return get_Builtin_kind(a) != get_Builtin_kind(b);
6249 } /* node_cmp_attr_Builtin */
6251 /** Compares the attributes of two ASM nodes. */
6252 static int node_cmp_attr_ASM(ir_node *a, ir_node *b)
6255 const ir_asm_constraint *ca;
6256 const ir_asm_constraint *cb;
6259 if (get_ASM_text(a) != get_ASM_text(b))
6262 /* Should we really check the constraints here? Should be better, but is strange. */
6263 n = get_ASM_n_input_constraints(a);
6264 if (n != get_ASM_n_input_constraints(b))
6267 ca = get_ASM_input_constraints(a);
6268 cb = get_ASM_input_constraints(b);
6269 for (i = 0; i < n; ++i) {
6270 if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
6274 n = get_ASM_n_output_constraints(a);
6275 if (n != get_ASM_n_output_constraints(b))
6278 ca = get_ASM_output_constraints(a);
6279 cb = get_ASM_output_constraints(b);
6280 for (i = 0; i < n; ++i) {
6281 if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
6285 n = get_ASM_n_clobbers(a);
6286 if (n != get_ASM_n_clobbers(b))
6289 cla = get_ASM_clobbers(a);
6290 clb = get_ASM_clobbers(b);
6291 for (i = 0; i < n; ++i) {
6292 if (cla[i] != clb[i])
6296 } /* node_cmp_attr_ASM */
6298 /** Compares the inexistent attributes of two Dummy nodes. */
6299 static int node_cmp_attr_Dummy(ir_node *a, ir_node *b)
6307 * Set the default node attribute compare operation for an ir_op_ops.
6309 * @param code the opcode for the default operation
6310 * @param ops the operations initialized
6315 static ir_op_ops *firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops)
6319 ops->node_cmp_attr = node_cmp_attr_##a; \
6352 } /* firm_set_default_node_cmp_attr */
6355 * Compare function for two nodes in the value table. Gets two
6356 * nodes as parameters. Returns 0 if the nodes are a Common Sub Expression.
6358 int identities_cmp(const void *elt, const void *key)
6360 ir_node *a = (ir_node *)elt;
6361 ir_node *b = (ir_node *)key;
6364 if (a == b) return 0;
6366 if ((get_irn_op(a) != get_irn_op(b)) ||
6367 (get_irn_mode(a) != get_irn_mode(b))) return 1;
6369 /* compare if a's in and b's in are of equal length */
6370 irn_arity_a = get_irn_arity(a);
6371 if (irn_arity_a != get_irn_arity(b))
6374 if (get_irn_pinned(a) == op_pin_state_pinned) {
6375 /* for pinned nodes, the block inputs must be equal */
6376 if (get_irn_n(a, -1) != get_irn_n(b, -1))
6378 } else if (! get_opt_global_cse()) {
6379 /* for block-local CSE both nodes must be in the same MacroBlock */
6380 if (get_irn_MacroBlock(a) != get_irn_MacroBlock(b))
6384 /* compare a->in[0..ins] with b->in[0..ins] */
6385 for (i = 0; i < irn_arity_a; ++i) {
6386 ir_node *pred_a = get_irn_n(a, i);
6387 ir_node *pred_b = get_irn_n(b, i);
6388 if (pred_a != pred_b) {
6389 /* if both predecessors are CSE neutral they might be different */
6390 if (!is_irn_cse_neutral(pred_a) || !is_irn_cse_neutral(pred_b))
6396 * here, we already now that the nodes are identical except their
6399 if (a->op->ops.node_cmp_attr)
6400 return a->op->ops.node_cmp_attr(a, b);
6403 } /* identities_cmp */
6406 * Calculate a hash value of a node.
6408 * @param node The IR-node
6410 unsigned ir_node_hash(const ir_node *node)
6412 return node->op->ops.hash(node);
6413 } /* ir_node_hash */
6416 pset *new_identities(void)
6418 return new_pset(identities_cmp, N_IR_NODES);
6419 } /* new_identities */
6421 void del_identities(pset *value_table)
6423 del_pset(value_table);
6424 } /* del_identities */
6426 /* Normalize a node by putting constants (and operands with larger
6427 * node index) on the right (operator side). */
6428 void ir_normalize_node(ir_node *n)
6430 if (is_op_commutative(get_irn_op(n))) {
6431 ir_node *l = get_binop_left(n);
6432 ir_node *r = get_binop_right(n);
6434 /* For commutative operators perform a OP b == b OP a but keep
6435 * constants on the RIGHT side. This helps greatly in some
6436 * optimizations. Moreover we use the idx number to make the form
6438 if (!operands_are_normalized(l, r)) {
6439 set_binop_left(n, r);
6440 set_binop_right(n, l);
6444 } /* ir_normalize_node */
6447 * Update the nodes after a match in the value table. If both nodes have
6448 * the same MacroBlock but different Blocks, we must ensure that the node
6449 * with the dominating Block (the node that is near to the MacroBlock header
6450 * is stored in the table.
6451 * Because a MacroBlock has only one "non-exception" flow, we don't need
6452 * dominance info here: We known, that one block must dominate the other and
6453 * following the only block input will allow to find it.
6455 static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node)
6457 ir_node *known_blk, *new_block, *block, *mbh;
6459 if (get_opt_global_cse()) {
6460 /* Block inputs are meaning less */
6463 known_blk = get_irn_n(known_irn, -1);
6464 new_block = get_irn_n(new_ir_node, -1);
6465 if (known_blk == new_block) {
6466 /* already in the same block */
6470 * We expect the typical case when we built the graph. In that case, the
6471 * known_irn is already the upper one, so checking this should be faster.
6474 mbh = get_Block_MacroBlock(new_block);
6476 if (block == known_blk) {
6477 /* ok, we have found it: known_block dominates new_block as expected */
6482 * We have reached the MacroBlock header NOT founding
6483 * the known_block. new_block must dominate known_block.
6486 set_irn_n(known_irn, -1, new_block);
6489 assert(get_Block_n_cfgpreds(block) == 1);
6490 block = get_Block_cfgpred_block(block, 0);
6492 } /* update_value_table */
6495 * Return the canonical node computing the same value as n.
6496 * Looks up the node in a hash table, enters it in the table
6497 * if it isn't there yet.
6499 * @param value_table the HashSet containing all nodes in the
6501 * @param n the node to look up
6503 * @return a node that computes the same value as n or n if no such
6504 * node could be found
6506 ir_node *identify_remember(pset *value_table, ir_node *n)
6510 if (!value_table) return n;
6512 ir_normalize_node(n);
6513 /* lookup or insert in hash table with given hash key. */
6514 nn = pset_insert(value_table, n, ir_node_hash(n));
6517 update_known_irn(nn, n);
6519 /* n is reachable again */
6520 edges_node_revival(nn, get_irn_irg(nn));
6524 } /* identify_remember */
6527 * During construction we set the op_pin_state_pinned flag in the graph right when the
6528 * optimization is performed. The flag turning on procedure global cse could
6529 * be changed between two allocations. This way we are safe.
6531 * @param value_table The value table
6532 * @param n The node to lookup
6534 static inline ir_node *identify_cons(pset *value_table, ir_node *n)
6538 n = identify_remember(value_table, n);
6539 if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
6540 set_irg_pinned(current_ir_graph, op_pin_state_floats);
6542 } /* identify_cons */
6544 /* Add a node to the identities value table. */
6545 void add_identities(pset *value_table, ir_node *node)
6547 if (get_opt_cse() && !is_Block(node))
6548 identify_remember(value_table, node);
6549 } /* add_identities */
6551 /* Visit each node in the value table of a graph. */
6552 void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env)
6555 ir_graph *rem = current_ir_graph;
6557 current_ir_graph = irg;
6558 foreach_pset(irg->value_table, node)
6560 current_ir_graph = rem;
6561 } /* visit_all_identities */
6564 * Garbage in, garbage out. If a node has a dead input, i.e., the
6565 * Bad node is input to the node, return the Bad node.
6567 static ir_node *gigo(ir_node *node)
6570 ir_op *op = get_irn_op(node);
6572 /* remove garbage blocks by looking at control flow that leaves the block
6573 and replacing the control flow by Bad. */
6574 if (get_irn_mode(node) == mode_X) {
6575 ir_node *block = get_nodes_block(skip_Proj(node));
6577 /* Don't optimize nodes in immature blocks. */
6578 if (!get_Block_matured(block))
6580 /* Don't optimize End, may have Bads. */
6581 if (op == op_End) return node;
6583 if (is_Block(block)) {
6584 if (is_Block_dead(block)) {
6585 /* control flow from dead block is dead */
6589 for (i = get_irn_arity(block) - 1; i >= 0; --i) {
6590 if (!is_Bad(get_irn_n(block, i)))
6594 ir_graph *irg = get_irn_irg(block);
6595 /* the start block is never dead */
6596 if (block != get_irg_start_block(irg)
6597 && block != get_irg_end_block(irg)) {
6599 * Do NOT kill control flow without setting
6600 * the block to dead of bad things can happen:
6601 * We get a Block that is not reachable be irg_block_walk()
6602 * but can be found by irg_walk()!
6604 set_Block_dead(block);
6611 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
6612 blocks predecessors is dead. */
6613 if (op != op_Block && op != op_Phi && op != op_Tuple) {
6614 irn_arity = get_irn_arity(node);
6617 * Beware: we can only read the block of a non-floating node.
6619 if (is_irn_pinned_in_irg(node) &&
6620 is_Block_dead(get_nodes_block(skip_Proj(node))))
6623 for (i = 0; i < irn_arity; i++) {
6624 ir_node *pred = get_irn_n(node, i);
6629 /* Propagating Unknowns here seems to be a bad idea, because
6630 sometimes we need a node as a input and did not want that
6632 However, it might be useful to move this into a later phase
6633 (if you think that optimizing such code is useful). */
6634 if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
6635 return new_Unknown(get_irn_mode(node));
6640 /* With this code we violate the agreement that local_optimize
6641 only leaves Bads in Block, Phi and Tuple nodes. */
6642 /* If Block has only Bads as predecessors it's garbage. */
6643 /* If Phi has only Bads as predecessors it's garbage. */
6644 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
6645 irn_arity = get_irn_arity(node);
6646 for (i = 0; i < irn_arity; i++) {
6647 if (!is_Bad(get_irn_n(node, i))) break;
6649 if (i == irn_arity) node = new_Bad();
6656 * These optimizations deallocate nodes from the obstack.
6657 * It can only be called if it is guaranteed that no other nodes
6658 * reference this one, i.e., right after construction of a node.
6660 * @param n The node to optimize
6662 * current_ir_graph must be set to the graph of the node!
6664 ir_node *optimize_node(ir_node *n)
6668 ir_opcode iro = get_irn_opcode(n);
6670 /* Always optimize Phi nodes: part of the construction. */
6671 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
6673 /* constant expression evaluation / constant folding */
6674 if (get_opt_constant_folding()) {
6675 /* neither constants nor Tuple values can be evaluated */
6676 if (iro != iro_Const && (get_irn_mode(n) != mode_T)) {
6677 unsigned fp_model = get_irg_fp_model(current_ir_graph);
6678 int old_fp_mode = tarval_fp_ops_enabled();
6680 tarval_enable_fp_ops(! (fp_model & fp_no_float_fold));
6682 /* try to evaluate */
6683 tv = computed_value(n);
6684 if (tv != tarval_bad) {
6686 ir_type *old_tp = get_irn_type(n);
6687 int i, arity = get_irn_arity(n);
6691 * Try to recover the type of the new expression.
6693 for (i = 0; i < arity && !old_tp; ++i)
6694 old_tp = get_irn_type(get_irn_n(n, i));
6697 * we MUST copy the node here temporary, because it's still needed
6698 * for DBG_OPT_CSTEVAL
6700 node_size = offsetof(ir_node, attr) + n->op->attr_size;
6701 oldn = alloca(node_size);
6703 memcpy(oldn, n, node_size);
6704 CLONE_ARR_A(ir_node *, oldn->in, n->in);
6706 /* ARG, copy the in array, we need it for statistics */
6707 memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
6709 /* note the inplace edges module */
6710 edges_node_deleted(n, current_ir_graph);
6712 /* evaluation was successful -- replace the node. */
6713 irg_kill_node(current_ir_graph, n);
6716 if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
6717 set_Const_type(nw, old_tp);
6718 DBG_OPT_CSTEVAL(oldn, nw);
6719 tarval_enable_fp_ops(old_fp_mode);
6722 tarval_enable_fp_ops(old_fp_mode);
6726 /* remove unnecessary nodes */
6727 if (get_opt_algebraic_simplification() ||
6728 (iro == iro_Phi) || /* always optimize these nodes. */
6730 (iro == iro_Proj) ||
6731 (iro == iro_Block) ) /* Flags tested local. */
6732 n = equivalent_node(n);
6734 /* Common Subexpression Elimination.
6736 * Checks whether n is already available.
6737 * The block input is used to distinguish different subexpressions. Right
6738 * now all nodes are op_pin_state_pinned to blocks, i.e., the CSE only finds common
6739 * subexpressions within a block.
6742 n = identify_cons(current_ir_graph->value_table, n);
6745 edges_node_deleted(oldn, current_ir_graph);
6747 /* We found an existing, better node, so we can deallocate the old node. */
6748 irg_kill_node(current_ir_graph, oldn);
6752 /* Some more constant expression evaluation that does not allow to
6754 iro = get_irn_opcode(n);
6755 if (get_opt_algebraic_simplification() ||
6756 (iro == iro_Cond) ||
6757 (iro == iro_Proj)) /* Flags tested local. */
6758 n = transform_node(n);
6760 /* Remove nodes with dead (Bad) input.
6761 Run always for transformation induced Bads. */
6764 /* Now we have a legal, useful node. Enter it in hash table for CSE */
6765 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
6767 n = identify_remember(current_ir_graph->value_table, o);
6773 } /* optimize_node */
6777 * These optimizations never deallocate nodes (in place). This can cause dead
6778 * nodes lying on the obstack. Remove these by a dead node elimination,
6779 * i.e., a copying garbage collection.
6781 ir_node *optimize_in_place_2(ir_node *n)
6785 ir_opcode iro = get_irn_opcode(n);
6787 if (!get_opt_optimize() && !is_Phi(n)) return n;
6789 /* constant expression evaluation / constant folding */
6790 if (get_opt_constant_folding()) {
6791 /* neither constants nor Tuple values can be evaluated */
6792 if (iro != iro_Const && get_irn_mode(n) != mode_T) {
6793 unsigned fp_model = get_irg_fp_model(current_ir_graph);
6794 int old_fp_mode = tarval_fp_ops_enabled();
6796 tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
6797 /* try to evaluate */
6798 tv = computed_value(n);
6799 if (tv != tarval_bad) {
6800 /* evaluation was successful -- replace the node. */
6801 ir_type *old_tp = get_irn_type(n);
6802 int i, arity = get_irn_arity(n);
6805 * Try to recover the type of the new expression.
6807 for (i = 0; i < arity && !old_tp; ++i)
6808 old_tp = get_irn_type(get_irn_n(n, i));
6812 if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
6813 set_Const_type(n, old_tp);
6815 DBG_OPT_CSTEVAL(oldn, n);
6816 tarval_enable_fp_ops(old_fp_mode);
6819 tarval_enable_fp_ops(old_fp_mode);
6823 /* remove unnecessary nodes */
6824 if (get_opt_constant_folding() ||
6825 (iro == iro_Phi) || /* always optimize these nodes. */
6826 (iro == iro_Id) || /* ... */
6827 (iro == iro_Proj) || /* ... */
6828 (iro == iro_Block) ) /* Flags tested local. */
6829 n = equivalent_node(n);
6831 /** common subexpression elimination **/
6832 /* Checks whether n is already available. */
6833 /* The block input is used to distinguish different subexpressions. Right
6834 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
6835 subexpressions within a block. */
6836 if (get_opt_cse()) {
6838 n = identify_remember(current_ir_graph->value_table, o);
6843 /* Some more constant expression evaluation. */
6844 iro = get_irn_opcode(n);
6845 if (get_opt_constant_folding() ||
6846 (iro == iro_Cond) ||
6847 (iro == iro_Proj)) /* Flags tested local. */
6848 n = transform_node(n);
6850 /* Remove nodes with dead (Bad) input.
6851 Run always for transformation induced Bads. */
6854 /* Now we can verify the node, as it has no dead inputs any more. */
6857 /* Now we have a legal, useful node. Enter it in hash table for cse.
6858 Blocks should be unique anyways. (Except the successor of start:
6859 is cse with the start block!) */
6860 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
6862 n = identify_remember(current_ir_graph->value_table, o);
6868 } /* optimize_in_place_2 */
6871 * Wrapper for external use, set proper status bits after optimization.
6873 ir_node *optimize_in_place(ir_node *n)
6875 /* Handle graph state */
6876 assert(get_irg_phase_state(current_ir_graph) != phase_building);
6878 if (get_opt_global_cse())
6879 set_irg_pinned(current_ir_graph, op_pin_state_floats);
6880 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
6881 set_irg_outs_inconsistent(current_ir_graph);
6883 /* FIXME: Maybe we could also test whether optimizing the node can
6884 change the control graph. */
6885 set_irg_doms_inconsistent(current_ir_graph);
6886 return optimize_in_place_2(n);
6887 } /* optimize_in_place */
6890 * Calculate a hash value of a Const node.
6892 static unsigned hash_Const(const ir_node *node)
6896 /* special value for const, as they only differ in their tarval. */
6897 h = HASH_PTR(node->attr.con.tarval);
6903 * Calculate a hash value of a SymConst node.
6905 static unsigned hash_SymConst(const ir_node *node)
6909 /* all others are pointers */
6910 h = HASH_PTR(node->attr.symc.sym.type_p);
6913 } /* hash_SymConst */
6916 * Set the default hash operation in an ir_op_ops.
6918 * @param code the opcode for the default operation
6919 * @param ops the operations initialized
6924 static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops)
6928 ops->hash = hash_##a; \
6931 /* hash function already set */
6932 if (ops->hash != NULL)
6939 /* use input/mode default hash if no function was given */
6940 ops->hash = firm_default_hash;
6948 * Sets the default operation for an ir_ops.
6950 ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops)
6952 ops = firm_set_default_hash(code, ops);
6953 ops = firm_set_default_computed_value(code, ops);
6954 ops = firm_set_default_equivalent_node(code, ops);
6955 ops = firm_set_default_transform_node(code, ops);
6956 ops = firm_set_default_node_cmp_attr(code, ops);
6957 ops = firm_set_default_get_type(code, ops);
6958 ops = firm_set_default_get_type_attr(code, ops);
6959 ops = firm_set_default_get_entity_attr(code, ops);
6962 } /* firm_set_default_operations */