3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2005 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
27 # include "irnode_t.h"
28 # include "irgraph_t.h"
29 # include "iredges_t.h"
30 # include "irmode_t.h"
32 # include "ircons_t.h"
36 # include "dbginfo_t.h"
37 # include "iropt_dbg.h"
38 # include "irflag_t.h"
42 # include "opt_polymorphy.h"
44 /* Make types visible to allow most efficient access */
45 # include "entity_t.h"
48 * Trivial INLINEable routine for copy propagation.
49 * Does follow Ids, needed to optimize INLINEd code.
51 static INLINE ir_node *
52 follow_Id (ir_node *n)
54 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
59 * return the value of a Constant
61 static tarval *computed_value_Const(ir_node *n)
63 return get_Const_tarval(n);
67 * return the value of a 'sizeof' SymConst
69 static tarval *computed_value_SymConst(ir_node *n)
71 if ((get_SymConst_kind(n) == symconst_size) &&
72 (get_type_state(get_SymConst_type(n))) == layout_fixed)
73 return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), get_irn_mode(n));
78 * return the value of an Add
80 static tarval *computed_value_Add(ir_node *n)
82 ir_node *a = get_Add_left(n);
83 ir_node *b = get_Add_right(n);
85 tarval *ta = value_of(a);
86 tarval *tb = value_of(b);
88 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
89 return tarval_add(ta, tb);
95 * return the value of a Sub
98 static tarval *computed_value_Sub(ir_node *n)
100 ir_node *a = get_Sub_left(n);
101 ir_node *b = get_Sub_right(n);
106 if (a == b && !is_Bad(a))
107 return get_tarval_null(get_irn_mode(n));
112 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
113 return tarval_sub(ta, tb);
119 * return the value of an unary Minus
121 static tarval *computed_value_Minus(ir_node *n)
123 ir_node *a = get_Minus_op(n);
124 tarval *ta = value_of(a);
126 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
127 return tarval_neg(ta);
133 * return the value of a Mul
135 static tarval *computed_value_Mul(ir_node *n)
137 ir_node *a = get_Mul_left(n);
138 ir_node *b = get_Mul_right(n);
140 tarval *ta = value_of(a);
141 tarval *tb = value_of(b);
143 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
144 return tarval_mul(ta, tb);
146 /* a*0 = 0 or 0*b = 0:
147 calls computed_value recursive and returns the 0 with proper
149 if ((ta != tarval_bad) && (ta == get_mode_null(get_tarval_mode(ta))))
151 if ((tb != tarval_bad) && (tb == get_mode_null(get_tarval_mode(tb))))
158 * return the value of a floating point Quot
160 static tarval *computed_value_Quot(ir_node *n)
162 ir_node *a = get_Quot_left(n);
163 ir_node *b = get_Quot_right(n);
165 tarval *ta = value_of(a);
166 tarval *tb = value_of(b);
168 /* This was missing in original implementation. Why? */
169 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
170 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
171 return tarval_quo(ta, tb);
177 * calculate the value of an integer Div of two nodes
178 * Special case: 0 / b
180 static tarval *do_computed_value_Div(ir_node *a, ir_node *b)
182 tarval *ta = value_of(a);
183 tarval *tb = value_of(b);
185 /* Compute c1 / c2 or 0 / a, a != 0 */
186 if (ta != tarval_bad) {
187 if ((tb != tarval_bad) && (tb != get_mode_null(get_irn_mode(b)))) /* div by zero: return tarval_bad */
188 return tarval_div(ta, tb);
189 else if (ta == get_mode_null(get_tarval_mode(ta))) /* 0 / b == 0 */
196 * return the value of an integer Div
198 static tarval *computed_value_Div(ir_node *n)
200 return do_computed_value_Div(get_Div_left(n), get_Div_right(n));
204 * calculate the value of an integer Mod of two nodes
205 * Special case: a % 1
207 static tarval *do_computed_value_Mod(ir_node *a, ir_node *b)
209 tarval *ta = value_of(a);
210 tarval *tb = value_of(b);
212 /* Compute c1 % c2 or a % 1 */
213 if (tb != tarval_bad) {
214 if ((ta != tarval_bad) && (tb != get_mode_null(get_tarval_mode(tb)))) /* div by zero: return tarval_bad */
215 return tarval_mod(ta, tb);
216 else if (tb == get_mode_one(get_tarval_mode(tb))) /* x mod 1 == 0 */
217 return get_mode_null(get_irn_mode(a));
224 * return the value of an integer Mod
226 static tarval *computed_value_Mod(ir_node *n)
228 return do_computed_value_Mod(get_Mod_left(n), get_Mod_right(n));
232 * return the value of an Abs
234 static tarval *computed_value_Abs(ir_node *n)
236 ir_node *a = get_Abs_op(n);
237 tarval *ta = value_of(a);
239 if (ta != tarval_bad)
240 return tarval_abs(ta);
246 * return the value of an And
247 * Special case: a & 0, 0 & b
249 static tarval *computed_value_And(ir_node *n)
251 ir_node *a = get_And_left(n);
252 ir_node *b = get_And_right(n);
254 tarval *ta = value_of(a);
255 tarval *tb = value_of(b);
257 if ((ta != tarval_bad) && (tb != tarval_bad)) {
258 return tarval_and (ta, tb);
262 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_NULL)
263 || (classify_tarval ((v = tb)) == TV_CLASSIFY_NULL)) {
271 * return the value of an Or
272 * Special case: a | 1...1, 1...1 | b
274 static tarval *computed_value_Or(ir_node *n)
276 ir_node *a = get_Or_left(n);
277 ir_node *b = get_Or_right(n);
279 tarval *ta = value_of(a);
280 tarval *tb = value_of(b);
282 if ((ta != tarval_bad) && (tb != tarval_bad)) {
283 return tarval_or (ta, tb);
286 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_ALL_ONE)
287 || (classify_tarval ((v = tb)) == TV_CLASSIFY_ALL_ONE)) {
295 * return the value of an Eor
297 static tarval *computed_value_Eor(ir_node *n)
299 ir_node *a = get_Eor_left(n);
300 ir_node *b = get_Eor_right(n);
305 return get_tarval_null(get_irn_mode(n));
310 if ((ta != tarval_bad) && (tb != tarval_bad)) {
311 return tarval_eor (ta, tb);
317 * return the value of a Not
319 static tarval *computed_value_Not(ir_node *n)
321 ir_node *a = get_Not_op(n);
322 tarval *ta = value_of(a);
324 if (ta != tarval_bad)
325 return tarval_not(ta);
331 * return the value of a Shl
333 static tarval *computed_value_Shl(ir_node *n)
335 ir_node *a = get_Shl_left(n);
336 ir_node *b = get_Shl_right(n);
338 tarval *ta = value_of(a);
339 tarval *tb = value_of(b);
341 if ((ta != tarval_bad) && (tb != tarval_bad)) {
342 return tarval_shl (ta, tb);
348 * return the value of a Shr
350 static tarval *computed_value_Shr(ir_node *n)
352 ir_node *a = get_Shr_left(n);
353 ir_node *b = get_Shr_right(n);
355 tarval *ta = value_of(a);
356 tarval *tb = value_of(b);
358 if ((ta != tarval_bad) && (tb != tarval_bad)) {
359 return tarval_shr (ta, tb);
365 * return the value of a Shrs
367 static tarval *computed_value_Shrs(ir_node *n)
369 ir_node *a = get_Shrs_left(n);
370 ir_node *b = get_Shrs_right(n);
372 tarval *ta = value_of(a);
373 tarval *tb = value_of(b);
375 if ((ta != tarval_bad) && (tb != tarval_bad)) {
376 return tarval_shrs (ta, tb);
382 * return the value of a Rot
384 static tarval *computed_value_Rot(ir_node *n)
386 ir_node *a = get_Rot_left(n);
387 ir_node *b = get_Rot_right(n);
389 tarval *ta = value_of(a);
390 tarval *tb = value_of(b);
392 if ((ta != tarval_bad) && (tb != tarval_bad)) {
393 return tarval_rot (ta, tb);
399 * return the value of a Conv
401 static tarval *computed_value_Conv(ir_node *n)
403 ir_node *a = get_Conv_op(n);
404 tarval *ta = value_of(a);
406 if (ta != tarval_bad)
407 return tarval_convert_to(ta, get_irn_mode(n));
413 * return the value of a Proj, handle Proj(Cmp), Proj(Div), Proj(Mod), Proj(DivMod)
415 static tarval *computed_value_Proj(ir_node *n)
417 ir_node *a = get_Proj_pred(n);
421 /* Optimize Cmp nodes.
422 This performs a first step of unreachable code elimination.
423 Proj can not be computed, but folding a Cmp above the Proj here is
424 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
426 There are several case where we can evaluate a Cmp node:
427 1. The nodes compared are both the same. If we compare for
428 equal, greater equal, ... this will return true, else it
429 will return false. This step relies on cse.
430 2. The predecessors of Cmp are target values. We can evaluate
432 3. The predecessors are Allocs or void* constants. Allocs never
433 return NULL, they raise an exception. Therefore we can predict
435 switch (get_irn_opcode(a)) {
437 aa = get_Cmp_left(a);
438 ab = get_Cmp_right(a);
439 proj_nr = get_Proj_proj(n);
441 if (aa == ab && !mode_is_float(get_irn_mode(aa))) { /* 1.: */
442 /* BEWARE: a == a is NOT always True for floating Point!!! */
443 /* This is a trick with the bits used for encoding the Cmp
444 Proj numbers, the following statement is not the same:
445 return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */
446 return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b);
448 tarval *taa = value_of(aa);
449 tarval *tab = value_of(ab);
451 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
452 /* strange checks... */
453 pn_Cmp flags = tarval_cmp (taa, tab);
454 if (flags != pn_Cmp_False) {
455 return new_tarval_from_long (proj_nr & flags, mode_b);
457 } else { /* check for 3.: */
458 ir_node *aaa = skip_Id(skip_Proj(aa));
459 ir_node *aba = skip_Id(skip_Proj(ab));
461 if ( ( (/* aa is ProjP and aaa is Alloc */
462 (get_irn_op(aa) == op_Proj)
463 && (mode_is_reference(get_irn_mode(aa)))
464 && (get_irn_op(aaa) == op_Alloc))
465 && ( (/* ab is constant void */
466 (get_irn_op(ab) == op_Const)
467 && (mode_is_reference(get_irn_mode(ab)))
468 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
469 || (/* ab is other Alloc */
470 (get_irn_op(ab) == op_Proj)
471 && (mode_is_reference(get_irn_mode(ab)))
472 && (get_irn_op(aba) == op_Alloc)
474 || (/* aa is void and aba is Alloc */
475 (get_irn_op(aa) == op_Const)
476 && (mode_is_reference(get_irn_mode(aa)))
477 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
478 && (get_irn_op(ab) == op_Proj)
479 && (mode_is_reference(get_irn_mode(ab)))
480 && (get_irn_op(aba) == op_Alloc)))
482 return new_tarval_from_long (proj_nr & pn_Cmp_Ne, mode_b);
488 /* compute either the Div or the Mod part */
489 proj_nr = get_Proj_proj(n);
490 if (proj_nr == pn_DivMod_res_div)
491 return do_computed_value_Div(get_DivMod_left(a), get_DivMod_right(a));
492 else if (proj_nr == pn_DivMod_res_mod)
493 return do_computed_value_Mod(get_DivMod_left(a), get_DivMod_right(a));
497 if (get_Proj_proj(n) == pn_Div_res)
498 return computed_value(a);
502 if (get_Proj_proj(n) == pn_Mod_res)
503 return computed_value(a);
513 * calculate the value of a Mux: can be evaluated, if the
514 * sel and the right input are known
516 static tarval *computed_value_Mux(ir_node *n)
518 ir_node *sel = get_Mux_sel(n);
519 tarval *ts = value_of(sel);
521 if (ts == get_tarval_b_true()) {
522 ir_node *v = get_Mux_true(n);
525 else if (ts == get_tarval_b_false()) {
526 ir_node *v = get_Mux_false(n);
533 * If the parameter n can be computed, return its value, else tarval_bad.
534 * Performs constant folding.
536 * @param n The node this should be evaluated
538 tarval *computed_value(ir_node *n)
540 if (n->op->computed_value)
541 return n->op->computed_value(n);
546 * set the default computed_value evaluator
548 static ir_op *firm_set_default_computed_value(ir_op *op)
552 op->computed_value = computed_value_##a; \
578 op->computed_value = NULL;
586 /* returns 1 if the a and b are pointers to different locations. */
588 different_identity (ir_node *a, ir_node *b)
590 assert (mode_is_reference(get_irn_mode (a))
591 && mode_is_reference(get_irn_mode (b)));
593 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
594 ir_node *a1 = get_Proj_pred (a);
595 ir_node *b1 = get_Proj_pred (b);
596 if (a1 != b1 && get_irn_op (a1) == op_Alloc
597 && get_irn_op (b1) == op_Alloc)
604 static ir_node *equivalent_node_Block(ir_node *n)
608 /* The Block constructor does not call optimize, but mature_immBlock
609 calls the optimization. */
610 assert(get_Block_matured(n));
612 /* Straightening: a single entry Block following a single exit Block
613 can be merged, if it is not the Start block. */
614 /* !!! Beware, all Phi-nodes of n must have been optimized away.
615 This should be true, as the block is matured before optimize is called.
616 But what about Phi-cycles with the Phi0/Id that could not be resolved?
617 Remaining Phi nodes are just Ids. */
618 if ((get_Block_n_cfgpreds(n) == 1) &&
619 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
620 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
621 if (predblock == oldn) {
622 /* Jmp jumps into the block it is in -- deal self cycle. */
623 n = set_Block_dead(n);
624 DBG_OPT_DEAD(oldn, n);
625 } else if (get_opt_control_flow_straightening()) {
627 DBG_OPT_STG(oldn, n);
630 else if ((get_Block_n_cfgpreds(n) == 1) &&
631 (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
632 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
633 if (predblock == oldn) {
634 /* Jmp jumps into the block it is in -- deal self cycle. */
635 n = set_Block_dead(n);
636 DBG_OPT_DEAD(oldn, n);
639 else if ((get_Block_n_cfgpreds(n) == 2) &&
640 (get_opt_control_flow_weak_simplification())) {
641 /* Test whether Cond jumps twice to this block
642 @@@ we could do this also with two loops finding two preds from several ones. */
643 ir_node *a = get_Block_cfgpred(n, 0);
644 ir_node *b = get_Block_cfgpred(n, 1);
646 if ((get_irn_op(a) == op_Proj) &&
647 (get_irn_op(b) == op_Proj) &&
648 (get_Proj_pred(a) == get_Proj_pred(b)) &&
649 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
650 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
651 /* Also a single entry Block following a single exit Block. Phis have
652 twice the same operand and will be optimized away. */
653 n = get_nodes_block(a);
654 DBG_OPT_IFSIM(oldn, a, b, n);
656 } else if (get_opt_unreachable_code() &&
657 (n != current_ir_graph->start_block) &&
658 (n != current_ir_graph->end_block) ) {
659 int i, n_cfg = get_Block_n_cfgpreds(n);
661 /* If all inputs are dead, this block is dead too, except if it is
662 the start or end block. This is a step of unreachable code
664 for (i = 0; i < n_cfg; i++) {
665 ir_node *pred = get_Block_cfgpred(n, i);
668 if (is_Bad(pred)) continue;
669 pred_blk = get_nodes_block(pred);
671 if (is_Block_dead(pred_blk)) continue;
674 /* really found a living input */
679 n = set_Block_dead(n);
686 * Returns a equivalent node for a Jmp, a Bad :-)
687 * Of course this only happens if the Block of the Jmp is Bad.
689 static ir_node *equivalent_node_Jmp(ir_node *n)
691 /* GL: Why not same for op_Raise?? */
692 /* unreachable code elimination */
693 if (is_Block_dead(get_nodes_block(n)))
699 static ir_node *equivalent_node_Cond(ir_node *n)
701 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
702 See cases for iro_Cond and iro_Proj in transform_node. */
707 * optimize operations that are commutative and have neutral 0,
708 * so a op 0 = 0 op a = a.
710 static ir_node *equivalent_node_neutral_zero(ir_node *n)
714 ir_node *a = get_binop_left(n);
715 ir_node *b = get_binop_right(n);
720 /* After running compute_node there is only one constant predecessor.
721 Find this predecessors value and remember the other node: */
722 if ((tv = value_of(a)) != tarval_bad) {
724 } else if ((tv = value_of(b)) != tarval_bad) {
729 /* If this predecessors constant value is zero, the operation is
730 unnecessary. Remove it: */
731 if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
734 DBG_OPT_ALGSIM1(oldn, a, b, n);
740 #define equivalent_node_Add equivalent_node_neutral_zero
741 #define equivalent_node_Eor equivalent_node_neutral_zero
744 * optimize operations that are not commutative but have neutral 0 on left,
747 static ir_node *equivalent_node_left_zero(ir_node *n)
751 ir_node *a = get_binop_left(n);
752 ir_node *b = get_binop_right(n);
754 if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
757 DBG_OPT_ALGSIM1(oldn, a, b, n);
763 #define equivalent_node_Sub equivalent_node_left_zero
764 #define equivalent_node_Shl equivalent_node_left_zero
765 #define equivalent_node_Shr equivalent_node_left_zero
766 #define equivalent_node_Shrs equivalent_node_left_zero
767 #define equivalent_node_Rot equivalent_node_left_zero
770 * Er, a "symmetic unop", ie op(op(n)) = n.
772 static ir_node *equivalent_node_symmetric_unop(ir_node *n)
775 ir_node *pred = get_unop_op(n);
777 /* optimize symmetric unop */
778 if (get_irn_op(pred) == get_irn_op(n)) {
779 n = get_unop_op(pred);
780 DBG_OPT_ALGSIM2(oldn, pred, n);
786 #define equivalent_node_Not equivalent_node_symmetric_unop
788 /* --x == x */ /* ??? Is this possible or can --x raise an
789 out of bounds exception if min =! max? */
790 #define equivalent_node_Minus equivalent_node_symmetric_unop
793 * Optimize a * 1 = 1 * a = a.
795 static ir_node *equivalent_node_Mul(ir_node *n)
799 ir_node *a = get_Mul_left(n);
800 ir_node *b = get_Mul_right(n);
802 /* Mul is commutative and has again an other neutral element. */
803 if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
805 DBG_OPT_ALGSIM1(oldn, a, b, n);
806 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
808 DBG_OPT_ALGSIM1(oldn, a, b, n);
814 * Optimize a / 1 = a.
816 static ir_node *equivalent_node_Div(ir_node *n)
818 ir_node *a = get_Div_left(n);
819 ir_node *b = get_Div_right(n);
821 /* Div is not commutative. */
822 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
823 /* Turn Div into a tuple (mem, bad, a) */
824 ir_node *mem = get_Div_mem(n);
825 turn_into_tuple(n, 3);
826 set_Tuple_pred(n, pn_Div_M, mem);
827 set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
828 set_Tuple_pred(n, pn_Div_res, a);
834 * Optimize a / 1 = a.
836 static ir_node *equivalent_node_DivMod(ir_node *n)
838 ir_node *a = get_DivMod_left(n);
839 ir_node *b = get_DivMod_right(n);
841 /* Div is not commutative. */
842 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
843 /* Turn DivMod into a tuple (mem, bad, a, 0) */
844 ir_node *mem = get_Div_mem(n);
845 ir_mode *mode = get_irn_mode(b);
847 turn_into_tuple(n, 4);
848 set_Tuple_pred(n, pn_DivMod_M, mem);
849 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
850 set_Tuple_pred(n, pn_DivMod_res_div, a);
851 set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
857 * Use algebraic simplification a | a = a | 0 = 0 | a = a.
859 static ir_node *equivalent_node_Or(ir_node *n)
863 ir_node *a = get_Or_left(n);
864 ir_node *b = get_Or_right(n);
867 n = a; /* Or has it's own neutral element */
868 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_NULL) {
870 DBG_OPT_ALGSIM1(oldn, a, b, n);
871 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
873 DBG_OPT_ALGSIM1(oldn, a, b, n);
880 * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
882 static ir_node *equivalent_node_And(ir_node *n)
886 ir_node *a = get_And_left(n);
887 ir_node *b = get_And_right(n);
890 n = a; /* And has it's own neutral element */
891 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_ALL_ONE) {
893 DBG_OPT_ALGSIM1(oldn, a, b, n);
894 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ALL_ONE) {
896 DBG_OPT_ALGSIM1(oldn, a, b, n);
902 * Try to remove useless conv's:
904 static ir_node *equivalent_node_Conv(ir_node *n)
907 ir_node *a = get_Conv_op(n);
910 ir_mode *n_mode = get_irn_mode(n);
911 ir_mode *a_mode = get_irn_mode(a);
913 if (n_mode == a_mode) { /* No Conv necessary */
915 DBG_OPT_ALGSIM3(oldn, a, n);
916 } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
920 n_mode = get_irn_mode(n);
921 b_mode = get_irn_mode(b);
923 if (n_mode == b_mode) {
924 if (n_mode == mode_b) {
925 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
926 DBG_OPT_ALGSIM1(oldn, a, b, n);
928 else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
929 if (smaller_mode(b_mode, a_mode)){
930 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
931 DBG_OPT_ALGSIM1(oldn, a, b, n);
940 * A Cast may be removed if the type of the previous node
941 * is already to type of the Cast.
943 static ir_node *equivalent_node_Cast(ir_node *n) {
944 ir_node *pred = get_Cast_op(n);
945 if (get_irn_type(pred) == get_Cast_type(n))
950 /* Several optimizations:
951 - no Phi in start block.
952 - remove Id operators that are inputs to Phi
953 - fold Phi-nodes, iff they have only one predecessor except
956 static ir_node *equivalent_node_Phi(ir_node *n)
961 ir_node *block = NULL; /* to shutup gcc */
962 ir_node *first_val = NULL; /* to shutup gcc */
963 ir_node *scnd_val = NULL; /* to shutup gcc */
965 if (!get_opt_normalize()) return n;
967 n_preds = get_Phi_n_preds(n);
969 block = get_nodes_block(n);
970 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
971 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
972 if ((is_Block_dead(block)) || /* Control dead */
973 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
974 return new_Bad(); /* in the Start Block. */
976 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
979 /* first we test for a special case: */
980 /* Confirm is a special node fixing additional information for a
981 value that is known at a certain point. This is useful for
982 dataflow analysis. */
984 ir_node *a = get_Phi_pred(n, 0);
985 ir_node *b = get_Phi_pred(n, 1);
986 if ( (get_irn_op(a) == op_Confirm)
987 && (get_irn_op(b) == op_Confirm)
988 && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
989 && (get_irn_n(a, 1) == get_irn_n (b, 1))
990 && (a->data.num == (~b->data.num & irpn_True) )) {
991 return get_irn_n(a, 0);
996 /* If the Block has a Bad pred, we also have one. */
997 for (i = 0; i < n_preds; ++i)
998 if (is_Bad (get_Block_cfgpred(block, i)))
999 set_Phi_pred(n, i, new_Bad());
1001 /* Find first non-self-referencing input */
1002 for (i = 0; i < n_preds; ++i) {
1003 first_val = get_Phi_pred(n, i);
1004 if ( (first_val != n) /* not self pointer */
1006 && (get_irn_op(first_val) != op_Bad)
1008 ) { /* value not dead */
1009 break; /* then found first value. */
1013 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
1014 if (i >= n_preds) { return new_Bad(); }
1018 /* follow_Id () for rest of inputs, determine if any of these
1019 are non-self-referencing */
1020 while (++i < n_preds) {
1021 scnd_val = get_Phi_pred(n, i);
1022 if ( (scnd_val != n)
1023 && (scnd_val != first_val)
1025 && (get_irn_op(scnd_val) != op_Bad)
1032 /* Fold, if no multiple distinct non-self-referencing inputs */
1035 DBG_OPT_PHI(oldn, first_val, n);
1037 /* skip the remaining Ids (done in get_Phi_pred). */
1038 /* superfluous, since we walk all to propagate Block's Bads.
1039 while (++i < n_preds) get_Phi_pred(n, i); */
1045 * optimize Proj(Tuple) and gigo for ProjX in Bad block
1047 static ir_node *equivalent_node_Proj(ir_node *n)
1051 ir_node *a = get_Proj_pred(n);
1053 if ( get_irn_op(a) == op_Tuple) {
1054 /* Remove the Tuple/Proj combination. */
1055 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
1056 n = get_Tuple_pred(a, get_Proj_proj(n));
1057 DBG_OPT_TUPLE(oldn, a, n);
1059 assert(0); /* This should not happen! */
1062 } else if (get_irn_mode(n) == mode_X &&
1063 is_Block_dead(get_nodes_block(n))) {
1064 /* Remove dead control flow -- early gigo. */
1073 static ir_node *equivalent_node_Id(ir_node *n)
1078 DBG_OPT_ID(oldn, n);
1085 static ir_node *equivalent_node_Mux(ir_node *n)
1087 ir_node *oldn = n, *sel = get_Mux_sel(n);
1088 tarval *ts = value_of(sel);
1090 if (ts == get_tarval_b_true()) {
1091 n = get_Mux_true(n);
1092 DBG_OPT_ALGSIM0(oldn, n);
1094 else if (ts == get_tarval_b_false()) {
1095 n = get_Mux_false(n);
1096 DBG_OPT_ALGSIM0(oldn, n);
1098 else if(get_Mux_false(n) == get_Mux_true(n)) {
1099 n = get_Mux_true(n);
1100 DBG_OPT_ALGSIM0(oldn, n);
1107 * Optimize -a CMP -b into b CMP a.
1108 * This works even for floating point
1110 static ir_node *equivalent_node_Cmp(ir_node *n)
1112 ir_node *left = get_Cmp_left(n);
1113 ir_node *right = get_Cmp_right(n);
1115 if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus) {
1116 left = get_Minus_op(left);
1117 right = get_Minus_op(right);
1118 set_Cmp_left(n, right);
1119 set_Cmp_right(n, left);
1125 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
1126 * perform no actual computation, as, e.g., the Id nodes. It does not create
1127 * new nodes. It is therefore safe to free n if the node returned is not n.
1128 * If a node returns a Tuple we can not just skip it. If the size of the
1129 * in array fits, we transform n into a tuple (e.g., Div).
1132 equivalent_node(ir_node *n)
1134 if (n->op->equivalent_node)
1135 return n->op->equivalent_node(n);
1140 * set the default equivalent node operation
1142 static ir_op *firm_set_default_equivalent_node(ir_op *op)
1146 op->equivalent_node = equivalent_node_##a; \
1175 op->equivalent_node = NULL;
1183 * Do node specific optimizations of nodes predecessors.
1186 optimize_preds(ir_node *n) {
1187 ir_node *a = NULL, *b = NULL;
1189 /* get the operands we will work on for simple cases. */
1191 a = get_binop_left(n);
1192 b = get_binop_right(n);
1193 } else if (is_unop(n)) {
1197 switch (get_irn_opcode(n)) {
1200 /* We don't want Cast as input to Cmp. */
1201 if (get_irn_op(a) == op_Cast) {
1205 if (get_irn_op(b) == op_Cast) {
1207 set_Cmp_right(n, b);
1216 * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
1217 * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
1218 * If possible, remove the Conv's.
1220 static ir_node *transform_node_AddSub(ir_node *n)
1222 ir_mode *mode = get_irn_mode(n);
1224 if (mode_is_reference(mode)) {
1225 ir_node *left = get_binop_left(n);
1226 ir_node *right = get_binop_right(n);
1227 int ref_bits = get_mode_size_bits(mode);
1229 if (get_irn_op(left) == op_Conv) {
1230 ir_mode *mode = get_irn_mode(left);
1231 int bits = get_mode_size_bits(mode);
1233 if (ref_bits == bits &&
1234 mode_is_int(mode) &&
1235 get_mode_arithmetic(mode) == irma_twos_complement) {
1236 ir_node *pre = get_Conv_op(left);
1237 ir_mode *pre_mode = get_irn_mode(pre);
1239 if (mode_is_int(pre_mode) &&
1240 get_mode_size_bits(pre_mode) == bits &&
1241 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1242 /* ok, this conv just changes to sign, moreover the calculation
1243 * is done with same number of bits as our address mode, so
1244 * we can ignore the conv as address calculation can be viewed
1245 * as either signed or unsigned
1247 set_binop_left(n, pre);
1252 if (get_irn_op(right) == op_Conv) {
1253 ir_mode *mode = get_irn_mode(right);
1254 int bits = get_mode_size_bits(mode);
1256 if (ref_bits == bits &&
1257 mode_is_int(mode) &&
1258 get_mode_arithmetic(mode) == irma_twos_complement) {
1259 ir_node *pre = get_Conv_op(right);
1260 ir_mode *pre_mode = get_irn_mode(pre);
1262 if (mode_is_int(pre_mode) &&
1263 get_mode_size_bits(pre_mode) == bits &&
1264 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1265 /* ok, this conv just changes to sign, moreover the calculation
1266 * is done with same number of bits as our address mode, so
1267 * we can ignore the conv as address calculation can be viewed
1268 * as either signed or unsigned
1270 set_binop_right(n, pre);
1279 * Do the AddSub optimization, then Transform Add(a,a) into Mul(a, 2)
1280 * if the mode is integer or float.
1281 * Reassociation might fold this further.
1283 static ir_node *transform_node_Add(ir_node *n)
1288 n = transform_node_AddSub(n);
1290 mode = get_irn_mode(n);
1291 if (mode_is_num(mode)) {
1292 ir_node *a = get_Add_left(n);
1294 if (a == get_Add_right(n)) {
1295 ir_node *block = get_nodes_block(n);
1298 get_irn_dbg_info(n),
1302 new_r_Const_long(current_ir_graph, block, mode, 2),
1304 DBG_OPT_ALGSIM0(oldn, n);
1311 * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a).
1313 static ir_node *transform_node_Sub(ir_node *n)
1318 n = transform_node_AddSub(n);
1320 mode = get_irn_mode(n);
1321 if (mode_is_num(mode)) {
1322 if (classify_Const(get_Sub_left(n)) == CNST_NULL) {
1324 get_irn_dbg_info(n),
1329 DBG_OPT_ALGSIM0(oldn, n);
1336 /** Do architecture dependend optimizations on Mul nodes */
1337 static ir_node *transform_node_Mul(ir_node *n) {
1338 return arch_dep_replace_mul_with_shifts(n);
1342 * transform a Div Node
1344 static ir_node *transform_node_Div(ir_node *n)
1346 tarval *tv = value_of(n);
1349 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1351 if (tv != tarval_bad) {
1352 value = new_Const(get_tarval_mode(tv), tv);
1354 DBG_OPT_CSTEVAL(n, value);
1356 else /* Try architecture dependand optimization */
1357 value = arch_dep_replace_div_by_const(n);
1360 /* Turn Div into a tuple (mem, bad, value) */
1361 ir_node *mem = get_Div_mem(n);
1363 turn_into_tuple(n, 3);
1364 set_Tuple_pred(n, pn_Div_M, mem);
1365 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1366 set_Tuple_pred(n, pn_Div_res, value);
1372 * transform a Mod node
1374 static ir_node *transform_node_Mod(ir_node *n)
1376 tarval *tv = value_of(n);
1379 /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
1381 if (tv != tarval_bad) {
1382 value = new_Const(get_tarval_mode(tv), tv);
1384 DBG_OPT_CSTEVAL(n, value);
1386 else /* Try architecture dependand optimization */
1387 value = arch_dep_replace_mod_by_const(n);
1390 /* Turn Mod into a tuple (mem, bad, value) */
1391 ir_node *mem = get_Mod_mem(n);
1393 turn_into_tuple(n, 3);
1394 set_Tuple_pred(n, pn_Mod_M, mem);
1395 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1396 set_Tuple_pred(n, pn_Mod_res, value);
1402 * transform a DivMod node
1404 static ir_node *transform_node_DivMod(ir_node *n)
1408 ir_node *a = get_DivMod_left(n);
1409 ir_node *b = get_DivMod_right(n);
1410 ir_mode *mode = get_irn_mode(a);
1411 tarval *ta = value_of(a);
1412 tarval *tb = value_of(b);
1414 if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
1417 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1419 if (tb != tarval_bad) {
1420 if (tb == get_mode_one(get_tarval_mode(tb))) {
1421 b = new_Const (mode, get_mode_null(mode));
1424 DBG_OPT_CSTEVAL(n, b);
1426 else if (ta != tarval_bad) {
1427 tarval *resa, *resb;
1428 resa = tarval_div (ta, tb);
1429 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
1430 Jmp for X result!? */
1431 resb = tarval_mod (ta, tb);
1432 if (resb == tarval_bad) return n; /* Causes exception! */
1433 a = new_Const (mode, resa);
1434 b = new_Const (mode, resb);
1437 DBG_OPT_CSTEVAL(n, a);
1438 DBG_OPT_CSTEVAL(n, b);
1440 else { /* Try architecture dependand optimization */
1441 arch_dep_replace_divmod_by_const(&a, &b, n);
1442 evaluated = a != NULL;
1444 } else if (ta == get_mode_null(mode)) {
1445 /* 0 / non-Const = 0 */
1450 if (evaluated) { /* replace by tuple */
1451 ir_node *mem = get_DivMod_mem(n);
1452 turn_into_tuple(n, 4);
1453 set_Tuple_pred(n, pn_DivMod_M, mem);
1454 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
1455 set_Tuple_pred(n, pn_DivMod_res_div, a);
1456 set_Tuple_pred(n, pn_DivMod_res_mod, b);
1457 assert(get_nodes_block(n));
1464 * transform a Cond node
1466 static ir_node *transform_node_Cond(ir_node *n)
1468 /* Replace the Cond by a Jmp if it branches on a constant
1471 ir_node *a = get_Cond_selector(n);
1472 tarval *ta = value_of(a);
1474 if ((ta != tarval_bad) &&
1475 (get_irn_mode(a) == mode_b) &&
1476 (get_opt_unreachable_code())) {
1477 /* It's a boolean Cond, branching on a boolean constant.
1478 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
1479 jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n));
1480 turn_into_tuple(n, 2);
1481 if (ta == tarval_b_true) {
1482 set_Tuple_pred(n, pn_Cond_false, new_Bad());
1483 set_Tuple_pred(n, pn_Cond_true, jmp);
1485 set_Tuple_pred(n, pn_Cond_false, jmp);
1486 set_Tuple_pred(n, pn_Cond_true, new_Bad());
1488 /* We might generate an endless loop, so keep it alive. */
1489 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1490 } else if ((ta != tarval_bad) &&
1491 (get_irn_mode(a) == mode_Iu) &&
1492 (get_Cond_kind(n) == dense) &&
1493 (get_opt_unreachable_code())) {
1494 /* I don't want to allow Tuples smaller than the biggest Proj.
1495 Also this tuple might get really big...
1496 I generate the Jmp here, and remember it in link. Link is used
1497 when optimizing Proj. */
1498 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n)));
1499 /* We might generate an endless loop, so keep it alive. */
1500 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1501 } else if ((get_irn_op(a) == op_Eor)
1502 && (get_irn_mode(a) == mode_b)
1503 && (classify_tarval(value_of(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
1504 /* The Eor is a negate. Generate a new Cond without the negate,
1505 simulate the negate by exchanging the results. */
1506 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1508 } else if ((get_irn_op(a) == op_Not)
1509 && (get_irn_mode(a) == mode_b)) {
1510 /* A Not before the Cond. Generate a new Cond without the Not,
1511 simulate the Not by exchanging the results. */
1512 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1521 static ir_node *transform_node_Eor(ir_node *n)
1524 ir_node *a = get_Eor_left(n);
1525 ir_node *b = get_Eor_right(n);
1527 if ((get_irn_mode(n) == mode_b)
1528 && (get_irn_op(a) == op_Proj)
1529 && (get_irn_mode(a) == mode_b)
1530 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
1531 && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
1532 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
1533 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1534 mode_b, get_negated_pnc(get_Proj_proj(a)));
1536 DBG_OPT_ALGSIM0(oldn, n);
1538 else if ((get_irn_mode(n) == mode_b)
1539 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
1540 /* The Eor is a Not. Replace it by a Not. */
1541 /* ????!!!Extend to bitfield 1111111. */
1542 n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
1544 DBG_OPT_ALGSIM0(oldn, n);
1551 * Transform a boolean Not.
1553 static ir_node *transform_node_Not(ir_node *n)
1556 ir_node *a = get_Not_op(n);
1558 if ( (get_irn_mode(n) == mode_b)
1559 && (get_irn_op(a) == op_Proj)
1560 && (get_irn_mode(a) == mode_b)
1561 && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
1562 /* We negate a Cmp. The Cmp has the negated result anyways! */
1563 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1564 mode_b, get_negated_pnc(get_Proj_proj(a)));
1565 DBG_OPT_ALGSIM0(oldn, n);
1572 * Transform a Cast of a Const into a new Const
1574 static ir_node *transform_node_Cast(ir_node *n) {
1576 ir_node *pred = get_Cast_op(n);
1577 type *tp = get_irn_type(pred);
1579 if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
1580 n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
1581 get_Const_tarval(pred), tp);
1582 DBG_OPT_CSTEVAL(oldn, n);
1583 } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
1584 n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
1585 get_SymConst_kind(pred), tp);
1586 DBG_OPT_CSTEVAL(oldn, n);
1592 * Does all optimizations on nodes that must be done on it's Proj's
1593 * because of creating new nodes.
1595 * Transform a Div/Mod/DivMod with a non-zero constant.
1596 * Removes the exceptions and routes the memory to the NoMem node.
1598 * Optimizes jump tables by removing all impossible cases.
1600 * Normalizes Cmp nodes.
1602 static ir_node *transform_node_Proj(ir_node *proj)
1604 ir_node *n = get_Proj_pred(proj);
1609 switch (get_irn_opcode(n)) {
1611 b = get_Div_right(n);
1614 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1615 proj_nr = get_Proj_proj(proj);
1617 /* this node may float */
1618 set_irn_pinned(n, op_pin_state_floats);
1620 if (proj_nr == pn_Div_X_except) {
1621 /* we found an exception handler, remove it */
1624 /* the memory Proj can be removed */
1625 ir_node *res = get_Div_mem(n);
1626 set_Div_mem(n, get_irg_no_mem(current_ir_graph));
1627 if (proj_nr == pn_Div_M)
1633 b = get_Mod_right(n);
1636 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
1637 proj_nr = get_Proj_proj(proj);
1639 /* this node may float */
1640 set_irn_pinned(n, op_pin_state_floats);
1642 if (proj_nr == pn_Mod_X_except) {
1643 /* we found an exception handler, remove it */
1646 /* the memory Proj can be removed */
1647 ir_node *res = get_Mod_mem(n);
1648 set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
1649 if (proj_nr == pn_Mod_M)
1655 b = get_DivMod_right(n);
1658 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
1659 proj_nr = get_Proj_proj(proj);
1661 /* this node may float */
1662 set_irn_pinned(n, op_pin_state_floats);
1664 if (proj_nr == pn_DivMod_X_except) {
1665 /* we found an exception handler, remove it */
1669 /* the memory Proj can be removed */
1670 ir_node *res = get_DivMod_mem(n);
1671 set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
1672 if (proj_nr == pn_DivMod_M)
1679 if (get_opt_unreachable_code()) {
1680 b = get_Cond_selector(n);
1683 if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
1684 /* we have a constant switch */
1685 long num = get_Proj_proj(proj);
1687 if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
1688 if (get_tarval_long(tb) == num) {
1689 /* Do NOT create a jump here, or we will have 2 control flow ops
1690 * in a block. This case is optimized away in optimize_cf(). */
1701 if (get_opt_reassociation()) {
1702 ir_node *left = get_Cmp_left(n);
1703 ir_node *right = get_Cmp_right(n);
1707 ir_mode *mode = NULL;
1709 proj_nr = get_Proj_proj(proj);
1712 * First step: normalize the compare op
1713 * by placing the constant on the right site
1714 * or moving the lower address node to the left.
1715 * We ignore the case that both are constants, then
1716 * this compare should be optimized away.
1718 if (get_irn_op(right) == op_Const)
1720 else if (get_irn_op(left) == op_Const) {
1725 proj_nr = get_swapped_pnc(proj_nr);
1728 else if (left > right) {
1734 proj_nr = get_swapped_pnc(proj_nr);
1739 * Second step: Try to reduce the magnitude
1740 * of a constant. This may help to generate better code
1741 * later and may help to normalize more compares.
1742 * Of course this is only possible for integer values.
1745 mode = get_irn_mode(c);
1746 tv = get_Const_tarval(c);
1748 if (tv != tarval_bad) {
1749 /* the following optimization is possibe on non-int values either:
1750 * -a CMP c ==> a swap(CMP) -c */
1751 if (get_opt_constant_folding() && get_irn_op(left) == op_Minus) {
1752 left = get_Minus_op(left);
1753 tv = tarval_sub(get_tarval_one(mode), tv);
1755 proj_nr = get_swapped_pnc(proj_nr);
1759 if (mode_is_int(mode)) {
1760 /* Ne includes Unordered which is not possible on integers.
1761 * However, frontends often use this wrong, so fix it here */
1762 if (proj_nr == pn_Cmp_Ne)
1763 proj_nr = pn_Cmp_Lg;
1765 /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
1766 if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
1767 tarval_cmp(tv, get_tarval_null(mode)) == pn_Cmp_Gt) {
1768 tv = tarval_sub(tv, get_tarval_one(mode));
1770 proj_nr ^= pn_Cmp_Eq;
1773 /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
1774 else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
1775 tarval_cmp(tv, get_tarval_null(mode)) == pn_Cmp_Lt) {
1776 tv = tarval_add(tv, get_tarval_one(mode));
1778 proj_nr ^= pn_Cmp_Eq;
1782 /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
1783 if (classify_tarval(tv) == TV_CLASSIFY_NULL && get_irn_op(left) == op_Sub) {
1784 if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
1785 right = get_Sub_right(left);
1786 left = get_Sub_left(left);
1796 ir_node *block = get_nodes_block(n);
1798 if (changed & 2) /* need a new Const */
1799 right = new_Const(mode, tv);
1801 /* create a new compare */
1802 n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block,
1805 set_Proj_pred(proj, n);
1806 set_Proj_proj(proj, proj_nr);
1812 /* should not happen, but if it does will be optimized away */
1820 /* we have added a Tuple, optimize it for the current Proj away */
1821 return equivalent_node_Proj(proj);
1825 * returns the operands of a commutative bin-op, if one operand is
1826 * a const, it is returned as the second one.
1828 static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
1830 ir_node *op_a = get_binop_left(binop);
1831 ir_node *op_b = get_binop_right(binop);
1833 assert(is_op_commutative(get_irn_op(binop)));
1835 if (get_irn_op(op_a) == op_Const) {
1846 * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
1847 * Such pattern may arise in bitfield stores.
1849 * value c4 value c4 & c2
1850 * AND c3 AND c1 | c3
1855 static ir_node *transform_node_Or_bf_store(ir_node *or)
1859 ir_node *and_l, *c3;
1860 ir_node *value, *c4;
1861 ir_node *new_and, *new_const, *block;
1862 ir_mode *mode = get_irn_mode(or);
1864 tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
1866 get_comm_Binop_Ops(or, &and, &c1);
1867 if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
1870 get_comm_Binop_Ops(and, &or_l, &c2);
1871 if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
1874 get_comm_Binop_Ops(or_l, &and_l, &c3);
1875 if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
1878 get_comm_Binop_Ops(and_l, &value, &c4);
1879 if (get_irn_op(c4) != op_Const)
1882 /* ok, found the pattern, check for conditions */
1883 assert(mode == get_irn_mode(and));
1884 assert(mode == get_irn_mode(or_l));
1885 assert(mode == get_irn_mode(and_l));
1887 tv1 = get_Const_tarval(c1);
1888 tv2 = get_Const_tarval(c2);
1889 tv3 = get_Const_tarval(c3);
1890 tv4 = get_Const_tarval(c4);
1892 tv = tarval_or(tv4, tv2);
1893 if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
1894 /* have at least one 0 at the same bit position */
1898 n_tv4 = tarval_not(tv4);
1899 if (tv3 != tarval_and(tv3, n_tv4)) {
1900 /* bit in the or_mask is outside the and_mask */
1904 n_tv2 = tarval_not(tv2);
1905 if (tv1 != tarval_and(tv1, n_tv2)) {
1906 /* bit in the or_mask is outside the and_mask */
1910 /* ok, all conditions met */
1911 block = get_nodes_block(or);
1913 new_and = new_r_And(current_ir_graph, block,
1914 value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
1916 new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
1918 set_Or_left(or, new_and);
1919 set_Or_right(or, new_const);
1921 /* check for more */
1922 return transform_node_Or_bf_store(or);
1926 * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
1928 static ir_node *transform_node_Or_Rot(ir_node *or)
1930 ir_mode *mode = get_irn_mode(or);
1931 ir_node *shl, *shr, *block;
1932 ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
1935 if (! mode_is_int(mode))
1938 shl = get_binop_left(or);
1939 shr = get_binop_right(or);
1941 if (get_irn_op(shl) == op_Shr) {
1942 if (get_irn_op(shr) != op_Shl)
1949 else if (get_irn_op(shl) != op_Shl)
1951 else if (get_irn_op(shr) != op_Shr)
1954 x = get_Shl_left(shl);
1955 if (x != get_Shr_left(shr))
1958 c1 = get_Shl_right(shl);
1959 c2 = get_Shr_right(shr);
1960 if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
1961 tv1 = get_Const_tarval(c1);
1962 if (! tarval_is_long(tv1))
1965 tv2 = get_Const_tarval(c2);
1966 if (! tarval_is_long(tv2))
1969 if (get_tarval_long(tv1) + get_tarval_long(tv2)
1970 != get_mode_size_bits(mode))
1973 /* yet, condition met */
1974 block = get_nodes_block(or);
1976 n = new_r_Rot(current_ir_graph, block, x, c1, mode);
1978 DBG_OPT_ALGSIM1(or, shl, shr, n);
1981 else if (get_irn_op(c1) == op_Sub) {
1985 if (get_Sub_right(sub) != v)
1988 c1 = get_Sub_left(sub);
1989 if (get_irn_op(c1) != op_Const)
1992 tv1 = get_Const_tarval(c1);
1993 if (! tarval_is_long(tv1))
1996 if (get_tarval_long(tv1) != get_mode_size_bits(mode))
1999 /* yet, condition met */
2000 block = get_nodes_block(or);
2002 /* a Rot right is not supported, so use a rot left */
2003 n = new_r_Rot(current_ir_graph, block, x, sub, mode);
2005 DBG_OPT_ALGSIM0(or, n);
2008 else if (get_irn_op(c2) == op_Sub) {
2012 c1 = get_Sub_left(sub);
2013 if (get_irn_op(c1) != op_Const)
2016 tv1 = get_Const_tarval(c1);
2017 if (! tarval_is_long(tv1))
2020 if (get_tarval_long(tv1) != get_mode_size_bits(mode))
2023 /* yet, condition met */
2024 block = get_nodes_block(or);
2027 n = new_r_Rot(current_ir_graph, block, x, v, mode);
2029 DBG_OPT_ALGSIM0(or, n);
2039 static ir_node *transform_node_Or(ir_node *or)
2041 or = transform_node_Or_bf_store(or);
2042 or = transform_node_Or_Rot(or);
2048 static ir_node *transform_node(ir_node *n);
2051 * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl
2053 static ir_node *transform_node_shift(ir_node *n)
2055 ir_node *left, *right;
2056 tarval *tv1, *tv2, *res;
2058 int modulo_shf, flag;
2060 left = get_binop_left(n);
2062 /* different operations */
2063 if (get_irn_op(left) != get_irn_op(n))
2066 right = get_binop_right(n);
2067 tv1 = value_of(right);
2068 if (tv1 == tarval_bad)
2071 tv2 = value_of(get_binop_right(left));
2072 if (tv2 == tarval_bad)
2075 res = tarval_add(tv1, tv2);
2077 /* beware: a simple replacement works only, if res < modulo shift */
2078 mode = get_irn_mode(n);
2082 modulo_shf = get_mode_modulo_shift(mode);
2083 if (modulo_shf > 0) {
2084 tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
2086 if (tarval_cmp(res, modulo) & pn_Cmp_Lt)
2093 /* ok, we can replace it */
2094 ir_node *in[2], *irn, *block = get_nodes_block(n);
2096 in[0] = get_binop_left(left);
2097 in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
2099 irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
2101 DBG_OPT_ALGSIM0(n, irn);
2103 return transform_node(irn);
2108 static ir_node * transform_node_End(ir_node *n) {
2109 int i, n_keepalives = get_End_n_keepalives(n);
2111 /* Remove dead blocks in keepalive list.
2112 We do not generate a new End node. */
2113 for (i = 0; i < n_keepalives; ++i) {
2114 ir_node *ka = get_End_keepalive(n, i);
2115 if (is_Block(ka) && is_Block_dead(ka))
2116 set_End_keepalive(n, i, new_Bad());
2123 * Tries several [inplace] [optimizing] transformations and returns an
2124 * equivalent node. The difference to equivalent_node() is that these
2125 * transformations _do_ generate new nodes, and thus the old node must
2126 * not be freed even if the equivalent node isn't the old one.
2128 static ir_node *transform_node(ir_node *n)
2130 if (n->op->transform_node)
2131 n = n->op->transform_node(n);
2136 * set the default transform node operation
2138 static ir_op *firm_set_default_transform_node(ir_op *op)
2142 op->transform_node = transform_node_##a; \
2163 op->transform_node = transform_node_shift;
2166 op->transform_node = NULL;
2174 /* **************** Common Subexpression Elimination **************** */
2176 /** The size of the hash table used, should estimate the number of nodes
2178 #define N_IR_NODES 512
2180 /** Compares the attributes of two Const nodes. */
2181 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
2183 return (get_Const_tarval(a) != get_Const_tarval(b))
2184 || (get_Const_type(a) != get_Const_type(b));
2187 /** Compares the attributes of two Proj nodes. */
2188 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
2190 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
2193 /** Compares the attributes of two Filter nodes. */
2194 static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
2196 return get_Filter_proj(a) != get_Filter_proj(b);
2199 /** Compares the attributes of two Alloc nodes. */
2200 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
2202 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
2203 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
2206 /** Compares the attributes of two Free nodes. */
2207 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
2209 return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
2210 || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
2213 /** Compares the attributes of two SymConst nodes. */
2214 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
2216 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
2217 || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p)
2218 || (get_irn_symconst_attr(a).tp != get_irn_symconst_attr(b).tp);
2221 /** Compares the attributes of two Call nodes. */
2222 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
2224 return (get_irn_call_attr(a) != get_irn_call_attr(b));
2227 /** Compares the attributes of two Sel nodes. */
2228 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
2230 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
2231 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
2232 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
2233 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
2234 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
2237 /** Compares the attributes of two Phi nodes. */
2238 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
2240 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
2243 /** Compares the attributes of two Cast nodes. */
2244 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
2246 return get_Cast_type(a) != get_Cast_type(b);
2249 /** Compares the attributes of two Load nodes. */
2250 static int node_cmp_attr_Load(ir_node *a, ir_node *b)
2252 if (get_Load_volatility(a) == volatility_is_volatile ||
2253 get_Load_volatility(b) == volatility_is_volatile)
2254 /* NEVER do CSE on volatile Loads */
2257 return get_Load_mode(a) != get_Load_mode(b);
2260 /** Compares the attributes of two Store nodes. */
2261 static int node_cmp_attr_Store(ir_node *a, ir_node *b)
2263 /* NEVER do CSE on volatile Stores */
2264 return (get_Store_volatility(a) == volatility_is_volatile ||
2265 get_Store_volatility(b) == volatility_is_volatile);
2269 * set the default node attribute compare operation
2271 static ir_op *firm_set_default_node_cmp_attr(ir_op *op)
2275 op->node_cmp_attr = node_cmp_attr_##a; \
2292 op->node_cmp_attr = NULL;
2300 * Compare function for two nodes in the hash table. Gets two
2301 * nodes as parameters. Returns 0 if the nodes are a cse.
2304 vt_cmp (const void *elt, const void *key)
2312 if (a == b) return 0;
2314 if ((get_irn_op(a) != get_irn_op(b)) ||
2315 (get_irn_mode(a) != get_irn_mode(b))) return 1;
2317 /* compare if a's in and b's in are of equal length */
2318 irn_arity_a = get_irn_intra_arity (a);
2319 if (irn_arity_a != get_irn_intra_arity(b))
2322 /* for block-local cse and op_pin_state_pinned nodes: */
2323 if (!get_opt_global_cse() || (get_irn_pinned(a) == op_pin_state_pinned)) {
2324 if (get_irn_intra_n(a, -1) != get_irn_intra_n(b, -1))
2328 /* compare a->in[0..ins] with b->in[0..ins] */
2329 for (i = 0; i < irn_arity_a; i++)
2330 if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
2334 * here, we already now that the nodes are identical except their
2337 if (a->op->node_cmp_attr)
2338 return a->op->node_cmp_attr(a, b);
2344 * Calculate a hash value of a node.
2347 ir_node_hash (ir_node *node)
2352 if (node->op == op_Const) {
2353 /* special value for const, as they only differ in their tarval. */
2354 h = HASH_PTR(node->attr.con.tv);
2355 h = 9*h + HASH_PTR(get_irn_mode(node));
2356 } else if (node->op == op_SymConst) {
2357 /* special value for const, as they only differ in their symbol. */
2358 h = HASH_PTR(node->attr.i.sym.type_p);
2359 h = 9*h + HASH_PTR(get_irn_mode(node));
2362 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2363 h = irn_arity = get_irn_intra_arity(node);
2365 /* consider all in nodes... except the block if not a control flow. */
2366 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; i++) {
2367 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
2371 h = 9*h + HASH_PTR(get_irn_mode(node));
2373 h = 9*h + HASH_PTR(get_irn_op(node));
2380 new_identities(void) {
2381 return new_pset(vt_cmp, N_IR_NODES);
2385 del_identities(pset *value_table) {
2386 del_pset(value_table);
2390 * Return the canonical node computing the same value as n.
2391 * Looks up the node in a hash table.
2393 * For Const nodes this is performed in the constructor, too. Const
2394 * nodes are extremely time critical because of their frequent use in
2395 * constant string arrays.
2397 static INLINE ir_node *
2398 identify (pset *value_table, ir_node *n)
2402 if (!value_table) return n;
2404 if (get_opt_reassociation()) {
2405 if (is_op_commutative(get_irn_op(n))) {
2406 ir_node *l = get_binop_left(n);
2407 ir_node *r = get_binop_right(n);
2409 /* for commutative operators perform a OP b == b OP a */
2411 set_binop_left(n, r);
2412 set_binop_right(n, l);
2417 o = pset_find (value_table, n, ir_node_hash (n));
2426 * During construction we set the op_pin_state_pinned flag in the graph right when the
2427 * optimization is performed. The flag turning on procedure global cse could
2428 * be changed between two allocations. This way we are safe.
2430 static INLINE ir_node *
2431 identify_cons (pset *value_table, ir_node *n) {
2434 n = identify(value_table, n);
2435 if (get_irn_n(old, -1) != get_irn_n(n, -1))
2436 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2441 * Return the canonical node computing the same value as n.
2442 * Looks up the node in a hash table, enters it in the table
2443 * if it isn't there yet.
2446 identify_remember (pset *value_table, ir_node *n)
2450 if (!value_table) return n;
2452 if (get_opt_reassociation()) {
2453 if (is_op_commutative(get_irn_op(n))) {
2454 ir_node *l = get_binop_left(n);
2455 ir_node *r = get_binop_right(n);
2457 /* for commutative operators perform a OP b == b OP a */
2459 set_binop_left(n, r);
2460 set_binop_right(n, l);
2465 /* lookup or insert in hash table with given hash key. */
2466 o = pset_insert (value_table, n, ir_node_hash (n));
2476 add_identities (pset *value_table, ir_node *node) {
2477 if (get_opt_cse() && (get_irn_opcode(node) != iro_Block))
2478 identify_remember (value_table, node);
2482 * garbage in, garbage out. If a node has a dead input, i.e., the
2483 * Bad node is input to the node, return the Bad node.
2485 static INLINE ir_node *
2486 gigo (ir_node *node)
2489 ir_op* op = get_irn_op(node);
2491 /* remove garbage blocks by looking at control flow that leaves the block
2492 and replacing the control flow by Bad. */
2493 if (get_irn_mode(node) == mode_X) {
2494 ir_node *block = get_nodes_block(node);
2495 if (!get_Block_matured(block)) return node; /* Don't optimize nodes in immature blocks. */
2496 if (op == op_End) return node; /* Don't optimize End, may have Bads. */
2498 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
2499 irn_arity = get_irn_arity(block);
2500 for (i = 0; i < irn_arity; i++) {
2501 if (!is_Bad(get_irn_n(block, i))) break;
2503 if (i == irn_arity) return new_Bad();
2507 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
2508 blocks predecessors is dead. */
2509 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
2510 irn_arity = get_irn_arity(node);
2512 if (is_Block_dead(get_nodes_block(node)))
2515 for (i = 0; i < irn_arity; i++) {
2516 if (is_Bad(get_irn_n(node, i))) {
2522 /* With this code we violate the agreement that local_optimize
2523 only leaves Bads in Block, Phi and Tuple nodes. */
2524 /* If Block has only Bads as predecessors it's garbage. */
2525 /* If Phi has only Bads as predecessors it's garbage. */
2526 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
2527 irn_arity = get_irn_arity(node);
2528 for (i = 0; i < irn_arity; i++) {
2529 if (!is_Bad(get_irn_n(node, i))) break;
2531 if (i == irn_arity) node = new_Bad();
2539 * These optimizations deallocate nodes from the obstack.
2540 * It can only be called if it is guaranteed that no other nodes
2541 * reference this one, i.e., right after construction of a node.
2544 optimize_node (ir_node *n)
2548 opcode iro = get_irn_opcode(n);
2550 type *old_tp = get_irn_type(n);
2552 int i, arity = get_irn_arity(n);
2553 for (i = 0; i < arity && !old_tp; ++i)
2554 old_tp = get_irn_type(get_irn_n(n, i));
2557 /* Always optimize Phi nodes: part of the construction. */
2558 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
2560 /* constant expression evaluation / constant folding */
2561 if (get_opt_constant_folding()) {
2562 /* constants can not be evaluated */
2563 if (iro != iro_Const) {
2564 /* try to evaluate */
2565 tv = computed_value(n);
2566 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2570 * we MUST copy the node here temporary, because it's still needed
2571 * for DBG_OPT_CSTEVAL
2573 int node_size = offsetof(ir_node, attr) + n->op->attr_size;
2574 oldn = alloca(node_size);
2576 memcpy(oldn, n, node_size);
2577 CLONE_ARR_A(ir_node *, oldn->in, n->in);
2579 /* ARG, copy the in array, we need it for statistics */
2580 memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
2583 edges_node_deleted(n, current_ir_graph);
2585 /* evaluation was successful -- replace the node. */
2586 obstack_free (current_ir_graph->obst, n);
2587 nw = new_Const (get_tarval_mode (tv), tv);
2589 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2590 set_Const_type(nw, old_tp);
2591 DBG_OPT_CSTEVAL(oldn, nw);
2597 /* remove unnecessary nodes */
2598 if (get_opt_constant_folding() ||
2599 (iro == iro_Phi) || /* always optimize these nodes. */
2601 (iro == iro_Proj) ||
2602 (iro == iro_Block) ) /* Flags tested local. */
2603 n = equivalent_node (n);
2605 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2607 /** common subexpression elimination **/
2608 /* Checks whether n is already available. */
2609 /* The block input is used to distinguish different subexpressions. Right
2610 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2611 subexpressions within a block. */
2613 n = identify_cons (current_ir_graph->value_table, n);
2616 edges_node_deleted(oldn, current_ir_graph);
2618 /* We found an existing, better node, so we can deallocate the old node. */
2619 obstack_free (current_ir_graph->obst, oldn);
2624 /* Some more constant expression evaluation that does not allow to
2626 iro = get_irn_opcode(n);
2627 if (get_opt_constant_folding() ||
2628 (iro == iro_Cond) ||
2629 (iro == iro_Proj) ||
2630 (iro == iro_Sel)) /* Flags tested local. */
2631 n = transform_node (n);
2633 /* Remove nodes with dead (Bad) input.
2634 Run always for transformation induced Bads. */
2637 /* Now we have a legal, useful node. Enter it in hash table for cse */
2638 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
2639 n = identify_remember (current_ir_graph->value_table, n);
2647 * These optimizations never deallocate nodes (in place). This can cause dead
2648 * nodes lying on the obstack. Remove these by a dead node elimination,
2649 * i.e., a copying garbage collection.
2652 optimize_in_place_2 (ir_node *n)
2656 opcode iro = get_irn_opcode(n);
2658 type *old_tp = get_irn_type(n);
2660 int i, arity = get_irn_arity(n);
2661 for (i = 0; i < arity && !old_tp; ++i)
2662 old_tp = get_irn_type(get_irn_n(n, i));
2665 if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
2667 /* if not optimize return n */
2670 /* Here this is possible. Why? */
2674 /* constant expression evaluation / constant folding */
2675 if (get_opt_constant_folding()) {
2676 /* constants can not be evaluated */
2677 if (iro != iro_Const) {
2678 /* try to evaluate */
2679 tv = computed_value(n);
2680 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2681 /* evaluation was successful -- replace the node. */
2682 n = new_Const (get_tarval_mode (tv), tv);
2684 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2685 set_Const_type(n, old_tp);
2687 DBG_OPT_CSTEVAL(oldn, n);
2693 /* remove unnecessary nodes */
2694 if (get_opt_constant_folding() ||
2695 (iro == iro_Phi) || /* always optimize these nodes. */
2696 (iro == iro_Id) || /* ... */
2697 (iro == iro_Proj) || /* ... */
2698 (iro == iro_Block) ) /* Flags tested local. */
2699 n = equivalent_node (n);
2701 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2703 /** common subexpression elimination **/
2704 /* Checks whether n is already available. */
2705 /* The block input is used to distinguish different subexpressions. Right
2706 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2707 subexpressions within a block. */
2708 if (get_opt_cse()) {
2709 n = identify (current_ir_graph->value_table, n);
2712 /* Some more constant expression evaluation. */
2713 iro = get_irn_opcode(n);
2714 if (get_opt_constant_folding() ||
2715 (iro == iro_Cond) ||
2716 (iro == iro_Proj) ||
2717 (iro == iro_Sel)) /* Flags tested local. */
2718 n = transform_node (n);
2720 /* Remove nodes with dead (Bad) input.
2721 Run always for transformation induced Bads. */
2724 /* Now we can verify the node, as it has no dead inputs any more. */
2727 /* Now we have a legal, useful node. Enter it in hash table for cse.
2728 Blocks should be unique anyways. (Except the successor of start:
2729 is cse with the start block!) */
2730 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
2731 n = identify_remember (current_ir_graph->value_table, n);
2737 * Wrapper for external use, set proper status bits after optimization.
2740 optimize_in_place (ir_node *n)
2742 /* Handle graph state */
2743 assert(get_irg_phase_state(current_ir_graph) != phase_building);
2745 if (get_opt_global_cse())
2746 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2747 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
2748 set_irg_outs_inconsistent(current_ir_graph);
2750 /* Maybe we could also test whether optimizing the node can
2751 change the control graph. */
2752 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
2753 set_irg_dom_inconsistent(current_ir_graph);
2754 return optimize_in_place_2 (n);
2758 * set the default ir op operations
2760 ir_op *firm_set_default_operations(ir_op *op)
2762 op = firm_set_default_computed_value(op);
2763 op = firm_set_default_equivalent_node(op);
2764 op = firm_set_default_transform_node(op);
2765 op = firm_set_default_node_cmp_attr(op);
2766 op = firm_set_default_get_type(op);