3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2005 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
27 # include "irnode_t.h"
28 # include "irgraph_t.h"
29 # include "iredges_t.h"
30 # include "irmode_t.h"
32 # include "ircons_t.h"
36 # include "dbginfo_t.h"
37 # include "iropt_dbg.h"
38 # include "irflag_t.h"
42 # include "opt_polymorphy.h"
44 /* Make types visible to allow most efficient access */
45 # include "entity_t.h"
48 * Trivial INLINEable routine for copy propagation.
49 * Does follow Ids, needed to optimize INLINEd code.
51 static INLINE ir_node *
52 follow_Id (ir_node *n)
54 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
59 * return the value of a Constant
61 static tarval *computed_value_Const(ir_node *n)
63 return get_Const_tarval(n);
67 * return the value of a 'sizeof' SymConst
69 static tarval *computed_value_SymConst(ir_node *n)
71 if ((get_SymConst_kind(n) == symconst_size) &&
72 (get_type_state(get_SymConst_type(n))) == layout_fixed)
73 return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), get_irn_mode(n));
78 * return the value of an Add
80 static tarval *computed_value_Add(ir_node *n)
82 ir_node *a = get_Add_left(n);
83 ir_node *b = get_Add_right(n);
85 tarval *ta = value_of(a);
86 tarval *tb = value_of(b);
88 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
89 return tarval_add(ta, tb);
95 * return the value of a Sub
98 static tarval *computed_value_Sub(ir_node *n)
100 ir_node *a = get_Sub_left(n);
101 ir_node *b = get_Sub_right(n);
106 if (a == b && !is_Bad(a))
107 return get_tarval_null(get_irn_mode(n));
112 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
113 return tarval_sub(ta, tb);
119 * return the value of an unary Minus
121 static tarval *computed_value_Minus(ir_node *n)
123 ir_node *a = get_Minus_op(n);
124 tarval *ta = value_of(a);
126 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
127 return tarval_neg(ta);
133 * return the value of a Mul
135 static tarval *computed_value_Mul(ir_node *n)
137 ir_node *a = get_Mul_left(n);
138 ir_node *b = get_Mul_right(n);
140 tarval *ta = value_of(a);
141 tarval *tb = value_of(b);
143 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
144 return tarval_mul(ta, tb);
146 /* a*0 = 0 or 0*b = 0:
147 calls computed_value recursive and returns the 0 with proper
149 if ((ta != tarval_bad) && (ta == get_mode_null(get_tarval_mode(ta))))
151 if ((tb != tarval_bad) && (tb == get_mode_null(get_tarval_mode(tb))))
158 * return the value of a floating point Quot
160 static tarval *computed_value_Quot(ir_node *n)
162 ir_node *a = get_Quot_left(n);
163 ir_node *b = get_Quot_right(n);
165 tarval *ta = value_of(a);
166 tarval *tb = value_of(b);
168 /* This was missing in original implementation. Why? */
169 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
170 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
171 return tarval_quo(ta, tb);
177 * calculate the value of an integer Div of two nodes
178 * Special case: 0 / b
180 static tarval *do_computed_value_Div(ir_node *a, ir_node *b)
182 tarval *ta = value_of(a);
183 tarval *tb = value_of(b);
185 /* Compute c1 / c2 or 0 / a, a != 0 */
186 if (ta != tarval_bad) {
187 if ((tb != tarval_bad) && (tb != get_mode_null(get_irn_mode(b)))) /* div by zero: return tarval_bad */
188 return tarval_div(ta, tb);
189 else if (ta == get_mode_null(get_tarval_mode(ta))) /* 0 / b == 0 */
196 * return the value of an integer Div
198 static tarval *computed_value_Div(ir_node *n)
200 return do_computed_value_Div(get_Div_left(n), get_Div_right(n));
204 * calculate the value of an integer Mod of two nodes
205 * Special case: a % 1
207 static tarval *do_computed_value_Mod(ir_node *a, ir_node *b)
209 tarval *ta = value_of(a);
210 tarval *tb = value_of(b);
212 /* Compute c1 % c2 or a % 1 */
213 if (tb != tarval_bad) {
214 if ((ta != tarval_bad) && (tb != get_mode_null(get_tarval_mode(tb)))) /* div by zero: return tarval_bad */
215 return tarval_mod(ta, tb);
216 else if (tb == get_mode_one(get_tarval_mode(tb))) /* x mod 1 == 0 */
217 return get_mode_null(get_irn_mode(a));
224 * return the value of an integer Mod
226 static tarval *computed_value_Mod(ir_node *n)
228 return do_computed_value_Mod(get_Mod_left(n), get_Mod_right(n));
232 * return the value of an Abs
234 static tarval *computed_value_Abs(ir_node *n)
236 ir_node *a = get_Abs_op(n);
237 tarval *ta = value_of(a);
239 if (ta != tarval_bad)
240 return tarval_abs(ta);
246 * return the value of an And
247 * Special case: a & 0, 0 & b
249 static tarval *computed_value_And(ir_node *n)
251 ir_node *a = get_And_left(n);
252 ir_node *b = get_And_right(n);
254 tarval *ta = value_of(a);
255 tarval *tb = value_of(b);
257 if ((ta != tarval_bad) && (tb != tarval_bad)) {
258 return tarval_and (ta, tb);
262 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_NULL)
263 || (classify_tarval ((v = tb)) == TV_CLASSIFY_NULL)) {
271 * return the value of an Or
272 * Special case: a | 1...1, 1...1 | b
274 static tarval *computed_value_Or(ir_node *n)
276 ir_node *a = get_Or_left(n);
277 ir_node *b = get_Or_right(n);
279 tarval *ta = value_of(a);
280 tarval *tb = value_of(b);
282 if ((ta != tarval_bad) && (tb != tarval_bad)) {
283 return tarval_or (ta, tb);
286 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_ALL_ONE)
287 || (classify_tarval ((v = tb)) == TV_CLASSIFY_ALL_ONE)) {
295 * return the value of an Eor
297 static tarval *computed_value_Eor(ir_node *n)
299 ir_node *a = get_Eor_left(n);
300 ir_node *b = get_Eor_right(n);
305 return get_tarval_null(get_irn_mode(n));
310 if ((ta != tarval_bad) && (tb != tarval_bad)) {
311 return tarval_eor (ta, tb);
317 * return the value of a Not
319 static tarval *computed_value_Not(ir_node *n)
321 ir_node *a = get_Not_op(n);
322 tarval *ta = value_of(a);
324 if (ta != tarval_bad)
325 return tarval_not(ta);
331 * return the value of a Shl
333 static tarval *computed_value_Shl(ir_node *n)
335 ir_node *a = get_Shl_left(n);
336 ir_node *b = get_Shl_right(n);
338 tarval *ta = value_of(a);
339 tarval *tb = value_of(b);
341 if ((ta != tarval_bad) && (tb != tarval_bad)) {
342 return tarval_shl (ta, tb);
348 * return the value of a Shr
350 static tarval *computed_value_Shr(ir_node *n)
352 ir_node *a = get_Shr_left(n);
353 ir_node *b = get_Shr_right(n);
355 tarval *ta = value_of(a);
356 tarval *tb = value_of(b);
358 if ((ta != tarval_bad) && (tb != tarval_bad)) {
359 return tarval_shr (ta, tb);
365 * return the value of a Shrs
367 static tarval *computed_value_Shrs(ir_node *n)
369 ir_node *a = get_Shrs_left(n);
370 ir_node *b = get_Shrs_right(n);
372 tarval *ta = value_of(a);
373 tarval *tb = value_of(b);
375 if ((ta != tarval_bad) && (tb != tarval_bad)) {
376 return tarval_shrs (ta, tb);
382 * return the value of a Rot
384 static tarval *computed_value_Rot(ir_node *n)
386 ir_node *a = get_Rot_left(n);
387 ir_node *b = get_Rot_right(n);
389 tarval *ta = value_of(a);
390 tarval *tb = value_of(b);
392 if ((ta != tarval_bad) && (tb != tarval_bad)) {
393 return tarval_rot (ta, tb);
399 * return the value of a Conv
401 static tarval *computed_value_Conv(ir_node *n)
403 ir_node *a = get_Conv_op(n);
404 tarval *ta = value_of(a);
406 if (ta != tarval_bad)
407 return tarval_convert_to(ta, get_irn_mode(n));
413 * return the value of a Proj, handle Proj(Cmp), Proj(Div), Proj(Mod), Proj(DivMod)
415 static tarval *computed_value_Proj(ir_node *n)
417 ir_node *a = get_Proj_pred(n);
421 /* Optimize Cmp nodes.
422 This performs a first step of unreachable code elimination.
423 Proj can not be computed, but folding a Cmp above the Proj here is
424 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
426 There are several case where we can evaluate a Cmp node:
427 1. The nodes compared are both the same. If we compare for
428 equal, greater equal, ... this will return true, else it
429 will return false. This step relies on cse.
430 2. The predecessors of Cmp are target values. We can evaluate
432 3. The predecessors are Allocs or void* constants. Allocs never
433 return NULL, they raise an exception. Therefore we can predict
435 switch (get_irn_opcode(a)) {
437 aa = get_Cmp_left(a);
438 ab = get_Cmp_right(a);
439 proj_nr = get_Proj_proj(n);
442 !mode_is_float(get_irn_mode(aa)) || proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Gt)
444 /* BEWARE: a == a is NOT always True for floating Point!!! */
445 /* This is a trick with the bits used for encoding the Cmp
446 Proj numbers, the following statement is not the same:
447 return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */
448 return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b);
450 tarval *taa = value_of(aa);
451 tarval *tab = value_of(ab);
453 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
454 /* strange checks... */
455 pn_Cmp flags = tarval_cmp (taa, tab);
456 if (flags != pn_Cmp_False) {
457 return new_tarval_from_long (proj_nr & flags, mode_b);
459 } else { /* check for 3.: */
460 ir_node *aaa = skip_Id(skip_Proj(aa));
461 ir_node *aba = skip_Id(skip_Proj(ab));
463 if ( ( (/* aa is ProjP and aaa is Alloc */
464 (get_irn_op(aa) == op_Proj)
465 && (mode_is_reference(get_irn_mode(aa)))
466 && (get_irn_op(aaa) == op_Alloc))
467 && ( (/* ab is constant void */
468 (get_irn_op(ab) == op_Const)
469 && (mode_is_reference(get_irn_mode(ab)))
470 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
471 || (/* ab is other Alloc */
472 (get_irn_op(ab) == op_Proj)
473 && (mode_is_reference(get_irn_mode(ab)))
474 && (get_irn_op(aba) == op_Alloc)
476 || (/* aa is void and aba is Alloc */
477 (get_irn_op(aa) == op_Const)
478 && (mode_is_reference(get_irn_mode(aa)))
479 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
480 && (get_irn_op(ab) == op_Proj)
481 && (mode_is_reference(get_irn_mode(ab)))
482 && (get_irn_op(aba) == op_Alloc)))
484 return new_tarval_from_long (proj_nr & pn_Cmp_Ne, mode_b);
490 /* compute either the Div or the Mod part */
491 proj_nr = get_Proj_proj(n);
492 if (proj_nr == pn_DivMod_res_div)
493 return do_computed_value_Div(get_DivMod_left(a), get_DivMod_right(a));
494 else if (proj_nr == pn_DivMod_res_mod)
495 return do_computed_value_Mod(get_DivMod_left(a), get_DivMod_right(a));
499 if (get_Proj_proj(n) == pn_Div_res)
500 return computed_value(a);
504 if (get_Proj_proj(n) == pn_Mod_res)
505 return computed_value(a);
515 * calculate the value of a Mux: can be evaluated, if the
516 * sel and the right input are known
518 static tarval *computed_value_Mux(ir_node *n)
520 ir_node *sel = get_Mux_sel(n);
521 tarval *ts = value_of(sel);
523 if (ts == get_tarval_b_true()) {
524 ir_node *v = get_Mux_true(n);
527 else if (ts == get_tarval_b_false()) {
528 ir_node *v = get_Mux_false(n);
535 * If the parameter n can be computed, return its value, else tarval_bad.
536 * Performs constant folding.
538 * @param n The node this should be evaluated
540 tarval *computed_value(ir_node *n)
542 if (n->op->computed_value)
543 return n->op->computed_value(n);
548 * set the default computed_value evaluator
550 static ir_op *firm_set_default_computed_value(ir_op *op)
554 op->computed_value = computed_value_##a; \
580 op->computed_value = NULL;
588 /* returns 1 if the a and b are pointers to different locations. */
590 different_identity (ir_node *a, ir_node *b)
592 assert (mode_is_reference(get_irn_mode (a))
593 && mode_is_reference(get_irn_mode (b)));
595 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
596 ir_node *a1 = get_Proj_pred (a);
597 ir_node *b1 = get_Proj_pred (b);
598 if (a1 != b1 && get_irn_op (a1) == op_Alloc
599 && get_irn_op (b1) == op_Alloc)
606 static ir_node *equivalent_node_Block(ir_node *n)
610 /* The Block constructor does not call optimize, but mature_immBlock
611 calls the optimization. */
612 assert(get_Block_matured(n));
614 /* Straightening: a single entry Block following a single exit Block
615 can be merged, if it is not the Start block. */
616 /* !!! Beware, all Phi-nodes of n must have been optimized away.
617 This should be true, as the block is matured before optimize is called.
618 But what about Phi-cycles with the Phi0/Id that could not be resolved?
619 Remaining Phi nodes are just Ids. */
620 if ((get_Block_n_cfgpreds(n) == 1) &&
621 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
622 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
623 if (predblock == oldn) {
624 /* Jmp jumps into the block it is in -- deal self cycle. */
625 n = set_Block_dead(n);
626 DBG_OPT_DEAD(oldn, n);
627 } else if (get_opt_control_flow_straightening()) {
629 DBG_OPT_STG(oldn, n);
632 else if ((get_Block_n_cfgpreds(n) == 1) &&
633 (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
634 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
635 if (predblock == oldn) {
636 /* Jmp jumps into the block it is in -- deal self cycle. */
637 n = set_Block_dead(n);
638 DBG_OPT_DEAD(oldn, n);
641 else if ((get_Block_n_cfgpreds(n) == 2) &&
642 (get_opt_control_flow_weak_simplification())) {
643 /* Test whether Cond jumps twice to this block
644 @@@ we could do this also with two loops finding two preds from several ones. */
645 ir_node *a = get_Block_cfgpred(n, 0);
646 ir_node *b = get_Block_cfgpred(n, 1);
648 if ((get_irn_op(a) == op_Proj) &&
649 (get_irn_op(b) == op_Proj) &&
650 (get_Proj_pred(a) == get_Proj_pred(b)) &&
651 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
652 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
653 /* Also a single entry Block following a single exit Block. Phis have
654 twice the same operand and will be optimized away. */
655 n = get_nodes_block(a);
656 DBG_OPT_IFSIM(oldn, a, b, n);
658 } else if (get_opt_unreachable_code() &&
659 (n != current_ir_graph->start_block) &&
660 (n != current_ir_graph->end_block) ) {
661 int i, n_cfg = get_Block_n_cfgpreds(n);
663 /* If all inputs are dead, this block is dead too, except if it is
664 the start or end block. This is a step of unreachable code
666 for (i = 0; i < n_cfg; i++) {
667 ir_node *pred = get_Block_cfgpred(n, i);
670 if (is_Bad(pred)) continue;
671 pred_blk = get_nodes_block(pred);
673 if (is_Block_dead(pred_blk)) continue;
676 /* really found a living input */
681 n = set_Block_dead(n);
688 * Returns a equivalent node for a Jmp, a Bad :-)
689 * Of course this only happens if the Block of the Jmp is Bad.
691 static ir_node *equivalent_node_Jmp(ir_node *n)
693 /* GL: Why not same for op_Raise?? */
694 /* unreachable code elimination */
695 if (is_Block_dead(get_nodes_block(n)))
701 static ir_node *equivalent_node_Cond(ir_node *n)
703 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
704 See cases for iro_Cond and iro_Proj in transform_node. */
709 * optimize operations that are commutative and have neutral 0,
710 * so a op 0 = 0 op a = a.
712 static ir_node *equivalent_node_neutral_zero(ir_node *n)
716 ir_node *a = get_binop_left(n);
717 ir_node *b = get_binop_right(n);
722 /* After running compute_node there is only one constant predecessor.
723 Find this predecessors value and remember the other node: */
724 if ((tv = value_of(a)) != tarval_bad) {
726 } else if ((tv = value_of(b)) != tarval_bad) {
731 /* If this predecessors constant value is zero, the operation is
732 unnecessary. Remove it: */
733 if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
736 DBG_OPT_ALGSIM1(oldn, a, b, n);
742 #define equivalent_node_Add equivalent_node_neutral_zero
743 #define equivalent_node_Eor equivalent_node_neutral_zero
746 * optimize operations that are not commutative but have neutral 0 on left,
749 static ir_node *equivalent_node_left_zero(ir_node *n)
753 ir_node *a = get_binop_left(n);
754 ir_node *b = get_binop_right(n);
756 if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
759 DBG_OPT_ALGSIM1(oldn, a, b, n);
765 #define equivalent_node_Sub equivalent_node_left_zero
766 #define equivalent_node_Shl equivalent_node_left_zero
767 #define equivalent_node_Shr equivalent_node_left_zero
768 #define equivalent_node_Shrs equivalent_node_left_zero
769 #define equivalent_node_Rot equivalent_node_left_zero
772 * Er, a "symmetic unop", ie op(op(n)) = n.
774 static ir_node *equivalent_node_symmetric_unop(ir_node *n)
777 ir_node *pred = get_unop_op(n);
779 /* optimize symmetric unop */
780 if (get_irn_op(pred) == get_irn_op(n)) {
781 n = get_unop_op(pred);
782 DBG_OPT_ALGSIM2(oldn, pred, n);
788 #define equivalent_node_Not equivalent_node_symmetric_unop
790 /* --x == x */ /* ??? Is this possible or can --x raise an
791 out of bounds exception if min =! max? */
792 #define equivalent_node_Minus equivalent_node_symmetric_unop
795 * Optimize a * 1 = 1 * a = a.
797 static ir_node *equivalent_node_Mul(ir_node *n)
801 ir_node *a = get_Mul_left(n);
802 ir_node *b = get_Mul_right(n);
804 /* Mul is commutative and has again an other neutral element. */
805 if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
807 DBG_OPT_ALGSIM1(oldn, a, b, n);
808 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
810 DBG_OPT_ALGSIM1(oldn, a, b, n);
816 * Optimize a / 1 = a.
818 static ir_node *equivalent_node_Div(ir_node *n)
820 ir_node *a = get_Div_left(n);
821 ir_node *b = get_Div_right(n);
823 /* Div is not commutative. */
824 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
825 /* Turn Div into a tuple (mem, bad, a) */
826 ir_node *mem = get_Div_mem(n);
827 turn_into_tuple(n, 3);
828 set_Tuple_pred(n, pn_Div_M, mem);
829 set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
830 set_Tuple_pred(n, pn_Div_res, a);
836 * Optimize a / 1 = a.
838 static ir_node *equivalent_node_DivMod(ir_node *n)
840 ir_node *a = get_DivMod_left(n);
841 ir_node *b = get_DivMod_right(n);
843 /* Div is not commutative. */
844 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
845 /* Turn DivMod into a tuple (mem, bad, a, 0) */
846 ir_node *mem = get_Div_mem(n);
847 ir_mode *mode = get_irn_mode(b);
849 turn_into_tuple(n, 4);
850 set_Tuple_pred(n, pn_DivMod_M, mem);
851 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
852 set_Tuple_pred(n, pn_DivMod_res_div, a);
853 set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
859 * Use algebraic simplification a | a = a | 0 = 0 | a = a.
861 static ir_node *equivalent_node_Or(ir_node *n)
865 ir_node *a = get_Or_left(n);
866 ir_node *b = get_Or_right(n);
869 n = a; /* Or has it's own neutral element */
870 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_NULL) {
872 DBG_OPT_ALGSIM1(oldn, a, b, n);
873 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
875 DBG_OPT_ALGSIM1(oldn, a, b, n);
882 * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
884 static ir_node *equivalent_node_And(ir_node *n)
888 ir_node *a = get_And_left(n);
889 ir_node *b = get_And_right(n);
892 n = a; /* And has it's own neutral element */
893 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_ALL_ONE) {
895 DBG_OPT_ALGSIM1(oldn, a, b, n);
896 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ALL_ONE) {
898 DBG_OPT_ALGSIM1(oldn, a, b, n);
904 * Try to remove useless conv's:
906 static ir_node *equivalent_node_Conv(ir_node *n)
909 ir_node *a = get_Conv_op(n);
912 ir_mode *n_mode = get_irn_mode(n);
913 ir_mode *a_mode = get_irn_mode(a);
915 if (n_mode == a_mode) { /* No Conv necessary */
917 DBG_OPT_ALGSIM3(oldn, a, n);
918 } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
922 n_mode = get_irn_mode(n);
923 b_mode = get_irn_mode(b);
925 if (n_mode == b_mode) {
926 if (n_mode == mode_b) {
927 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
928 DBG_OPT_ALGSIM1(oldn, a, b, n);
930 else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
931 if (smaller_mode(b_mode, a_mode)){
932 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
933 DBG_OPT_ALGSIM1(oldn, a, b, n);
942 * A Cast may be removed if the type of the previous node
943 * is already to type of the Cast.
945 static ir_node *equivalent_node_Cast(ir_node *n) {
946 ir_node *pred = get_Cast_op(n);
947 if (get_irn_type(pred) == get_Cast_type(n))
952 /* Several optimizations:
953 - no Phi in start block.
954 - remove Id operators that are inputs to Phi
955 - fold Phi-nodes, iff they have only one predecessor except
958 static ir_node *equivalent_node_Phi(ir_node *n)
963 ir_node *block = NULL; /* to shutup gcc */
964 ir_node *first_val = NULL; /* to shutup gcc */
965 ir_node *scnd_val = NULL; /* to shutup gcc */
967 if (!get_opt_normalize()) return n;
969 n_preds = get_Phi_n_preds(n);
971 block = get_nodes_block(n);
972 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
973 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
974 if ((is_Block_dead(block)) || /* Control dead */
975 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
976 return new_Bad(); /* in the Start Block. */
978 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
981 /* first we test for a special case: */
982 /* Confirm is a special node fixing additional information for a
983 value that is known at a certain point. This is useful for
984 dataflow analysis. */
986 ir_node *a = get_Phi_pred(n, 0);
987 ir_node *b = get_Phi_pred(n, 1);
988 if ( (get_irn_op(a) == op_Confirm)
989 && (get_irn_op(b) == op_Confirm)
990 && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
991 && (get_irn_n(a, 1) == get_irn_n (b, 1))
992 && (a->data.num == (~b->data.num & irpn_True) )) {
993 return get_irn_n(a, 0);
998 /* If the Block has a Bad pred, we also have one. */
999 for (i = 0; i < n_preds; ++i)
1000 if (is_Bad (get_Block_cfgpred(block, i)))
1001 set_Phi_pred(n, i, new_Bad());
1003 /* Find first non-self-referencing input */
1004 for (i = 0; i < n_preds; ++i) {
1005 first_val = get_Phi_pred(n, i);
1006 if ( (first_val != n) /* not self pointer */
1008 && (get_irn_op(first_val) != op_Bad)
1010 ) { /* value not dead */
1011 break; /* then found first value. */
1015 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
1016 if (i >= n_preds) { return new_Bad(); }
1020 /* follow_Id () for rest of inputs, determine if any of these
1021 are non-self-referencing */
1022 while (++i < n_preds) {
1023 scnd_val = get_Phi_pred(n, i);
1024 if ( (scnd_val != n)
1025 && (scnd_val != first_val)
1027 && (get_irn_op(scnd_val) != op_Bad)
1034 /* Fold, if no multiple distinct non-self-referencing inputs */
1037 DBG_OPT_PHI(oldn, first_val, n);
1039 /* skip the remaining Ids (done in get_Phi_pred). */
1040 /* superfluous, since we walk all to propagate Block's Bads.
1041 while (++i < n_preds) get_Phi_pred(n, i); */
1047 * optimize Proj(Tuple) and gigo for ProjX in Bad block
1049 static ir_node *equivalent_node_Proj(ir_node *n)
1053 ir_node *a = get_Proj_pred(n);
1055 if ( get_irn_op(a) == op_Tuple) {
1056 /* Remove the Tuple/Proj combination. */
1057 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
1058 n = get_Tuple_pred(a, get_Proj_proj(n));
1059 DBG_OPT_TUPLE(oldn, a, n);
1061 assert(0); /* This should not happen! */
1064 } else if (get_irn_mode(n) == mode_X &&
1065 is_Block_dead(get_nodes_block(n))) {
1066 /* Remove dead control flow -- early gigo. */
1075 static ir_node *equivalent_node_Id(ir_node *n)
1080 DBG_OPT_ID(oldn, n);
1087 static ir_node *equivalent_node_Mux(ir_node *n)
1089 ir_node *oldn = n, *sel = get_Mux_sel(n);
1090 tarval *ts = value_of(sel);
1092 /* Mux(true, f, t) == t */
1093 if (ts == get_tarval_b_true()) {
1094 n = get_Mux_true(n);
1095 DBG_OPT_ALGSIM0(oldn, n);
1097 /* Mux(false, f, t) == f */
1098 else if (ts == get_tarval_b_false()) {
1099 n = get_Mux_false(n);
1100 DBG_OPT_ALGSIM0(oldn, n);
1102 /* Mux(v, x, x) == x */
1103 else if (get_Mux_false(n) == get_Mux_true(n)) {
1104 n = get_Mux_true(n);
1105 DBG_OPT_ALGSIM0(oldn, n);
1107 else if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(get_irn_mode(n))) {
1108 ir_node *cmp = get_Proj_pred(sel);
1109 long proj_nr = get_Proj_proj(sel);
1110 ir_node *b = get_Mux_false(n);
1111 ir_node *a = get_Mux_true(n);
1114 * Note: normalization puts the constant on the right site,
1115 * so we check only one case.
1117 * Note further that these optimization work even for floating point
1118 * with NaN's because -NaN == NaN.
1119 * However, if +0 and -0 is handled differently, we cannot use the first one.
1121 if (get_irn_op(cmp) == op_Cmp && get_Cmp_left(cmp) == a) {
1122 if (classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
1123 /* Mux(a CMP 0, X, a) */
1124 if (get_irn_op(b) == op_Minus && get_Minus_op(b) == a) {
1125 /* Mux(a CMP 0, -a, a) */
1126 if (proj_nr == pn_Cmp_Eq) {
1127 /* Mux(a == 0, -a, a) ==> -a */
1129 DBG_OPT_ALGSIM0(oldn, n);
1131 else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
1132 /* Mux(a != 0, -a, a) ==> a */
1134 DBG_OPT_ALGSIM0(oldn, n);
1137 else if (classify_Const(b) == CNST_NULL) {
1138 /* Mux(a CMP 0, 0, a) */
1139 if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
1140 /* Mux(a != 0, 0, a) ==> a */
1142 DBG_OPT_ALGSIM0(oldn, n);
1144 else if (proj_nr == pn_Cmp_Eq) {
1145 /* Mux(a == 0, 0, a) ==> 0 */
1147 DBG_OPT_ALGSIM0(oldn, n);
1158 * Optimize -a CMP -b into b CMP a.
1159 * This works even for floating point
1161 static ir_node *equivalent_node_Cmp(ir_node *n)
1163 ir_node *left = get_Cmp_left(n);
1164 ir_node *right = get_Cmp_right(n);
1166 if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus) {
1167 left = get_Minus_op(left);
1168 right = get_Minus_op(right);
1169 set_Cmp_left(n, right);
1170 set_Cmp_right(n, left);
1176 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
1177 * perform no actual computation, as, e.g., the Id nodes. It does not create
1178 * new nodes. It is therefore safe to free n if the node returned is not n.
1179 * If a node returns a Tuple we can not just skip it. If the size of the
1180 * in array fits, we transform n into a tuple (e.g., Div).
1183 equivalent_node(ir_node *n)
1185 if (n->op->equivalent_node)
1186 return n->op->equivalent_node(n);
1191 * set the default equivalent node operation
1193 static ir_op *firm_set_default_equivalent_node(ir_op *op)
1197 op->equivalent_node = equivalent_node_##a; \
1226 op->equivalent_node = NULL;
1234 * Do node specific optimizations of nodes predecessors.
1237 optimize_preds(ir_node *n) {
1238 ir_node *a = NULL, *b = NULL;
1240 /* get the operands we will work on for simple cases. */
1242 a = get_binop_left(n);
1243 b = get_binop_right(n);
1244 } else if (is_unop(n)) {
1248 switch (get_irn_opcode(n)) {
1251 /* We don't want Cast as input to Cmp. */
1252 if (get_irn_op(a) == op_Cast) {
1256 if (get_irn_op(b) == op_Cast) {
1258 set_Cmp_right(n, b);
1267 * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
1268 * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
1269 * If possible, remove the Conv's.
1271 static ir_node *transform_node_AddSub(ir_node *n)
1273 ir_mode *mode = get_irn_mode(n);
1275 if (mode_is_reference(mode)) {
1276 ir_node *left = get_binop_left(n);
1277 ir_node *right = get_binop_right(n);
1278 int ref_bits = get_mode_size_bits(mode);
1280 if (get_irn_op(left) == op_Conv) {
1281 ir_mode *mode = get_irn_mode(left);
1282 int bits = get_mode_size_bits(mode);
1284 if (ref_bits == bits &&
1285 mode_is_int(mode) &&
1286 get_mode_arithmetic(mode) == irma_twos_complement) {
1287 ir_node *pre = get_Conv_op(left);
1288 ir_mode *pre_mode = get_irn_mode(pre);
1290 if (mode_is_int(pre_mode) &&
1291 get_mode_size_bits(pre_mode) == bits &&
1292 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1293 /* ok, this conv just changes to sign, moreover the calculation
1294 * is done with same number of bits as our address mode, so
1295 * we can ignore the conv as address calculation can be viewed
1296 * as either signed or unsigned
1298 set_binop_left(n, pre);
1303 if (get_irn_op(right) == op_Conv) {
1304 ir_mode *mode = get_irn_mode(right);
1305 int bits = get_mode_size_bits(mode);
1307 if (ref_bits == bits &&
1308 mode_is_int(mode) &&
1309 get_mode_arithmetic(mode) == irma_twos_complement) {
1310 ir_node *pre = get_Conv_op(right);
1311 ir_mode *pre_mode = get_irn_mode(pre);
1313 if (mode_is_int(pre_mode) &&
1314 get_mode_size_bits(pre_mode) == bits &&
1315 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1316 /* ok, this conv just changes to sign, moreover the calculation
1317 * is done with same number of bits as our address mode, so
1318 * we can ignore the conv as address calculation can be viewed
1319 * as either signed or unsigned
1321 set_binop_right(n, pre);
1330 * Do the AddSub optimization, then Transform Add(a,a) into Mul(a, 2)
1331 * if the mode is integer or float.
1332 * Reassociation might fold this further.
1334 static ir_node *transform_node_Add(ir_node *n)
1339 n = transform_node_AddSub(n);
1341 mode = get_irn_mode(n);
1342 if (mode_is_num(mode)) {
1343 ir_node *a = get_Add_left(n);
1345 if (a == get_Add_right(n)) {
1346 ir_node *block = get_nodes_block(n);
1349 get_irn_dbg_info(n),
1353 new_r_Const_long(current_ir_graph, block, mode, 2),
1355 DBG_OPT_ALGSIM0(oldn, n);
1362 * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a).
1364 static ir_node *transform_node_Sub(ir_node *n)
1369 n = transform_node_AddSub(n);
1371 mode = get_irn_mode(n);
1372 if (mode_is_num(mode)) {
1373 if (classify_Const(get_Sub_left(n)) == CNST_NULL) {
1375 get_irn_dbg_info(n),
1380 DBG_OPT_ALGSIM0(oldn, n);
1387 /** Do architecture dependend optimizations on Mul nodes */
1388 static ir_node *transform_node_Mul(ir_node *n) {
1389 return arch_dep_replace_mul_with_shifts(n);
1393 * transform a Div Node
1395 static ir_node *transform_node_Div(ir_node *n)
1397 tarval *tv = value_of(n);
1400 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1402 if (tv != tarval_bad) {
1403 value = new_Const(get_tarval_mode(tv), tv);
1405 DBG_OPT_CSTEVAL(n, value);
1407 else /* Try architecture dependand optimization */
1408 value = arch_dep_replace_div_by_const(n);
1411 /* Turn Div into a tuple (mem, bad, value) */
1412 ir_node *mem = get_Div_mem(n);
1414 turn_into_tuple(n, 3);
1415 set_Tuple_pred(n, pn_Div_M, mem);
1416 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1417 set_Tuple_pred(n, pn_Div_res, value);
1423 * transform a Mod node
1425 static ir_node *transform_node_Mod(ir_node *n)
1427 tarval *tv = value_of(n);
1430 /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
1432 if (tv != tarval_bad) {
1433 value = new_Const(get_tarval_mode(tv), tv);
1435 DBG_OPT_CSTEVAL(n, value);
1437 else /* Try architecture dependand optimization */
1438 value = arch_dep_replace_mod_by_const(n);
1441 /* Turn Mod into a tuple (mem, bad, value) */
1442 ir_node *mem = get_Mod_mem(n);
1444 turn_into_tuple(n, 3);
1445 set_Tuple_pred(n, pn_Mod_M, mem);
1446 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1447 set_Tuple_pred(n, pn_Mod_res, value);
1453 * transform a DivMod node
1455 static ir_node *transform_node_DivMod(ir_node *n)
1459 ir_node *a = get_DivMod_left(n);
1460 ir_node *b = get_DivMod_right(n);
1461 ir_mode *mode = get_irn_mode(a);
1462 tarval *ta = value_of(a);
1463 tarval *tb = value_of(b);
1465 if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
1468 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1470 if (tb != tarval_bad) {
1471 if (tb == get_mode_one(get_tarval_mode(tb))) {
1472 b = new_Const (mode, get_mode_null(mode));
1475 DBG_OPT_CSTEVAL(n, b);
1477 else if (ta != tarval_bad) {
1478 tarval *resa, *resb;
1479 resa = tarval_div (ta, tb);
1480 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
1481 Jmp for X result!? */
1482 resb = tarval_mod (ta, tb);
1483 if (resb == tarval_bad) return n; /* Causes exception! */
1484 a = new_Const (mode, resa);
1485 b = new_Const (mode, resb);
1488 DBG_OPT_CSTEVAL(n, a);
1489 DBG_OPT_CSTEVAL(n, b);
1491 else { /* Try architecture dependand optimization */
1492 arch_dep_replace_divmod_by_const(&a, &b, n);
1493 evaluated = a != NULL;
1495 } else if (ta == get_mode_null(mode)) {
1496 /* 0 / non-Const = 0 */
1501 if (evaluated) { /* replace by tuple */
1502 ir_node *mem = get_DivMod_mem(n);
1503 turn_into_tuple(n, 4);
1504 set_Tuple_pred(n, pn_DivMod_M, mem);
1505 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
1506 set_Tuple_pred(n, pn_DivMod_res_div, a);
1507 set_Tuple_pred(n, pn_DivMod_res_mod, b);
1508 assert(get_nodes_block(n));
1515 * transform a Cond node
1517 static ir_node *transform_node_Cond(ir_node *n)
1519 /* Replace the Cond by a Jmp if it branches on a constant
1522 ir_node *a = get_Cond_selector(n);
1523 tarval *ta = value_of(a);
1525 if ((ta != tarval_bad) &&
1526 (get_irn_mode(a) == mode_b) &&
1527 (get_opt_unreachable_code())) {
1528 /* It's a boolean Cond, branching on a boolean constant.
1529 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
1530 jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n));
1531 turn_into_tuple(n, 2);
1532 if (ta == tarval_b_true) {
1533 set_Tuple_pred(n, pn_Cond_false, new_Bad());
1534 set_Tuple_pred(n, pn_Cond_true, jmp);
1536 set_Tuple_pred(n, pn_Cond_false, jmp);
1537 set_Tuple_pred(n, pn_Cond_true, new_Bad());
1539 /* We might generate an endless loop, so keep it alive. */
1540 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1541 } else if ((ta != tarval_bad) &&
1542 (get_irn_mode(a) == mode_Iu) &&
1543 (get_Cond_kind(n) == dense) &&
1544 (get_opt_unreachable_code())) {
1545 /* I don't want to allow Tuples smaller than the biggest Proj.
1546 Also this tuple might get really big...
1547 I generate the Jmp here, and remember it in link. Link is used
1548 when optimizing Proj. */
1549 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n)));
1550 /* We might generate an endless loop, so keep it alive. */
1551 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1552 } else if ((get_irn_op(a) == op_Eor)
1553 && (get_irn_mode(a) == mode_b)
1554 && (classify_tarval(value_of(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
1555 /* The Eor is a negate. Generate a new Cond without the negate,
1556 simulate the negate by exchanging the results. */
1557 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1559 } else if ((get_irn_op(a) == op_Not)
1560 && (get_irn_mode(a) == mode_b)) {
1561 /* A Not before the Cond. Generate a new Cond without the Not,
1562 simulate the Not by exchanging the results. */
1563 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1572 static ir_node *transform_node_Eor(ir_node *n)
1575 ir_node *a = get_Eor_left(n);
1576 ir_node *b = get_Eor_right(n);
1578 if ((get_irn_mode(n) == mode_b)
1579 && (get_irn_op(a) == op_Proj)
1580 && (get_irn_mode(a) == mode_b)
1581 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
1582 && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
1583 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
1584 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1585 mode_b, get_negated_pnc(get_Proj_proj(a)));
1587 DBG_OPT_ALGSIM0(oldn, n);
1589 else if ((get_irn_mode(n) == mode_b)
1590 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
1591 /* The Eor is a Not. Replace it by a Not. */
1592 /* ????!!!Extend to bitfield 1111111. */
1593 n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
1595 DBG_OPT_ALGSIM0(oldn, n);
1602 * Transform a boolean Not.
1604 static ir_node *transform_node_Not(ir_node *n)
1607 ir_node *a = get_Not_op(n);
1609 if ( (get_irn_mode(n) == mode_b)
1610 && (get_irn_op(a) == op_Proj)
1611 && (get_irn_mode(a) == mode_b)
1612 && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
1613 /* We negate a Cmp. The Cmp has the negated result anyways! */
1614 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1615 mode_b, get_negated_pnc(get_Proj_proj(a)));
1616 DBG_OPT_ALGSIM0(oldn, n);
1623 * Transform a Cast of a Const into a new Const
1625 static ir_node *transform_node_Cast(ir_node *n) {
1627 ir_node *pred = get_Cast_op(n);
1628 type *tp = get_irn_type(pred);
1630 if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
1631 n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
1632 get_Const_tarval(pred), tp);
1633 DBG_OPT_CSTEVAL(oldn, n);
1634 } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
1635 n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
1636 get_SymConst_kind(pred), tp);
1637 DBG_OPT_CSTEVAL(oldn, n);
1643 * Does all optimizations on nodes that must be done on it's Proj's
1644 * because of creating new nodes.
1646 * Transform a Div/Mod/DivMod with a non-zero constant.
1647 * Removes the exceptions and routes the memory to the NoMem node.
1649 * Optimizes jump tables by removing all impossible cases.
1651 * Normalizes and optimizes Cmp nodes.
1653 static ir_node *transform_node_Proj(ir_node *proj)
1655 ir_node *n = get_Proj_pred(proj);
1660 switch (get_irn_opcode(n)) {
1662 b = get_Div_right(n);
1665 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1666 proj_nr = get_Proj_proj(proj);
1668 /* this node may float */
1669 set_irn_pinned(n, op_pin_state_floats);
1671 if (proj_nr == pn_Div_X_except) {
1672 /* we found an exception handler, remove it */
1675 /* the memory Proj can be removed */
1676 ir_node *res = get_Div_mem(n);
1677 set_Div_mem(n, get_irg_no_mem(current_ir_graph));
1678 if (proj_nr == pn_Div_M)
1684 b = get_Mod_right(n);
1687 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
1688 proj_nr = get_Proj_proj(proj);
1690 /* this node may float */
1691 set_irn_pinned(n, op_pin_state_floats);
1693 if (proj_nr == pn_Mod_X_except) {
1694 /* we found an exception handler, remove it */
1697 /* the memory Proj can be removed */
1698 ir_node *res = get_Mod_mem(n);
1699 set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
1700 if (proj_nr == pn_Mod_M)
1706 b = get_DivMod_right(n);
1709 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
1710 proj_nr = get_Proj_proj(proj);
1712 /* this node may float */
1713 set_irn_pinned(n, op_pin_state_floats);
1715 if (proj_nr == pn_DivMod_X_except) {
1716 /* we found an exception handler, remove it */
1720 /* the memory Proj can be removed */
1721 ir_node *res = get_DivMod_mem(n);
1722 set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
1723 if (proj_nr == pn_DivMod_M)
1730 if (get_opt_unreachable_code()) {
1731 b = get_Cond_selector(n);
1734 if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
1735 /* we have a constant switch */
1736 long num = get_Proj_proj(proj);
1738 if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
1739 if (get_tarval_long(tb) == num) {
1740 /* Do NOT create a jump here, or we will have 2 control flow ops
1741 * in a block. This case is optimized away in optimize_cf(). */
1752 if (get_opt_reassociation()) {
1753 ir_node *left = get_Cmp_left(n);
1754 ir_node *right = get_Cmp_right(n);
1758 ir_mode *mode = NULL;
1760 proj_nr = get_Proj_proj(proj);
1763 * First step: normalize the compare op
1764 * by placing the constant on the right site
1765 * or moving the lower address node to the left.
1766 * We ignore the case that both are constants, then
1767 * this compare should be optimized away.
1769 if (get_irn_op(right) == op_Const)
1771 else if (get_irn_op(left) == op_Const) {
1776 proj_nr = get_swapped_pnc(proj_nr);
1779 else if (left > right) {
1785 proj_nr = get_swapped_pnc(proj_nr);
1790 * Second step: Try to reduce the magnitude
1791 * of a constant. This may help to generate better code
1792 * later and may help to normalize more compares.
1793 * Of course this is only possible for integer values.
1796 mode = get_irn_mode(c);
1797 tv = get_Const_tarval(c);
1799 if (tv != tarval_bad) {
1800 /* the following optimization is possibe on non-int values either:
1801 * -a CMP c ==> a swap(CMP) -c */
1802 if (get_opt_constant_folding() && get_irn_op(left) == op_Minus) {
1803 left = get_Minus_op(left);
1804 tv = tarval_sub(get_tarval_one(mode), tv);
1806 proj_nr = get_swapped_pnc(proj_nr);
1810 /* for integer modes, we have more */
1811 if (mode_is_int(mode)) {
1812 /* Ne includes Unordered which is not possible on integers.
1813 * However, frontends often use this wrong, so fix it here */
1814 if (proj_nr == pn_Cmp_Ne)
1815 proj_nr = pn_Cmp_Lg;
1817 /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
1818 if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
1819 tarval_cmp(tv, get_tarval_null(mode)) == pn_Cmp_Gt) {
1820 tv = tarval_sub(tv, get_tarval_one(mode));
1822 proj_nr ^= pn_Cmp_Eq;
1825 /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
1826 else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
1827 tarval_cmp(tv, get_tarval_null(mode)) == pn_Cmp_Lt) {
1828 tv = tarval_add(tv, get_tarval_one(mode));
1830 proj_nr ^= pn_Cmp_Eq;
1834 /* the following reassociations work only for == and != */
1836 /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
1837 if (classify_tarval(tv) == TV_CLASSIFY_NULL && get_irn_op(left) == op_Sub) {
1838 if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
1839 right = get_Sub_right(left);
1840 left = get_Sub_left(left);
1842 tv = value_of(right);
1847 if (tv != tarval_bad) {
1848 ir_op *op = get_irn_op(left);
1850 /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
1852 ir_node *c1 = get_Sub_right(left);
1853 tarval *tv2 = tarval_add(tv, value_of(c1));
1855 if (tv2 != tarval_bad) {
1856 left = get_Sub_left(left);
1861 /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
1862 else if (op == op_Add) {
1863 ir_node *a_l = get_Add_left(left);
1864 ir_node *a_r = get_Add_right(left);
1868 if (get_irn_op(a_l) == op_Const) {
1870 tv2 = value_of(a_l);
1874 tv2 = value_of(a_r);
1877 tv2 = tarval_sub(tv, tv2);
1878 if (tv2 != tarval_bad) {
1890 ir_node *block = get_nodes_block(n);
1892 if (changed & 2) /* need a new Const */
1893 right = new_Const(mode, tv);
1895 /* create a new compare */
1896 n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block,
1899 set_Proj_pred(proj, n);
1900 set_Proj_proj(proj, proj_nr);
1906 /* should not happen, but if it does will be optimized away */
1914 /* we have added a Tuple, optimize it for the current Proj away */
1915 return equivalent_node_Proj(proj);
1919 * returns the operands of a commutative bin-op, if one operand is
1920 * a const, it is returned as the second one.
1922 static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
1924 ir_node *op_a = get_binop_left(binop);
1925 ir_node *op_b = get_binop_right(binop);
1927 assert(is_op_commutative(get_irn_op(binop)));
1929 if (get_irn_op(op_a) == op_Const) {
1940 * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
1941 * Such pattern may arise in bitfield stores.
1943 * value c4 value c4 & c2
1944 * AND c3 AND c1 | c3
1949 static ir_node *transform_node_Or_bf_store(ir_node *or)
1953 ir_node *and_l, *c3;
1954 ir_node *value, *c4;
1955 ir_node *new_and, *new_const, *block;
1956 ir_mode *mode = get_irn_mode(or);
1958 tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
1960 get_comm_Binop_Ops(or, &and, &c1);
1961 if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
1964 get_comm_Binop_Ops(and, &or_l, &c2);
1965 if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
1968 get_comm_Binop_Ops(or_l, &and_l, &c3);
1969 if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
1972 get_comm_Binop_Ops(and_l, &value, &c4);
1973 if (get_irn_op(c4) != op_Const)
1976 /* ok, found the pattern, check for conditions */
1977 assert(mode == get_irn_mode(and));
1978 assert(mode == get_irn_mode(or_l));
1979 assert(mode == get_irn_mode(and_l));
1981 tv1 = get_Const_tarval(c1);
1982 tv2 = get_Const_tarval(c2);
1983 tv3 = get_Const_tarval(c3);
1984 tv4 = get_Const_tarval(c4);
1986 tv = tarval_or(tv4, tv2);
1987 if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
1988 /* have at least one 0 at the same bit position */
1992 n_tv4 = tarval_not(tv4);
1993 if (tv3 != tarval_and(tv3, n_tv4)) {
1994 /* bit in the or_mask is outside the and_mask */
1998 n_tv2 = tarval_not(tv2);
1999 if (tv1 != tarval_and(tv1, n_tv2)) {
2000 /* bit in the or_mask is outside the and_mask */
2004 /* ok, all conditions met */
2005 block = get_nodes_block(or);
2007 new_and = new_r_And(current_ir_graph, block,
2008 value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
2010 new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
2012 set_Or_left(or, new_and);
2013 set_Or_right(or, new_const);
2015 /* check for more */
2016 return transform_node_Or_bf_store(or);
2020 * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
2022 static ir_node *transform_node_Or_Rot(ir_node *or)
2024 ir_mode *mode = get_irn_mode(or);
2025 ir_node *shl, *shr, *block;
2026 ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
2029 if (! mode_is_int(mode))
2032 shl = get_binop_left(or);
2033 shr = get_binop_right(or);
2035 if (get_irn_op(shl) == op_Shr) {
2036 if (get_irn_op(shr) != op_Shl)
2043 else if (get_irn_op(shl) != op_Shl)
2045 else if (get_irn_op(shr) != op_Shr)
2048 x = get_Shl_left(shl);
2049 if (x != get_Shr_left(shr))
2052 c1 = get_Shl_right(shl);
2053 c2 = get_Shr_right(shr);
2054 if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
2055 tv1 = get_Const_tarval(c1);
2056 if (! tarval_is_long(tv1))
2059 tv2 = get_Const_tarval(c2);
2060 if (! tarval_is_long(tv2))
2063 if (get_tarval_long(tv1) + get_tarval_long(tv2)
2064 != get_mode_size_bits(mode))
2067 /* yet, condition met */
2068 block = get_nodes_block(or);
2070 n = new_r_Rot(current_ir_graph, block, x, c1, mode);
2072 DBG_OPT_ALGSIM1(or, shl, shr, n);
2075 else if (get_irn_op(c1) == op_Sub) {
2079 if (get_Sub_right(sub) != v)
2082 c1 = get_Sub_left(sub);
2083 if (get_irn_op(c1) != op_Const)
2086 tv1 = get_Const_tarval(c1);
2087 if (! tarval_is_long(tv1))
2090 if (get_tarval_long(tv1) != get_mode_size_bits(mode))
2093 /* yet, condition met */
2094 block = get_nodes_block(or);
2096 /* a Rot right is not supported, so use a rot left */
2097 n = new_r_Rot(current_ir_graph, block, x, sub, mode);
2099 DBG_OPT_ALGSIM0(or, n);
2102 else if (get_irn_op(c2) == op_Sub) {
2106 c1 = get_Sub_left(sub);
2107 if (get_irn_op(c1) != op_Const)
2110 tv1 = get_Const_tarval(c1);
2111 if (! tarval_is_long(tv1))
2114 if (get_tarval_long(tv1) != get_mode_size_bits(mode))
2117 /* yet, condition met */
2118 block = get_nodes_block(or);
2121 n = new_r_Rot(current_ir_graph, block, x, v, mode);
2123 DBG_OPT_ALGSIM0(or, n);
2133 static ir_node *transform_node_Or(ir_node *or)
2135 or = transform_node_Or_bf_store(or);
2136 or = transform_node_Or_Rot(or);
2142 static ir_node *transform_node(ir_node *n);
2145 * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl
2147 static ir_node *transform_node_shift(ir_node *n)
2149 ir_node *left, *right;
2150 tarval *tv1, *tv2, *res;
2152 int modulo_shf, flag;
2154 left = get_binop_left(n);
2156 /* different operations */
2157 if (get_irn_op(left) != get_irn_op(n))
2160 right = get_binop_right(n);
2161 tv1 = value_of(right);
2162 if (tv1 == tarval_bad)
2165 tv2 = value_of(get_binop_right(left));
2166 if (tv2 == tarval_bad)
2169 res = tarval_add(tv1, tv2);
2171 /* beware: a simple replacement works only, if res < modulo shift */
2172 mode = get_irn_mode(n);
2176 modulo_shf = get_mode_modulo_shift(mode);
2177 if (modulo_shf > 0) {
2178 tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
2180 if (tarval_cmp(res, modulo) & pn_Cmp_Lt)
2187 /* ok, we can replace it */
2188 ir_node *in[2], *irn, *block = get_nodes_block(n);
2190 in[0] = get_binop_left(left);
2191 in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
2193 irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
2195 DBG_OPT_ALGSIM0(n, irn);
2197 return transform_node(irn);
2202 #define transform_node_Shr transform_node_shift
2203 #define transform_node_Shrs transform_node_shift
2204 #define transform_node_Shl transform_node_shift
2207 * Remove dead blocks in keepalive list. We do not generate a new End node.
2209 static ir_node *transform_node_End(ir_node *n) {
2210 int i, n_keepalives = get_End_n_keepalives(n);
2212 for (i = 0; i < n_keepalives; ++i) {
2213 ir_node *ka = get_End_keepalive(n, i);
2214 if (is_Block(ka) && is_Block_dead(ka))
2215 set_End_keepalive(n, i, new_Bad());
2221 * Optimize a Mux into some simplier cases
2223 static ir_node *transform_node_Mux(ir_node *n)
2225 ir_node *oldn = n, *sel = get_Mux_sel(n);
2226 ir_mode *mode = get_irn_mode(n);
2228 if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
2229 ir_node *cmp = get_Proj_pred(sel);
2230 long proj_nr = get_Proj_proj(sel);
2231 ir_node *f = get_Mux_false(n);
2232 ir_node *t = get_Mux_true(n);
2235 * Note: normalization puts the constant on the right site,
2236 * so we check only one case.
2238 * Note further that these optimization work even for floating point
2239 * with NaN's because -NaN == NaN.
2240 * However, if +0 and -0 is handled differently, we cannot use the first one.
2242 if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
2243 if (get_irn_op(f) == op_Minus &&
2244 get_Minus_op(f) == t &&
2245 get_Cmp_left(cmp) == t) {
2247 if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
2248 /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
2249 n = new_rd_Abs(get_irn_dbg_info(n),
2253 DBG_OPT_ALGSIM0(oldn, n);
2255 else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
2256 /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
2257 n = new_rd_Abs(get_irn_dbg_info(n),
2261 n = new_rd_Minus(get_irn_dbg_info(n),
2266 DBG_OPT_ALGSIM0(oldn, n);
2269 else if (get_irn_op(t) == op_Minus &&
2270 get_Minus_op(t) == f &&
2271 get_Cmp_left(cmp) == f) {
2273 if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
2274 /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
2275 n = new_rd_Abs(get_irn_dbg_info(n),
2279 DBG_OPT_ALGSIM0(oldn, n);
2281 else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
2282 /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
2283 n = new_rd_Abs(get_irn_dbg_info(n),
2287 n = new_rd_Minus(get_irn_dbg_info(n),
2292 DBG_OPT_ALGSIM0(oldn, n);
2301 * Tries several [inplace] [optimizing] transformations and returns an
2302 * equivalent node. The difference to equivalent_node() is that these
2303 * transformations _do_ generate new nodes, and thus the old node must
2304 * not be freed even if the equivalent node isn't the old one.
2306 static ir_node *transform_node(ir_node *n)
2308 if (n->op->transform_node)
2309 n = n->op->transform_node(n);
2314 * set the default transform node operation
2316 static ir_op *firm_set_default_transform_node(ir_op *op)
2320 op->transform_node = transform_node_##a; \
2343 op->transform_node = NULL;
2351 /* **************** Common Subexpression Elimination **************** */
2353 /** The size of the hash table used, should estimate the number of nodes
2355 #define N_IR_NODES 512
2357 /** Compares the attributes of two Const nodes. */
2358 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
2360 return (get_Const_tarval(a) != get_Const_tarval(b))
2361 || (get_Const_type(a) != get_Const_type(b));
2364 /** Compares the attributes of two Proj nodes. */
2365 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
2367 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
2370 /** Compares the attributes of two Filter nodes. */
2371 static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
2373 return get_Filter_proj(a) != get_Filter_proj(b);
2376 /** Compares the attributes of two Alloc nodes. */
2377 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
2379 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
2380 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
2383 /** Compares the attributes of two Free nodes. */
2384 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
2386 return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
2387 || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
2390 /** Compares the attributes of two SymConst nodes. */
2391 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
2393 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
2394 || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p)
2395 || (get_irn_symconst_attr(a).tp != get_irn_symconst_attr(b).tp);
2398 /** Compares the attributes of two Call nodes. */
2399 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
2401 return (get_irn_call_attr(a) != get_irn_call_attr(b));
2404 /** Compares the attributes of two Sel nodes. */
2405 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
2407 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
2408 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
2409 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
2410 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
2411 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
2414 /** Compares the attributes of two Phi nodes. */
2415 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
2417 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
2420 /** Compares the attributes of two Cast nodes. */
2421 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
2423 return get_Cast_type(a) != get_Cast_type(b);
2426 /** Compares the attributes of two Load nodes. */
2427 static int node_cmp_attr_Load(ir_node *a, ir_node *b)
2429 if (get_Load_volatility(a) == volatility_is_volatile ||
2430 get_Load_volatility(b) == volatility_is_volatile)
2431 /* NEVER do CSE on volatile Loads */
2434 return get_Load_mode(a) != get_Load_mode(b);
2437 /** Compares the attributes of two Store nodes. */
2438 static int node_cmp_attr_Store(ir_node *a, ir_node *b)
2440 /* NEVER do CSE on volatile Stores */
2441 return (get_Store_volatility(a) == volatility_is_volatile ||
2442 get_Store_volatility(b) == volatility_is_volatile);
2446 * set the default node attribute compare operation
2448 static ir_op *firm_set_default_node_cmp_attr(ir_op *op)
2452 op->node_cmp_attr = node_cmp_attr_##a; \
2469 op->node_cmp_attr = NULL;
2477 * Compare function for two nodes in the hash table. Gets two
2478 * nodes as parameters. Returns 0 if the nodes are a cse.
2481 vt_cmp (const void *elt, const void *key)
2489 if (a == b) return 0;
2491 if ((get_irn_op(a) != get_irn_op(b)) ||
2492 (get_irn_mode(a) != get_irn_mode(b))) return 1;
2494 /* compare if a's in and b's in are of equal length */
2495 irn_arity_a = get_irn_intra_arity (a);
2496 if (irn_arity_a != get_irn_intra_arity(b))
2499 /* for block-local cse and op_pin_state_pinned nodes: */
2500 if (!get_opt_global_cse() || (get_irn_pinned(a) == op_pin_state_pinned)) {
2501 if (get_irn_intra_n(a, -1) != get_irn_intra_n(b, -1))
2505 /* compare a->in[0..ins] with b->in[0..ins] */
2506 for (i = 0; i < irn_arity_a; i++)
2507 if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
2511 * here, we already now that the nodes are identical except their
2514 if (a->op->node_cmp_attr)
2515 return a->op->node_cmp_attr(a, b);
2521 * Calculate a hash value of a node.
2524 ir_node_hash (ir_node *node)
2529 if (node->op == op_Const) {
2530 /* special value for const, as they only differ in their tarval. */
2531 h = HASH_PTR(node->attr.con.tv);
2532 h = 9*h + HASH_PTR(get_irn_mode(node));
2533 } else if (node->op == op_SymConst) {
2534 /* special value for const, as they only differ in their symbol. */
2535 h = HASH_PTR(node->attr.i.sym.type_p);
2536 h = 9*h + HASH_PTR(get_irn_mode(node));
2539 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2540 h = irn_arity = get_irn_intra_arity(node);
2542 /* consider all in nodes... except the block if not a control flow. */
2543 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; i++) {
2544 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
2548 h = 9*h + HASH_PTR(get_irn_mode(node));
2550 h = 9*h + HASH_PTR(get_irn_op(node));
2557 new_identities(void) {
2558 return new_pset(vt_cmp, N_IR_NODES);
2562 del_identities(pset *value_table) {
2563 del_pset(value_table);
2567 * Return the canonical node computing the same value as n.
2568 * Looks up the node in a hash table.
2570 * For Const nodes this is performed in the constructor, too. Const
2571 * nodes are extremely time critical because of their frequent use in
2572 * constant string arrays.
2574 static INLINE ir_node *
2575 identify (pset *value_table, ir_node *n)
2579 if (!value_table) return n;
2581 if (get_opt_reassociation()) {
2582 if (is_op_commutative(get_irn_op(n))) {
2583 ir_node *l = get_binop_left(n);
2584 ir_node *r = get_binop_right(n);
2586 /* for commutative operators perform a OP b == b OP a */
2588 set_binop_left(n, r);
2589 set_binop_right(n, l);
2594 o = pset_find (value_table, n, ir_node_hash (n));
2603 * During construction we set the op_pin_state_pinned flag in the graph right when the
2604 * optimization is performed. The flag turning on procedure global cse could
2605 * be changed between two allocations. This way we are safe.
2607 static INLINE ir_node *
2608 identify_cons (pset *value_table, ir_node *n) {
2611 n = identify(value_table, n);
2612 if (get_irn_n(old, -1) != get_irn_n(n, -1))
2613 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2618 * Return the canonical node computing the same value as n.
2619 * Looks up the node in a hash table, enters it in the table
2620 * if it isn't there yet.
2623 identify_remember (pset *value_table, ir_node *n)
2627 if (!value_table) return n;
2629 if (get_opt_reassociation()) {
2630 if (is_op_commutative(get_irn_op(n))) {
2631 ir_node *l = get_binop_left(n);
2632 ir_node *r = get_binop_right(n);
2634 /* for commutative operators perform a OP b == b OP a */
2636 set_binop_left(n, r);
2637 set_binop_right(n, l);
2642 /* lookup or insert in hash table with given hash key. */
2643 o = pset_insert (value_table, n, ir_node_hash (n));
2653 add_identities (pset *value_table, ir_node *node) {
2654 if (get_opt_cse() && (get_irn_opcode(node) != iro_Block))
2655 identify_remember (value_table, node);
2659 * garbage in, garbage out. If a node has a dead input, i.e., the
2660 * Bad node is input to the node, return the Bad node.
2662 static INLINE ir_node *
2663 gigo (ir_node *node)
2666 ir_op* op = get_irn_op(node);
2668 /* remove garbage blocks by looking at control flow that leaves the block
2669 and replacing the control flow by Bad. */
2670 if (get_irn_mode(node) == mode_X) {
2671 ir_node *block = get_nodes_block(node);
2672 if (!get_Block_matured(block)) return node; /* Don't optimize nodes in immature blocks. */
2673 if (op == op_End) return node; /* Don't optimize End, may have Bads. */
2675 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
2676 irn_arity = get_irn_arity(block);
2677 for (i = 0; i < irn_arity; i++) {
2678 if (!is_Bad(get_irn_n(block, i))) break;
2680 if (i == irn_arity) return new_Bad();
2684 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
2685 blocks predecessors is dead. */
2686 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
2687 irn_arity = get_irn_arity(node);
2689 if (is_Block_dead(get_nodes_block(node)))
2692 for (i = 0; i < irn_arity; i++) {
2693 if (is_Bad(get_irn_n(node, i))) {
2699 /* With this code we violate the agreement that local_optimize
2700 only leaves Bads in Block, Phi and Tuple nodes. */
2701 /* If Block has only Bads as predecessors it's garbage. */
2702 /* If Phi has only Bads as predecessors it's garbage. */
2703 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
2704 irn_arity = get_irn_arity(node);
2705 for (i = 0; i < irn_arity; i++) {
2706 if (!is_Bad(get_irn_n(node, i))) break;
2708 if (i == irn_arity) node = new_Bad();
2716 * These optimizations deallocate nodes from the obstack.
2717 * It can only be called if it is guaranteed that no other nodes
2718 * reference this one, i.e., right after construction of a node.
2721 optimize_node (ir_node *n)
2725 opcode iro = get_irn_opcode(n);
2727 type *old_tp = get_irn_type(n);
2729 int i, arity = get_irn_arity(n);
2730 for (i = 0; i < arity && !old_tp; ++i)
2731 old_tp = get_irn_type(get_irn_n(n, i));
2734 /* Always optimize Phi nodes: part of the construction. */
2735 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
2737 /* constant expression evaluation / constant folding */
2738 if (get_opt_constant_folding()) {
2739 /* constants can not be evaluated */
2740 if (iro != iro_Const) {
2741 /* try to evaluate */
2742 tv = computed_value(n);
2743 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2747 * we MUST copy the node here temporary, because it's still needed
2748 * for DBG_OPT_CSTEVAL
2750 int node_size = offsetof(ir_node, attr) + n->op->attr_size;
2751 oldn = alloca(node_size);
2753 memcpy(oldn, n, node_size);
2754 CLONE_ARR_A(ir_node *, oldn->in, n->in);
2756 /* ARG, copy the in array, we need it for statistics */
2757 memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
2760 edges_node_deleted(n, current_ir_graph);
2762 /* evaluation was successful -- replace the node. */
2763 obstack_free (current_ir_graph->obst, n);
2764 nw = new_Const (get_tarval_mode (tv), tv);
2766 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2767 set_Const_type(nw, old_tp);
2768 DBG_OPT_CSTEVAL(oldn, nw);
2774 /* remove unnecessary nodes */
2775 if (get_opt_constant_folding() ||
2776 (iro == iro_Phi) || /* always optimize these nodes. */
2778 (iro == iro_Proj) ||
2779 (iro == iro_Block) ) /* Flags tested local. */
2780 n = equivalent_node (n);
2782 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2784 /** common subexpression elimination **/
2785 /* Checks whether n is already available. */
2786 /* The block input is used to distinguish different subexpressions. Right
2787 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2788 subexpressions within a block. */
2790 n = identify_cons (current_ir_graph->value_table, n);
2793 edges_node_deleted(oldn, current_ir_graph);
2795 /* We found an existing, better node, so we can deallocate the old node. */
2796 obstack_free (current_ir_graph->obst, oldn);
2801 /* Some more constant expression evaluation that does not allow to
2803 iro = get_irn_opcode(n);
2804 if (get_opt_constant_folding() ||
2805 (iro == iro_Cond) ||
2806 (iro == iro_Proj) ||
2807 (iro == iro_Sel)) /* Flags tested local. */
2808 n = transform_node (n);
2810 /* Remove nodes with dead (Bad) input.
2811 Run always for transformation induced Bads. */
2814 /* Now we have a legal, useful node. Enter it in hash table for cse */
2815 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
2816 n = identify_remember (current_ir_graph->value_table, n);
2824 * These optimizations never deallocate nodes (in place). This can cause dead
2825 * nodes lying on the obstack. Remove these by a dead node elimination,
2826 * i.e., a copying garbage collection.
2829 optimize_in_place_2 (ir_node *n)
2833 opcode iro = get_irn_opcode(n);
2835 type *old_tp = get_irn_type(n);
2837 int i, arity = get_irn_arity(n);
2838 for (i = 0; i < arity && !old_tp; ++i)
2839 old_tp = get_irn_type(get_irn_n(n, i));
2842 if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
2844 /* if not optimize return n */
2847 /* Here this is possible. Why? */
2851 /* constant expression evaluation / constant folding */
2852 if (get_opt_constant_folding()) {
2853 /* constants can not be evaluated */
2854 if (iro != iro_Const) {
2855 /* try to evaluate */
2856 tv = computed_value(n);
2857 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2858 /* evaluation was successful -- replace the node. */
2859 n = new_Const (get_tarval_mode (tv), tv);
2861 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2862 set_Const_type(n, old_tp);
2864 DBG_OPT_CSTEVAL(oldn, n);
2870 /* remove unnecessary nodes */
2871 if (get_opt_constant_folding() ||
2872 (iro == iro_Phi) || /* always optimize these nodes. */
2873 (iro == iro_Id) || /* ... */
2874 (iro == iro_Proj) || /* ... */
2875 (iro == iro_Block) ) /* Flags tested local. */
2876 n = equivalent_node (n);
2878 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2880 /** common subexpression elimination **/
2881 /* Checks whether n is already available. */
2882 /* The block input is used to distinguish different subexpressions. Right
2883 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2884 subexpressions within a block. */
2885 if (get_opt_cse()) {
2886 n = identify (current_ir_graph->value_table, n);
2889 /* Some more constant expression evaluation. */
2890 iro = get_irn_opcode(n);
2891 if (get_opt_constant_folding() ||
2892 (iro == iro_Cond) ||
2893 (iro == iro_Proj) ||
2894 (iro == iro_Sel)) /* Flags tested local. */
2895 n = transform_node (n);
2897 /* Remove nodes with dead (Bad) input.
2898 Run always for transformation induced Bads. */
2901 /* Now we can verify the node, as it has no dead inputs any more. */
2904 /* Now we have a legal, useful node. Enter it in hash table for cse.
2905 Blocks should be unique anyways. (Except the successor of start:
2906 is cse with the start block!) */
2907 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
2908 n = identify_remember (current_ir_graph->value_table, n);
2914 * Wrapper for external use, set proper status bits after optimization.
2917 optimize_in_place (ir_node *n)
2919 /* Handle graph state */
2920 assert(get_irg_phase_state(current_ir_graph) != phase_building);
2922 if (get_opt_global_cse())
2923 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2924 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
2925 set_irg_outs_inconsistent(current_ir_graph);
2927 /* Maybe we could also test whether optimizing the node can
2928 change the control graph. */
2929 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
2930 set_irg_dom_inconsistent(current_ir_graph);
2931 return optimize_in_place_2 (n);
2935 * set the default ir op operations
2937 ir_op *firm_set_default_operations(ir_op *op)
2939 op = firm_set_default_computed_value(op);
2940 op = firm_set_default_equivalent_node(op);
2941 op = firm_set_default_transform_node(op);
2942 op = firm_set_default_node_cmp_attr(op);
2943 op = firm_set_default_get_type(op);