3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
27 # include "irnode_t.h"
28 # include "irgraph_t.h"
29 # include "irmode_t.h"
31 # include "ircons_t.h"
35 # include "dbginfo_t.h"
36 # include "iropt_dbg.h"
37 # include "irflag_t.h"
42 /* Make types visible to allow most efficient access */
43 # include "entity_t.h"
46 * Trivial INLINEable routine for copy propagation.
47 * Does follow Ids, needed to optimize INLINEd code.
49 static INLINE ir_node *
50 follow_Id (ir_node *n)
52 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
57 * return the value of a Constant
59 static tarval *computed_value_Const(ir_node *n)
61 return get_Const_tarval(n);
65 * return the value of a 'sizeof' SymConst
67 static tarval *computed_value_SymConst(ir_node *n)
69 if ((get_SymConst_kind(n) == symconst_size) &&
70 (get_type_state(get_SymConst_type(n))) == layout_fixed)
71 return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), get_irn_mode(n));
76 * return the value of an Add
78 static tarval *computed_value_Add(ir_node *n)
80 ir_node *a = get_Add_left(n);
81 ir_node *b = get_Add_right(n);
83 tarval *ta = value_of(a);
84 tarval *tb = value_of(b);
86 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
87 return tarval_add(ta, tb);
93 * return the value of a Sub
96 static tarval *computed_value_Sub(ir_node *n)
98 ir_node *a = get_Sub_left(n);
99 ir_node *b = get_Sub_right(n);
104 if (a == b && !is_Bad(a))
105 return get_tarval_null(get_irn_mode(n));
110 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b)))
111 return tarval_sub(ta, tb);
117 * return the value of an unary Minus
119 static tarval *computed_value_Minus(ir_node *n)
121 ir_node *a = get_Minus_op(n);
122 tarval *ta = value_of(a);
124 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
125 return tarval_neg(ta);
131 * return the value of a Mul
133 static tarval *computed_value_Mul(ir_node *n)
135 ir_node *a = get_Mul_left(n);
136 ir_node *b = get_Mul_right(n);
138 tarval *ta = value_of(a);
139 tarval *tb = value_of(b);
141 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
142 return tarval_mul(ta, tb);
144 /* a*0 = 0 or 0*b = 0:
145 calls computed_value recursive and returns the 0 with proper
147 if ((ta != tarval_bad) && (ta == get_mode_null(get_tarval_mode(ta))))
149 if ((tb != tarval_bad) && (tb == get_mode_null(get_tarval_mode(tb))))
156 * return the value of a floating point Quot
158 static tarval *computed_value_Quot(ir_node *n)
160 ir_node *a = get_Quot_left(n);
161 ir_node *b = get_Quot_right(n);
163 tarval *ta = value_of(a);
164 tarval *tb = value_of(b);
166 /* This was missing in original implementation. Why? */
167 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
168 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
169 return tarval_quo(ta, tb);
175 * calculate the value of an integer Div of two nodes
176 * Special case: 0 / b
178 static tarval *do_computed_value_Div(ir_node *a, ir_node *b)
180 tarval *ta = value_of(a);
181 tarval *tb = value_of(b);
183 /* Compute c1 / c2 or 0 / a, a != 0 */
184 if (ta != tarval_bad) {
185 if ((tb != tarval_bad) && (tb != get_mode_null(get_irn_mode(b)))) /* div by zero: return tarval_bad */
186 return tarval_div(ta, tb);
187 else if (ta == get_mode_null(get_tarval_mode(ta))) /* 0 / b == 0 */
194 * return the value of an integer Div
196 static tarval *computed_value_Div(ir_node *n)
198 return do_computed_value_Div(get_Div_left(n), get_Div_right(n));
202 * calculate the value of an integer Mod of two nodes
203 * Special case: a % 1
205 static tarval *do_computed_value_Mod(ir_node *a, ir_node *b)
207 tarval *ta = value_of(a);
208 tarval *tb = value_of(b);
210 /* Compute c1 % c2 or a % 1 */
211 if (tb != tarval_bad) {
212 if ((ta != tarval_bad) && (tb != get_mode_null(get_tarval_mode(tb)))) /* div by zero: return tarval_bad */
213 return tarval_mod(ta, tb);
214 else if (tb == get_mode_one(get_tarval_mode(tb))) /* x mod 1 == 0 */
215 return get_mode_null(get_irn_mode(a));
222 * return the value of an integer Mod
224 static tarval *computed_value_Mod(ir_node *n)
226 return do_computed_value_Mod(get_Mod_left(n), get_Mod_right(n));
230 * return the value of an Abs
232 static tarval *computed_value_Abs(ir_node *n)
234 ir_node *a = get_Abs_op(n);
235 tarval *ta = value_of(a);
237 if (ta != tarval_bad)
238 return tarval_abs(ta);
244 * return the value of an And
245 * Special case: a & 0, 0 & b
247 static tarval *computed_value_And(ir_node *n)
249 ir_node *a = get_And_left(n);
250 ir_node *b = get_And_right(n);
252 tarval *ta = value_of(a);
253 tarval *tb = value_of(b);
255 if ((ta != tarval_bad) && (tb != tarval_bad)) {
256 return tarval_and (ta, tb);
260 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_NULL)
261 || (classify_tarval ((v = tb)) == TV_CLASSIFY_NULL)) {
269 * return the value of an Or
270 * Special case: a | 1...1, 1...1 | b
272 static tarval *computed_value_Or(ir_node *n)
274 ir_node *a = get_Or_left(n);
275 ir_node *b = get_Or_right(n);
277 tarval *ta = value_of(a);
278 tarval *tb = value_of(b);
280 if ((ta != tarval_bad) && (tb != tarval_bad)) {
281 return tarval_or (ta, tb);
284 if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_ALL_ONE)
285 || (classify_tarval ((v = tb)) == TV_CLASSIFY_ALL_ONE)) {
293 * return the value of an Eor
295 static tarval *computed_value_Eor(ir_node *n)
297 ir_node *a = get_Eor_left(n);
298 ir_node *b = get_Eor_right(n);
303 return get_tarval_null(get_irn_mode(n));
308 if ((ta != tarval_bad) && (tb != tarval_bad)) {
309 return tarval_eor (ta, tb);
315 * return the value of a Not
317 static tarval *computed_value_Not(ir_node *n)
319 ir_node *a = get_Not_op(n);
320 tarval *ta = value_of(a);
322 if (ta != tarval_bad)
323 return tarval_not(ta);
329 * return the value of a Shl
331 static tarval *computed_value_Shl(ir_node *n)
333 ir_node *a = get_Shl_left(n);
334 ir_node *b = get_Shl_right(n);
336 tarval *ta = value_of(a);
337 tarval *tb = value_of(b);
339 if ((ta != tarval_bad) && (tb != tarval_bad)) {
340 return tarval_shl (ta, tb);
346 * return the value of a Shr
348 static tarval *computed_value_Shr(ir_node *n)
350 ir_node *a = get_Shr_left(n);
351 ir_node *b = get_Shr_right(n);
353 tarval *ta = value_of(a);
354 tarval *tb = value_of(b);
356 if ((ta != tarval_bad) && (tb != tarval_bad)) {
357 return tarval_shr (ta, tb);
363 * return the value of a Shrs
365 static tarval *computed_value_Shrs(ir_node *n)
367 ir_node *a = get_Shrs_left(n);
368 ir_node *b = get_Shrs_right(n);
370 tarval *ta = value_of(a);
371 tarval *tb = value_of(b);
373 if ((ta != tarval_bad) && (tb != tarval_bad)) {
374 return tarval_shrs (ta, tb);
380 * return the value of a Rot
382 static tarval *computed_value_Rot(ir_node *n)
384 ir_node *a = get_Rot_left(n);
385 ir_node *b = get_Rot_right(n);
387 tarval *ta = value_of(a);
388 tarval *tb = value_of(b);
390 if ((ta != tarval_bad) && (tb != tarval_bad)) {
391 return tarval_rot (ta, tb);
397 * return the value of a Conv
399 static tarval *computed_value_Conv(ir_node *n)
401 ir_node *a = get_Conv_op(n);
402 tarval *ta = value_of(a);
404 if (ta != tarval_bad)
405 return tarval_convert_to(ta, get_irn_mode(n));
411 * return the value of a Proj, handle Proj(Cmp), Proj(Div), Proj(Mod), Proj(DivMod)
413 static tarval *computed_value_Proj(ir_node *n)
415 ir_node *a = get_Proj_pred(n);
419 /* Optimize Cmp nodes.
420 This performs a first step of unreachable code elimination.
421 Proj can not be computed, but folding a Cmp above the Proj here is
422 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
424 There are several case where we can evaluate a Cmp node:
425 1. The nodes compared are both the same. If we compare for
426 equal, greater equal, ... this will return true, else it
427 will return false. This step relies on cse.
428 2. The predecessors of Cmp are target values. We can evaluate
430 3. The predecessors are Allocs or void* constants. Allocs never
431 return NULL, they raise an exception. Therefore we can predict
433 switch (get_irn_opcode(a)) {
435 aa = get_Cmp_left(a);
436 ab = get_Cmp_right(a);
437 proj_nr = get_Proj_proj(n);
439 if (aa == ab && !mode_is_float(get_irn_mode(aa))) { /* 1.: */
440 /* BEWARE: a == a is NOT always True for floating Point!!! */
441 /* This is a trick with the bits used for encoding the Cmp
442 Proj numbers, the following statement is not the same:
443 return new_tarval_from_long (proj_nr == Eq, mode_b) */
444 return new_tarval_from_long (proj_nr & Eq, mode_b);
446 tarval *taa = value_of(aa);
447 tarval *tab = value_of(ab);
449 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
450 /* strange checks... */
451 pnc_number flags = tarval_cmp (taa, tab);
452 if (flags != False) {
453 return new_tarval_from_long (proj_nr & flags, mode_b);
455 } else { /* check for 3.: */
456 ir_node *aaa = skip_Id(skip_Proj(aa));
457 ir_node *aba = skip_Id(skip_Proj(ab));
459 if ( ( (/* aa is ProjP and aaa is Alloc */
460 (get_irn_op(aa) == op_Proj)
461 && (mode_is_reference(get_irn_mode(aa)))
462 && (get_irn_op(aaa) == op_Alloc))
463 && ( (/* ab is constant void */
464 (get_irn_op(ab) == op_Const)
465 && (mode_is_reference(get_irn_mode(ab)))
466 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
467 || (/* ab is other Alloc */
468 (get_irn_op(ab) == op_Proj)
469 && (mode_is_reference(get_irn_mode(ab)))
470 && (get_irn_op(aba) == op_Alloc)
472 || (/* aa is void and aba is Alloc */
473 (get_irn_op(aa) == op_Const)
474 && (mode_is_reference(get_irn_mode(aa)))
475 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
476 && (get_irn_op(ab) == op_Proj)
477 && (mode_is_reference(get_irn_mode(ab)))
478 && (get_irn_op(aba) == op_Alloc)))
480 return new_tarval_from_long (proj_nr & Ne, mode_b);
486 /* compute either the Div or the Mod part */
487 proj_nr = get_Proj_proj(n);
488 if (proj_nr == pn_DivMod_res_div)
489 return do_computed_value_Div(get_DivMod_left(a), get_DivMod_right(a));
490 else if (proj_nr == pn_DivMod_res_mod)
491 return do_computed_value_Mod(get_DivMod_left(a), get_DivMod_right(a));
495 if (get_Proj_proj(n) == pn_Div_res)
496 return computed_value(a);
500 if (get_Proj_proj(n) == pn_Mod_res)
501 return computed_value(a);
511 * calculate the value of a Mux: can be evaluated, if the
512 * sel and the right input are known
514 static tarval *computed_value_Mux(ir_node *n)
516 ir_node *sel = get_Mux_sel(n);
517 tarval *ts = value_of(sel);
519 if (ts == get_tarval_b_true()) {
520 ir_node *v = get_Mux_true(n);
523 else if (ts == get_tarval_b_false()) {
524 ir_node *v = get_Mux_false(n);
531 * If the parameter n can be computed, return its value, else tarval_bad.
532 * Performs constant folding.
534 * @param n The node this should be evaluated
536 tarval *computed_value(ir_node *n)
538 if (n->op->computed_value)
539 return n->op->computed_value(n);
544 * set the default computed_value evaluator
546 static ir_op *firm_set_default_computed_value(ir_op *op)
550 op->computed_value = computed_value_##a; \
576 op->computed_value = NULL;
584 /* returns 1 if the a and b are pointers to different locations. */
586 different_identity (ir_node *a, ir_node *b)
588 assert (mode_is_reference(get_irn_mode (a))
589 && mode_is_reference(get_irn_mode (b)));
591 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
592 ir_node *a1 = get_Proj_pred (a);
593 ir_node *b1 = get_Proj_pred (b);
594 if (a1 != b1 && get_irn_op (a1) == op_Alloc
595 && get_irn_op (b1) == op_Alloc)
602 static ir_node *equivalent_node_Block(ir_node *n)
606 /* The Block constructor does not call optimize, but mature_immBlock
607 calls the optimization. */
608 assert(get_Block_matured(n));
610 /* Straightening: a single entry Block following a single exit Block
611 can be merged, if it is not the Start block. */
612 /* !!! Beware, all Phi-nodes of n must have been optimized away.
613 This should be true, as the block is matured before optimize is called.
614 But what about Phi-cycles with the Phi0/Id that could not be resolved?
615 Remaining Phi nodes are just Ids. */
616 if ((get_Block_n_cfgpreds(n) == 1) &&
617 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
618 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
619 if (predblock == oldn) {
620 /* Jmp jumps into the block it is in -- deal self cycle. */
621 n = set_Block_dead(n);
622 DBG_OPT_DEAD(oldn, n);
623 } else if (get_opt_control_flow_straightening()) {
625 DBG_OPT_STG(oldn, n);
628 else if ((get_Block_n_cfgpreds(n) == 1) &&
629 (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
630 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
631 if (predblock == oldn) {
632 /* Jmp jumps into the block it is in -- deal self cycle. */
633 n = set_Block_dead(n);
634 DBG_OPT_DEAD(oldn, n);
637 else if ((get_Block_n_cfgpreds(n) == 2) &&
638 (get_opt_control_flow_weak_simplification())) {
639 /* Test whether Cond jumps twice to this block
640 @@@ we could do this also with two loops finding two preds from several ones. */
641 ir_node *a = get_Block_cfgpred(n, 0);
642 ir_node *b = get_Block_cfgpred(n, 1);
644 if ((get_irn_op(a) == op_Proj) &&
645 (get_irn_op(b) == op_Proj) &&
646 (get_Proj_pred(a) == get_Proj_pred(b)) &&
647 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
648 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
649 /* Also a single entry Block following a single exit Block. Phis have
650 twice the same operand and will be optimized away. */
651 n = get_nodes_block(a);
652 DBG_OPT_IFSIM(oldn, a, b, n);
654 } else if (get_opt_unreachable_code() &&
655 (n != current_ir_graph->start_block) &&
656 (n != current_ir_graph->end_block) ) {
657 int i, n_cfg = get_Block_n_cfgpreds(n);
659 /* If all inputs are dead, this block is dead too, except if it is
660 the start or end block. This is a step of unreachable code
662 for (i = 0; i < n_cfg; i++) {
663 ir_node *pred = get_Block_cfgpred(n, i);
666 if (is_Bad(pred)) continue;
667 pred_blk = get_nodes_block(pred);
669 if (is_Block_dead(pred_blk)) continue;
672 /* really found a living input */
677 n = set_Block_dead(n);
684 * Returns a equivalent node for a Jmp, a Bad :-)
685 * Of course this only happens if the Block of the Jmp is Bad.
687 static ir_node *equivalent_node_Jmp(ir_node *n)
689 /* GL: Why not same for op_Raise?? */
690 /* unreachable code elimination */
691 if (is_Block_dead(get_nodes_block(n)))
697 static ir_node *equivalent_node_Cond(ir_node *n)
699 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
700 See cases for iro_Cond and iro_Proj in transform_node. */
705 * optimize operations that are commutative and have neutral 0,
706 * so a op 0 = 0 op a = a.
708 static ir_node *equivalent_node_neutral_zero(ir_node *n)
712 ir_node *a = get_binop_left(n);
713 ir_node *b = get_binop_right(n);
718 /* After running compute_node there is only one constant predecessor.
719 Find this predecessors value and remember the other node: */
720 if ((tv = value_of(a)) != tarval_bad) {
722 } else if ((tv = value_of(b)) != tarval_bad) {
727 /* If this predecessors constant value is zero, the operation is
728 unnecessary. Remove it: */
729 if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
732 DBG_OPT_ALGSIM1(oldn, a, b, n);
738 #define equivalent_node_Add equivalent_node_neutral_zero
739 #define equivalent_node_Eor equivalent_node_neutral_zero
742 * optimize operations that are not commutative but have neutral 0 on left,
745 static ir_node *equivalent_node_left_zero(ir_node *n)
749 ir_node *a = get_binop_left(n);
750 ir_node *b = get_binop_right(n);
752 if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
755 DBG_OPT_ALGSIM1(oldn, a, b, n);
761 #define equivalent_node_Sub equivalent_node_left_zero
762 #define equivalent_node_Shl equivalent_node_left_zero
763 #define equivalent_node_Shr equivalent_node_left_zero
764 #define equivalent_node_Shrs equivalent_node_left_zero
765 #define equivalent_node_Rot equivalent_node_left_zero
768 * Er, a "symmetic unop", ie op(op(n)) = n.
770 static ir_node *equivalent_node_symmetric_unop(ir_node *n)
773 ir_node *pred = get_unop_op(n);
775 /* optimize symmetric unop */
776 if (get_irn_op(pred) == get_irn_op(n)) {
777 n = get_unop_op(pred);
778 DBG_OPT_ALGSIM2(oldn, pred, n);
784 #define equivalent_node_Not equivalent_node_symmetric_unop
786 /* --x == x */ /* ??? Is this possible or can --x raise an
787 out of bounds exception if min =! max? */
788 #define equivalent_node_Minus equivalent_node_symmetric_unop
791 * Optimize a * 1 = 1 * a = a.
793 static ir_node *equivalent_node_Mul(ir_node *n)
797 ir_node *a = get_Mul_left(n);
798 ir_node *b = get_Mul_right(n);
800 /* Mul is commutative and has again an other neutral element. */
801 if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
803 DBG_OPT_ALGSIM1(oldn, a, b, n);
804 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
806 DBG_OPT_ALGSIM1(oldn, a, b, n);
812 * Optimize a / 1 = a.
814 static ir_node *equivalent_node_Div(ir_node *n)
816 ir_node *a = get_Div_left(n);
817 ir_node *b = get_Div_right(n);
819 /* Div is not commutative. */
820 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
821 /* Turn Div into a tuple (mem, bad, a) */
822 ir_node *mem = get_Div_mem(n);
823 turn_into_tuple(n, 3);
824 set_Tuple_pred(n, pn_Div_M, mem);
825 set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
826 set_Tuple_pred(n, pn_Div_res, a);
832 * Optimize a / 1 = a.
834 static ir_node *equivalent_node_DivMod(ir_node *n)
836 ir_node *a = get_DivMod_left(n);
837 ir_node *b = get_DivMod_right(n);
839 /* Div is not commutative. */
840 if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
841 /* Turn DivMod into a tuple (mem, bad, a, 0) */
842 ir_node *mem = get_Div_mem(n);
843 ir_mode *mode = get_irn_mode(b);
845 turn_into_tuple(n, 4);
846 set_Tuple_pred(n, pn_DivMod_M, mem);
847 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
848 set_Tuple_pred(n, pn_DivMod_res_div, a);
849 set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
855 * Use algebraic simplification a | a = a | 0 = 0 | a = a.
857 static ir_node *equivalent_node_Or(ir_node *n)
861 ir_node *a = get_Or_left(n);
862 ir_node *b = get_Or_right(n);
865 n = a; /* Or has it's own neutral element */
866 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_NULL) {
868 DBG_OPT_ALGSIM1(oldn, a, b, n);
869 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
871 DBG_OPT_ALGSIM1(oldn, a, b, n);
878 * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
880 static ir_node *equivalent_node_And(ir_node *n)
884 ir_node *a = get_And_left(n);
885 ir_node *b = get_And_right(n);
888 n = a; /* And has it's own neutral element */
889 } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_ALL_ONE) {
891 DBG_OPT_ALGSIM1(oldn, a, b, n);
892 } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ALL_ONE) {
894 DBG_OPT_ALGSIM1(oldn, a, b, n);
900 * Try to remove useless conv's:
902 static ir_node *equivalent_node_Conv(ir_node *n)
905 ir_node *a = get_Conv_op(n);
908 ir_mode *n_mode = get_irn_mode(n);
909 ir_mode *a_mode = get_irn_mode(a);
911 if (n_mode == a_mode) { /* No Conv necessary */
913 DBG_OPT_ALGSIM3(oldn, a, n);
914 } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
918 n_mode = get_irn_mode(n);
919 b_mode = get_irn_mode(b);
921 if (n_mode == b_mode) {
922 if (n_mode == mode_b) {
923 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
924 DBG_OPT_ALGSIM1(oldn, a, b, n);
926 else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
927 if (smaller_mode(b_mode, a_mode)){
928 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
929 DBG_OPT_ALGSIM1(oldn, a, b, n);
938 * A Cast may be removed if the type of the previous node
939 * is already to type of the Cast.
941 static ir_node *equivalent_node_Cast(ir_node *n) {
942 ir_node *pred = get_Cast_op(n);
943 if (get_irn_type(pred) == get_Cast_type(n))
948 /* Several optimizations:
949 - no Phi in start block.
950 - remove Id operators that are inputs to Phi
951 - fold Phi-nodes, iff they have only one predecessor except
954 static ir_node *equivalent_node_Phi(ir_node *n)
959 ir_node *block = NULL; /* to shutup gcc */
960 ir_node *first_val = NULL; /* to shutup gcc */
961 ir_node *scnd_val = NULL; /* to shutup gcc */
963 if (!get_opt_normalize()) return n;
965 n_preds = get_Phi_n_preds(n);
967 block = get_nodes_block(n);
968 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
969 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
970 if ((is_Block_dead(block)) || /* Control dead */
971 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
972 return new_Bad(); /* in the Start Block. */
974 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
977 /* first we test for a special case: */
978 /* Confirm is a special node fixing additional information for a
979 value that is known at a certain point. This is useful for
980 dataflow analysis. */
982 ir_node *a = get_Phi_pred(n, 0);
983 ir_node *b = get_Phi_pred(n, 1);
984 if ( (get_irn_op(a) == op_Confirm)
985 && (get_irn_op(b) == op_Confirm)
986 && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
987 && (get_irn_n(a, 1) == get_irn_n (b, 1))
988 && (a->data.num == (~b->data.num & irpn_True) )) {
989 return get_irn_n(a, 0);
994 /* If the Block has a Bad pred, we also have one. */
995 for (i = 0; i < n_preds; ++i)
996 if (is_Bad (get_Block_cfgpred(block, i)))
997 set_Phi_pred(n, i, new_Bad());
999 /* Find first non-self-referencing input */
1000 for (i = 0; i < n_preds; ++i) {
1001 first_val = get_Phi_pred(n, i);
1002 if ( (first_val != n) /* not self pointer */
1004 && (get_irn_op(first_val) != op_Bad)
1006 ) { /* value not dead */
1007 break; /* then found first value. */
1011 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
1012 if (i >= n_preds) { return new_Bad(); }
1016 /* follow_Id () for rest of inputs, determine if any of these
1017 are non-self-referencing */
1018 while (++i < n_preds) {
1019 scnd_val = get_Phi_pred(n, i);
1020 if ( (scnd_val != n)
1021 && (scnd_val != first_val)
1023 && (get_irn_op(scnd_val) != op_Bad)
1030 /* Fold, if no multiple distinct non-self-referencing inputs */
1033 DBG_OPT_PHI(oldn, first_val, n);
1035 /* skip the remaining Ids (done in get_Phi_pred). */
1036 /* superfluous, since we walk all to propagate Block's Bads.
1037 while (++i < n_preds) get_Phi_pred(n, i); */
1043 * optimize Proj(Tuple) and gigo for ProjX in Bad block
1045 static ir_node *equivalent_node_Proj(ir_node *n)
1049 ir_node *a = get_Proj_pred(n);
1051 if ( get_irn_op(a) == op_Tuple) {
1052 /* Remove the Tuple/Proj combination. */
1053 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
1054 n = get_Tuple_pred(a, get_Proj_proj(n));
1055 DBG_OPT_TUPLE(oldn, a, n);
1057 assert(0); /* This should not happen! */
1060 } else if (get_irn_mode(n) == mode_X &&
1061 is_Block_dead(get_nodes_block(n))) {
1062 /* Remove dead control flow -- early gigo. */
1071 static ir_node *equivalent_node_Id(ir_node *n)
1076 DBG_OPT_ID(oldn, n);
1083 static ir_node *equivalent_node_Mux(ir_node *n)
1085 ir_node *sel = get_Mux_sel(n);
1086 tarval *ts = value_of(sel);
1088 if (ts == get_tarval_b_true())
1089 return get_Mux_true(n);
1090 else if (ts == get_tarval_b_false())
1091 return get_Mux_false(n);
1097 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
1098 * perform no actual computation, as, e.g., the Id nodes. It does not create
1099 * new nodes. It is therefore safe to free n if the node returned is not n.
1100 * If a node returns a Tuple we can not just skip it. If the size of the
1101 * in array fits, we transform n into a tuple (e.g., Div).
1104 equivalent_node(ir_node *n)
1106 if (n->op->equivalent_node)
1107 return n->op->equivalent_node(n);
1112 * set the default equivalent node operation
1114 static ir_op *firm_set_default_equivalent_node(ir_op *op)
1118 op->equivalent_node = equivalent_node_##a; \
1146 op->equivalent_node = NULL;
1154 * Do node specific optimizations of nodes predecessors.
1157 optimize_preds(ir_node *n) {
1158 ir_node *a = NULL, *b = NULL;
1160 /* get the operands we will work on for simple cases. */
1162 a = get_binop_left(n);
1163 b = get_binop_right(n);
1164 } else if (is_unop(n)) {
1168 switch (get_irn_opcode(n)) {
1171 /* We don't want Cast as input to Cmp. */
1172 if (get_irn_op(a) == op_Cast) {
1176 if (get_irn_op(b) == op_Cast) {
1178 set_Cmp_right(n, b);
1187 * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
1188 * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)) if possible.
1190 static ir_node *transform_node_AddSub(ir_node *n)
1192 ir_mode *mode = get_irn_mode(n);
1194 if (mode_is_reference(mode)) {
1195 ir_node *left = get_binop_left(n);
1196 ir_node *right = get_binop_right(n);
1197 int ref_bits = get_mode_size_bits(mode);
1199 if (get_irn_op(left) == op_Conv) {
1200 ir_mode *mode = get_irn_mode(left);
1201 int bits = get_mode_size_bits(mode);
1203 if (ref_bits == bits &&
1204 mode_is_int(mode) &&
1205 get_mode_arithmetic(mode) == irma_twos_complement) {
1206 ir_node *pre = get_Conv_op(left);
1207 ir_mode *pre_mode = get_irn_mode(pre);
1209 if (mode_is_int(pre_mode) &&
1210 get_mode_size_bits(pre_mode) == bits &&
1211 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1212 /* ok, this conv just changes to sign, moreover the calculation
1213 * is done with same number of bits as our address mode, so
1214 * we can ignore the conv as address calculation can be viewed
1215 * as either signed or unsigned
1217 set_binop_left(n, pre);
1222 if (get_irn_op(right) == op_Conv) {
1223 ir_mode *mode = get_irn_mode(right);
1224 int bits = get_mode_size_bits(mode);
1226 if (ref_bits == bits &&
1227 mode_is_int(mode) &&
1228 get_mode_arithmetic(mode) == irma_twos_complement) {
1229 ir_node *pre = get_Conv_op(right);
1230 ir_mode *pre_mode = get_irn_mode(pre);
1232 if (mode_is_int(pre_mode) &&
1233 get_mode_size_bits(pre_mode) == bits &&
1234 get_mode_arithmetic(pre_mode) == irma_twos_complement) {
1235 /* ok, this conv just changes to sign, moreover the calculation
1236 * is done with same number of bits as our address mode, so
1237 * we can ignore the conv as address calculation can be viewed
1238 * as either signed or unsigned
1240 set_binop_right(n, pre);
1248 #define transform_node_Add transform_node_AddSub
1249 #define transform_node_Sub transform_node_AddSub
1251 /** Do architecture dependend optimizations on Mul nodes */
1252 static ir_node *transform_node_Mul(ir_node *n) {
1253 return arch_dep_replace_mul_with_shifts(n);
1256 static ir_node *transform_node_Div(ir_node *n)
1258 tarval *tv = value_of(n);
1261 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1263 if (tv != tarval_bad)
1264 value = new_Const(get_tarval_mode(tv), tv);
1265 else /* Try architecture dependand optimization */
1266 value = arch_dep_replace_div_by_const(n);
1269 /* Turn Div into a tuple (mem, bad, value) */
1270 ir_node *mem = get_Div_mem(n);
1272 turn_into_tuple(n, 3);
1273 set_Tuple_pred(n, pn_Div_M, mem);
1274 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1275 set_Tuple_pred(n, pn_Div_res, value);
1280 static ir_node *transform_node_Mod(ir_node *n)
1282 tarval *tv = value_of(n);
1285 /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
1287 if (tv != tarval_bad)
1288 value = new_Const(get_tarval_mode(tv), tv);
1289 else /* Try architecture dependand optimization */
1290 value = arch_dep_replace_mod_by_const(n);
1293 /* Turn Mod into a tuple (mem, bad, value) */
1294 ir_node *mem = get_Mod_mem(n);
1296 turn_into_tuple(n, 3);
1297 set_Tuple_pred(n, pn_Mod_M, mem);
1298 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1299 set_Tuple_pred(n, pn_Mod_res, value);
1304 static ir_node *transform_node_DivMod(ir_node *n)
1308 ir_node *a = get_DivMod_left(n);
1309 ir_node *b = get_DivMod_right(n);
1310 ir_mode *mode = get_irn_mode(a);
1311 tarval *ta = value_of(a);
1312 tarval *tb = value_of(b);
1314 if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
1317 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1319 if (tb != tarval_bad) {
1320 if (tb == get_mode_one(get_tarval_mode(tb))) {
1321 b = new_Const (mode, get_mode_null(mode));
1323 } else if (ta != tarval_bad) {
1324 tarval *resa, *resb;
1325 resa = tarval_div (ta, tb);
1326 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
1327 Jmp for X result!? */
1328 resb = tarval_mod (ta, tb);
1329 if (resb == tarval_bad) return n; /* Causes exception! */
1330 a = new_Const (mode, resa);
1331 b = new_Const (mode, resb);
1334 else { /* Try architecture dependand optimization */
1335 arch_dep_replace_divmod_by_const(&a, &b, n);
1336 evaluated = a != NULL;
1338 } else if (ta == get_mode_null(mode)) {
1339 /* 0 / non-Const = 0 */
1344 if (evaluated) { /* replace by tuple */
1345 ir_node *mem = get_DivMod_mem(n);
1346 turn_into_tuple(n, 4);
1347 set_Tuple_pred(n, pn_DivMod_M, mem);
1348 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
1349 set_Tuple_pred(n, pn_DivMod_res_div, a);
1350 set_Tuple_pred(n, pn_DivMod_res_mod, b);
1351 assert(get_nodes_block(n));
1357 static ir_node *transform_node_Cond(ir_node *n)
1359 /* Replace the Cond by a Jmp if it branches on a constant
1362 ir_node *a = get_Cond_selector(n);
1363 tarval *ta = value_of(a);
1365 if ((ta != tarval_bad) &&
1366 (get_irn_mode(a) == mode_b) &&
1367 (get_opt_unreachable_code())) {
1368 /* It's a boolean Cond, branching on a boolean constant.
1369 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
1370 jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n));
1371 turn_into_tuple(n, 2);
1372 if (ta == tarval_b_true) {
1373 set_Tuple_pred(n, pn_Cond_false, new_Bad());
1374 set_Tuple_pred(n, pn_Cond_true, jmp);
1376 set_Tuple_pred(n, pn_Cond_false, jmp);
1377 set_Tuple_pred(n, pn_Cond_true, new_Bad());
1379 /* We might generate an endless loop, so keep it alive. */
1380 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1381 } else if ((ta != tarval_bad) &&
1382 (get_irn_mode(a) == mode_Iu) &&
1383 (get_Cond_kind(n) == dense) &&
1384 (get_opt_unreachable_code())) {
1385 /* I don't want to allow Tuples smaller than the biggest Proj.
1386 Also this tuple might get really big...
1387 I generate the Jmp here, and remember it in link. Link is used
1388 when optimizing Proj. */
1389 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n)));
1390 /* We might generate an endless loop, so keep it alive. */
1391 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1392 } else if ((get_irn_op(a) == op_Eor)
1393 && (get_irn_mode(a) == mode_b)
1394 && (classify_tarval(value_of(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
1395 /* The Eor is a negate. Generate a new Cond without the negate,
1396 simulate the negate by exchanging the results. */
1397 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1399 } else if ((get_irn_op(a) == op_Not)
1400 && (get_irn_mode(a) == mode_b)) {
1401 /* A Not before the Cond. Generate a new Cond without the Not,
1402 simulate the Not by exchanging the results. */
1403 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1412 static ir_node *transform_node_Eor(ir_node *n)
1414 ir_node *a = get_Eor_left(n);
1415 ir_node *b = get_Eor_right(n);
1417 if ((get_irn_mode(n) == mode_b)
1418 && (get_irn_op(a) == op_Proj)
1419 && (get_irn_mode(a) == mode_b)
1420 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
1421 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1422 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
1423 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1424 mode_b, get_negated_pnc(get_Proj_proj(a)));
1425 else if ((get_irn_mode(n) == mode_b)
1426 && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE))
1427 /* The Eor is a Not. Replace it by a Not. */
1428 /* ????!!!Extend to bitfield 1111111. */
1429 n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
1435 * Transform a boolean Not.
1437 static ir_node *transform_node_Not(ir_node *n)
1439 ir_node *a = get_Not_op(n);
1441 if ( (get_irn_mode(n) == mode_b)
1442 && (get_irn_op(a) == op_Proj)
1443 && (get_irn_mode(a) == mode_b)
1444 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1445 /* We negate a Cmp. The Cmp has the negated result anyways! */
1446 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1447 mode_b, get_negated_pnc(get_Proj_proj(a)));
1453 * Transform a Cast of a Const into a new Const
1455 static ir_node *transform_node_Cast(ir_node *n) {
1456 ir_node *pred = get_Cast_op(n);
1457 type *tp = get_irn_type(pred);
1459 if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
1460 n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
1461 get_Const_tarval(pred), tp);
1462 } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
1463 n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
1464 get_SymConst_kind(pred), tp);
1470 * Transform a Div/Mod/DivMod with a non-zero constant. Must be
1471 * done here instead of equivalent node because it creates new
1473 * Removes the exceptions and routes the memory to the NoMem node.
1475 * Further, it optimizes jump tables by removing all impossible cases.
1477 static ir_node *transform_node_Proj(ir_node *proj)
1479 ir_node *n = get_Proj_pred(proj);
1484 switch (get_irn_opcode(n)) {
1486 b = get_Div_right(n);
1489 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1490 proj_nr = get_Proj_proj(proj);
1492 /* this node may float */
1493 set_irn_pinned(n, op_pin_state_floats);
1495 if (proj_nr == pn_Div_X_except) {
1496 /* we found an exception handler, remove it */
1499 /* the memory Proj can be removed */
1500 ir_node *res = get_Div_mem(n);
1501 set_Div_mem(n, get_irg_no_mem(current_ir_graph));
1502 if (proj_nr == pn_Div_M)
1508 b = get_Mod_right(n);
1511 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
1512 proj_nr = get_Proj_proj(proj);
1514 /* this node may float */
1515 set_irn_pinned(n, op_pin_state_floats);
1517 if (proj_nr == pn_Mod_X_except) {
1518 /* we found an exception handler, remove it */
1521 /* the memory Proj can be removed */
1522 ir_node *res = get_Mod_mem(n);
1523 set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
1524 if (proj_nr == pn_Mod_M)
1530 b = get_DivMod_right(n);
1533 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
1534 proj_nr = get_Proj_proj(proj);
1536 /* this node may float */
1537 set_irn_pinned(n, op_pin_state_floats);
1539 if (proj_nr == pn_DivMod_X_except) {
1540 /* we found an exception handler, remove it */
1544 /* the memory Proj can be removed */
1545 ir_node *res = get_DivMod_mem(n);
1546 set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
1547 if (proj_nr == pn_DivMod_M)
1554 if (get_opt_unreachable_code()) {
1555 b = get_Cond_selector(n);
1558 if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
1559 /* we have a constant switch */
1560 long num = get_Proj_proj(proj);
1562 if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
1563 if (get_tarval_long(tb) == num) {
1564 /* Do NOT create a jump here, or we will have 2 control flow ops
1565 * in a block. This case is optimized away in optimize_cf(). */
1576 /* should not happen, but if it does will be optimized away */
1584 /* we have added a Tuple, optimize it for the current Proj away */
1585 return equivalent_node_Proj(proj);
1589 * returns the operands of a commutative bin-op, if one operand is
1590 * a const, it is returned as the second one.
1592 static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
1594 ir_node *op_a = get_binop_left(binop);
1595 ir_node *op_b = get_binop_right(binop);
1597 assert(is_op_commutative(get_irn_op(binop)));
1599 if (get_irn_op(op_a) == op_Const) {
1610 * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
1611 * Such pattern may arise in bitfield stores.
1613 * value c4 value c4 & c2
1614 * AND c3 AND c1 | c3
1619 static ir_node *transform_node_Or(ir_node *or)
1623 ir_node *and_l, *c3;
1624 ir_node *value, *c4;
1625 ir_node *new_and, *new_const, *block;
1626 ir_mode *mode = get_irn_mode(or);
1628 tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
1630 get_comm_Binop_Ops(or, &and, &c1);
1631 if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
1634 get_comm_Binop_Ops(and, &or_l, &c2);
1635 if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
1638 get_comm_Binop_Ops(or_l, &and_l, &c3);
1639 if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
1642 get_comm_Binop_Ops(and_l, &value, &c4);
1643 if (get_irn_op(c4) != op_Const)
1646 /* ok, found the pattern, check for conditions */
1647 assert(mode == get_irn_mode(and));
1648 assert(mode == get_irn_mode(or_l));
1649 assert(mode == get_irn_mode(and_l));
1651 tv1 = get_Const_tarval(c1);
1652 tv2 = get_Const_tarval(c2);
1653 tv3 = get_Const_tarval(c3);
1654 tv4 = get_Const_tarval(c4);
1656 tv = tarval_or(tv4, tv2);
1657 if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
1658 /* have at least one 0 at the same bit position */
1662 n_tv4 = tarval_not(tv4);
1663 if (tv3 != tarval_and(tv3, n_tv4)) {
1664 /* bit in the or_mask is outside the and_mask */
1668 n_tv2 = tarval_not(tv2);
1669 if (tv1 != tarval_and(tv1, n_tv2)) {
1670 /* bit in the or_mask is outside the and_mask */
1674 /* ok, all conditions met */
1675 block = get_nodes_block(or);
1677 new_and = new_r_And(current_ir_graph, block,
1678 value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
1680 new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
1682 set_Or_left(or, new_and);
1683 set_Or_right(or, new_const);
1685 /* check for more */
1686 return transform_node_Or(or);
1690 static ir_node *transform_node(ir_node *n);
1693 * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl
1695 static ir_node * transform_node_shift(ir_node *n)
1698 tarval *tv1, *tv2, *res;
1700 int modulo_shf, flag;
1702 left = get_binop_left(n);
1704 /* different operations */
1705 if (get_irn_op(left) != get_irn_op(n))
1708 tv1 = value_of(get_binop_right(n));
1709 if (tv1 == tarval_bad)
1712 tv2 = value_of(get_binop_right(left));
1713 if (tv2 == tarval_bad)
1716 res = tarval_add(tv1, tv2);
1718 /* beware: a simple replacement works only, if res < modulo shift */
1719 mode = get_irn_mode(n);
1723 modulo_shf = get_mode_modulo_shift(mode);
1724 if (modulo_shf > 0) {
1725 tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
1727 if (tarval_cmp(res, modulo) & Lt)
1734 /* ok, we can replace it */
1735 ir_node *in[2], *irn, *block = get_nodes_block(n);
1737 in[0] = get_binop_left(left);
1738 in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
1740 irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
1742 return transform_node(irn);
1747 static ir_node * transform_node_End(ir_node *n) {
1748 int i, n_keepalives = get_End_n_keepalives(n);
1750 /* Remove dead blocks in keepalive list.
1751 We do not generate a new End node. */
1752 for (i = 0; i < n_keepalives; ++i) {
1753 ir_node *ka = get_End_keepalive(n, i);
1754 if (is_Block(ka) && is_Block_dead(ka))
1755 set_End_keepalive(n, i, new_Bad());
1762 * Tries several [inplace] [optimizing] transformations and returns an
1763 * equivalent node. The difference to equivalent_node() is that these
1764 * transformations _do_ generate new nodes, and thus the old node must
1765 * not be freed even if the equivalent node isn't the old one.
1767 static ir_node *transform_node(ir_node *n)
1769 if (n->op->transform_node)
1770 n = n->op->transform_node(n);
1775 * set the default transform node operation
1777 static ir_op *firm_set_default_transform_node(ir_op *op)
1781 op->transform_node = transform_node_##a; \
1801 op->transform_node = transform_node_shift;
1804 op->transform_node = NULL;
1812 /* **************** Common Subexpression Elimination **************** */
1814 /** The size of the hash table used, should estimate the number of nodes
1816 #define N_IR_NODES 512
1818 /** Compares the attributes of two Const nodes. */
1819 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
1821 return (get_Const_tarval(a) != get_Const_tarval(b))
1822 || (get_Const_type(a) != get_Const_type(b));
1825 /** Compares the attributes of two Proj nodes. */
1826 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
1828 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
1831 /** Compares the attributes of two Filter nodes. */
1832 static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
1834 return get_Filter_proj(a) != get_Filter_proj(b);
1837 /** Compares the attributes of two Alloc nodes. */
1838 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
1840 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
1841 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
1844 /** Compares the attributes of two Free nodes. */
1845 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
1847 return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
1848 || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
1851 /** Compares the attributes of two SymConst nodes. */
1852 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
1854 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
1855 || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p)
1856 || (get_irn_symconst_attr(a).tp != get_irn_symconst_attr(b).tp);
1859 /** Compares the attributes of two Call nodes. */
1860 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
1862 return (get_irn_call_attr(a) != get_irn_call_attr(b));
1865 /** Compares the attributes of two Sel nodes. */
1866 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
1868 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
1869 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
1870 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
1871 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
1872 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
1875 /** Compares the attributes of two Phi nodes. */
1876 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
1878 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
1881 /** Compares the attributes of two Cast nodes. */
1882 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
1884 return get_Cast_type(a) != get_Cast_type(b);
1887 /** Compares the attributes of two Load nodes. */
1888 static int node_cmp_attr_Load(ir_node *a, ir_node *b)
1890 if (get_Load_volatility(a) == volatility_is_volatile ||
1891 get_Load_volatility(b) == volatility_is_volatile)
1892 /* NEVER do CSE on volatile Loads */
1895 return get_Load_mode(a) != get_Load_mode(b);
1898 /** Compares the attributes of two Store nodes. */
1899 static int node_cmp_attr_Store(ir_node *a, ir_node *b)
1901 /* NEVER do CSE on volatile Stores */
1902 return (get_Store_volatility(a) == volatility_is_volatile ||
1903 get_Store_volatility(b) == volatility_is_volatile);
1907 * set the default node attribute compare operation
1909 static ir_op *firm_set_default_node_cmp_attr(ir_op *op)
1913 op->node_cmp_attr = node_cmp_attr_##a; \
1930 op->node_cmp_attr = NULL;
1938 * Compare function for two nodes in the hash table. Gets two
1939 * nodes as parameters. Returns 0 if the nodes are a cse.
1942 vt_cmp (const void *elt, const void *key)
1950 if (a == b) return 0;
1952 if ((get_irn_op(a) != get_irn_op(b)) ||
1953 (get_irn_mode(a) != get_irn_mode(b))) return 1;
1955 /* compare if a's in and b's in are of equal length */
1956 irn_arity_a = get_irn_intra_arity (a);
1957 if (irn_arity_a != get_irn_intra_arity(b))
1960 /* for block-local cse and op_pin_state_pinned nodes: */
1961 if (!get_opt_global_cse() || (get_irn_pinned(a) == op_pin_state_pinned)) {
1962 if (get_irn_intra_n(a, -1) != get_irn_intra_n(b, -1))
1966 /* compare a->in[0..ins] with b->in[0..ins] */
1967 for (i = 0; i < irn_arity_a; i++)
1968 if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
1972 * here, we already now that the nodes are identical except their
1975 if (a->op->node_cmp_attr)
1976 return a->op->node_cmp_attr(a, b);
1982 * Calculate a hash value of a node.
1985 ir_node_hash (ir_node *node)
1990 if (node->op == op_Const) {
1991 /* special value for const, as they only differ in their tarval. */
1992 h = HASH_PTR(node->attr.con.tv);
1993 h = 9*h + HASH_PTR(get_irn_mode(node));
1994 } else if (node->op == op_SymConst) {
1995 /* special value for const, as they only differ in their symbol. */
1996 h = HASH_PTR(node->attr.i.sym.type_p);
1997 h = 9*h + HASH_PTR(get_irn_mode(node));
2000 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
2001 h = irn_arity = get_irn_intra_arity(node);
2003 /* consider all in nodes... except the block if not a control flow. */
2004 for (i = is_cfop(node) ? -1 : 0; i < irn_arity; i++) {
2005 h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
2009 h = 9*h + HASH_PTR(get_irn_mode(node));
2011 h = 9*h + HASH_PTR(get_irn_op(node));
2018 new_identities(void) {
2019 return new_pset(vt_cmp, N_IR_NODES);
2023 del_identities(pset *value_table) {
2024 del_pset(value_table);
2028 * Return the canonical node computing the same value as n.
2029 * Looks up the node in a hash table.
2031 * For Const nodes this is performed in the constructor, too. Const
2032 * nodes are extremely time critical because of their frequent use in
2033 * constant string arrays.
2035 static INLINE ir_node *
2036 identify (pset *value_table, ir_node *n)
2040 if (!value_table) return n;
2042 if (get_opt_reassociation()) {
2043 if (is_op_commutative(get_irn_op(n))) {
2044 ir_node *l = get_binop_left(n);
2045 ir_node *r = get_binop_right(n);
2047 /* for commutative operators perform a OP b == b OP a */
2049 set_binop_left(n, r);
2050 set_binop_right(n, l);
2055 o = pset_find (value_table, n, ir_node_hash (n));
2064 * During construction we set the op_pin_state_pinned flag in the graph right when the
2065 * optimization is performed. The flag turning on procedure global cse could
2066 * be changed between two allocations. This way we are safe.
2068 static INLINE ir_node *
2069 identify_cons (pset *value_table, ir_node *n) {
2072 n = identify(value_table, n);
2073 if (get_irn_n(old, -1) != get_irn_n(n, -1))
2074 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2079 * Return the canonical node computing the same value as n.
2080 * Looks up the node in a hash table, enters it in the table
2081 * if it isn't there yet.
2084 identify_remember (pset *value_table, ir_node *n)
2088 if (!value_table) return n;
2090 if (get_opt_reassociation()) {
2091 if (is_op_commutative(get_irn_op(n))) {
2092 ir_node *l = get_binop_left(n);
2093 ir_node *r = get_binop_right(n);
2095 /* for commutative operators perform a OP b == b OP a */
2097 set_binop_left(n, r);
2098 set_binop_right(n, l);
2103 /* lookup or insert in hash table with given hash key. */
2104 o = pset_insert (value_table, n, ir_node_hash (n));
2114 add_identities (pset *value_table, ir_node *node) {
2115 if (get_opt_cse() && (get_irn_opcode(node) != iro_Block))
2116 identify_remember (value_table, node);
2120 * garbage in, garbage out. If a node has a dead input, i.e., the
2121 * Bad node is input to the node, return the Bad node.
2123 static INLINE ir_node *
2124 gigo (ir_node *node)
2127 ir_op* op = get_irn_op(node);
2129 /* remove garbage blocks by looking at control flow that leaves the block
2130 and replacing the control flow by Bad. */
2131 if (get_irn_mode(node) == mode_X) {
2132 ir_node *block = get_nodes_block(node);
2133 if (!get_Block_matured(block)) return node; /* Don't optimize nodes in immature blocks. */
2134 if (op == op_End) return node; /* Don't optimize End, may have Bads. */
2136 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
2137 irn_arity = get_irn_arity(block);
2138 for (i = 0; i < irn_arity; i++) {
2139 if (!is_Bad(get_irn_n(block, i))) break;
2141 if (i == irn_arity) return new_Bad();
2145 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
2146 blocks predecessors is dead. */
2147 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
2148 irn_arity = get_irn_arity(node);
2150 if (is_Block_dead(get_nodes_block(node)))
2153 for (i = 0; i < irn_arity; i++) {
2154 if (is_Bad(get_irn_n(node, i))) {
2160 /* With this code we violate the agreement that local_optimize
2161 only leaves Bads in Block, Phi and Tuple nodes. */
2162 /* If Block has only Bads as predecessors it's garbage. */
2163 /* If Phi has only Bads as predecessors it's garbage. */
2164 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
2165 irn_arity = get_irn_arity(node);
2166 for (i = 0; i < irn_arity; i++) {
2167 if (!is_Bad(get_irn_n(node, i))) break;
2169 if (i == irn_arity) node = new_Bad();
2177 * These optimizations deallocate nodes from the obstack.
2178 * It can only be called if it is guaranteed that no other nodes
2179 * reference this one, i.e., right after construction of a node.
2182 optimize_node (ir_node *n)
2186 opcode iro = get_irn_opcode(n);
2188 type *old_tp = get_irn_type(n);
2190 int i, arity = get_irn_arity(n);
2191 for (i = 0; i < arity && !old_tp; ++i)
2192 old_tp = get_irn_type(get_irn_n(n, i));
2195 /* Allways optimize Phi nodes: part of the construction. */
2196 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
2198 /* constant expression evaluation / constant folding */
2199 if (get_opt_constant_folding()) {
2200 /* constants can not be evaluated */
2201 if (iro != iro_Const) {
2202 /* try to evaluate */
2203 tv = computed_value(n);
2204 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2206 * we MUST copy the node here temporary, because it's still needed
2207 * for DBG_OPT_CSTEVAL
2209 int node_size = offsetof(ir_node, attr) + n->op->attr_size;
2210 oldn = alloca(node_size);
2212 memcpy(oldn, n, node_size);
2213 CLONE_ARR_A(ir_node *, oldn->in, n->in);
2215 /* ARG, copy the in array, we need it for statistics */
2216 memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
2218 /* evaluation was successful -- replace the node. */
2219 obstack_free (current_ir_graph->obst, n);
2220 n = new_Const (get_tarval_mode (tv), tv);
2221 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2222 set_Const_type(n, old_tp);
2223 DBG_OPT_CSTEVAL(oldn, n);
2229 /* remove unnecessary nodes */
2230 if (get_opt_constant_folding() ||
2231 (iro == iro_Phi) || /* always optimize these nodes. */
2233 (iro == iro_Proj) ||
2234 (iro == iro_Block) ) /* Flags tested local. */
2235 n = equivalent_node (n);
2237 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2239 /** common subexpression elimination **/
2240 /* Checks whether n is already available. */
2241 /* The block input is used to distinguish different subexpressions. Right
2242 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2243 subexpressions within a block. */
2245 n = identify_cons (current_ir_graph->value_table, n);
2248 /* We found an existing, better node, so we can deallocate the old node. */
2249 obstack_free (current_ir_graph->obst, oldn);
2254 /* Some more constant expression evaluation that does not allow to
2256 iro = get_irn_opcode(n);
2257 if (get_opt_constant_folding() ||
2258 (iro == iro_Cond) ||
2259 (iro == iro_Proj)) /* Flags tested local. */
2260 n = transform_node (n);
2262 /* Remove nodes with dead (Bad) input.
2263 Run always for transformation induced Bads. */
2266 /* Now we have a legal, useful node. Enter it in hash table for cse */
2267 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
2268 n = identify_remember (current_ir_graph->value_table, n);
2276 * These optimizations never deallocate nodes (in place). This can cause dead
2277 * nodes lying on the obstack. Remove these by a dead node elimination,
2278 * i.e., a copying garbage collection.
2281 optimize_in_place_2 (ir_node *n)
2285 opcode iro = get_irn_opcode(n);
2287 type *old_tp = get_irn_type(n);
2289 int i, arity = get_irn_arity(n);
2290 for (i = 0; i < arity && !old_tp; ++i)
2291 old_tp = get_irn_type(get_irn_n(n, i));
2294 if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
2296 /* if not optimize return n */
2299 /* Here this is possible. Why? */
2303 /* constant expression evaluation / constant folding */
2304 if (get_opt_constant_folding()) {
2305 /* constants can not be evaluated */
2306 if (iro != iro_Const) {
2307 /* try to evaluate */
2308 tv = computed_value(n);
2309 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
2310 /* evaluation was successful -- replace the node. */
2311 n = new_Const (get_tarval_mode (tv), tv);
2313 if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
2314 set_Const_type(n, old_tp);
2316 DBG_OPT_CSTEVAL(oldn, n);
2322 /* remove unnecessary nodes */
2323 if (get_opt_constant_folding() ||
2324 (iro == iro_Phi) || /* always optimize these nodes. */
2325 (iro == iro_Id) || /* ... */
2326 (iro == iro_Proj) || /* ... */
2327 (iro == iro_Block) ) /* Flags tested local. */
2328 n = equivalent_node (n);
2330 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
2332 /** common subexpression elimination **/
2333 /* Checks whether n is already available. */
2334 /* The block input is used to distinguish different subexpressions. Right
2335 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
2336 subexpressions within a block. */
2337 if (get_opt_cse()) {
2338 n = identify (current_ir_graph->value_table, n);
2341 /* Some more constant expression evaluation. */
2342 iro = get_irn_opcode(n);
2343 if (get_opt_constant_folding() ||
2344 (iro == iro_Cond) ||
2345 (iro == iro_Proj)) /* Flags tested local. */
2346 n = transform_node (n);
2348 /* Remove nodes with dead (Bad) input.
2349 Run always for transformation induced Bads. */
2352 /* Now we can verify the node, as it has no dead inputs any more. */
2355 /* Now we have a legal, useful node. Enter it in hash table for cse.
2356 Blocks should be unique anyways. (Except the successor of start:
2357 is cse with the start block!) */
2358 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
2359 n = identify_remember (current_ir_graph->value_table, n);
2365 * Wrapper for external use, set proper status bits after optimization.
2368 optimize_in_place (ir_node *n)
2370 /* Handle graph state */
2371 assert(get_irg_phase_state(current_ir_graph) != phase_building);
2373 if (get_opt_global_cse())
2374 set_irg_pinned(current_ir_graph, op_pin_state_floats);
2375 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
2376 set_irg_outs_inconsistent(current_ir_graph);
2378 /* Maybe we could also test whether optimizing the node can
2379 change the control graph. */
2380 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
2381 set_irg_dom_inconsistent(current_ir_graph);
2382 return optimize_in_place_2 (n);
2386 * set the default ir op operations
2388 ir_op *firm_set_default_operations(ir_op *op)
2390 op = firm_set_default_computed_value(op);
2391 op = firm_set_default_equivalent_node(op);
2392 op = firm_set_default_transform_node(op);
2393 op = firm_set_default_node_cmp_attr(op);