3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 # include "irnode_t.h"
18 # include "irgraph_t.h"
19 # include "irmode_t.h"
21 # include "ircons_t.h"
25 # include "dbginfo_t.h"
26 # include "iropt_dbg.h"
27 # include "irflag_t.h"
28 # include "firmstat.h"
30 /* Make types visible to allow most efficient access */
31 # include "entity_t.h"
34 * Trivial INLINEable routine for copy propagation.
35 * Does follow Ids, needed to optimize INLINEd code.
37 static INLINE ir_node *
38 follow_Id (ir_node *n)
40 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
45 * Returns the tarval of a Const node or tarval_bad for all other nodes.
47 static INLINE tarval *
50 if ((n != NULL) && (get_irn_op(n) == op_Const))
51 return get_Const_tarval(n); /* might return tarval_bad */
57 * return the value of a Constant
59 static tarval *computed_value_Const(ir_node *n)
61 return get_Const_tarval(n);
65 * return the value of a 'sizeof' SymConst
67 static tarval *computed_value_SymConst(ir_node *n)
69 if ((get_SymConst_kind(n) == symconst_size) &&
70 (get_type_state(get_SymConst_type(n))) == layout_fixed)
71 return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), get_irn_mode(n));
75 static tarval *computed_value_Add(ir_node *n)
77 ir_node *a = get_Add_left(n);
78 ir_node *b = get_Add_right(n);
80 tarval *ta = value_of(a);
81 tarval *tb = value_of(b);
83 if ((ta != tarval_bad) && (tb != tarval_bad)
84 && (get_irn_mode(a) == get_irn_mode(b))
85 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
86 return tarval_add(ta, tb);
91 static tarval *computed_value_Sub(ir_node *n)
93 ir_node *a = get_Sub_left(n);
94 ir_node *b = get_Sub_right(n);
100 return get_tarval_null(get_irn_mode(n));
105 if ((ta != tarval_bad) && (tb != tarval_bad)
106 && (get_irn_mode(a) == get_irn_mode(b))
107 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
108 return tarval_sub(ta, tb);
113 static tarval *computed_value_Minus(ir_node *n)
115 ir_node *a = get_Minus_op(n);
116 tarval *ta = value_of(a);
118 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
119 return tarval_neg(ta);
124 static tarval *computed_value_Mul(ir_node *n)
126 ir_node *a = get_Mul_left(n);
127 ir_node *b = get_Mul_right(n);
129 tarval *ta = value_of(a);
130 tarval *tb = value_of(b);
132 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
133 return tarval_mul(ta, tb);
135 /* a*0 = 0 or 0*b = 0:
136 calls computed_value recursive and returns the 0 with proper
140 if ( ( ((v = ta) != tarval_bad)
141 && (v == get_mode_null(get_tarval_mode(v))) )
142 || ( ((v = tb) != tarval_bad)
143 && (v == get_mode_null(get_tarval_mode(v))) )) {
150 static tarval *computed_value_Quot(ir_node *n)
152 ir_node *a = get_Quot_left(n);
153 ir_node *b = get_Quot_right(n);
155 tarval *ta = value_of(a);
156 tarval *tb = value_of(b);
158 /* This was missing in original implementation. Why? */
159 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
160 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
161 return tarval_quo(ta, tb);
166 static tarval *computed_value_Div(ir_node *n)
168 ir_node *a = get_Div_left(n);
169 ir_node *b = get_Div_right(n);
171 tarval *ta = value_of(a);
172 tarval *tb = value_of(b);
174 /* This was missing in original implementation. Why? */
175 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
176 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
177 return tarval_div(ta, tb);
182 static tarval *computed_value_Mod(ir_node *n)
184 ir_node *a = get_Mod_left(n);
185 ir_node *b = get_Mod_right(n);
187 tarval *ta = value_of(a);
188 tarval *tb = value_of(b);
190 /* This was missing in original implementation. Why? */
191 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
192 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
193 return tarval_mod(ta, tb);
199 static tarval *computed_value_Abs(ir_node *n)
201 ir_node *a = get_Abs_op(n);
202 tarval *ta = value_of(a);
204 if (ta != tarval_bad)
205 return tarval_abs(ta);
210 static tarval *computed_value_And(ir_node *n)
212 ir_node *a = get_And_left(n);
213 ir_node *b = get_And_right(n);
215 tarval *ta = value_of(a);
216 tarval *tb = value_of(b);
218 if ((ta != tarval_bad) && (tb != tarval_bad)) {
219 return tarval_and (ta, tb);
223 if ( (classify_tarval ((v = computed_value (a))) == TV_CLASSIFY_NULL)
224 || (classify_tarval ((v = computed_value (b))) == TV_CLASSIFY_NULL)) {
231 static tarval *computed_value_Or(ir_node *n)
233 ir_node *a = get_Or_left(n);
234 ir_node *b = get_Or_right(n);
236 tarval *ta = value_of(a);
237 tarval *tb = value_of(b);
239 if ((ta != tarval_bad) && (tb != tarval_bad)) {
240 return tarval_or (ta, tb);
243 if ( (classify_tarval ((v = computed_value (a))) == TV_CLASSIFY_ALL_ONE)
244 || (classify_tarval ((v = computed_value (b))) == TV_CLASSIFY_ALL_ONE)) {
251 static tarval *computed_value_Eor(ir_node *n)
253 ir_node *a = get_Eor_left(n);
254 ir_node *b = get_Eor_right(n);
256 tarval *ta = value_of(a);
257 tarval *tb = value_of(b);
259 if ((ta != tarval_bad) && (tb != tarval_bad)) {
260 return tarval_eor (ta, tb);
265 static tarval *computed_value_Not(ir_node *n)
267 ir_node *a = get_Not_op(n);
268 tarval *ta = value_of(a);
270 if (ta != tarval_bad)
271 return tarval_not(ta);
276 static tarval *computed_value_Shl(ir_node *n)
278 ir_node *a = get_Shl_left(n);
279 ir_node *b = get_Shl_right(n);
281 tarval *ta = value_of(a);
282 tarval *tb = value_of(b);
284 if ((ta != tarval_bad) && (tb != tarval_bad)) {
285 return tarval_shl (ta, tb);
290 static tarval *computed_value_Shr(ir_node *n)
292 ir_node *a = get_Shr_left(n);
293 ir_node *b = get_Shr_right(n);
295 tarval *ta = value_of(a);
296 tarval *tb = value_of(b);
298 if ((ta != tarval_bad) && (tb != tarval_bad)) {
299 return tarval_shr (ta, tb);
304 static tarval *computed_value_Shrs(ir_node *n)
306 ir_node *a = get_Shrs_left(n);
307 ir_node *b = get_Shrs_right(n);
309 tarval *ta = value_of(a);
310 tarval *tb = value_of(b);
312 if ((ta != tarval_bad) && (tb != tarval_bad)) {
313 return tarval_shrs (ta, tb);
318 static tarval *computed_value_Rot(ir_node *n)
320 ir_node *a = get_Rot_left(n);
321 ir_node *b = get_Rot_right(n);
323 tarval *ta = value_of(a);
324 tarval *tb = value_of(b);
326 if ((ta != tarval_bad) && (tb != tarval_bad)) {
327 return tarval_rot (ta, tb);
332 static tarval *computed_value_Conv(ir_node *n)
334 ir_node *a = get_Conv_op(n);
335 tarval *ta = value_of(a);
337 if (ta != tarval_bad)
338 return tarval_convert_to(ta, get_irn_mode(n));
343 static tarval *computed_value_Proj(ir_node *n)
345 ir_node *a = get_Proj_pred(n), *b;
348 /* Optimize Cmp nodes.
349 This performs a first step of unreachable code elimination.
350 Proj can not be computed, but folding a Cmp above the Proj here is
351 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
353 There are several case where we can evaluate a Cmp node:
354 1. The nodes compared are both the same. If we compare for
355 equal, greater equal, ... this will return true, else it
356 will return false. This step relies on cse.
357 2. The predecessors of Cmp are target values. We can evaluate
359 3. The predecessors are Allocs or void* constants. Allocs never
360 return NULL, they raise an exception. Therefore we can predict
362 switch (get_irn_opcode(a)) {
364 aa = get_Cmp_left(a);
365 ab = get_Cmp_right(a);
367 if (aa == ab) { /* 1.: */
368 /* This is a trick with the bits used for encoding the Cmp
369 Proj numbers, the following statement is not the same:
370 return new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
371 return new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);
373 tarval *taa = computed_value (aa);
374 tarval *tab = computed_value (ab);
376 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
377 /* strange checks... */
378 pnc_number flags = tarval_cmp (taa, tab);
379 if (flags != False) {
380 return new_tarval_from_long (get_Proj_proj(n) & flags, mode_b);
382 } else { /* check for 3.: */
383 ir_node *aaa = skip_Id(skip_Proj(aa));
384 ir_node *aba = skip_Id(skip_Proj(ab));
386 if ( ( (/* aa is ProjP and aaa is Alloc */
387 (get_irn_op(aa) == op_Proj)
388 && (mode_is_reference(get_irn_mode(aa)))
389 && (get_irn_op(aaa) == op_Alloc))
390 && ( (/* ab is constant void */
391 (get_irn_op(ab) == op_Const)
392 && (mode_is_reference(get_irn_mode(ab)))
393 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
394 || (/* ab is other Alloc */
395 (get_irn_op(ab) == op_Proj)
396 && (mode_is_reference(get_irn_mode(ab)))
397 && (get_irn_op(aba) == op_Alloc)
399 || (/* aa is void and aba is Alloc */
400 (get_irn_op(aa) == op_Const)
401 && (mode_is_reference(get_irn_mode(aa)))
402 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
403 && (get_irn_op(ab) == op_Proj)
404 && (mode_is_reference(get_irn_mode(ab)))
405 && (get_irn_op(aba) == op_Alloc)))
407 return new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
414 tarval *tb = value_of(b = get_DivMod_right(a));
415 tarval *ta = value_of(a = get_DivMod_left(a));
417 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
418 if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
420 if (get_Proj_proj(n)== pn_DivMod_res_div)
421 return tarval_div(ta, tb);
422 else if (get_Proj_proj(n)== pn_DivMod_res_mod)
423 return tarval_mod(ta, tb);
430 tarval *tb = value_of(b = get_Div_right(a));
431 tarval *ta = value_of(a = get_Div_left(a));
433 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
434 if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
436 if (get_Proj_proj(n)== pn_Div_res)
437 return tarval_div(ta, tb);
444 tarval *tb = value_of(b = get_Mod_right(a));
445 tarval *ta = value_of(a = get_Mod_left(a));
447 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
448 if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
450 if (get_Proj_proj(n)== pn_Mod_res)
451 return tarval_mod(ta, tb);
463 * If the parameter n can be computed, return its value, else tarval_bad.
464 * Performs constant folding.
466 * @param n The node this should be evaluated
468 tarval *computed_value(ir_node *n)
470 if (n->op->computed_value)
471 return n->op->computed_value(n);
476 * set the default computed_value evaluator
478 static ir_op *firm_set_default_computed_value(ir_op *op)
482 op->computed_value = computed_value_##a; \
507 op->computed_value = NULL;
515 /* returns 1 if the a and b are pointers to different locations. */
517 different_identity (ir_node *a, ir_node *b)
519 assert (mode_is_reference(get_irn_mode (a))
520 && mode_is_reference(get_irn_mode (b)));
522 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
523 ir_node *a1 = get_Proj_pred (a);
524 ir_node *b1 = get_Proj_pred (b);
525 if (a1 != b1 && get_irn_op (a1) == op_Alloc
526 && get_irn_op (b1) == op_Alloc)
533 static ir_node *equivalent_node_Block(ir_node *n)
537 /* The Block constructor does not call optimize, but mature_immBlock
538 calls the optimization. */
539 assert(get_Block_matured(n));
541 /* Straightening: a single entry Block following a single exit Block
542 can be merged, if it is not the Start block. */
543 /* !!! Beware, all Phi-nodes of n must have been optimized away.
544 This should be true, as the block is matured before optimize is called.
545 But what about Phi-cycles with the Phi0/Id that could not be resolved?
546 Remaining Phi nodes are just Ids. */
547 if ((get_Block_n_cfgpreds(n) == 1) &&
548 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
549 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
550 if (predblock == oldn) {
551 /* Jmp jumps into the block it is in -- deal self cycle. */
552 n = new_Bad(); DBG_OPT_DEAD;
553 } else if (get_opt_control_flow_straightening()) {
554 n = predblock; DBG_OPT_STG;
557 else if ((get_Block_n_cfgpreds(n) == 1) &&
558 (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
559 ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
560 if (predblock == oldn) {
561 /* Jmp jumps into the block it is in -- deal self cycle. */
562 n = new_Bad(); DBG_OPT_DEAD;
565 else if ((get_Block_n_cfgpreds(n) == 2) &&
566 (get_opt_control_flow_weak_simplification())) {
567 /* Test whether Cond jumps twice to this block
568 @@@ we could do this also with two loops finding two preds from several ones. */
569 ir_node *a = get_Block_cfgpred(n, 0);
570 ir_node *b = get_Block_cfgpred(n, 1);
572 if ((get_irn_op(a) == op_Proj) &&
573 (get_irn_op(b) == op_Proj) &&
574 (get_Proj_pred(a) == get_Proj_pred(b)) &&
575 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
576 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
577 /* Also a single entry Block following a single exit Block. Phis have
578 twice the same operand and will be optimized away. */
579 n = get_nodes_block(a); DBG_OPT_IFSIM;
581 } else if (get_opt_unreachable_code() &&
582 (n != current_ir_graph->start_block) &&
583 (n != current_ir_graph->end_block) ) {
585 /* If all inputs are dead, this block is dead too, except if it is
586 the start or end block. This is a step of unreachable code
588 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
589 if (!is_Bad(get_Block_cfgpred(n, i))) break;
591 if (i == get_Block_n_cfgpreds(n))
599 * Returns a equivalent node for a Jmp, a Bad :-)
600 * Of course this only happens if the Block of the Jmp is Bad.
602 static ir_node *equivalent_node_Jmp(ir_node *n)
604 /* GL: Why not same for op_Raise?? */
605 /* unreachable code elimination */
606 if (is_Bad(get_nodes_block(n)))
612 static ir_node *equivalent_node_Cond(ir_node *n)
614 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
615 See cases for iro_Cond and iro_Proj in transform_node. */
620 * Use algebraic simplification a v a = a.
622 static ir_node *equivalent_node_Or(ir_node *n)
626 ir_node *a = get_Or_left(n);
627 ir_node *b = get_Or_right(n);
631 n = a; DBG_OPT_ALGSIM1;
638 * optimize operations that are commutative and have neutral 0,
639 * so a op 0 = 0 op a = a.
641 static ir_node *equivalent_node_neutral_zero(ir_node *n)
645 ir_node *a = get_binop_left(n);
646 ir_node *b = get_binop_right(n);
651 /* After running compute_node there is only one constant predecessor.
652 Find this predecessors value and remember the other node: */
653 if ((tv = computed_value(a)) != tarval_bad) {
655 } else if ((tv = computed_value(b)) != tarval_bad) {
660 /* If this predecessors constant value is zero, the operation is
661 unnecessary. Remove it: */
662 if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
663 n = on; DBG_OPT_ALGSIM1;
669 #define equivalent_node_Add equivalent_node_neutral_zero
670 #define equivalent_node_Eor equivalent_node_neutral_zero
673 * optimize operations that are not commutative but have neutral 0 on left,
676 static ir_node *equivalent_node_left_zero(ir_node *n)
680 ir_node *a = get_binop_left(n);
681 ir_node *b = get_binop_right(n);
683 if (classify_tarval(computed_value(b)) == TV_CLASSIFY_NULL) {
684 n = a; DBG_OPT_ALGSIM1;
690 #define equivalent_node_Sub equivalent_node_left_zero
691 #define equivalent_node_Shl equivalent_node_left_zero
692 #define equivalent_node_Shr equivalent_node_left_zero
693 #define equivalent_node_Shrs equivalent_node_left_zero
694 #define equivalent_node_Rot equivalent_node_left_zero
697 * Er, a "symmetic unop", ie op(op(n)) = n.
699 static ir_node *equivalent_node_symmetric_unop(ir_node *n)
702 ir_node *pred = get_unop_op(n);
704 /* optimize symmetric unop */
705 if (get_irn_op(pred) == get_irn_op(n)) {
706 n = get_unop_op(pred); DBG_OPT_ALGSIM2;
712 #define equivalent_node_Not equivalent_node_symmetric_unop
714 /* --x == x */ /* ??? Is this possible or can --x raise an
715 out of bounds exception if min =! max? */
716 #define equivalent_node_Minus equivalent_node_symmetric_unop
719 * Optimize a * 1 = 1 * a = a.
721 static ir_node *equivalent_node_Mul(ir_node *n)
725 ir_node *a = get_Mul_left(n);
726 ir_node *b = get_Mul_right(n);
728 /* Mul is commutative and has again an other neutral element. */
729 if (classify_tarval (computed_value (a)) == TV_CLASSIFY_ONE) {
730 n = b; DBG_OPT_ALGSIM1;
731 } else if (classify_tarval (computed_value (b)) == TV_CLASSIFY_ONE) {
732 n = a; DBG_OPT_ALGSIM1;
738 * Optimize a / 1 = a.
740 static ir_node *equivalent_node_Div(ir_node *n)
742 ir_node *a = get_Div_left(n);
743 ir_node *b = get_Div_right(n);
745 /* Div is not commutative. */
746 if (classify_tarval(computed_value(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
747 /* Turn Div into a tuple (mem, bad, a) */
748 ir_node *mem = get_Div_mem(n);
749 turn_into_tuple(n, 3);
750 set_Tuple_pred(n, pn_Div_M, mem);
751 set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
752 set_Tuple_pred(n, pn_Div_res, a);
758 * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
760 static ir_node *equivalent_node_And(ir_node *n)
764 ir_node *a = get_And_left(n);
765 ir_node *b = get_And_right(n);
768 n = a; /* And has it's own neutral element */
769 } else if (classify_tarval(computed_value(a)) == TV_CLASSIFY_ALL_ONE) {
771 } else if (classify_tarval(computed_value(b)) == TV_CLASSIFY_ALL_ONE) {
774 if (n != oldn) DBG_OPT_ALGSIM1;
779 * Try to remove useless conv's:
781 static ir_node *equivalent_node_Conv(ir_node *n)
784 ir_node *a = get_Conv_op(n);
787 ir_mode *n_mode = get_irn_mode(n);
788 ir_mode *a_mode = get_irn_mode(a);
790 if (n_mode == a_mode) { /* No Conv necessary */
791 n = a; DBG_OPT_ALGSIM3;
792 } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
796 n_mode = get_irn_mode(n);
797 b_mode = get_irn_mode(b);
799 if (n_mode == b_mode) {
800 if (n_mode == mode_b) {
801 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
803 else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
804 if (smaller_mode(b_mode, a_mode)){
805 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;
813 static ir_node *equivalent_node_Phi(ir_node *n)
815 /* Several optimizations:
816 - no Phi in start block.
817 - remove Id operators that are inputs to Phi
818 - fold Phi-nodes, iff they have only one predecessor except
824 ir_node *block = NULL; /* to shutup gcc */
825 ir_node *first_val = NULL; /* to shutup gcc */
826 ir_node *scnd_val = NULL; /* to shutup gcc */
828 if (!get_opt_normalize()) return n;
830 n_preds = get_Phi_n_preds(n);
832 block = get_nodes_block(n);
833 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
834 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
835 if ((is_Bad(block)) || /* Control dead */
836 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
837 return new_Bad(); /* in the Start Block. */
839 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
842 /* first we test for a special case: */
843 /* Confirm is a special node fixing additional information for a
844 value that is known at a certain point. This is useful for
845 dataflow analysis. */
847 ir_node *a = get_Phi_pred(n, 0);
848 ir_node *b = get_Phi_pred(n, 1);
849 if ( (get_irn_op(a) == op_Confirm)
850 && (get_irn_op(b) == op_Confirm)
851 && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
852 && (get_irn_n(a, 1) == get_irn_n (b, 1))
853 && (a->data.num == (~b->data.num & irpn_True) )) {
854 return get_irn_n(a, 0);
859 /* If the Block has a Bad pred, we also have one. */
860 for (i = 0; i < n_preds; ++i)
861 if (is_Bad (get_Block_cfgpred(block, i)))
862 set_Phi_pred(n, i, new_Bad());
864 /* Find first non-self-referencing input */
865 for (i = 0; i < n_preds; ++i) {
866 first_val = get_Phi_pred(n, i);
867 if ( (first_val != n) /* not self pointer */
869 && (get_irn_op(first_val) != op_Bad)
871 ) { /* value not dead */
872 break; /* then found first value. */
876 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
877 if (i >= n_preds) { return new_Bad(); }
881 /* follow_Id () for rest of inputs, determine if any of these
882 are non-self-referencing */
883 while (++i < n_preds) {
884 scnd_val = get_Phi_pred(n, i);
886 && (scnd_val != first_val)
888 && (get_irn_op(scnd_val) != op_Bad)
895 /* Fold, if no multiple distinct non-self-referencing inputs */
897 n = first_val; DBG_OPT_PHI;
899 /* skip the remaining Ids (done in get_Phi_pred). */
900 /* superfluous, since we walk all to propagate Block's Bads.
901 while (++i < n_preds) get_Phi_pred(n, i); */
907 * optimize Proj(Tuple) and gigo for ProjX in Bad block
909 static ir_node *equivalent_node_Proj(ir_node *n)
913 ir_node *a = get_Proj_pred(n);
915 if ( get_irn_op(a) == op_Tuple) {
916 /* Remove the Tuple/Proj combination. */
917 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
918 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
920 assert(0); /* This should not happen! */
923 } else if (get_irn_mode(n) == mode_X &&
924 is_Bad(get_nodes_block(n))) {
925 /* Remove dead control flow -- early gigo. */
934 static ir_node *equivalent_node_Id(ir_node *n)
938 n = follow_Id(n); DBG_OPT_ID;
943 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
944 * perform no actual computation, as, e.g., the Id nodes. It does not create
945 * new nodes. It is therefore safe to free n if the node returned is not n.
946 * If a node returns a Tuple we can not just skip it. If the size of the
947 * in array fits, we transform n into a tuple (e.g., Div).
950 equivalent_node(ir_node *n)
952 if (n->op->equivalent_node)
953 return n->op->equivalent_node(n);
958 * set the default equivalent node operation
960 static ir_op *firm_set_default_equivalent_node(ir_op *op)
964 op->equivalent_node = equivalent_node_##a; \
989 op->equivalent_node = NULL;
997 * Do node specific optimizations of nodes predecessors.
1000 optimize_preds(ir_node *n) {
1001 ir_node *a = NULL, *b = NULL;
1003 /* get the operands we will work on for simple cases. */
1005 a = get_binop_left(n);
1006 b = get_binop_right(n);
1007 } else if (is_unop(n)) {
1011 switch (get_irn_opcode(n)) {
1014 /* We don't want Cast as input to Cmp. */
1015 if (get_irn_op(a) == op_Cast) {
1019 if (get_irn_op(b) == op_Cast) {
1021 set_Cmp_right(n, b);
1029 static ir_node *transform_node_Div(ir_node *n)
1031 tarval *tv = computed_value(n);
1033 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1035 if (tv != tarval_bad) {
1036 /* Turn Div into a tuple (mem, bad, value) */
1037 ir_node *mem = get_Div_mem(n);
1039 turn_into_tuple(n, 3);
1040 set_Tuple_pred(n, pn_Div_M, mem);
1041 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1042 set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(tv), tv));
1047 static ir_node *transform_node_Mod(ir_node *n)
1049 tarval *tv = computed_value(n);
1051 /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
1053 if (tv != tarval_bad) {
1054 /* Turn Mod into a tuple (mem, bad, value) */
1055 ir_node *mem = get_Mod_mem(n);
1056 turn_into_tuple(n, 3);
1057 set_Tuple_pred(n, pn_Mod_M, mem);
1058 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1059 set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(tv), tv));
1064 static ir_node *transform_node_DivMod(ir_node *n)
1068 ir_node *a = get_DivMod_left(n);
1069 ir_node *b = get_DivMod_right(n);
1070 ir_mode *mode = get_irn_mode(a);
1071 tarval *ta = value_of(a);
1072 tarval *tb = value_of(b);
1074 if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
1077 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1079 if (tb != tarval_bad) {
1080 if (tb == get_mode_one(get_tarval_mode(tb))) {
1081 b = new_Const (mode, get_mode_null(mode));
1083 } else if (ta != tarval_bad) {
1084 tarval *resa, *resb;
1085 resa = tarval_div (ta, tb);
1086 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
1087 Jmp for X result!? */
1088 resb = tarval_mod (ta, tb);
1089 if (resb == tarval_bad) return n; /* Causes exception! */
1090 a = new_Const (mode, resa);
1091 b = new_Const (mode, resb);
1094 } else if (ta == get_mode_null(mode)) {
1098 if (evaluated) { /* replace by tuple */
1099 ir_node *mem = get_DivMod_mem(n);
1100 turn_into_tuple(n, 4);
1101 set_Tuple_pred(n, pn_DivMod_M, mem);
1102 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
1103 set_Tuple_pred(n, pn_DivMod_res_div, a);
1104 set_Tuple_pred(n, pn_DivMod_res_mod, b);
1105 assert(get_nodes_block(n));
1111 static ir_node *transform_node_Cond(ir_node *n)
1113 /* Replace the Cond by a Jmp if it branches on a constant
1116 ir_node *a = get_Cond_selector(n);
1117 tarval *ta = value_of(a);
1119 if ((ta != tarval_bad) &&
1120 (get_irn_mode(a) == mode_b) &&
1121 (get_opt_unreachable_code())) {
1122 /* It's a boolean Cond, branching on a boolean constant.
1123 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
1124 jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n));
1125 turn_into_tuple(n, 2);
1126 if (ta == tarval_b_true) {
1127 set_Tuple_pred(n, pn_Cond_false, new_Bad());
1128 set_Tuple_pred(n, pn_Cond_true, jmp);
1130 set_Tuple_pred(n, pn_Cond_false, jmp);
1131 set_Tuple_pred(n, pn_Cond_true, new_Bad());
1133 /* We might generate an endless loop, so keep it alive. */
1134 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1135 } else if ((ta != tarval_bad) &&
1136 (get_irn_mode(a) == mode_Iu) &&
1137 (get_Cond_kind(n) == dense) &&
1138 (get_opt_unreachable_code())) {
1139 /* I don't want to allow Tuples smaller than the biggest Proj.
1140 Also this tuple might get really big...
1141 I generate the Jmp here, and remember it in link. Link is used
1142 when optimizing Proj. */
1143 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n)));
1144 /* We might generate an endless loop, so keep it alive. */
1145 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
1146 } else if ((get_irn_op(a) == op_Eor)
1147 && (get_irn_mode(a) == mode_b)
1148 && (classify_tarval(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
1149 /* The Eor is a negate. Generate a new Cond without the negate,
1150 simulate the negate by exchanging the results. */
1151 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1153 } else if ((get_irn_op(a) == op_Not)
1154 && (get_irn_mode(a) == mode_b)) {
1155 /* A Not before the Cond. Generate a new Cond without the Not,
1156 simulate the Not by exchanging the results. */
1157 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
1163 static ir_node *transform_node_Eor(ir_node *n)
1165 ir_node *a = get_Eor_left(n);
1166 ir_node *b = get_Eor_right(n);
1168 if ((get_irn_mode(n) == mode_b)
1169 && (get_irn_op(a) == op_Proj)
1170 && (get_irn_mode(a) == mode_b)
1171 && (classify_tarval (computed_value (b)) == TV_CLASSIFY_ONE)
1172 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1173 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
1174 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1175 mode_b, get_negated_pnc(get_Proj_proj(a)));
1176 else if ((get_irn_mode(n) == mode_b)
1177 && (classify_tarval (computed_value (b)) == TV_CLASSIFY_ONE))
1178 /* The Eor is a Not. Replace it by a Not. */
1179 /* ????!!!Extend to bitfield 1111111. */
1180 n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b);
1186 * Transfor a boolean Not.
1188 static ir_node *transform_node_Not(ir_node *n)
1190 ir_node *a = get_Not_op(n);
1192 if ( (get_irn_mode(n) == mode_b)
1193 && (get_irn_op(a) == op_Proj)
1194 && (get_irn_mode(a) == mode_b)
1195 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1196 /* We negate a Cmp. The Cmp has the negated result anyways! */
1197 n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
1198 mode_b, get_negated_pnc(get_Proj_proj(a)));
1204 * Transform a Div/Mod/DivMod with a non-zero constant. Must be
1205 * done here instead of equivalent node because in creates new
1208 static ir_node *transform_node_Proj(ir_node *proj)
1210 ir_node *n = get_Proj_pred(proj);
1215 switch (get_irn_opcode(n)) {
1217 b = get_Div_right(n);
1218 tb = computed_value(b);
1220 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1221 proj_nr = get_Proj_proj(proj);
1223 if (proj_nr == pn_Div_X_except) {
1224 /* we found an exception handler, remove it */
1227 else if (proj_nr == pn_Div_M) {
1228 /* the memory Proj can be removed */
1229 ir_node *res = get_Div_mem(n);
1230 set_Div_mem(n, get_irg_initial_mem(current_ir_graph));
1236 b = get_Mod_right(n);
1237 tb = computed_value(b);
1239 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
1240 proj_nr = get_Proj_proj(proj);
1242 if (proj_nr == pn_Mod_X_except) {
1243 /* we found an exception handler, remove it */
1246 else if (proj_nr == pn_Mod_M) {
1247 /* the memory Proj can be removed */
1248 ir_node *res = get_Mod_mem(n);
1249 set_Mod_mem(n, get_irg_initial_mem(current_ir_graph));
1255 b = get_DivMod_right(n);
1256 tb = computed_value(b);
1258 if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
1259 proj_nr = get_Proj_proj(proj);
1261 if (proj_nr == pn_DivMod_X_except) {
1264 else if (proj_nr == pn_DivMod_M) {
1265 /* the memory Proj can be removed */
1266 ir_node *res = get_DivMod_mem(n);
1267 set_DivMod_mem(n, get_irg_initial_mem(current_ir_graph));
1274 if (get_opt_unreachable_code()) {
1275 b = get_Cond_selector(n);
1276 tb = computed_value(b);
1278 if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
1279 /* we have a constant switch */
1280 long num = get_Proj_proj(proj);
1282 if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
1283 if (get_tarval_long(tb) == num) {
1284 /* Do NOT create a jump here, or we will have 2 control flow ops
1285 * in a block. This case is optimized away in optimize_cf(). */
1296 /* should not happen, but if it does will be optimized away */
1304 /* we have added a Tuple, optimize it for the current Proj away */
1305 return equivalent_node_Proj(proj);
1309 * returns the operands of a commutative bin-op, if one operand is
1310 * a const, it is returned as the second one.
1312 static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
1314 ir_node *op_a = get_binop_left(binop);
1315 ir_node *op_b = get_binop_right(binop);
1317 assert(is_op_commutative(get_irn_op(binop)));
1319 if (get_irn_op(op_a) == op_Const) {
1330 * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
1331 * Such pattern may arise in bitfield stores.
1333 * value c4 value c4 & c2
1334 * AND c3 AND c1 | c3
1339 static ir_node *transform_node_Or(ir_node *or)
1343 ir_node *and_l, *c3;
1344 ir_node *value, *c4;
1345 ir_node *new_and, *new_const, *block;
1346 ir_mode *mode = get_irn_mode(or);
1348 tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
1350 get_comm_Binop_Ops(or, &and, &c1);
1351 if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
1354 get_comm_Binop_Ops(and, &or_l, &c2);
1355 if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
1358 get_comm_Binop_Ops(or_l, &and_l, &c3);
1359 if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
1362 get_comm_Binop_Ops(and_l, &value, &c4);
1363 if (get_irn_op(c4) != op_Const)
1366 /* ok, found the pattern, check for conditions */
1367 assert(mode == get_irn_mode(and));
1368 assert(mode == get_irn_mode(or_l));
1369 assert(mode == get_irn_mode(and_l));
1371 tv1 = get_Const_tarval(c1);
1372 tv2 = get_Const_tarval(c2);
1373 tv3 = get_Const_tarval(c3);
1374 tv4 = get_Const_tarval(c4);
1376 tv = tarval_or(tv4, tv2);
1377 if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
1378 /* have at least one 0 at the same bit position */
1382 n_tv4 = tarval_not(tv4);
1383 if (tv3 != tarval_and(tv3, n_tv4)) {
1384 /* bit in the or_mask is outside the and_mask */
1388 n_tv2 = tarval_not(tv2);
1389 if (tv1 != tarval_and(tv1, n_tv2)) {
1390 /* bit in the or_mask is outside the and_mask */
1394 /* ok, all conditions met */
1395 block = get_nodes_block(or);
1397 new_and = new_r_And(current_ir_graph, block,
1398 value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
1400 new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
1402 set_Or_left(or, new_and);
1403 set_Or_right(or, new_const);
1405 /* check for more */
1406 return transform_node_Or(or);
1410 * Tries several [inplace] [optimizing] transformations and returns an
1411 * equivalent node. The difference to equivalent_node() is that these
1412 * transformations _do_ generate new nodes, and thus the old node must
1413 * not be freed even if the equivalent node isn't the old one.
1415 static ir_node *transform_node(ir_node *n)
1417 if (n->op->transform_node)
1418 n = n->op->transform_node(n);
1423 * set the default transform node operation
1425 static ir_op *firm_set_default_transform_node(ir_op *op)
1429 op->transform_node = transform_node_##a; \
1442 op->transform_node = NULL;
1450 /* **************** Common Subexpression Elimination **************** */
1452 /** The size of the hash table used, should estimate the number of nodes
1454 #define N_IR_NODES 512
1456 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
1458 return (get_Const_tarval(a) != get_Const_tarval(b))
1459 || (get_Const_type(a) != get_Const_type(b));
1462 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
1464 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
1467 static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
1469 return get_Filter_proj(a) != get_Filter_proj(b);
1472 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
1474 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
1475 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
1478 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
1480 return (get_irn_free_attr(a) != get_irn_free_attr(b));
1483 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
1485 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
1486 || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p);
1489 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
1491 return (get_irn_call_attr(a) != get_irn_call_attr(b));
1494 static int node_cmp_attr_FuncCall(ir_node *a, ir_node *b)
1496 return (get_irn_funccall_attr(a) != get_irn_funccall_attr(b));
1499 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
1501 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
1502 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
1503 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
1504 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
1505 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
1508 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
1510 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
1513 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
1515 return get_Cast_type(a) != get_Cast_type(b);
1518 static int node_cmp_attr_Load(ir_node *a, ir_node *b)
1520 if (get_Load_volatility(a) == volatility_is_volatile ||
1521 get_Load_volatility(b) == volatility_is_volatile)
1522 /* NEVER do CSE on volatile Loads */
1525 return get_Load_mode(a) != get_Load_mode(b);
1528 static int node_cmp_attr_Store(ir_node *a, ir_node *b)
1530 /* NEVER do CSE on volatile Stores */
1531 return (get_Store_volatility(a) == volatility_is_volatile ||
1532 get_Load_volatility(b) == volatility_is_volatile);
1536 * set the default node attribute compare operation
1538 static ir_op *firm_set_default_node_cmp_attr(ir_op *op)
1542 op->node_cmp_attr = node_cmp_attr_##a; \
1560 op->node_cmp_attr = NULL;
1568 * Compare function for two nodes in the hash table. Gets two
1569 * nodes as parameters. Returns 0 if the nodes are a cse.
1572 vt_cmp (const void *elt, const void *key)
1580 if (a == b) return 0;
1582 if ((get_irn_op(a) != get_irn_op(b)) ||
1583 (get_irn_mode(a) != get_irn_mode(b))) return 1;
1585 /* compare if a's in and b's in are equal */
1586 irn_arity_a = get_irn_arity (a);
1587 if (irn_arity_a != get_irn_arity(b))
1590 /* for block-local cse and op_pin_state_pinned nodes: */
1591 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == op_pin_state_pinned)) {
1592 if (get_irn_n(a, -1) != get_irn_n(b, -1))
1596 /* compare a->in[0..ins] with b->in[0..ins] */
1597 for (i = 0; i < irn_arity_a; i++)
1598 if (get_irn_n(a, i) != get_irn_n(b, i))
1602 * here, we already now that the nodes are identical except their
1605 if (a->op->node_cmp_attr)
1606 return a->op->node_cmp_attr(a, b);
1612 * Calculate a hash value of a node.
1615 ir_node_hash (ir_node *node)
1620 if (node->op == op_Const) {
1621 /* special value for const, as they only differ in their tarval. */
1622 h = ((unsigned) node->attr.con.tv)>>3 ;
1623 h = 9*h + (unsigned)get_irn_mode(node);
1624 } else if (node->op == op_SymConst) {
1625 /* special value for const, as they only differ in their symbol. */
1626 h = ((unsigned) node->attr.i.sym.type_p)>>3 ;
1627 h = 9*h + (unsigned)get_irn_mode(node);
1630 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1631 h = irn_arity = get_irn_arity(node);
1633 /* consider all in nodes... except the block. */
1634 for (i = 0; i < irn_arity; i++) {
1635 h = 9*h + (unsigned)get_irn_n(node, i);
1639 h = 9*h + (unsigned) get_irn_mode (node);
1641 h = 9*h + (unsigned) get_irn_op (node);
1648 new_identities (void)
1650 return new_pset (vt_cmp, N_IR_NODES);
1654 del_identities (pset *value_table)
1656 del_pset (value_table);
1660 * Return the canonical node computing the same value as n.
1661 * Looks up the node in a hash table.
1663 * For Const nodes this is performed in the constructor, too. Const
1664 * nodes are extremely time critical because of their frequent use in
1665 * constant string arrays.
1667 static INLINE ir_node *
1668 identify (pset *value_table, ir_node *n)
1672 if (!value_table) return n;
1674 /* TODO: use a generic commutative attribute */
1675 if (get_opt_reassociation()) {
1676 if (is_op_commutative(get_irn_op(n))) {
1677 ir_node *l = get_binop_left(n);
1678 ir_node *r = get_binop_right(n);
1680 /* for commutative operators perform a OP b == b OP a */
1682 set_binop_left(n, r);
1683 set_binop_right(n, l);
1688 o = pset_find (value_table, n, ir_node_hash (n));
1695 * During construction we set the op_pin_state_pinned flag in the graph right when the
1696 * optimization is performed. The flag turning on procedure global cse could
1697 * be changed between two allocations. This way we are safe.
1699 static INLINE ir_node *
1700 identify_cons (pset *value_table, ir_node *n) {
1703 n = identify(value_table, n);
1704 if (get_irn_n(old, -1) != get_irn_n(n, -1))
1705 set_irg_pinned(current_ir_graph, op_pin_state_floats);
1710 * Return the canonical node computing the same value as n.
1711 * Looks up the node in a hash table, enters it in the table
1712 * if it isn't there yet.
1715 identify_remember (pset *value_table, ir_node *node)
1719 if (!value_table) return node;
1721 /* lookup or insert in hash table with given hash key. */
1722 o = pset_insert (value_table, node, ir_node_hash (node));
1724 if (o == node) return node;
1730 add_identities (pset *value_table, ir_node *node) {
1731 identify_remember (value_table, node);
1735 * garbage in, garbage out. If a node has a dead input, i.e., the
1736 * Bad node is input to the node, return the Bad node.
1738 static INLINE ir_node *
1739 gigo (ir_node *node)
1742 ir_op* op = get_irn_op(node);
1744 /* remove garbage blocks by looking at control flow that leaves the block
1745 and replacing the control flow by Bad. */
1746 if (get_irn_mode(node) == mode_X) {
1747 ir_node *block = get_nodes_block(node);
1748 if (op == op_End) return node; /* Don't optimize End, may have Bads. */
1749 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
1750 irn_arity = get_irn_arity(block);
1751 for (i = 0; i < irn_arity; i++) {
1752 if (!is_Bad(get_irn_n(block, i))) break;
1754 if (i == irn_arity) return new_Bad();
1758 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
1759 blocks predecessors is dead. */
1760 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
1761 irn_arity = get_irn_arity(node);
1762 for (i = -1; i < irn_arity; i++) {
1763 if (is_Bad(get_irn_n(node, i))) {
1769 /* With this code we violate the agreement that local_optimize
1770 only leaves Bads in Block, Phi and Tuple nodes. */
1771 /* If Block has only Bads as predecessors it's garbage. */
1772 /* If Phi has only Bads as predecessors it's garbage. */
1773 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
1774 irn_arity = get_irn_arity(node);
1775 for (i = 0; i < irn_arity; i++) {
1776 if (!is_Bad(get_irn_n(node, i))) break;
1778 if (i == irn_arity) node = new_Bad();
1786 * These optimizations deallocate nodes from the obstack.
1787 * It can only be called if it is guaranteed that no other nodes
1788 * reference this one, i.e., right after construction of a node.
1791 optimize_node (ir_node *n)
1795 opcode iro = get_irn_opcode(n);
1797 /* Allways optimize Phi nodes: part of the construction. */
1798 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
1800 /* constant expression evaluation / constant folding */
1801 if (get_opt_constant_folding()) {
1802 /* constants can not be evaluated */
1803 if (iro != iro_Const) {
1804 /* try to evaluate */
1805 tv = computed_value (n);
1806 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1808 * we MUST copy the node here temporary, because it's still needed
1809 * for DBG_OPT_ALGSIM0
1811 int node_size = offsetof(ir_node, attr) + n->op->attr_size;
1812 ir_node *x = alloca(node_size);
1814 memcpy(x, n, node_size);
1817 /* evaluation was successful -- replace the node. */
1818 obstack_free (current_ir_graph->obst, n);
1819 n = new_Const (get_tarval_mode (tv), tv);
1826 /* remove unnecessary nodes */
1827 if (get_opt_constant_folding() ||
1828 (iro == iro_Phi) || /* always optimize these nodes. */
1830 (iro == iro_Proj) ||
1831 (iro == iro_Block) ) /* Flags tested local. */
1832 n = equivalent_node (n);
1834 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
1836 /** common subexpression elimination **/
1837 /* Checks whether n is already available. */
1838 /* The block input is used to distinguish different subexpressions. Right
1839 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
1840 subexpressions within a block. */
1842 n = identify_cons (current_ir_graph->value_table, n);
1845 /* We found an existing, better node, so we can deallocate the old node. */
1846 obstack_free (current_ir_graph->obst, oldn);
1851 /* Some more constant expression evaluation that does not allow to
1853 iro = get_irn_opcode(n);
1854 if (get_opt_constant_folding() ||
1855 (iro == iro_Cond) ||
1856 (iro == iro_Proj)) /* Flags tested local. */
1857 n = transform_node (n);
1859 /* Remove nodes with dead (Bad) input.
1860 Run always for transformation induced Bads. */
1863 /* Now we have a legal, useful node. Enter it in hash table for cse */
1864 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1865 n = identify_remember (current_ir_graph->value_table, n);
1873 * These optimizations never deallocate nodes. This can cause dead
1874 * nodes lying on the obstack. Remove these by a dead node elimination,
1875 * i.e., a copying garbage collection.
1878 optimize_in_place_2 (ir_node *n)
1882 opcode iro = get_irn_opcode(n);
1884 if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
1886 /* if not optimize return n */
1889 /* Here this is possible. Why? */
1894 /* constant expression evaluation / constant folding */
1895 if (get_opt_constant_folding()) {
1896 /* constants can not be evaluated */
1897 if (iro != iro_Const) {
1898 /* try to evaluate */
1899 tv = computed_value (n);
1900 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1901 /* evaluation was successful -- replace the node. */
1902 n = new_Const (get_tarval_mode (tv), tv);
1909 /* remove unnecessary nodes */
1910 if (get_opt_constant_folding() ||
1911 (iro == iro_Phi) || /* always optimize these nodes. */
1912 (iro == iro_Id) || /* ... */
1913 (iro == iro_Proj) || /* ... */
1914 (iro == iro_Block) ) /* Flags tested local. */
1915 n = equivalent_node (n);
1917 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
1919 /** common subexpression elimination **/
1920 /* Checks whether n is already available. */
1921 /* The block input is used to distinguish different subexpressions. Right
1922 now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
1923 subexpressions within a block. */
1924 if (get_opt_cse()) {
1925 n = identify (current_ir_graph->value_table, n);
1928 /* Some more constant expression evaluation. */
1929 iro = get_irn_opcode(n);
1930 if (get_opt_constant_folding() ||
1931 (iro == iro_Cond) ||
1932 (iro == iro_Proj)) /* Flags tested local. */
1933 n = transform_node (n);
1935 /* Remove nodes with dead (Bad) input.
1936 Run always for transformation induced Bads. */
1939 /* Now we can verify the node, as it has no dead inputs any more. */
1942 /* Now we have a legal, useful node. Enter it in hash table for cse.
1943 Blocks should be unique anyways. (Except the successor of start:
1944 is cse with the start block!) */
1945 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1946 n = identify_remember (current_ir_graph->value_table, n);
1952 * Wrapper for external use, set proper status bits after optimization.
1955 optimize_in_place (ir_node *n)
1957 /* Handle graph state */
1958 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1960 if (get_opt_global_cse())
1961 set_irg_pinned(current_ir_graph, op_pin_state_floats);
1962 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1963 set_irg_outs_inconsistent(current_ir_graph);
1965 /* Maybe we could also test whether optimizing the node can
1966 change the control graph. */
1967 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1968 set_irg_dom_inconsistent(current_ir_graph);
1969 return optimize_in_place_2 (n);
1973 * set the default ir op operations
1975 ir_op *firm_set_default_operations(ir_op *op)
1977 op = firm_set_default_computed_value(op);
1978 op = firm_set_default_equivalent_node(op);
1979 op = firm_set_default_transform_node(op);
1980 op = firm_set_default_node_cmp_attr(op);