3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 # include "irnode_t.h"
18 # include "irgraph_t.h"
19 # include "irmode_t.h"
25 # include "dbginfo_t.h"
26 # include "iropt_dbg.h"
27 # include "irflag_t.h"
28 # include "firmstat.h"
30 /* Make types visible to allow most efficient access */
31 # include "entity_t.h"
34 * Trivial INLINEable routine for copy propagation.
35 * Does follow Ids, needed to optimize INLINEd code.
37 static INLINE ir_node *
38 follow_Id (ir_node *n)
40 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
45 * Returns the tarval of a Const node or tarval_bad for all other nodes.
47 static INLINE tarval *
50 if ((n != NULL) && (get_irn_op(n) == op_Const))
51 return get_Const_tarval(n); /* might return tarval_bad */
56 static tarval *computed_value_Const(ir_node *n)
58 return get_Const_tarval(n);
61 static tarval *computed_value_SymConst(ir_node *n)
63 if ((get_SymConst_kind(n) == size) &&
64 (get_type_state(get_SymConst_type(n))) == layout_fixed)
65 return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), mode_Is);
69 static tarval *computed_value_Add(ir_node *n)
71 ir_node *a = get_Add_left(n);
72 ir_node *b = get_Add_right(n);
74 tarval *ta = value_of(a);
75 tarval *tb = value_of(b);
77 if ((ta != tarval_bad) && (tb != tarval_bad)
78 && (get_irn_mode(a) == get_irn_mode(b))
79 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
80 return tarval_add(ta, tb);
85 static tarval *computed_value_Sub(ir_node *n)
87 ir_node *a = get_Sub_left(n);
88 ir_node *b = get_Sub_right(n);
90 tarval *ta = value_of(a);
91 tarval *tb = value_of(b);
93 if ((ta != tarval_bad) && (tb != tarval_bad)
94 && (get_irn_mode(a) == get_irn_mode(b))
95 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
96 return tarval_sub(ta, tb);
101 static tarval *computed_value_Minus(ir_node *n)
103 ir_node *a = get_Minus_op(n);
104 tarval *ta = value_of(a);
106 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
107 return tarval_neg(ta);
112 static tarval *computed_value_Mul(ir_node *n)
114 ir_node *a = get_Mul_left(n);
115 ir_node *b = get_Mul_right(n);
117 tarval *ta = value_of(a);
118 tarval *tb = value_of(b);
120 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
121 return tarval_mul(ta, tb);
123 /* a*0 = 0 or 0*b = 0:
124 calls computed_value recursive and returns the 0 with proper
128 if ( ( ((v = ta) != tarval_bad)
129 && (v == get_mode_null(get_tarval_mode(v))) )
130 || ( ((v = tb) != tarval_bad)
131 && (v == get_mode_null(get_tarval_mode(v))) )) {
138 static tarval *computed_value_Quot(ir_node *n)
140 ir_node *a = get_Quot_left(n);
141 ir_node *b = get_Quot_right(n);
143 tarval *ta = value_of(a);
144 tarval *tb = value_of(b);
146 /* This was missing in original implementation. Why? */
147 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
148 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
149 return tarval_quo(ta, tb);
154 static tarval *computed_value_Div(ir_node *n)
156 ir_node *a = get_Div_left(n);
157 ir_node *b = get_Div_right(n);
159 tarval *ta = value_of(a);
160 tarval *tb = value_of(b);
162 /* This was missing in original implementation. Why? */
163 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
164 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
165 return tarval_div(ta, tb);
170 static tarval *computed_value_Mod(ir_node *n)
172 ir_node *a = get_Mod_left(n);
173 ir_node *b = get_Mod_right(n);
175 tarval *ta = value_of(a);
176 tarval *tb = value_of(b);
178 /* This was missing in original implementation. Why? */
179 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
180 if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
181 return tarval_mod(ta, tb);
186 static tarval *computed_value_Abs(ir_node *n)
188 ir_node *a = get_Abs_op(n);
189 tarval *ta = value_of(a);
191 if (ta != tarval_bad)
192 return tarval_abs(ta);
197 static tarval *computed_value_And(ir_node *n)
199 ir_node *a = get_And_left(n);
200 ir_node *b = get_And_right(n);
202 tarval *ta = value_of(a);
203 tarval *tb = value_of(b);
205 if ((ta != tarval_bad) && (tb != tarval_bad)) {
206 return tarval_and (ta, tb);
210 if ( (tarval_classify ((v = computed_value (a))) == TV_CLASSIFY_NULL)
211 || (tarval_classify ((v = computed_value (b))) == TV_CLASSIFY_NULL)) {
218 static tarval *computed_value_Or(ir_node *n)
220 ir_node *a = get_Or_left(n);
221 ir_node *b = get_Or_right(n);
223 tarval *ta = value_of(a);
224 tarval *tb = value_of(b);
226 if ((ta != tarval_bad) && (tb != tarval_bad)) {
227 return tarval_or (ta, tb);
230 if ( (tarval_classify ((v = computed_value (a))) == TV_CLASSIFY_ALL_ONE)
231 || (tarval_classify ((v = computed_value (b))) == TV_CLASSIFY_ALL_ONE)) {
238 static tarval *computed_value_Eor(ir_node *n)
240 ir_node *a = get_Eor_left(n);
241 ir_node *b = get_Eor_right(n);
243 tarval *ta = value_of(a);
244 tarval *tb = value_of(b);
246 if ((ta != tarval_bad) && (tb != tarval_bad)) {
247 return tarval_eor (ta, tb);
252 static tarval *computed_value_Not(ir_node *n)
254 ir_node *a = get_Not_op(n);
255 tarval *ta = value_of(a);
257 if (ta != tarval_bad)
258 return tarval_not(ta);
263 static tarval *computed_value_Shl(ir_node *n)
265 ir_node *a = get_Shl_left(n);
266 ir_node *b = get_Shl_right(n);
268 tarval *ta = value_of(a);
269 tarval *tb = value_of(b);
271 if ((ta != tarval_bad) && (tb != tarval_bad)) {
272 return tarval_shl (ta, tb);
277 static tarval *computed_value_Shr(ir_node *n)
279 ir_node *a = get_Shr_left(n);
280 ir_node *b = get_Shr_right(n);
282 tarval *ta = value_of(a);
283 tarval *tb = value_of(b);
285 if ((ta != tarval_bad) && (tb != tarval_bad)) {
286 return tarval_shr (ta, tb);
291 static tarval *computed_value_Shrs(ir_node *n)
293 ir_node *a = get_Shrs_left(n);
294 ir_node *b = get_Shrs_right(n);
296 tarval *ta = value_of(a);
297 tarval *tb = value_of(b);
299 if ((ta != tarval_bad) && (tb != tarval_bad)) {
300 return tarval_shrs (ta, tb);
305 static tarval *computed_value_Rot(ir_node *n)
307 ir_node *a = get_Rot_left(n);
308 ir_node *b = get_Rot_right(n);
310 tarval *ta = value_of(a);
311 tarval *tb = value_of(b);
313 if ((ta != tarval_bad) && (tb != tarval_bad)) {
314 /* return tarval_rot (ta, tb); */
319 static tarval *computed_value_Conv(ir_node *n)
321 ir_node *a = get_Conv_op(n);
322 tarval *ta = value_of(a);
324 if (ta != tarval_bad)
325 return tarval_convert_to(ta, get_irn_mode(n));
330 static tarval *computed_value_Proj(ir_node *n)
332 ir_node *a = get_Proj_pred(n), *b;
335 /* Optimize Cmp nodes.
336 This performs a first step of unreachable code elimination.
337 Proj can not be computed, but folding a Cmp above the Proj here is
338 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
340 There are several case where we can evaluate a Cmp node:
341 1. The nodes compared are both the same. If we compare for
342 equal, greater equal, ... this will return true, else it
343 will return false. This step relies on cse.
344 2. The predecessors of Cmp are target values. We can evaluate
346 3. The predecessors are Allocs or void* constants. Allocs never
347 return NULL, they raise an exception. Therefore we can predict
349 if (get_irn_op(a) == op_Cmp) {
350 aa = get_Cmp_left(a);
351 ab = get_Cmp_right(a);
353 if (aa == ab) { /* 1.: */
354 /* This is a tric with the bits used for encoding the Cmp
355 Proj numbers, the following statement is not the same:
356 return new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
357 return new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);
359 tarval *taa = computed_value (aa);
360 tarval *tab = computed_value (ab);
362 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
363 /* strange checks... */
364 pnc_number flags = tarval_cmp (taa, tab);
365 if (flags != False) {
366 return new_tarval_from_long (get_Proj_proj(n) & flags, mode_b);
368 } else { /* check for 3.: */
369 ir_node *aaa = skip_nop(skip_Proj(aa));
370 ir_node *aba = skip_nop(skip_Proj(ab));
372 if ( ( (/* aa is ProjP and aaa is Alloc */
373 (get_irn_op(aa) == op_Proj)
374 && (mode_is_reference(get_irn_mode(aa)))
375 && (get_irn_op(aaa) == op_Alloc))
376 && ( (/* ab is constant void */
377 (get_irn_op(ab) == op_Const)
378 && (mode_is_reference(get_irn_mode(ab)))
379 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
380 || (/* ab is other Alloc */
381 (get_irn_op(ab) == op_Proj)
382 && (mode_is_reference(get_irn_mode(ab)))
383 && (get_irn_op(aba) == op_Alloc)
385 || (/* aa is void and aba is Alloc */
386 (get_irn_op(aa) == op_Const)
387 && (mode_is_reference(get_irn_mode(aa)))
388 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
389 && (get_irn_op(ab) == op_Proj)
390 && (mode_is_reference(get_irn_mode(ab)))
391 && (get_irn_op(aba) == op_Alloc)))
393 return new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
396 } else if (get_irn_op(a) == op_DivMod) {
397 tarval *tb = value_of(b = get_DivMod_right(a));
398 tarval *ta = value_of(a = get_DivMod_left(a));
400 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
401 if (tb == get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
403 if (get_Proj_proj(n)== 0) /* Div */
404 return tarval_div(ta, tb);
406 return tarval_mod(ta, tb);
413 * If the parameter n can be computed, return its value, else tarval_bad.
414 * Performs constant folding.
416 * GL: Only if n is arithmetic operator?
418 tarval *computed_value(ir_node *n)
420 if (n->op->computed_value)
421 return n->op->computed_value(n);
426 * set the default computed_value evaluator
428 static ir_op *firm_set_default_computed_value(ir_op *op)
432 op->computed_value = computed_value_##a; \
457 op->computed_value = NULL;
465 /* returns 1 if the a and b are pointers to different locations. */
467 different_identity (ir_node *a, ir_node *b)
469 assert (mode_is_reference(get_irn_mode (a))
470 && mode_is_reference(get_irn_mode (b)));
472 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
473 ir_node *a1 = get_Proj_pred (a);
474 ir_node *b1 = get_Proj_pred (b);
475 if (a1 != b1 && get_irn_op (a1) == op_Alloc
476 && get_irn_op (b1) == op_Alloc)
483 static ir_node *equivalent_node_Block(ir_node *n)
487 /* The Block constructor does not call optimize, but mature_block
488 calls the optimization. */
489 assert(get_Block_matured(n));
491 /* Straightening: a single entry Block following a single exit Block
492 can be merged, if it is not the Start block. */
493 /* !!! Beware, all Phi-nodes of n must have been optimized away.
494 This should be true, as the block is matured before optimize is called.
495 But what about Phi-cycles with the Phi0/Id that could not be resolved?
496 Remaining Phi nodes are just Ids. */
497 if ((get_Block_n_cfgpreds(n) == 1) &&
498 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
499 ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
500 if (predblock == oldn) {
501 /* Jmp jumps into the block it is in -- deal self cycle. */
502 n = new_Bad(); DBG_OPT_DEAD;
503 } else if (get_opt_control_flow_straightening()) {
504 n = predblock; DBG_OPT_STG;
507 else if ((get_Block_n_cfgpreds(n) == 1) &&
508 (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
509 ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
510 if (predblock == oldn) {
511 /* Jmp jumps into the block it is in -- deal self cycle. */
512 n = new_Bad(); DBG_OPT_DEAD;
515 else if ((get_Block_n_cfgpreds(n) == 2) &&
516 (get_opt_control_flow_weak_simplification())) {
517 /* Test whether Cond jumps twice to this block
518 @@@ we could do this also with two loops finding two preds from several ones. */
519 ir_node *a = get_Block_cfgpred(n, 0);
520 ir_node *b = get_Block_cfgpred(n, 1);
522 if ((get_irn_op(a) == op_Proj) &&
523 (get_irn_op(b) == op_Proj) &&
524 (get_Proj_pred(a) == get_Proj_pred(b)) &&
525 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
526 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
527 /* Also a single entry Block following a single exit Block. Phis have
528 twice the same operand and will be optimized away. */
529 n = get_nodes_Block(a); DBG_OPT_IFSIM;
531 } else if (get_opt_unreachable_code() &&
532 (n != current_ir_graph->start_block) &&
533 (n != current_ir_graph->end_block) ) {
535 /* If all inputs are dead, this block is dead too, except if it is
536 the start or end block. This is a step of unreachable code
538 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
539 if (!is_Bad(get_Block_cfgpred(n, i))) break;
541 if (i == get_Block_n_cfgpreds(n))
548 static ir_node *equivalent_node_Jmp(ir_node *n)
550 /* GL: Why not same for op_Raise?? */
551 /* unreachable code elimination */
552 if (is_Bad(get_nodes_Block(n)))
558 static ir_node *equivalent_node_Cond(ir_node *n)
560 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
561 See cases for iro_Cond and iro_Proj in transform_node. */
565 static ir_node *equivalent_node_Or(ir_node *n)
569 ir_node *a = get_Or_left(n);
570 ir_node *b = get_Or_right(n);
574 n = a; DBG_OPT_ALGSIM1;
581 * optimize operations that are commutative and have neutral 0.
583 static ir_node *equivalent_node_neutral_zero(ir_node *n)
587 ir_node *a = get_binop_left(n);
588 ir_node *b = get_binop_right(n);
593 /* After running compute_node there is only one constant predecessor.
594 Find this predecessors value and remember the other node: */
595 if ((tv = computed_value (a)) != tarval_bad) {
597 } else if ((tv = computed_value (b)) != tarval_bad) {
602 /* If this predecessors constant value is zero, the operation is
603 unnecessary. Remove it: */
604 if (tarval_classify (tv) == TV_CLASSIFY_NULL) {
605 n = on; DBG_OPT_ALGSIM1;
611 static ir_node *equivalent_node_Add(ir_node *n)
613 return equivalent_node_neutral_zero(n);
616 static ir_node *equivalent_node_Eor(ir_node *n)
618 return equivalent_node_neutral_zero(n);
622 * optimize operations that are not commutative but have neutral 0 on left.
623 * Test only one predecessor.
625 static ir_node *equivalent_node_left_zero(ir_node *n)
629 ir_node *a = get_binop_left(n);
630 ir_node *b = get_binop_right(n);
632 if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
633 n = a; DBG_OPT_ALGSIM1;
639 static ir_node *equivalent_node_Sub(ir_node *n)
641 return equivalent_node_left_zero(n);
644 static ir_node *equivalent_node_Shl(ir_node *n)
646 return equivalent_node_left_zero(n);
649 static ir_node *equivalent_node_Shr(ir_node *n)
651 return equivalent_node_left_zero(n);
654 static ir_node *equivalent_node_Shrs(ir_node *n)
656 return equivalent_node_left_zero(n);
659 static ir_node *equivalent_node_Rot(ir_node *n)
661 return equivalent_node_left_zero(n);
664 static ir_node *equivalent_node_symmetric_unop(ir_node *n)
668 /* optimize symmetric unop */
669 if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
670 n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
675 static ir_node *equivalent_node_Not(ir_node *n)
678 return equivalent_node_symmetric_unop(n);
681 static ir_node *equivalent_node_Minus(ir_node *n)
683 /* --x == x */ /* ??? Is this possible or can --x raise an
684 out of bounds exception if min =! max? */
685 return equivalent_node_symmetric_unop(n);
688 static ir_node *equivalent_node_Mul(ir_node *n)
692 ir_node *a = get_Mul_left(n);
693 ir_node *b = get_Mul_right(n);
695 /* Mul is commutative and has again an other neutral element. */
696 if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ONE) {
697 n = b; DBG_OPT_ALGSIM1;
698 } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) {
699 n = a; DBG_OPT_ALGSIM1;
704 static ir_node *equivalent_node_Div(ir_node *n)
706 ir_node *a = get_Div_left(n);
707 ir_node *b = get_Div_right(n);
709 /* Div is not commutative. */
710 if (tarval_classify(computed_value(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
711 /* Turn Div into a tuple (mem, bad, a) */
712 ir_node *mem = get_Div_mem(n);
713 turn_into_tuple(n, 3);
714 set_Tuple_pred(n, pn_Div_M, mem);
715 set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
716 set_Tuple_pred(n, pn_Div_res, a);
721 static ir_node *equivalent_node_And(ir_node *n)
725 ir_node *a = get_And_left(n);
726 ir_node *b = get_And_right(n);
729 n = a; /* And has it's own neutral element */
730 } else if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ALL_ONE) {
732 } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ALL_ONE) {
735 if (n != oldn) DBG_OPT_ALGSIM1;
739 static ir_node *equivalent_node_Conv(ir_node *n)
742 ir_node *a = get_Conv_op(n);
745 ir_mode *n_mode = get_irn_mode(n);
746 ir_mode *a_mode = get_irn_mode(a);
748 if (n_mode == a_mode) { /* No Conv necessary */
749 n = a; DBG_OPT_ALGSIM3;
750 } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
754 n_mode = get_irn_mode(n);
755 b_mode = get_irn_mode(b);
757 if (n_mode == b_mode) {
758 if (n_mode == mode_b) {
759 n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
761 else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
762 if (smaller_mode(b_mode, a_mode)){
763 n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;
771 static ir_node *equivalent_node_Phi(ir_node *n)
773 /* Several optimizations:
774 - no Phi in start block.
775 - remove Id operators that are inputs to Phi
776 - fold Phi-nodes, iff they have only one predecessor except
782 ir_node *block = NULL; /* to shutup gcc */
783 ir_node *first_val = NULL; /* to shutup gcc */
784 ir_node *scnd_val = NULL; /* to shutup gcc */
786 if (!get_opt_normalize()) return n;
788 n_preds = get_Phi_n_preds(n);
790 block = get_nodes_block(n);
791 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
792 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
793 if ((is_Bad(block)) || /* Control dead */
794 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
795 return new_Bad(); /* in the Start Block. */
797 if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
800 /* first we test for a special case: */
801 /* Confirm is a special node fixing additional information for a
802 value that is known at a certain point. This is useful for
803 dataflow analysis. */
805 ir_node *a = get_Phi_pred(n, 0);
806 ir_node *b = get_Phi_pred(n, 1);
807 if ( (get_irn_op(a) == op_Confirm)
808 && (get_irn_op(b) == op_Confirm)
809 && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
810 && (get_irn_n(a, 1) == get_irn_n (b, 1))
811 && (a->data.num == (~b->data.num & irpn_True) )) {
812 return get_irn_n(a, 0);
817 /* If the Block has a Bad pred, we also have one. */
818 for (i = 0; i < n_preds; ++i)
819 if (is_Bad (get_Block_cfgpred(block, i)))
820 set_Phi_pred(n, i, new_Bad());
822 /* Find first non-self-referencing input */
823 for (i = 0; i < n_preds; ++i) {
824 first_val = get_Phi_pred(n, i);
825 if ( (first_val != n) /* not self pointer */
827 && (get_irn_op(first_val) != op_Bad)
829 ) { /* value not dead */
830 break; /* then found first value. */
834 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
835 if (i >= n_preds) { return new_Bad(); }
839 /* follow_Id () for rest of inputs, determine if any of these
840 are non-self-referencing */
841 while (++i < n_preds) {
842 scnd_val = get_Phi_pred(n, i);
844 && (scnd_val != first_val)
846 && (get_irn_op(scnd_val) != op_Bad)
853 /* Fold, if no multiple distinct non-self-referencing inputs */
855 n = first_val; DBG_OPT_PHI;
857 /* skip the remaining Ids (done in get_Phi_pred). */
858 /* superfluous, since we walk all to propagate Block's Bads.
859 while (++i < n_preds) get_Phi_pred(n, i); */
864 static ir_node *equivalent_node_Load(ir_node *n)
866 #if 0 /* Is an illegal transformation: different nodes can
867 represent the same pointer value!! */
868 ir_node *a = skip_Proj(get_Load_mem(n));
869 ir_node *b = get_Load_ptr(n);
871 if (get_irn_op(a) == op_Store) {
872 if ( different_identity (b, get_Store_ptr(a))) {
873 /* load and store use different pointers, therefore load
874 needs not take store's memory but the state before. */
875 set_Load_mem (n, get_Store_mem(a));
876 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
883 static ir_node *equivalent_node_Store(ir_node *n)
887 /* remove unnecessary store. */
888 ir_node *a = skip_Proj(get_Store_mem(n));
889 ir_node *b = get_Store_ptr(n);
890 ir_node *c = skip_Proj(get_Store_value(n));
892 if (get_irn_op(a) == op_Store
893 && get_Store_ptr(a) == b
894 && skip_Proj(get_Store_value(a)) == c) {
895 /* We have twice exactly the same store -- a write after write. */
897 } else if (get_irn_op(c) == op_Load
898 && (a == c || skip_Proj(get_Load_mem(c)) == a)
899 && get_Load_ptr(c) == b ) {
900 /* We just loaded the value from the same memory, i.e., the store
901 doesn't change the memory -- a write after read. */
902 a = get_Store_mem(n);
903 turn_into_tuple(n, 2);
904 set_Tuple_pred(n, pn_Store_M, a);
905 set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
910 static ir_node *equivalent_node_Proj(ir_node *n)
914 ir_node *a = get_Proj_pred(n);
916 if ( get_irn_op(a) == op_Tuple) {
917 /* Remove the Tuple/Proj combination. */
918 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
919 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
921 assert(0); /* This should not happen! */
924 } else if (get_irn_mode(n) == mode_X &&
925 is_Bad(get_nodes_Block(n))) {
926 /* Remove dead control flow -- early gigo. */
932 static ir_node *equivalent_node_Id(ir_node *n)
936 n = follow_Id (n); DBG_OPT_ID;
941 case iro_Mod, Quot, DivMod
942 DivMod allocates new nodes --> it's treated in transform node.
943 What about Quot, DivMod?
947 * equivalent_node() returns a node equivalent to input n. It skips all nodes that
948 * perform no actual computation, as, e.g., the Id nodes. It does not create
949 * new nodes. It is therefore safe to free n if the node returned is not n.
950 * If a node returns a Tuple we can not just skip it. If the size of the
951 * in array fits, we transform n into a tuple (e.g., Div).
954 equivalent_node (ir_node *n)
956 if (n->op->equivalent_node)
957 return n->op->equivalent_node(n);
962 * set the default equivalent node operation
964 static ir_op *firm_set_default_equivalent_node(ir_op *op)
968 op->equivalent_node = equivalent_node_##a; \
995 op->equivalent_node = NULL;
1003 * Do node specific optimizations of nodes predecessors.
1006 optimize_preds(ir_node *n) {
1007 ir_node *a = NULL, *b = NULL;
1009 /* get the operands we will work on for simple cases. */
1011 a = get_binop_left(n);
1012 b = get_binop_right(n);
1013 } else if (is_unop(n)) {
1017 switch (get_irn_opcode(n)) {
1020 /* We don't want Cast as input to Cmp. */
1021 if (get_irn_op(a) == op_Cast) {
1025 if (get_irn_op(b) == op_Cast) {
1027 set_Cmp_right(n, b);
1035 static ir_node *transform_node_Div(ir_node *n)
1037 tarval *tv = computed_value(n);
1038 ir_node *b = get_Div_right(n);
1039 tarval *tb = computed_value(b);
1041 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1043 if (tv != tarval_bad) {
1044 /* Turn Div into a tuple (mem, bad, value) */
1045 ir_node *mem = get_Div_mem(n);
1047 turn_into_tuple(n, 3);
1048 set_Tuple_pred(n, pn_Div_M, mem);
1049 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1050 set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(tv), tv));
1052 else if (tb != tarval_bad && tarval_classify(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1053 ir_node *div, *proj;
1054 ir_node *a = get_Div_left(n);
1055 ir_node *mem = get_Div_mem(n);
1056 int rem = get_optimize();
1060 div = new_rd_Div(get_irn_dbg_info(n), current_ir_graph,
1061 get_nodes_Block(n), get_irg_initial_mem(current_ir_graph), a, b);
1064 proj = new_r_Proj(current_ir_graph, get_nodes_Block(n), div, get_irn_mode(a), pn_Div_res);
1066 turn_into_tuple(n, 3);
1067 set_Tuple_pred(n, pn_Div_M, mem);
1068 set_Tuple_pred(n, pn_Div_X_except, new_Bad());
1069 set_Tuple_pred(n, pn_Div_res, proj);
1075 static ir_node *transform_node_Mod(ir_node *n)
1077 tarval *tv = computed_value(n);
1078 ir_node *b = get_Mod_right(n);
1079 tarval *tb = computed_value(b);
1081 /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
1083 if (tv != tarval_bad) {
1084 /* Turn Mod into a tuple (mem, bad, value) */
1085 ir_node *mem = get_Mod_mem(n);
1086 turn_into_tuple(n, 3);
1087 set_Tuple_pred(n, pn_Mod_M, mem);
1088 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1089 set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(tv), tv));
1091 else if (tb != tarval_bad && tarval_classify(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
1092 ir_node *mod, *proj;
1093 ir_node *a = get_Mod_left(n);
1094 ir_node *mem = get_Mod_mem(n);
1095 int rem = get_optimize();
1099 mod = new_rd_Mod(get_irn_dbg_info(n), current_ir_graph,
1100 get_nodes_Block(n), get_irg_initial_mem(current_ir_graph), a, b);
1103 proj = new_r_Proj(current_ir_graph, get_nodes_Block(n), mod, get_irn_mode(a), pn_Mod_res);
1105 turn_into_tuple(n, 3);
1106 set_Tuple_pred(n, pn_Mod_M, mem);
1107 set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
1108 set_Tuple_pred(n, pn_Mod_res, proj);
1114 static ir_node *transform_node_DivMod(ir_node *n)
1118 ir_node *a = get_DivMod_left(n);
1119 ir_node *b = get_DivMod_right(n);
1120 ir_mode *mode = get_irn_mode(a);
1121 tarval *ta = value_of(a);
1122 tarval *tb = value_of(b);
1124 if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
1127 /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
1129 if (tb != tarval_bad) {
1130 if (tb == get_mode_one(get_tarval_mode(tb))) {
1131 b = new_Const (mode, get_mode_null(mode));
1133 } else if (ta != tarval_bad) {
1134 tarval *resa, *resb;
1135 resa = tarval_div (ta, tb);
1136 if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
1137 Jmp for X result!? */
1138 resb = tarval_mod (ta, tb);
1139 if (resb == tarval_bad) return n; /* Causes exception! */
1140 a = new_Const (mode, resa);
1141 b = new_Const (mode, resb);
1144 } else if (ta == get_mode_null(mode)) {
1148 if (evaluated) { /* replace by tuple */
1149 ir_node *mem = get_DivMod_mem(n);
1150 turn_into_tuple(n, 4);
1151 set_Tuple_pred(n, pn_DivMod_M, mem);
1152 set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
1153 set_Tuple_pred(n, pn_DivMod_res_div, a);
1154 set_Tuple_pred(n, pn_DivMod_res_mod, b);
1155 assert(get_nodes_Block(n));
1161 static ir_node *transform_node_Cond(ir_node *n)
1163 /* Replace the Cond by a Jmp if it branches on a constant
1166 ir_node *a = get_Cond_selector(n);
1167 tarval *ta = value_of(a);
1169 if ((ta != tarval_bad) &&
1170 (get_irn_mode(a) == mode_b) &&
1171 (get_opt_unreachable_code())) {
1172 /* It's a boolean Cond, branching on a boolean constant.
1173 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
1174 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
1175 turn_into_tuple(n, 2);
1176 if (ta == tarval_b_true) {
1177 set_Tuple_pred(n, pn_Cond_false, new_Bad());
1178 set_Tuple_pred(n, pn_Cond_true, jmp);
1180 set_Tuple_pred(n, pn_Cond_false, jmp);
1181 set_Tuple_pred(n, pn_Cond_true, new_Bad());
1183 /* We might generate an endless loop, so keep it alive. */
1184 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
1185 } else if ((ta != tarval_bad) &&
1186 (get_irn_mode(a) == mode_Iu) &&
1187 (get_Cond_kind(n) == dense) &&
1188 (get_opt_unreachable_code())) {
1189 /* I don't want to allow Tuples smaller than the biggest Proj.
1190 Also this tuple might get really big...
1191 I generate the Jmp here, and remember it in link. Link is used
1192 when optimizing Proj. */
1193 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
1194 /* We might generate an endless loop, so keep it alive. */
1195 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
1196 } else if ((get_irn_op(a) == op_Eor)
1197 && (get_irn_mode(a) == mode_b)
1198 && (tarval_classify(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
1199 /* The Eor is a negate. Generate a new Cond without the negate,
1200 simulate the negate by exchanging the results. */
1201 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
1203 } else if ((get_irn_op(a) == op_Not)
1204 && (get_irn_mode(a) == mode_b)) {
1205 /* A Not before the Cond. Generate a new Cond without the Not,
1206 simulate the Not by exchanging the results. */
1207 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
1213 static ir_node *transform_node_Eor(ir_node *n)
1215 ir_node *a = get_Eor_left(n);
1216 ir_node *b = get_Eor_right(n);
1218 if ((get_irn_mode(n) == mode_b)
1219 && (get_irn_op(a) == op_Proj)
1220 && (get_irn_mode(a) == mode_b)
1221 && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE)
1222 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1223 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
1224 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
1225 mode_b, get_negated_pnc(get_Proj_proj(a)));
1226 else if ((get_irn_mode(n) == mode_b)
1227 && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE))
1228 /* The Eor is a Not. Replace it by a Not. */
1229 /* ????!!!Extend to bitfield 1111111. */
1230 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
1235 static ir_node *transform_node_Not(ir_node *n)
1237 ir_node *a = get_Not_op(n);
1239 if ( (get_irn_mode(n) == mode_b)
1240 && (get_irn_op(a) == op_Proj)
1241 && (get_irn_mode(a) == mode_b)
1242 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
1243 /* We negate a Cmp. The Cmp has the negated result anyways! */
1244 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
1245 mode_b, get_negated_pnc(get_Proj_proj(a)));
1252 * Tries several [inplace] [optimizing] transformations and returns an
1253 * equivalent node. The difference to equivalent_node() is that these
1254 * transformations _do_ generate new nodes, and thus the old node must
1255 * not be freed even if the equivalent node isn't the old one.
1257 static ir_node *transform_node(ir_node *n)
1259 if (n->op->transform_node)
1260 n = n->op->transform_node(n);
1265 * set the default transform node operation
1267 static ir_op *firm_set_default_transform_node(ir_op *op)
1271 op->transform_node = transform_node_##a; \
1282 op->transform_node = NULL;
1290 /* **************** Common Subexpression Elimination **************** */
1292 /** The size of the hash table used, should estimate the number of nodes
1294 #define N_IR_NODES 512
1296 static int node_cmp_attr_Const(ir_node *a, ir_node *b)
1298 return (get_Const_tarval(a) != get_Const_tarval(b))
1299 || (get_Const_type(a) != get_Const_type(b));
1302 static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
1304 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
1307 static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
1309 return get_Filter_proj(a) != get_Filter_proj(b);
1312 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
1314 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
1315 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
1318 static int node_cmp_attr_Free(ir_node *a, ir_node *b)
1320 return (get_irn_free_attr(a) != get_irn_free_attr(b));
1323 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
1325 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
1326 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
1329 static int node_cmp_attr_Call(ir_node *a, ir_node *b)
1331 return (get_irn_call_attr(a) != get_irn_call_attr(b));
1334 static int node_cmp_attr_FuncCall(ir_node *a, ir_node *b)
1336 return (get_irn_funccall_attr(a) != get_irn_funccall_attr(b));
1339 static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
1341 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
1342 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
1343 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
1344 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
1345 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
1348 static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
1350 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
1353 static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
1355 return get_Cast_type(a) != get_Cast_type(b);
1359 * set the default node attribute compare operation
1361 static ir_op *firm_set_default_node_cmp_attr(ir_op *op)
1365 op->node_cmp_attr = node_cmp_attr_##a; \
1381 op->node_cmp_attr = NULL;
1389 * Compare function for two nodes in the hash table. Gets two
1390 * nodes as parameters. Returns 0 if the nodes are a cse.
1393 vt_cmp (const void *elt, const void *key)
1401 if (a == b) return 0;
1403 if ((get_irn_op(a) != get_irn_op(b)) ||
1404 (get_irn_mode(a) != get_irn_mode(b))) return 1;
1406 /* compare if a's in and b's in are equal */
1407 irn_arity_a = get_irn_arity (a);
1408 if (irn_arity_a != get_irn_arity(b))
1411 /* for block-local cse and pinned nodes: */
1412 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
1413 if (get_irn_n(a, -1) != get_irn_n(b, -1))
1417 /* compare a->in[0..ins] with b->in[0..ins] */
1418 for (i = 0; i < irn_arity_a; i++)
1419 if (get_irn_n(a, i) != get_irn_n(b, i))
1423 * here, we already now that the nodes are identical except their
1426 if (a->op->node_cmp_attr)
1427 return a->op->node_cmp_attr(a, b);
1433 * Calculate a hash value of a node.
1436 ir_node_hash (ir_node *node)
1441 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
1442 h = irn_arity = get_irn_arity(node);
1444 /* consider all in nodes... except the block. */
1445 for (i = 0; i < irn_arity; i++) {
1446 h = 9*h + (unsigned long)get_irn_n(node, i);
1450 h = 9*h + (unsigned long) get_irn_mode (node);
1452 h = 9*h + (unsigned long) get_irn_op (node);
1458 new_identities (void)
1460 return new_pset (vt_cmp, N_IR_NODES);
1464 del_identities (pset *value_table)
1466 del_pset (value_table);
1470 * Return the canonical node computing the same value as n.
1471 * Looks up the node in a hash table.
1473 static INLINE ir_node *
1474 identify (pset *value_table, ir_node *n)
1478 if (!value_table) return n;
1480 /* TODO: use a generic commutative attribute */
1481 if (get_opt_reassociation()) {
1482 if (is_op_commutative(get_irn_op(n))) {
1483 /* for commutative operators perform a OP b == b OP a */
1484 if (get_binop_left(n) > get_binop_right(n)) {
1485 ir_node *h = get_binop_left(n);
1486 set_binop_left(n, get_binop_right(n));
1487 set_binop_right(n, h);
1492 o = pset_find (value_table, n, ir_node_hash (n));
1499 * During construction we set the pinned flag in the graph right when the
1500 * optimizatin is performed. The flag turning on procedure global cse could
1501 * be changed between two allocations. This way we are safe.
1503 static INLINE ir_node *
1504 identify_cons (pset *value_table, ir_node *n) {
1506 n = identify(value_table, n);
1507 if (get_irn_n(old, -1) != get_irn_n(n, -1))
1508 set_irg_pinned(current_ir_graph, floats);
1513 * Return the canonical node computing the same value as n.
1514 * Looks up the node in a hash table, enters it in the table
1515 * if it isn't there yet.
1518 identify_remember (pset *value_table, ir_node *node)
1522 if (!value_table) return node;
1524 /* lookup or insert in hash table with given hash key. */
1525 o = pset_insert (value_table, node, ir_node_hash (node));
1527 if (o == node) return node;
1533 add_identities (pset *value_table, ir_node *node) {
1534 identify_remember (value_table, node);
1538 * garbage in, garbage out. If a node has a dead input, i.e., the
1539 * Bad node is input to the node, return the Bad node.
1541 static INLINE ir_node *
1542 gigo (ir_node *node)
1545 ir_op* op = get_irn_op(node);
1547 /* remove garbage blocks by looking at control flow that leaves the block
1548 and replacing the control flow by Bad. */
1549 if (get_irn_mode(node) == mode_X) {
1550 ir_node *block = get_nodes_block(node);
1551 if (op == op_End) return node; /* Don't optimize End, may have Bads. */
1552 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
1553 irn_arity = get_irn_arity(block);
1554 for (i = 0; i < irn_arity; i++) {
1555 if (!is_Bad(get_irn_n(block, i))) break;
1557 if (i == irn_arity) return new_Bad();
1561 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
1562 blocks predecessors is dead. */
1563 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
1564 irn_arity = get_irn_arity(node);
1565 for (i = -1; i < irn_arity; i++) {
1566 if (is_Bad(get_irn_n(node, i))) {
1572 /* With this code we violate the agreement that local_optimize
1573 only leaves Bads in Block, Phi and Tuple nodes. */
1574 /* If Block has only Bads as predecessors it's garbage. */
1575 /* If Phi has only Bads as predecessors it's garbage. */
1576 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
1577 irn_arity = get_irn_arity(node);
1578 for (i = 0; i < irn_arity; i++) {
1579 if (!is_Bad(get_irn_n(node, i))) break;
1581 if (i == irn_arity) node = new_Bad();
1589 * These optimizations deallocate nodes from the obstack.
1590 * It can only be called if it is guaranteed that no other nodes
1591 * reference this one, i.e., right after construction of a node.
1594 optimize_node (ir_node *n)
1598 opcode iro = get_irn_opcode(n);
1600 /* Allways optimize Phi nodes: part of the construction. */
1601 if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
1603 /* constant expression evaluation / constant folding */
1604 if (get_opt_constant_folding()) {
1605 /* constants can not be evaluated */
1606 if (iro != iro_Const) {
1607 /* try to evaluate */
1608 tv = computed_value (n);
1609 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1611 * we MUST copy the node here temparary, because it's still needed
1612 * for DBG_OPT_ALGSIM0
1614 int node_size = offsetof(ir_node, attr) + n->op->attr_size;
1615 ir_node *x = alloca(node_size);
1617 memcpy(x, n, node_size);
1620 /* evaluation was successful -- replace the node. */
1621 obstack_free (current_ir_graph->obst, n);
1622 n = new_Const (get_tarval_mode (tv), tv);
1629 /* remove unnecessary nodes */
1630 if (get_opt_constant_folding() ||
1631 (iro == iro_Phi) || /* always optimize these nodes. */
1633 (iro == iro_Proj) ||
1634 (iro == iro_Block) ) /* Flags tested local. */
1635 n = equivalent_node (n);
1637 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
1639 /** common subexpression elimination **/
1640 /* Checks whether n is already available. */
1641 /* The block input is used to distinguish different subexpressions. Right
1642 now all nodes are pinned to blocks, i.e., the cse only finds common
1643 subexpressions within a block. */
1645 n = identify_cons (current_ir_graph->value_table, n);
1648 /* We found an existing, better node, so we can deallocate the old node. */
1649 obstack_free (current_ir_graph->obst, oldn);
1654 /* Some more constant expression evaluation that does not allow to
1656 iro = get_irn_opcode(n);
1657 if (get_opt_constant_folding() ||
1658 (iro == iro_Cond) ||
1659 (iro == iro_Proj)) /* Flags tested local. */
1660 n = transform_node (n);
1662 /* Remove nodes with dead (Bad) input.
1663 Run always for transformation induced Bads. */
1666 /* Now we have a legal, useful node. Enter it in hash table for cse */
1667 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1668 n = identify_remember (current_ir_graph->value_table, n);
1676 * These optimizations never deallocate nodes. This can cause dead
1677 * nodes lying on the obstack. Remove these by a dead node elimination,
1678 * i.e., a copying garbage collection.
1681 optimize_in_place_2 (ir_node *n)
1685 opcode iro = get_irn_opcode(n);
1687 if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
1689 /* if not optimize return n */
1692 /* Here this is possible. Why? */
1697 /* constant expression evaluation / constant folding */
1698 if (get_opt_constant_folding()) {
1699 /* constants can not be evaluated */
1700 if (iro != iro_Const) {
1701 /* try to evaluate */
1702 tv = computed_value (n);
1703 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1704 /* evaluation was successful -- replace the node. */
1705 n = new_Const (get_tarval_mode (tv), tv);
1712 /* remove unnecessary nodes */
1713 /*if (get_opt_constant_folding()) */
1714 if (get_opt_constant_folding() ||
1715 (iro == iro_Phi) || /* always optimize these nodes. */
1716 (iro == iro_Id) || /* ... */
1717 (iro == iro_Proj) || /* ... */
1718 (iro == iro_Block) ) /* Flags tested local. */
1719 n = equivalent_node (n);
1721 optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
1723 /** common subexpression elimination **/
1724 /* Checks whether n is already available. */
1725 /* The block input is used to distinguish different subexpressions. Right
1726 now all nodes are pinned to blocks, i.e., the cse only finds common
1727 subexpressions within a block. */
1728 if (get_opt_cse()) {
1729 n = identify (current_ir_graph->value_table, n);
1732 /* Some more constant expression evaluation. */
1733 iro = get_irn_opcode(n);
1734 if (get_opt_constant_folding() ||
1735 (iro == iro_Cond) ||
1736 (iro == iro_Proj)) /* Flags tested local. */
1737 n = transform_node (n);
1739 /* Remove nodes with dead (Bad) input.
1740 Run always for transformation induced Bads. */
1743 /* Now we can verify the node, as it has no dead inputs any more. */
1746 /* Now we have a legal, useful node. Enter it in hash table for cse.
1747 Blocks should be unique anyways. (Except the successor of start:
1748 is cse with the start block!) */
1749 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1750 n = identify_remember (current_ir_graph->value_table, n);
1756 * Wrapper for external use, set proper status bits after optimization.
1759 optimize_in_place (ir_node *n)
1761 /* Handle graph state */
1762 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1764 if (get_opt_global_cse())
1765 set_irg_pinned(current_ir_graph, floats);
1766 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1767 set_irg_outs_inconsistent(current_ir_graph);
1768 /* Maybe we could also test whether optimizing the node can
1769 change the control graph. */
1770 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1771 set_irg_dom_inconsistent(current_ir_graph);
1772 return optimize_in_place_2 (n);
1776 * set the default ir op operations
1778 ir_op *firm_set_default_operations(ir_op *op)
1780 op = firm_set_default_computed_value(op);
1781 op = firm_set_default_equivalent_node(op);
1782 op = firm_set_default_transform_node(op);
1783 op = firm_set_default_node_cmp_attr(op);