3 * File name: ir/ir/iropt.c
4 * Purpose: iropt --- optimizations intertwined with IR construction.
5 * Author: Christian Schaefer
6 * Modified by: Goetz Lindenmaier
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 # include "irnode_t.h"
18 # include "irgraph_t.h"
24 # include "dbginfo_t.h"
25 # include "iropt_dbg.h"
27 /* Make types visible to allow most efficient access */
28 # include "entity_t.h"
30 /* Trivial INLINEable routine for copy propagation.
31 Does follow Ids, needed to optimize INLINEd code. */
32 static INLINE ir_node *
33 follow_Id (ir_node *n)
35 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
39 static INLINE tarval *
42 if ((n != NULL) && (get_irn_op(n) == op_Const))
43 return get_Const_tarval(n); /* might return tarval_bad */
48 /* if n can be computed, return the value, else tarval_bad. Performs
49 constant folding. GL: Only if n is arithmetic operator? */
51 computed_value (ir_node *n)
55 ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
56 /* initialized to uniformly filter invalid constants */
57 tarval *ta = tarval_bad, *tb = tarval_bad;
61 /* get the operands we will work on for simple cases. */
63 a = get_binop_left(n);
64 b = get_binop_right(n);
65 } else if (is_unop(n)) {
69 /* if the operands are constants, get the target value, else set it NULL.
70 (a and b may be NULL if we treat a node that is no computation.) */
74 /* Perform the constant evaluation / computation. */
75 switch (get_irn_opcode(n)) {
77 res = get_Const_tarval(n);
80 if ((get_SymConst_kind(n) == size) &&
81 (get_type_state(get_SymConst_type(n))) == layout_fixed)
82 res = new_tarval_from_long (get_type_size(get_SymConst_type(n)), mode_Is);
85 if ((ta != tarval_bad) && (tb != tarval_bad)
86 && (get_irn_mode(a) == get_irn_mode(b))
87 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
88 res = tarval_add (ta, tb);
92 if ((ta != tarval_bad) && (tb != tarval_bad)
93 && (get_irn_mode(a) == get_irn_mode(b))
94 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
95 res = tarval_sub (ta, tb);
99 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
100 res = tarval_neg (ta);
103 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
104 res = tarval_mul (ta, tb);
106 /* a*0 = 0 or 0*b = 0:
107 calls computed_value recursive and returns the 0 with proper
110 if ( ( ((v = computed_value (a)) != tarval_bad)
111 && (v == get_mode_null(get_tarval_mode(v))) )
112 || ( ((v = computed_value (b)) != tarval_bad)
113 && (v == get_mode_null(get_tarval_mode(v))) )) {
119 /* This was missing in original implementation. Why? */
120 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
121 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
122 res = tarval_quo(ta, tb);
126 /* This was missing in original implementation. Why? */
127 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
128 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
129 res = tarval_div(ta, tb);
133 /* This was missing in original implementation. Why? */
134 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
135 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
136 res = tarval_mod(ta, tb);
139 /* for iro_DivMod see iro_Proj */
141 if (ta != tarval_bad)
142 res = tarval_abs (ta);
145 if ((ta != tarval_bad) && (tb != tarval_bad)) {
146 res = tarval_and (ta, tb);
149 if ( ( ((v = computed_value (a)) != tarval_bad)
150 && (v == get_mode_null(get_tarval_mode(v))) )
151 || ( ((v = computed_value (b)) != tarval_bad)
152 && (v == get_mode_null(get_tarval_mode(v))) )) {
158 if ((ta != tarval_bad) && (tb != tarval_bad)) {
159 res = tarval_or (ta, tb);
162 if ( (tarval_classify ((v = computed_value (a))) == -1)
163 || (tarval_classify ((v = computed_value (b))) == -1)) {
169 if ((ta != tarval_bad) && (tb != tarval_bad)) {
170 res = tarval_eor (ta, tb);
174 if ((ta != tarval_bad)) {
175 res = tarval_not (ta);
179 if ((ta != tarval_bad) && (tb != tarval_bad)) {
180 res = tarval_shl (ta, tb);
184 if ((ta != tarval_bad) && (tb != tarval_bad)) {
185 res = tarval_shr (ta, tb);
189 if ((ta != tarval_bad) && (tb != tarval_bad)) {
190 res = tarval_shrs (ta, tb);
194 if ((ta != tarval_bad) && (tb != tarval_bad)) {
195 /*res = tarval_rot (ta, tb)*/;
199 if (ta != tarval_bad) {
200 res = tarval_convert_to (ta, get_irn_mode (n));
203 case iro_Proj: /* iro_Cmp */
207 a = get_Proj_pred(n);
208 /* Optimize Cmp nodes.
209 This performs a first step of unreachable code elimination.
210 Proj can not be computed, but folding a Cmp above the Proj here is
211 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
213 There are several case where we can evaluate a Cmp node:
214 1. The nodes compared are both the same. If we compare for
215 equal, greater equal, ... this will return true, else it
216 will return false. This step relies on cse.
217 2. The predecessors of Cmp are target values. We can evaluate
219 3. The predecessors are Allocs or void* constants. Allocs never
220 return NULL, they raise an exception. Therefore we can predict
222 if (get_irn_op(a) == op_Cmp) {
223 aa = get_Cmp_left(a);
224 ab = get_Cmp_right(a);
225 if (aa == ab) { /* 1.: */
226 /* This is a tric with the bits used for encoding the Cmp
227 Proj numbers, the following statement is not the same:
228 res = new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
229 res = new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);
231 tarval *taa = computed_value (aa);
232 tarval *tab = computed_value (ab);
233 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
234 /* strange checks... */
235 pnc_number flags = tarval_cmp (taa, tab);
236 if (flags != False) {
237 res = new_tarval_from_long (get_Proj_proj(n) & flags, mode_b);
239 } else { /* check for 3.: */
240 ir_node *aaa = skip_nop(skip_Proj(aa));
241 ir_node *aba = skip_nop(skip_Proj(ab));
242 if ( ( (/* aa is ProjP and aaa is Alloc */
243 (get_irn_op(aa) == op_Proj)
244 && (mode_is_reference(get_irn_mode(aa)))
245 && (get_irn_op(aaa) == op_Alloc))
246 && ( (/* ab is constant void */
247 (get_irn_op(ab) == op_Const)
248 && (mode_is_reference(get_irn_mode(ab)))
249 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
250 || (/* ab is other Alloc */
251 (get_irn_op(ab) == op_Proj)
252 && (mode_is_reference(get_irn_mode(ab)))
253 && (get_irn_op(aba) == op_Alloc)
255 || (/* aa is void and aba is Alloc */
256 (get_irn_op(aa) == op_Const)
257 && (mode_is_reference(get_irn_mode(aa)))
258 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
259 && (get_irn_op(ab) == op_Proj)
260 && (mode_is_reference(get_irn_mode(ab)))
261 && (get_irn_op(aba) == op_Alloc)))
263 res = new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
266 } else if (get_irn_op(a) == op_DivMod) {
267 ta = value_of(get_DivMod_left(a));
268 tb = value_of(get_DivMod_right(a));
269 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
270 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
271 if (get_Proj_proj(n)== 0) /* Div */
272 res = tarval_div(ta, tb);
274 res = tarval_mod(ta, tb);
287 /* returns 1 if the a and b are pointers to different locations. */
289 different_identity (ir_node *a, ir_node *b)
291 assert (mode_is_reference(get_irn_mode (a))
292 && mode_is_reference(get_irn_mode (b)));
294 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
295 ir_node *a1 = get_Proj_pred (a);
296 ir_node *b1 = get_Proj_pred (b);
297 if (a1 != b1 && get_irn_op (a1) == op_Alloc
298 && get_irn_op (b1) == op_Alloc)
305 /* equivalent_node returns a node equivalent to N. It skips all nodes that
306 perform no actual computation, as, e.g., the Id nodes. It does not create
307 new nodes. It is therefore safe to free N if the node returned is not N.
308 If a node returns a Tuple we can not just skip it. If the size of the
309 in array fits, we transform n into a tuple (e.g., Div). */
311 equivalent_node (ir_node *n)
314 ir_node *a = NULL; /* to shutup gcc */
315 ir_node *b = NULL; /* to shutup gcc */
316 ir_node *c = NULL; /* to shutup gcc */
319 ins = get_irn_arity (n);
321 /* get the operands we will work on */
323 a = get_binop_left(n);
324 b = get_binop_right(n);
325 } else if (is_unop(n)) {
329 /* skip unnecessary nodes. */
330 switch (get_irn_opcode (n)) {
333 /* The Block constructor does not call optimize, but mature_block
334 calls the optimization. */
335 assert(get_Block_matured(n));
337 /* Straightening: a single entry Block following a single exit Block
338 can be merged, if it is not the Start block. */
339 /* !!! Beware, all Phi-nodes of n must have been optimized away.
340 This should be true, as the block is matured before optimize is called.
341 But what about Phi-cycles with the Phi0/Id that could not be resolved?
342 Remaining Phi nodes are just Ids. */
343 if ((get_Block_n_cfgpreds(n) == 1) &&
344 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
345 (get_opt_control_flow_straightening())) {
346 n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
348 } else if ((get_Block_n_cfgpreds(n) == 2) &&
349 (get_opt_control_flow_weak_simplification())) {
350 /* Test whether Cond jumps twice to this block
351 @@@ we could do this also with two loops finding two preds from several ones. */
352 a = get_Block_cfgpred(n, 0);
353 b = get_Block_cfgpred(n, 1);
354 if ((get_irn_op(a) == op_Proj) &&
355 (get_irn_op(b) == op_Proj) &&
356 (get_Proj_pred(a) == get_Proj_pred(b)) &&
357 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
358 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
359 /* Also a single entry Block following a single exit Block. Phis have
360 twice the same operand and will be optimized away. */
361 n = get_nodes_Block(a); DBG_OPT_IFSIM;
363 } else if (get_opt_unreachable_code() &&
364 (n != current_ir_graph->start_block) &&
365 (n != current_ir_graph->end_block) ) {
367 /* If all inputs are dead, this block is dead too, except if it is
368 the start or end block. This is a step of unreachable code
370 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
371 if (!is_Bad(get_Block_cfgpred(n, i))) break;
373 if (i == get_Block_n_cfgpreds(n))
379 case iro_Jmp: /* GL: Why not same for op_Raise?? */
380 /* unreachable code elimination */
381 if (is_Bad(get_nodes_Block(n))) n = new_Bad();
383 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
384 See cases for iro_Cond and iro_Proj in transform_node. */
385 /** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
386 case iro_Or: if (a == b) {n = a; break;}
391 /* After running compute_node there is only one constant predecessor.
392 Find this predecessors value and remember the other node: */
393 if ((tv = computed_value (a)) != tarval_bad) {
395 } else if ((tv = computed_value (b)) != tarval_bad) {
399 /* If this predecessors constant value is zero, the operation is
400 unnecessary. Remove it: */
401 if (tarval_classify (tv) == 0) {
402 n = on; DBG_OPT_ALGSIM1;
410 /* these operations are not commutative. Test only one predecessor. */
411 if (tarval_classify (computed_value (b)) == 0) {
412 n = a; DBG_OPT_ALGSIM1;
413 /* Test if b > #bits of a ==> return 0 / divide b by #bits
414 --> transform node? */
417 case iro_Not: /* NotNot x == x */
418 case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
419 out of bounds exception if min =! max? */
420 if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
421 n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
425 /* Mul is commutative and has again an other neutral element. */
426 if (tarval_classify (computed_value (a)) == 1) {
427 n = b; DBG_OPT_ALGSIM1;
428 } else if (tarval_classify (computed_value (b)) == 1) {
429 n = a; DBG_OPT_ALGSIM1;
433 /* Div is not commutative. */
434 if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
435 /* Turn Div into a tuple (mem, bad, a) */
436 ir_node *mem = get_Div_mem(n);
437 turn_into_tuple(n, 3);
438 set_Tuple_pred(n, 0, mem);
439 set_Tuple_pred(n, 1, new_Bad());
440 set_Tuple_pred(n, 2, a);
444 case iro_Mod, Quot, DivMod
445 DivMod allocates new nodes --> it's treated in transform node.
446 What about Quot, DivMod?
450 n = a; /* And has it's own neutral element */
451 } else if (tarval_classify (computed_value (a)) == -1) {
453 } else if (tarval_classify (computed_value (b)) == -1) {
456 if (n != oldn) DBG_OPT_ALGSIM1;
459 if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
460 n = a; DBG_OPT_ALGSIM3;
461 } else if (get_irn_mode(n) == mode_b) {
462 if (get_irn_op(a) == op_Conv &&
463 get_irn_mode (get_Conv_op(a)) == mode_b) {
464 n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM2;
471 /* Several optimizations:
472 - no Phi in start block.
473 - remove Id operators that are inputs to Phi
474 - fold Phi-nodes, iff they have only one predecessor except
479 ir_node *block = NULL; /* to shutup gcc */
480 ir_node *first_val = NULL; /* to shutup gcc */
481 ir_node *scnd_val = NULL; /* to shutup gcc */
483 if (!get_opt_normalize()) return n;
485 n_preds = get_Phi_n_preds(n);
487 block = get_nodes_Block(n);
488 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
489 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
490 if ((is_Bad(block)) || /* Control dead */
491 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
492 return new_Bad(); /* in the Start Block. */
494 if (n_preds == 0) break; /* Phi of dead Region without predecessors. */
497 /* first we test for a special case: */
498 /* Confirm is a special node fixing additional information for a
499 value that is known at a certain point. This is useful for
500 dataflow analysis. */
502 ir_node *a = follow_Id (get_Phi_pred(n, 0));
503 ir_node *b = follow_Id (get_Phi_pred(n, 1));
504 if ( (get_irn_op(a) == op_Confirm)
505 && (get_irn_op(b) == op_Confirm)
506 && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
507 && (get_irn_n(a, 1) == get_irn_n (b, 1))
508 && (a->data.num == (~b->data.num & irpn_True) )) {
509 n = follow_Id (get_irn_n(a, 0));
515 /* Find first non-self-referencing input */
516 for (i = 0; i < n_preds; ++i) {
517 first_val = follow_Id(get_Phi_pred(n, i));
519 set_Phi_pred(n, i, first_val);
520 if ( (first_val != n) /* not self pointer */
521 && (get_irn_op(first_val) != op_Bad) /* value not dead */
522 && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
523 break; /* then found first value. */
527 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
528 if (i >= n_preds) { n = new_Bad(); break; }
532 /* follow_Id () for rest of inputs, determine if any of these
533 are non-self-referencing */
534 while (++i < n_preds) {
535 scnd_val = follow_Id(get_Phi_pred(n, i));
537 set_Phi_pred(n, i, scnd_val);
539 && (scnd_val != first_val)
540 && (get_irn_op(scnd_val) != op_Bad)
541 && !(is_Bad (get_Block_cfgpred(block, i))) ) {
546 /* Fold, if no multiple distinct non-self-referencing inputs */
548 n = first_val; DBG_OPT_PHI;
550 /* skip the remaining Ids. */
551 while (++i < n_preds) {
552 set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
560 #if 0 /* Is an illegal transformation: different nodes can
561 represent the same pointer value!! */
562 a = skip_Proj(get_Load_mem(n));
565 if (get_irn_op(a) == op_Store) {
566 if ( different_identity (b, get_Store_ptr(a))) {
567 /* load and store use different pointers, therefore load
568 needs not take store's memory but the state before. */
569 set_Load_mem (n, get_Store_mem(a));
570 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
577 /* remove unnecessary store. */
579 a = skip_Proj(get_Store_mem(n));
580 b = get_Store_ptr(n);
581 c = skip_Proj(get_Store_value(n));
583 if (get_irn_op(a) == op_Store
584 && get_Store_ptr(a) == b
585 && skip_Proj(get_Store_value(a)) == c) {
586 /* We have twice exactly the same store -- a write after write. */
588 } else if (get_irn_op(c) == op_Load
589 && (a == c || skip_Proj(get_Load_mem(c)) == a)
590 && get_Load_ptr(c) == b ) {
591 /* We just loaded the value from the same memory, i.e., the store
592 doesn't change the memory -- a write after read. */
593 a = get_Store_mem(n);
594 turn_into_tuple(n, 2);
595 set_Tuple_pred(n, 0, a);
596 set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
603 a = get_Proj_pred(n);
605 if ( get_irn_op(a) == op_Tuple) {
606 /* Remove the Tuple/Proj combination. */
607 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
608 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
610 assert(0); /* This should not happen! */
613 } else if (get_irn_mode(n) == mode_X &&
614 is_Bad(get_nodes_Block(n))) {
615 /* Remove dead control flow -- early gigo. */
622 n = follow_Id (n); DBG_OPT_ID;
629 } /* end equivalent_node() */
632 /* tries several [inplace] [optimizing] transformations and returns an
633 equivalent node. The difference to equivalent_node is that these
634 transformations _do_ generate new nodes, and thus the old node must
635 not be freed even if the equivalent node isn't the old one. */
637 transform_node (ir_node *n)
640 ir_node *a = NULL, *b;
643 switch (get_irn_opcode(n)) {
645 ta = computed_value(n);
646 if (ta != tarval_bad) {
647 /* Turn Div into a tuple (mem, bad, value) */
648 ir_node *mem = get_Div_mem(n);
649 turn_into_tuple(n, 3);
650 set_Tuple_pred(n, 0, mem);
651 set_Tuple_pred(n, 1, new_Bad());
652 set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
656 ta = computed_value(n);
657 if (ta != tarval_bad) {
658 /* Turn Mod into a tuple (mem, bad, value) */
659 ir_node *mem = get_Mod_mem(n);
660 turn_into_tuple(n, 3);
661 set_Tuple_pred(n, 0, mem);
662 set_Tuple_pred(n, 1, new_Bad());
663 set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
671 a = get_DivMod_left(n);
672 b = get_DivMod_right(n);
673 mode = get_irn_mode(a);
675 if (!(mode_is_int(get_irn_mode(a)) &&
676 mode_is_int(get_irn_mode(b))))
680 a = new_Const (mode, get_mode_one(mode));
681 b = new_Const (mode, get_mode_null(mode));
687 if (tb != tarval_bad) {
688 if (tb == get_mode_one(get_tarval_mode(tb))) {
689 b = new_Const (mode, get_mode_null(mode));
691 } else if (ta != tarval_bad) {
693 resa = tarval_div (ta, tb);
694 if (resa == tarval_bad) break; /* Causes exception!!! Model by replacing through
695 Jmp for X result!? */
696 resb = tarval_mod (ta, tb);
697 if (resb == tarval_bad) break; /* Causes exception! */
698 a = new_Const (mode, resa);
699 b = new_Const (mode, resb);
702 } else if (ta == get_mode_null(get_tarval_mode(ta))) {
707 if (evaluated) { /* replace by tuple */
708 ir_node *mem = get_DivMod_mem(n);
709 turn_into_tuple(n, 4);
710 set_Tuple_pred(n, 0, mem);
711 set_Tuple_pred(n, 1, new_Bad()); /* no exception */
712 set_Tuple_pred(n, 2, a);
713 set_Tuple_pred(n, 3, b);
714 assert(get_nodes_Block(n));
720 /* Replace the Cond by a Jmp if it branches on a constant
723 a = get_Cond_selector(n);
726 if ((ta != tarval_bad) &&
727 (get_irn_mode(a) == mode_b) &&
728 (get_opt_unreachable_code())) {
729 /* It's a boolean Cond, branching on a boolean constant.
730 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
731 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
732 turn_into_tuple(n, 2);
733 if (ta == tarval_b_true) {
734 set_Tuple_pred(n, 0, new_Bad());
735 set_Tuple_pred(n, 1, jmp);
737 set_Tuple_pred(n, 0, jmp);
738 set_Tuple_pred(n, 1, new_Bad());
740 /* We might generate an endless loop, so keep it alive. */
741 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
742 } else if ((ta != tarval_bad) &&
743 (get_irn_mode(a) == mode_Iu) &&
744 (get_Cond_kind(n) == dense) &&
745 (get_opt_unreachable_code())) {
746 /* I don't want to allow Tuples smaller than the biggest Proj.
747 Also this tuple might get really big...
748 I generate the Jmp here, and remember it in link. Link is used
749 when optimizing Proj. */
750 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
751 /* We might generate an endless loop, so keep it alive. */
752 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
753 } else if ((get_irn_op(get_Cond_selector(n)) == op_Eor)
754 && (get_irn_mode(get_Cond_selector(n)) == mode_b)
755 && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
756 /* The Eor is a negate. Generate a new Cond without the negate,
757 simulate the negate by exchanging the results. */
758 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
760 } else if ((get_irn_op(get_Cond_selector(n)) == op_Not)
761 && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
762 /* A Not before the Cond. Generate a new Cond without the Not,
763 simulate the Not by exchanging the results. */
764 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
771 a = get_Proj_pred(n);
773 if ((get_irn_op(a) == op_Cond)
775 && get_irn_op(get_irn_link(a)) == op_Cond) {
776 /* Use the better Cond if the Proj projs from a Cond which get's
777 its result from an Eor/Not. */
778 assert (((get_irn_op(get_Cond_selector(a)) == op_Eor)
779 || (get_irn_op(get_Cond_selector(a)) == op_Not))
780 && (get_irn_mode(get_Cond_selector(a)) == mode_b)
781 && (get_irn_op(get_irn_link(a)) == op_Cond)
782 && (get_Cond_selector(get_irn_link(a)) == get_Eor_left(get_Cond_selector(a))));
783 set_Proj_pred(n, get_irn_link(a));
784 if (get_Proj_proj(n) == 0)
788 } else if ((get_irn_op(a) == op_Cond)
789 && (get_irn_mode(get_Cond_selector(a)) == mode_Iu)
791 && (get_Cond_kind(a) == dense)
792 && (get_opt_unreachable_code())) {
793 /* The Cond is a Switch on a Constant */
794 if (get_Proj_proj(n) == tarval_to_long(value_of(a))) {
795 /* The always taken branch, reuse the existing Jmp. */
796 if (!get_irn_link(a)) /* well, if it exists ;-> */
797 set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
798 assert(get_irn_op(get_irn_link(a)) == op_Jmp);
800 } else {/* Not taken control flow, but be careful with the default! */
801 if (get_Proj_proj(n) < a->attr.c.default_proj){
802 /* a never taken branch */
805 a->attr.c.default_proj = get_Proj_proj(n);
810 case iro_Eor: { /* @@@ not tested as boolean Eor not allowed any more. */
812 b = get_Eor_right(n);
814 if ((get_irn_mode(n) == mode_b)
815 && (get_irn_op(a) == op_Proj)
816 && (get_irn_mode(a) == mode_b)
817 && (tarval_classify (computed_value (b)) == 1)
818 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
819 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
820 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
821 mode_b, get_negated_pnc(get_Proj_proj(a)));
822 else if ((get_irn_mode(n) == mode_b)
823 && (tarval_classify (computed_value (b)) == 1))
824 /* The Eor is a Not. Replace it by a Not. */
825 /* ????!!!Extend to bitfield 1111111. */
826 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
832 if ( (get_irn_mode(n) == mode_b)
833 && (get_irn_op(a) == op_Proj)
834 && (get_irn_mode(a) == mode_b)
835 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
836 /* We negate a Cmp. The Cmp has the negated result anyways! */
837 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
838 mode_b, get_negated_pnc(get_Proj_proj(a)));
846 /* **************** Common Subexpression Elimination **************** */
848 /** The size of the hash table used, should estimate the number of nodes
850 #define N_IR_NODES 512
852 /* Compare function for two nodes in the hash table. Gets two */
853 /* nodes as parameters. Returns 0 if the nodes are a cse. */
855 vt_cmp (const void *elt, const void *key)
863 if (a == b) return 0;
865 if ((get_irn_op(a) != get_irn_op(b)) ||
866 (get_irn_mode(a) != get_irn_mode(b))) return 1;
868 /* compare if a's in and b's in are equal */
869 if (get_irn_arity (a) != get_irn_arity(b))
872 /* for block-local cse and pinned nodes: */
873 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
874 if (get_irn_n(a, -1) != get_irn_n(b, -1))
878 /* compare a->in[0..ins] with b->in[0..ins] */
879 for (i = 0; i < get_irn_arity(a); i++)
880 if (get_irn_n(a, i) != get_irn_n(b, i))
883 switch (get_irn_opcode(a)) {
885 return (get_Const_tarval(a) != get_Const_tarval(b))
886 || (get_Const_type(a) != get_Const_type(b));
888 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
890 return get_Filter_proj(a) != get_Filter_proj(b);
892 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
893 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
895 return (get_irn_free_attr(a) != get_irn_free_attr(b));
897 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
898 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
900 return (get_irn_call_attr(a) != get_irn_call_attr(b));
902 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
903 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
904 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
905 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
906 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
908 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
910 return get_Cast_type(a) != get_Cast_type(b);
918 ir_node_hash (ir_node *node)
923 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
924 h = get_irn_arity(node);
926 /* consider all in nodes... except the block. */
927 for (i = 0; i < get_irn_arity(node); i++) {
928 h = 9*h + (unsigned long)get_irn_n(node, i);
932 h = 9*h + (unsigned long) get_irn_mode (node);
934 h = 9*h + (unsigned long) get_irn_op (node);
940 new_identities (void)
942 return new_pset (vt_cmp, N_IR_NODES);
946 del_identities (pset *value_table)
948 del_pset (value_table);
951 /* Return the canonical node computing the same value as n.
952 Looks up the node in a hash table. */
953 static INLINE ir_node *
954 identify (pset *value_table, ir_node *n)
958 if (!value_table) return n;
960 if (get_opt_reassociation()) {
961 switch (get_irn_opcode (n)) {
968 /* for commutative operators perform a OP b == b OP a */
969 if (get_binop_left(n) > get_binop_right(n)) {
970 ir_node *h = get_binop_left(n);
971 set_binop_left(n, get_binop_right(n));
972 set_binop_right(n, h);
980 o = pset_find (value_table, n, ir_node_hash (n));
986 /* During construction we set the pinned flag in the graph right when the
987 optimizatin is performed. The flag turning on procedure global cse could
988 be changed between two allocations. This way we are safe. */
989 static INLINE ir_node *
990 identify_cons (pset *value_table, ir_node *n) {
992 n = identify(value_table, n);
993 if (get_irn_n(old, -1) != get_irn_n(n, -1))
994 set_irg_pinned(current_ir_graph, floats);
998 /* Return the canonical node computing the same value as n.
999 Looks up the node in a hash table, enters it in the table
1000 if it isn't there yet. */
1002 identify_remember (pset *value_table, ir_node *node)
1006 if (!value_table) return node;
1008 /* lookup or insert in hash table with given hash key. */
1009 o = pset_insert (value_table, node, ir_node_hash (node));
1011 if (o == node) return node;
1017 add_identities (pset *value_table, ir_node *node) {
1018 identify_remember (value_table, node);
1021 /* garbage in, garbage out. If a node has a dead input, i.e., the
1022 Bad node is input to the node, return the Bad node. */
1023 static INLINE ir_node *
1024 gigo (ir_node *node)
1027 ir_op* op = get_irn_op(node);
1030 /* remove garbage blocks by looking at control flow that leaves the block
1031 and replacing the control flow by Bad. */
1032 if (get_irn_mode(node) == mode_X) {
1033 ir_node *block = get_nodes_block(node);
1034 if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
1035 for (i = 0; i < get_irn_arity(block); i++) {
1036 if (!is_Bad(get_irn_n(block, i))) break;
1038 if (i == get_irn_arity(block)) return new_Bad();
1043 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
1044 blocks predecessors is dead. */
1045 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
1046 for (i = -1; i < get_irn_arity(node); i++) {
1047 if (is_Bad(get_irn_n(node, i))) {
1053 /* With this code we violate the agreement that local_optimize
1054 only leaves Bads in Block, Phi and Tuple nodes. */
1055 /* If Block has only Bads as predecessors it's garbage. */
1056 /* If Phi has only Bads as predecessors it's garbage. */
1057 if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
1058 for (i = 0; i < get_irn_arity(node); i++) {
1059 if (!is_Bad(get_irn_n(node, i))) break;
1061 if (i == get_irn_arity(node)) node = new_Bad();
1068 /* These optimizations deallocate nodes from the obstack.
1069 It can only be called if it is guaranteed that no other nodes
1070 reference this one, i.e., right after construction of a node. */
1072 optimize_node (ir_node *n)
1077 /* Allways optimize Phi nodes: part of the construction. */
1078 if ((!get_optimize()) && (get_irn_op(n) != op_Phi)) return n;
1080 /* constant expression evaluation / constant folding */
1081 if (get_opt_constant_folding()) {
1082 /* constants can not be evaluated */
1083 if (get_irn_op(n) != op_Const) {
1084 /* try to evaluate */
1085 tv = computed_value (n);
1086 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1087 /* evaluation was succesful -- replace the node. */
1088 obstack_free (current_ir_graph->obst, n);
1089 return new_Const (get_tarval_mode (tv), tv);
1094 /* remove unnecessary nodes */
1095 if (get_opt_constant_folding() ||
1096 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1097 (get_irn_op(n) == op_Id) ||
1098 (get_irn_op(n) == op_Proj) ||
1099 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1100 n = equivalent_node (n);
1102 /** common subexpression elimination **/
1103 /* Checks whether n is already available. */
1104 /* The block input is used to distinguish different subexpressions. Right
1105 now all nodes are pinned to blocks, i.e., the cse only finds common
1106 subexpressions within a block. */
1108 n = identify_cons (current_ir_graph->value_table, n);
1111 /* We found an existing, better node, so we can deallocate the old node. */
1112 obstack_free (current_ir_graph->obst, old_n);
1115 /* Some more constant expression evaluation that does not allow to
1117 if (get_opt_constant_folding() ||
1118 (get_irn_op(n) == op_Cond) ||
1119 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1120 n = transform_node (n);
1122 /* Remove nodes with dead (Bad) input.
1123 Run always for transformation induced Bads. */
1126 /* Now we can verify the node, as it has no dead inputs any more. */
1129 /* Now we have a legal, useful node. Enter it in hash table for cse */
1130 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1131 n = identify_remember (current_ir_graph->value_table, n);
1138 /* These optimizations never deallocate nodes. This can cause dead
1139 nodes lying on the obstack. Remove these by a dead node elimination,
1140 i.e., a copying garbage collection. */
1142 optimize_in_place_2 (ir_node *n)
1147 if (!get_optimize() && (get_irn_op(n) != op_Phi)) return n;
1149 /* if not optimize return n */
1152 /* Here this is possible. Why? */
1157 /* constant expression evaluation / constant folding */
1158 if (get_opt_constant_folding()) {
1159 /* constants can not be evaluated */
1160 if (get_irn_op(n) != op_Const) {
1161 /* try to evaluate */
1162 tv = computed_value (n);
1163 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1164 /* evaluation was succesful -- replace the node. */
1165 n = new_Const (get_tarval_mode (tv), tv);
1166 __dbg_info_merge_pair(n, old_n, dbg_const_eval);
1172 /* remove unnecessary nodes */
1173 /*if (get_opt_constant_folding()) */
1174 if (get_opt_constant_folding() ||
1175 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1176 (get_irn_op(n) == op_Id) || /* ... */
1177 (get_irn_op(n) == op_Proj) || /* ... */
1178 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1179 n = equivalent_node (n);
1181 /** common subexpression elimination **/
1182 /* Checks whether n is already available. */
1183 /* The block input is used to distinguish different subexpressions. Right
1184 now all nodes are pinned to blocks, i.e., the cse only finds common
1185 subexpressions within a block. */
1186 if (get_opt_cse()) {
1187 n = identify (current_ir_graph->value_table, n);
1190 /* Some more constant expression evaluation. */
1191 if (get_opt_constant_folding() ||
1192 (get_irn_op(n) == op_Cond) ||
1193 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1194 n = transform_node (n);
1196 /* Remove nodes with dead (Bad) input.
1197 Run always for transformation induced Bads. */
1200 /* Now we can verify the node, as it has no dead inputs any more. */
1203 /* Now we have a legal, useful node. Enter it in hash table for cse.
1204 Blocks should be unique anyways. (Except the successor of start:
1205 is cse with the start block!) */
1206 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1207 n = identify_remember (current_ir_graph->value_table, n);
1212 /* Wrapper for external use, set proper status bits after optimization */
1214 optimize_in_place (ir_node *n) {
1215 /* Handle graph state */
1216 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1217 if (get_opt_global_cse())
1218 set_irg_pinned(current_ir_graph, floats);
1219 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1220 set_irg_outs_inconsistent(current_ir_graph);
1221 /* Maybe we could also test whether optimizing the node can
1222 change the control graph. */
1223 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1224 set_irg_dom_inconsistent(current_ir_graph);
1225 return optimize_in_place_2 (n);