1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
4 * Authors: Christian Schaefer, Goetz Lindenmaier
6 * iropt --- optimizations intertwined with IR construction.
15 # include "irnode_t.h"
16 # include "irgraph_t.h"
23 # include "dbginfo_t.h"
24 # include "iropt_dbg.h"
26 /* Make types visible to allow most efficient access */
27 # include "entity_t.h"
29 /* Trivial INLINEable routine for copy propagation.
30 Does follow Ids, needed to optimize INLINEd code. */
31 static INLINE ir_node *
32 follow_Id (ir_node *n)
34 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
38 static INLINE tarval *
41 if ((n != NULL) && (get_irn_op(n) == op_Const))
42 return get_Const_tarval(n); /* might return tarval_bad */
47 /* if n can be computed, return the value, else tarval_bad. Performs
48 constant folding. GL: Only if n is arithmetic operator? */
50 computed_value (ir_node *n)
54 ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
55 /* initialized to uniformly filter invalid constants */
56 tarval *ta = tarval_bad, *tb = tarval_bad;
60 /* get the operands we will work on for simple cases. */
62 a = get_binop_left(n);
63 b = get_binop_right(n);
64 } else if (is_unop(n)) {
68 /* if the operands are constants, get the target value, else set it NULL.
69 (a and b may be NULL if we treat a node that is no computation.) */
73 /* Perform the constant evaluation / computation. */
74 switch (get_irn_opcode(n)) {
76 res = get_Const_tarval(n);
79 if ((get_SymConst_kind(n) == size) &&
80 (get_type_state(get_SymConst_type(n))) == layout_fixed)
81 res = new_tarval_from_long (get_type_size(get_SymConst_type(n)), mode_Is);
84 if ((ta != tarval_bad) && (tb != tarval_bad)
85 && (get_irn_mode(a) == get_irn_mode(b))
86 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
87 res = tarval_add (ta, tb);
91 if ((ta != tarval_bad) && (tb != tarval_bad)
92 && (get_irn_mode(a) == get_irn_mode(b))
93 && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
94 res = tarval_sub (ta, tb);
98 if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
99 res = tarval_neg (ta);
102 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
103 res = tarval_mul (ta, tb);
105 /* a*0 = 0 or 0*b = 0:
106 calls computed_value recursive and returns the 0 with proper
109 if ( ( ((v = computed_value (a)) != tarval_bad)
110 && (v == get_mode_null(get_tarval_mode(v))) )
111 || ( ((v = computed_value (b)) != tarval_bad)
112 && (v == get_mode_null(get_tarval_mode(v))) )) {
118 /* This was missing in original implementation. Why? */
119 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
120 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
121 res = tarval_quo(ta, tb);
125 /* This was missing in original implementation. Why? */
126 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
127 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
128 res = tarval_div(ta, tb);
132 /* This was missing in original implementation. Why? */
133 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
134 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
135 res = tarval_mod(ta, tb);
138 /* for iro_DivMod see iro_Proj */
140 if (ta != tarval_bad)
141 res = tarval_abs (ta);
144 if ((ta != tarval_bad) && (tb != tarval_bad)) {
145 res = tarval_and (ta, tb);
148 if ( ( ((v = computed_value (a)) != tarval_bad)
149 && (v == get_mode_null(get_tarval_mode(v))) )
150 || ( ((v = computed_value (b)) != tarval_bad)
151 && (v == get_mode_null(get_tarval_mode(v))) )) {
157 if ((ta != tarval_bad) && (tb != tarval_bad)) {
158 res = tarval_or (ta, tb);
161 if ( (tarval_classify ((v = computed_value (a))) == -1)
162 || (tarval_classify ((v = computed_value (b))) == -1)) {
168 if ((ta != tarval_bad) && (tb != tarval_bad)) {
169 res = tarval_eor (ta, tb);
173 if ((ta != tarval_bad)) {
174 res = tarval_neg (ta);
178 if ((ta != tarval_bad) && (tb != tarval_bad)) {
179 res = tarval_shl (ta, tb);
183 if ((ta != tarval_bad) && (tb != tarval_bad)) {
184 res = tarval_shr (ta, tb);
188 if ((ta != tarval_bad) && (tb != tarval_bad)) {
189 res = tarval_shrs (ta, tb);
193 if ((ta != tarval_bad) && (tb != tarval_bad)) {
194 /*res = tarval_rot (ta, tb)*/;
198 if (ta != tarval_bad) {
199 res = tarval_convert_to (ta, get_irn_mode (n));
202 case iro_Proj: /* iro_Cmp */
206 a = get_Proj_pred(n);
207 /* Optimize Cmp nodes.
208 This performs a first step of unreachable code elimination.
209 Proj can not be computed, but folding a Cmp above the Proj here is
210 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
212 There are several case where we can evaluate a Cmp node:
213 1. The nodes compared are both the same. If we compare for
214 equal, greater equal, ... this will return true, else it
215 will return false. This step relies on cse.
216 2. The predecessors of Cmp are target values. We can evaluate
218 3. The predecessors are Allocs or void* constants. Allocs never
219 return NULL, they raise an exception. Therefore we can predict
221 if (get_irn_op(a) == op_Cmp) {
222 aa = get_Cmp_left(a);
223 ab = get_Cmp_right(a);
224 if (aa == ab) { /* 1.: */
225 /* This is a tric with the bits used for encoding the Cmp
226 Proj numbers, the following statement is not the same:
227 res = new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
228 res = new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);
230 tarval *taa = computed_value (aa);
231 tarval *tab = computed_value (ab);
232 if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
233 /* strange checks... */
234 pnc_number flags = tarval_cmp (taa, tab);
235 if (flags != False) {
236 res = new_tarval_from_long (get_Proj_proj(n) & flags, mode_b);
238 } else { /* check for 3.: */
239 ir_node *aaa = skip_nop(skip_Proj(aa));
240 ir_node *aba = skip_nop(skip_Proj(ab));
241 if ( ( (/* aa is ProjP and aaa is Alloc */
242 (get_irn_op(aa) == op_Proj)
243 && (mode_is_reference(get_irn_mode(aa)))
244 && (get_irn_op(aaa) == op_Alloc))
245 && ( (/* ab is constant void */
246 (get_irn_op(ab) == op_Const)
247 && (mode_is_reference(get_irn_mode(ab)))
248 && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
249 || (/* ab is other Alloc */
250 (get_irn_op(ab) == op_Proj)
251 && (mode_is_reference(get_irn_mode(ab)))
252 && (get_irn_op(aba) == op_Alloc)
254 || (/* aa is void and aba is Alloc */
255 (get_irn_op(aa) == op_Const)
256 && (mode_is_reference(get_irn_mode(aa)))
257 && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
258 && (get_irn_op(ab) == op_Proj)
259 && (mode_is_reference(get_irn_mode(ab)))
260 && (get_irn_op(aba) == op_Alloc)))
262 res = new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
265 } else if (get_irn_op(a) == op_DivMod) {
266 ta = value_of(get_DivMod_left(a));
267 tb = value_of(get_DivMod_right(a));
268 if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
269 if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
270 if (get_Proj_proj(n)== 0) /* Div */
271 res = tarval_div(ta, tb);
273 res = tarval_mod(ta, tb);
286 /* returns 1 if the a and b are pointers to different locations. */
288 different_identity (ir_node *a, ir_node *b)
290 assert (mode_is_reference(get_irn_mode (a))
291 && mode_is_reference(get_irn_mode (b)));
293 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
294 ir_node *a1 = get_Proj_pred (a);
295 ir_node *b1 = get_Proj_pred (b);
296 if (a1 != b1 && get_irn_op (a1) == op_Alloc
297 && get_irn_op (b1) == op_Alloc)
304 /* equivalent_node returns a node equivalent to N. It skips all nodes that
305 perform no actual computation, as, e.g., the Id nodes. It does not create
306 new nodes. It is therefore safe to free N if the node returned is not N.
307 If a node returns a Tuple we can not just skip it. If the size of the
308 in array fits, we transform n into a tuple (e.g., Div). */
310 equivalent_node (ir_node *n)
313 ir_node *a = NULL; /* to shutup gcc */
314 ir_node *b = NULL; /* to shutup gcc */
315 ir_node *c = NULL; /* to shutup gcc */
318 ins = get_irn_arity (n);
320 /* get the operands we will work on */
322 a = get_binop_left(n);
323 b = get_binop_right(n);
324 } else if (is_unop(n)) {
328 /* skip unnecessary nodes. */
329 switch (get_irn_opcode (n)) {
332 /* The Block constructor does not call optimize, but mature_block
333 calls the optimization. */
334 assert(get_Block_matured(n));
336 /* Straightening: a single entry Block following a single exit Block
337 can be merged, if it is not the Start block. */
338 /* !!! Beware, all Phi-nodes of n must have been optimized away.
339 This should be true, as the block is matured before optimize is called.
340 But what about Phi-cycles with the Phi0/Id that could not be resolved?
341 Remaining Phi nodes are just Ids. */
342 if ((get_Block_n_cfgpreds(n) == 1) &&
343 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
344 (get_opt_control_flow_straightening())) {
345 n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
347 } else if ((get_Block_n_cfgpreds(n) == 2) &&
348 (get_opt_control_flow_weak_simplification())) {
349 /* Test whether Cond jumps twice to this block
350 @@@ we could do this also with two loops finding two preds from several ones. */
351 a = get_Block_cfgpred(n, 0);
352 b = get_Block_cfgpred(n, 1);
353 if ((get_irn_op(a) == op_Proj) &&
354 (get_irn_op(b) == op_Proj) &&
355 (get_Proj_pred(a) == get_Proj_pred(b)) &&
356 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
357 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
358 /* Also a single entry Block following a single exit Block. Phis have
359 twice the same operand and will be optimized away. */
360 n = get_nodes_Block(a); DBG_OPT_IFSIM;
362 } else if (get_opt_unreachable_code() &&
363 (n != current_ir_graph->start_block) &&
364 (n != current_ir_graph->end_block) ) {
366 /* If all inputs are dead, this block is dead too, except if it is
367 the start or end block. This is a step of unreachable code
369 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
370 if (!is_Bad(get_Block_cfgpred(n, i))) break;
372 if (i == get_Block_n_cfgpreds(n))
378 case iro_Jmp: /* GL: Why not same for op_Raise?? */
379 /* unreachable code elimination */
380 if (is_Bad(get_nodes_Block(n))) n = new_Bad();
382 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
383 See cases for iro_Cond and iro_Proj in transform_node. */
384 /** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
385 case iro_Or: if (a == b) {n = a; break;}
390 /* After running compute_node there is only one constant predecessor.
391 Find this predecessors value and remember the other node: */
392 if ((tv = computed_value (a)) != tarval_bad) {
394 } else if ((tv = computed_value (b)) != tarval_bad) {
398 /* If this predecessors constant value is zero, the operation is
399 unnecessary. Remove it: */
400 if (tarval_classify (tv) == 0) {
401 n = on; DBG_OPT_ALGSIM1;
409 /* these operations are not commutative. Test only one predecessor. */
410 if (tarval_classify (computed_value (b)) == 0) {
411 n = a; DBG_OPT_ALGSIM1;
412 /* Test if b > #bits of a ==> return 0 / divide b by #bits
413 --> transform node? */
416 case iro_Not: /* NotNot x == x */
417 case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
418 out of bounds exception if min =! max? */
419 if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
420 n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
424 /* Mul is commutative and has again an other neutral element. */
425 if (tarval_classify (computed_value (a)) == 1) {
426 n = b; DBG_OPT_ALGSIM1;
427 } else if (tarval_classify (computed_value (b)) == 1) {
428 n = a; DBG_OPT_ALGSIM1;
432 /* Div is not commutative. */
433 if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
434 /* Turn Div into a tuple (mem, bad, a) */
435 ir_node *mem = get_Div_mem(n);
436 turn_into_tuple(n, 3);
437 set_Tuple_pred(n, 0, mem);
438 set_Tuple_pred(n, 1, new_Bad());
439 set_Tuple_pred(n, 2, a);
443 case iro_Mod, Quot, DivMod
444 DivMod allocates new nodes --> it's treated in transform node.
445 What about Quot, DivMod?
449 n = a; /* And has it's own neutral element */
450 } else if (tarval_classify (computed_value (a)) == -1) {
452 } else if (tarval_classify (computed_value (b)) == -1) {
455 if (n != oldn) DBG_OPT_ALGSIM1;
458 if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
459 n = a; DBG_OPT_ALGSIM3;
460 } else if (get_irn_mode(n) == mode_b) {
461 if (get_irn_op(a) == op_Conv &&
462 get_irn_mode (get_Conv_op(a)) == mode_b) {
463 n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM2;
470 /* Several optimizations:
471 - no Phi in start block.
472 - remove Id operators that are inputs to Phi
473 - fold Phi-nodes, iff they have only one predecessor except
478 ir_node *block = NULL; /* to shutup gcc */
479 ir_node *first_val = NULL; /* to shutup gcc */
480 ir_node *scnd_val = NULL; /* to shutup gcc */
482 if (!get_opt_normalize()) return n;
484 n_preds = get_Phi_n_preds(n);
486 block = get_nodes_Block(n);
487 /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
488 assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
489 if ((is_Bad(block)) || /* Control dead */
490 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
491 return new_Bad(); /* in the Start Block. */
493 if (n_preds == 0) break; /* Phi of dead Region without predecessors. */
496 /* first we test for a special case: */
497 /* Confirm is a special node fixing additional information for a
498 value that is known at a certain point. This is useful for
499 dataflow analysis. */
501 ir_node *a = follow_Id (get_Phi_pred(n, 0));
502 ir_node *b = follow_Id (get_Phi_pred(n, 1));
503 if ( (get_irn_op(a) == op_Confirm)
504 && (get_irn_op(b) == op_Confirm)
505 && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
506 && (get_irn_n(a, 1) == get_irn_n (b, 1))
507 && (a->data.num == (~b->data.num & irpn_True) )) {
508 n = follow_Id (get_irn_n(a, 0));
514 /* Find first non-self-referencing input */
515 for (i = 0; i < n_preds; ++i) {
516 first_val = follow_Id(get_Phi_pred(n, i));
518 set_Phi_pred(n, i, first_val);
519 if ( (first_val != n) /* not self pointer */
520 && (get_irn_op(first_val) != op_Bad) /* value not dead */
521 && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
522 break; /* then found first value. */
526 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
527 if (i >= n_preds) { n = new_Bad(); break; }
531 /* follow_Id () for rest of inputs, determine if any of these
532 are non-self-referencing */
533 while (++i < n_preds) {
534 scnd_val = follow_Id(get_Phi_pred(n, i));
536 set_Phi_pred(n, i, scnd_val);
538 && (scnd_val != first_val)
539 && (get_irn_op(scnd_val) != op_Bad)
540 && !(is_Bad (get_Block_cfgpred(block, i))) ) {
545 /* Fold, if no multiple distinct non-self-referencing inputs */
547 n = first_val; DBG_OPT_PHI;
549 /* skip the remaining Ids. */
550 while (++i < n_preds) {
551 set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
559 #if 0 /* Is an illegal transformation: different nodes can
560 represent the same pointer value!! */
561 a = skip_Proj(get_Load_mem(n));
564 if (get_irn_op(a) == op_Store) {
565 if ( different_identity (b, get_Store_ptr(a))) {
566 /* load and store use different pointers, therefore load
567 needs not take store's memory but the state before. */
568 set_Load_mem (n, get_Store_mem(a));
569 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
576 /* remove unnecessary store. */
578 a = skip_Proj(get_Store_mem(n));
579 b = get_Store_ptr(n);
580 c = skip_Proj(get_Store_value(n));
582 if (get_irn_op(a) == op_Store
583 && get_Store_ptr(a) == b
584 && skip_Proj(get_Store_value(a)) == c) {
585 /* We have twice exactly the same store -- a write after write. */
587 } else if (get_irn_op(c) == op_Load
588 && (a == c || skip_Proj(get_Load_mem(c)) == a)
589 && get_Load_ptr(c) == b ) {
590 /* We just loaded the value from the same memory, i.e., the store
591 doesn't change the memory -- a write after read. */
592 a = get_Store_mem(n);
593 turn_into_tuple(n, 2);
594 set_Tuple_pred(n, 0, a);
595 set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
602 a = get_Proj_pred(n);
604 if ( get_irn_op(a) == op_Tuple) {
605 /* Remove the Tuple/Proj combination. */
606 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
607 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
609 assert(0); /* This should not happen! */
612 } else if (get_irn_mode(n) == mode_X &&
613 is_Bad(get_nodes_Block(n))) {
614 /* Remove dead control flow -- early gigo. */
621 n = follow_Id (n); DBG_OPT_ID;
628 } /* end equivalent_node() */
631 /* tries several [inplace] [optimizing] transformations and returns an
632 equivalent node. The difference to equivalent_node is that these
633 transformations _do_ generate new nodes, and thus the old node must
634 not be freed even if the equivalent node isn't the old one. */
636 transform_node (ir_node *n)
639 ir_node *a = NULL, *b;
642 switch (get_irn_opcode(n)) {
644 ta = computed_value(n);
645 if (ta != tarval_bad) {
646 /* Turn Div into a tuple (mem, bad, value) */
647 ir_node *mem = get_Div_mem(n);
648 turn_into_tuple(n, 3);
649 set_Tuple_pred(n, 0, mem);
650 set_Tuple_pred(n, 1, new_Bad());
651 set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
655 ta = computed_value(n);
656 if (ta != tarval_bad) {
657 /* Turn Div into a tuple (mem, bad, value) */
658 ir_node *mem = get_Mod_mem(n);
659 turn_into_tuple(n, 3);
660 set_Tuple_pred(n, 0, mem);
661 set_Tuple_pred(n, 1, new_Bad());
662 set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
670 a = get_DivMod_left(n);
671 b = get_DivMod_right(n);
672 mode = get_irn_mode(a);
674 if (!(mode_is_int(get_irn_mode(a)) &&
675 mode_is_int(get_irn_mode(b))))
679 a = new_Const (mode, get_mode_one(mode));
680 b = new_Const (mode, get_mode_null(mode));
686 if (tb != tarval_bad) {
687 if (tb == get_mode_one(get_tarval_mode(tb))) {
688 b = new_Const (mode, get_mode_null(mode));
690 } else if (ta != tarval_bad) {
692 resa = tarval_div (ta, tb);
693 if (resa == tarval_bad) break; /* Causes exception!!! Model by replacing through
694 Jmp for X result!? */
695 resb = tarval_mod (ta, tb);
696 if (resb == tarval_bad) break; /* Causes exception! */
697 a = new_Const (mode, resa);
698 b = new_Const (mode, resb);
701 } else if (ta == get_mode_null(get_tarval_mode(ta))) {
706 if (evaluated) { /* replace by tuple */
707 ir_node *mem = get_DivMod_mem(n);
708 turn_into_tuple(n, 4);
709 set_Tuple_pred(n, 0, mem);
710 set_Tuple_pred(n, 1, new_Bad()); /* no exception */
711 set_Tuple_pred(n, 2, a);
712 set_Tuple_pred(n, 3, b);
713 assert(get_nodes_Block(n));
719 /* Replace the Cond by a Jmp if it branches on a constant
722 a = get_Cond_selector(n);
725 if ((ta != tarval_bad) &&
726 (get_irn_mode(a) == mode_b) &&
727 (get_opt_unreachable_code())) {
728 /* It's a boolean Cond, branching on a boolean constant.
729 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
730 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
731 turn_into_tuple(n, 2);
732 if (ta == tarval_b_true) {
733 set_Tuple_pred(n, 0, new_Bad());
734 set_Tuple_pred(n, 1, jmp);
736 set_Tuple_pred(n, 0, jmp);
737 set_Tuple_pred(n, 1, new_Bad());
739 /* We might generate an endless loop, so keep it alive. */
740 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
741 } else if ((ta != tarval_bad) &&
742 (get_irn_mode(a) == mode_Iu) &&
743 (get_Cond_kind(n) == dense) &&
744 (get_opt_unreachable_code())) {
745 /* I don't want to allow Tuples smaller than the biggest Proj.
746 Also this tuple might get really big...
747 I generate the Jmp here, and remember it in link. Link is used
748 when optimizing Proj. */
749 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
750 /* We might generate an endless loop, so keep it alive. */
751 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
752 } else if ((get_irn_op(get_Cond_selector(n)) == op_Eor)
753 && (get_irn_mode(get_Cond_selector(n)) == mode_b)
754 && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
755 /* The Eor is a negate. Generate a new Cond without the negate,
756 simulate the negate by exchanging the results. */
757 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
759 } else if ((get_irn_op(get_Cond_selector(n)) == op_Not)
760 && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
761 /* A Not before the Cond. Generate a new Cond without the Not,
762 simulate the Not by exchanging the results. */
763 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
770 a = get_Proj_pred(n);
772 if ((get_irn_op(a) == op_Cond)
774 && get_irn_op(get_irn_link(a)) == op_Cond) {
775 /* Use the better Cond if the Proj projs from a Cond which get's
776 its result from an Eor/Not. */
777 assert (((get_irn_op(get_Cond_selector(a)) == op_Eor)
778 || (get_irn_op(get_Cond_selector(a)) == op_Not))
779 && (get_irn_mode(get_Cond_selector(a)) == mode_b)
780 && (get_irn_op(get_irn_link(a)) == op_Cond)
781 && (get_Cond_selector(get_irn_link(a)) == get_Eor_left(get_Cond_selector(a))));
782 set_Proj_pred(n, get_irn_link(a));
783 if (get_Proj_proj(n) == 0)
787 } else if ((get_irn_op(a) == op_Cond)
788 && (get_irn_mode(get_Cond_selector(a)) == mode_Iu)
790 && (get_Cond_kind(a) == dense)
791 && (get_opt_unreachable_code())) {
792 /* The Cond is a Switch on a Constant */
793 if (get_Proj_proj(n) == tarval_to_long(value_of(a))) {
794 /* The always taken branch, reuse the existing Jmp. */
795 if (!get_irn_link(a)) /* well, if it exists ;-> */
796 set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
797 assert(get_irn_op(get_irn_link(a)) == op_Jmp);
799 } else {/* Not taken control flow, but be careful with the default! */
800 if (get_Proj_proj(n) < a->attr.c.default_proj){
801 /* a never taken branch */
804 a->attr.c.default_proj = get_Proj_proj(n);
809 case iro_Eor: { /* @@@ not tested as boolean Eor not allowed any more. */
811 b = get_Eor_right(n);
813 if ((get_irn_mode(n) == mode_b)
814 && (get_irn_op(a) == op_Proj)
815 && (get_irn_mode(a) == mode_b)
816 && (tarval_classify (computed_value (b)) == 1)
817 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
818 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
819 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
820 mode_b, get_negated_pnc(get_Proj_proj(a)));
821 else if ((get_irn_mode(n) == mode_b)
822 && (tarval_classify (computed_value (b)) == 1))
823 /* The Eor is a Not. Replace it by a Not. */
824 /* ????!!!Extend to bitfield 1111111. */
825 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
831 if ( (get_irn_mode(n) == mode_b)
832 && (get_irn_op(a) == op_Proj)
833 && (get_irn_mode(a) == mode_b)
834 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
835 /* We negate a Cmp. The Cmp has the negated result anyways! */
836 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
837 mode_b, get_negated_pnc(get_Proj_proj(a)));
845 /* **************** Common Subexpression Elimination **************** */
847 /* Compare function for two nodes in the hash table. Gets two */
848 /* nodes as parameters. Returns 0 if the nodes are a cse. */
850 vt_cmp (const void *elt, const void *key)
858 if (a == b) return 0;
860 if ((get_irn_op(a) != get_irn_op(b)) ||
861 (get_irn_mode(a) != get_irn_mode(b))) return 1;
863 /* compare if a's in and b's in are equal */
864 if (get_irn_arity (a) != get_irn_arity(b))
867 /* for block-local cse and pinned nodes: */
868 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
869 if (get_irn_n(a, -1) != get_irn_n(b, -1))
873 /* compare a->in[0..ins] with b->in[0..ins] */
874 for (i = 0; i < get_irn_arity(a); i++)
875 if (get_irn_n(a, i) != get_irn_n(b, i))
878 switch (get_irn_opcode(a)) {
880 return get_irn_const_attr (a) != get_irn_const_attr (b);
882 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
884 return get_Filter_proj(a) != get_Filter_proj(b);
886 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
887 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
889 return (get_irn_free_attr(a) != get_irn_free_attr(b));
891 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
892 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
894 return (get_irn_call_attr(a) != get_irn_call_attr(b));
896 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
897 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
898 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
899 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
900 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
902 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
904 return get_Cast_type(a) != get_Cast_type(b);
912 ir_node_hash (ir_node *node)
917 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
918 h = get_irn_arity(node);
920 /* consider all in nodes... except the block. */
921 for (i = 0; i < get_irn_arity(node); i++) {
922 h = 9*h + (unsigned long)get_irn_n(node, i);
926 h = 9*h + (unsigned long) get_irn_mode (node);
928 h = 9*h + (unsigned long) get_irn_op (node);
934 new_identities (void)
936 return new_pset (vt_cmp, TUNE_NIR_NODES);
940 del_identities (pset *value_table)
942 del_pset (value_table);
945 /* Return the canonical node computing the same value as n.
946 Looks up the node in a hash table. */
947 static INLINE ir_node *
948 identify (pset *value_table, ir_node *n)
952 if (!value_table) return n;
954 if (get_opt_reassociation()) {
955 switch (get_irn_opcode (n)) {
962 /* for commutative operators perform a OP b == b OP a */
963 if (get_binop_left(n) > get_binop_right(n)) {
964 ir_node *h = get_binop_left(n);
965 set_binop_left(n, get_binop_right(n));
966 set_binop_right(n, h);
974 o = pset_find (value_table, n, ir_node_hash (n));
980 /* During construction we set the pinned flag in the graph right when the
981 optimizatin is performed. The flag turning on procedure global cse could
982 be changed between two allocations. This way we are safe. */
983 static INLINE ir_node *
984 identify_cons (pset *value_table, ir_node *n) {
986 n = identify(value_table, n);
987 if (get_irn_n(old, -1) != get_irn_n(n, -1))
988 set_irg_pinned(current_ir_graph, floats);
992 /* Return the canonical node computing the same value as n.
993 Looks up the node in a hash table, enters it in the table
994 if it isn't there yet. */
996 identify_remember (pset *value_table, ir_node *node)
1000 if (!value_table) return node;
1002 /* lookup or insert in hash table with given hash key. */
1003 o = pset_insert (value_table, node, ir_node_hash (node));
1005 if (o == node) return node;
1011 add_identities (pset *value_table, ir_node *node) {
1012 identify_remember (value_table, node);
1015 /* garbage in, garbage out. If a node has a dead input, i.e., the
1016 Bad node is input to the node, return the Bad node. */
1017 static INLINE ir_node *
1018 gigo (ir_node *node)
1021 ir_op* op = get_irn_op(node);
1023 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
1024 blocks predecessors is dead. */
1025 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
1026 for (i = -1; i < get_irn_arity(node); i++) {
1027 if (is_Bad(get_irn_n(node, i))) {
1033 /* If Block has only Bads as predecessors it's garbage. */
1034 /* If Phi has only Bads as predecessors it's garbage. */
1035 if (op == op_Block || op == op_Phi) {
1036 for (i = 0; i < get_irn_arity(node); i++) {
1037 if (!is_Bad(get_irn_n(node, i))) break;
1039 if (i = get_irn_arity(node)) node = new_Bad();
1046 /* These optimizations deallocate nodes from the obstack.
1047 It can only be called if it is guaranteed that no other nodes
1048 reference this one, i.e., right after construction of a node. */
1050 optimize_node (ir_node *n)
1055 /* Allways optimize Phi nodes: part of the construction. */
1056 if ((!get_optimize()) && (get_irn_op(n) != op_Phi)) return n;
1058 /* constant expression evaluation / constant folding */
1059 if (get_opt_constant_folding()) {
1060 /* constants can not be evaluated */
1061 if (get_irn_op(n) != op_Const) {
1062 /* try to evaluate */
1063 tv = computed_value (n);
1064 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1065 /* evaluation was succesful -- replace the node. */
1066 obstack_free (current_ir_graph->obst, n);
1067 return new_Const (get_tarval_mode (tv), tv);
1072 /* remove unnecessary nodes */
1073 if (get_opt_constant_folding() ||
1074 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1075 (get_irn_op(n) == op_Id) ||
1076 (get_irn_op(n) == op_Proj) ||
1077 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1078 n = equivalent_node (n);
1080 /** common subexpression elimination **/
1081 /* Checks whether n is already available. */
1082 /* The block input is used to distinguish different subexpressions. Right
1083 now all nodes are pinned to blocks, i.e., the cse only finds common
1084 subexpressions within a block. */
1086 n = identify_cons (current_ir_graph->value_table, n);
1089 /* We found an existing, better node, so we can deallocate the old node. */
1090 obstack_free (current_ir_graph->obst, old_n);
1093 /* Some more constant expression evaluation that does not allow to
1095 if (get_opt_constant_folding() ||
1096 (get_irn_op(n) == op_Cond) ||
1097 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1098 n = transform_node (n);
1100 /* Remove nodes with dead (Bad) input.
1101 Run always for transformation induced Bads. */
1104 /* Now we can verify the node, as it has no dead inputs any more. */
1107 /* Now we have a legal, useful node. Enter it in hash table for cse */
1108 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1109 n = identify_remember (current_ir_graph->value_table, n);
1116 /* These optimizations never deallocate nodes. This can cause dead
1117 nodes lying on the obstack. Remove these by a dead node elimination,
1118 i.e., a copying garbage collection. */
1120 optimize_in_place_2 (ir_node *n)
1125 if (!get_optimize() && (get_irn_op(n) != op_Phi)) return n;
1127 /* if not optimize return n */
1130 /* Here this is possible. Why? */
1135 /* constant expression evaluation / constant folding */
1136 if (get_opt_constant_folding()) {
1137 /* constants can not be evaluated */
1138 if (get_irn_op(n) != op_Const) {
1139 /* try to evaluate */
1140 tv = computed_value (n);
1141 if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
1142 /* evaluation was succesful -- replace the node. */
1143 n = new_Const (get_tarval_mode (tv), tv);
1144 __dbg_info_merge_pair(n, old_n, dbg_const_eval);
1150 /* remove unnecessary nodes */
1151 /*if (get_opt_constant_folding()) */
1152 if (get_opt_constant_folding() ||
1153 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1154 (get_irn_op(n) == op_Id) || /* ... */
1155 (get_irn_op(n) == op_Proj) || /* ... */
1156 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1157 n = equivalent_node (n);
1159 /** common subexpression elimination **/
1160 /* Checks whether n is already available. */
1161 /* The block input is used to distinguish different subexpressions. Right
1162 now all nodes are pinned to blocks, i.e., the cse only finds common
1163 subexpressions within a block. */
1164 if (get_opt_cse()) {
1165 n = identify (current_ir_graph->value_table, n);
1168 /* Some more constant expression evaluation. */
1169 if (get_opt_constant_folding() ||
1170 (get_irn_op(n) == op_Cond) ||
1171 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1172 n = transform_node (n);
1174 /* Remove nodes with dead (Bad) input.
1175 Run always for transformation induced Bads. */
1178 /* Now we can verify the node, as it has no dead inputs any more. */
1181 /* Now we have a legal, useful node. Enter it in hash table for cse.
1182 Blocks should be unique anyways. (Except the successor of start:
1183 is cse with the start block!) */
1184 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1185 n = identify_remember (current_ir_graph->value_table, n);
1190 /* Wrapper for external use, set proper status bits after optimization */
1192 optimize_in_place (ir_node *n) {
1193 /* Handle graph state */
1194 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1195 if (get_opt_global_cse())
1196 set_irg_pinned(current_ir_graph, floats);
1197 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1198 set_irg_outs_inconsistent(current_ir_graph);
1199 /* Maybe we could also test whether optimizing the node can
1200 change the control graph. */
1201 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1202 set_irg_dom_inconsistent(current_ir_graph);
1203 return optimize_in_place_2 (n);