1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Christian Schaefer, Goetz Lindenmaier
6 ** iropt --- optimizations intertwined with IR construction.
15 # include "irnode_t.h"
16 # include "irgraph_t.h"
25 /* Make types visible to allow most efficient access */
26 # include "entity_t.h"
28 /* Trivial inlineable routine for copy propagation.
29 Does follow Ids, needed to optimize inlined code. */
30 static inline ir_node *
31 follow_Id (ir_node *n)
33 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
37 static inline tarval *
40 if ((n != NULL) && (get_irn_op(n) == op_Const))
41 return get_Const_tarval(n);
46 /* if n can be computed, return the value, else NULL. Performs
47 constant folding. GL: Only if n is arithmetic operator? */
49 computed_value (ir_node *n)
53 ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
54 tarval *ta = NULL, *tb = NULL; /* initialized to shut up gcc */
58 /* get the operands we will work on for simple cases. */
60 a = get_binop_left(n);
61 b = get_binop_right(n);
62 } else if (is_unop(n)) {
66 /* if the operands are constants, get the target value, else set it NULL.
67 (a and b may be NULL if we treat a node that is no computation.) */
71 /* Perform the constant evaluation / computation. */
72 switch (get_irn_opcode(n)) {
74 res = get_Const_tarval(n);
76 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
77 && (get_irn_mode(a) != mode_p)) {
78 res = tarval_add (ta, tb);
82 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
83 && (get_irn_mode(a) != mode_p)) {
84 res = tarval_sub (ta, tb);
86 res = tarval_mode_null [get_irn_modecode (n)];
90 if (ta && mode_is_float(get_irn_mode(a)))
91 res = tarval_neg (ta);
94 if (ta && tb) /* tarval_mul tests for equivalent modes itself */ {
95 res = tarval_mul (ta, tb);
97 /* a*0 = 0 or 0*b = 0:
98 calls computed_value recursive and returns the 0 with proper
101 if ( (tarval_classify ((v = computed_value (a))) == 0)
102 || (tarval_classify ((v = computed_value (b))) == 0)) {
108 /* This was missing in original implementation. Why? */
109 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
110 if (tarval_classify(tb) == 0) {res = NULL; break;}
111 res = tarval_quo(ta, tb);
115 /* This was missing in original implementation. Why? */
116 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
117 if (tarval_classify(tb) == 0) {res = NULL; break;}
118 res = tarval_div(ta, tb);
122 /* This was missing in original implementation. Why? */
123 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
124 if (tarval_classify(tb) == 0) {res = NULL; break;}
125 res = tarval_mod(ta, tb);
128 /* for iro_DivMod see iro_Proj */
131 res = tarval_abs (ta);
135 res = tarval_and (ta, tb);
138 if ( (tarval_classify ((v = computed_value (a))) == 0)
139 || (tarval_classify ((v = computed_value (b))) == 0)) {
146 res = tarval_or (ta, tb);
149 if ( (tarval_classify ((v = computed_value (a))) == -1)
150 || (tarval_classify ((v = computed_value (b))) == -1)) {
155 case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
156 case iro_Not: if (ta) { res = tarval_neg (ta); } break;
157 case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
158 /* tarval_shr is faulty !! */
159 case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
160 case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
161 case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
162 case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
164 case iro_Proj: /* iro_Cmp */
168 a = get_Proj_pred(n);
169 /* Optimize Cmp nodes.
170 This performs a first step of unreachable code elimination.
171 Proj can not be computed, but folding a Cmp above the Proj here is
172 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
174 There are several case where we can evaluate a Cmp node:
175 1. The nodes compared are both the same. If we compare for
176 equal, this will return true, else it will return false.
177 This step relies on cse.
178 2. The predecessors of Cmp are target values. We can evaluate
180 3. The predecessors are Allocs or void* constants. Allocs never
181 return NULL, they raise an exception. Therefore we can predict
183 if (get_irn_op(a) == op_Cmp) {
184 aa = get_Cmp_left(a);
185 ab = get_Cmp_right(a);
186 if (aa == ab) { /* 1.: */
187 /* This is a tric with the bits used for encoding the Cmp
188 Proj numbers, the following statement is not the same:
189 res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
190 res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
192 tarval *taa = computed_value (aa);
193 tarval *tab = computed_value (ab);
194 if (taa && tab) { /* 2.: */
195 /* strange checks... */
196 ir_pncmp flags = tarval_comp (taa, tab);
197 if (flags != irpn_False) {
198 res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
200 } else { /* check for 3.: */
201 ir_node *aaa = skip_nop(skip_Proj(aa));
202 ir_node *aba = skip_nop(skip_Proj(ab));
203 if ( ( (/* aa is ProjP and aaa is Alloc */
204 (get_irn_op(aa) == op_Proj)
205 && (get_irn_mode(aa) == mode_p)
206 && (get_irn_op(aaa) == op_Alloc))
207 && ( (/* ab is constant void */
208 (get_irn_op(ab) == op_Const)
209 && (get_irn_mode(ab) == mode_p)
210 && (get_Const_tarval(ab) == tarval_p_void))
211 || (/* ab is other Alloc */
212 (get_irn_op(ab) == op_Proj)
213 && (get_irn_mode(ab) == mode_p)
214 && (get_irn_op(aba) == op_Alloc)
216 || (/* aa is void and aba is Alloc */
217 (get_irn_op(aa) == op_Const)
218 && (get_irn_mode(aa) == mode_p)
219 && (get_Const_tarval(aa) == tarval_p_void)
220 && (get_irn_op(ab) == op_Proj)
221 && (get_irn_mode(ab) == mode_p)
222 && (get_irn_op(aba) == op_Alloc)))
224 res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
227 } else if (get_irn_op(a) == op_DivMod) {
228 ta = value_of(get_DivMod_left(a));
229 tb = value_of(get_DivMod_right(a));
230 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
231 if (tarval_classify(tb) == 0) {res = NULL; break;}
232 if (get_Proj_proj(n)== 0) /* Div */
233 res = tarval_div(ta, tb);
235 res = tarval_mod(ta, tb);
238 /* printf(" # comp_val: Proj node, not optimized\n"); */
250 /* returns 1 if the a and b are pointers to different locations. */
252 different_identity (ir_node *a, ir_node *b)
254 assert (get_irn_mode (a) == mode_p
255 && get_irn_mode (b) == mode_p);
257 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
258 ir_node *a1 = get_Proj_pred (a);
259 ir_node *b1 = get_Proj_pred (b);
260 if (a1 != b1 && get_irn_op (a1) == op_Alloc
261 && get_irn_op (b1) == op_Alloc)
268 /* equivalent_node returns a node equivalent to N. It skips all nodes that
269 perform no actual computation, as, e.g., the Id nodes. It does not create
270 new nodes. It is therefore safe to free N if the node returned is not N.
271 If a node returns a Tuple we can not just skip it. If the size of the
272 in array fits, we transform n into a tuple (e.g., Div). */
274 equivalent_node (ir_node *n)
277 ir_node *a = NULL; /* to shutup gcc */
278 ir_node *b = NULL; /* to shutup gcc */
279 ir_node *c = NULL; /* to shutup gcc */
281 ins = get_irn_arity (n);
283 /* get the operands we will work on */
285 a = get_binop_left(n);
286 b = get_binop_right(n);
287 } else if (is_unop(n)) {
291 /* skip unnecessary nodes. */
292 switch (get_irn_opcode (n)) {
295 /* The Block constructor does not call optimize, but mature_block
296 calls the optimization. */
297 assert(get_Block_matured(n));
299 /* A single entry Block following a single exit Block can be merged,
300 if it is not the Start block. */
301 /* !!! Beware, all Phi-nodes of n must have been optimized away.
302 This should be true, as the block is matured before optimize is called.
303 But what about Phi-cycles with the Phi0/Id that could not be resolved?
304 Remaining Phi nodes are just Ids. */
305 if (get_Block_n_cfgpreds(n) == 1
306 && get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) {
307 n = get_nodes_Block(get_Block_cfgpred(n, 0));
309 } else if ((n != current_ir_graph->start_block) &&
310 (n != current_ir_graph->end_block) ) {
312 /* If all inputs are dead, this block is dead too, except if it is
313 the start or end block. This is a step of unreachable code
315 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
316 if (!is_Bad(get_Block_cfgpred(n, i))) break;
318 if (i == get_Block_n_cfgpreds(n))
324 case iro_Jmp: /* GL: Why not same for op_Raise?? */
325 /* unreachable code elimination */
326 if (is_Bad(get_nodes_Block(n))) n = new_Bad();
328 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
329 See cases for iro_Cond and iro_Proj in transform_node. */
330 /** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
331 case iro_Or: if (a == b) {n = a; break;}
336 /* After running compute_node there is only one constant predecessor.
337 Find this predecessors value and remember the other node: */
338 if ((tv = computed_value (a))) {
340 } else if ((tv = computed_value (b))) {
344 /* If this predecessors constant value is zero, the operation is
345 unnecessary. Remove it: */
346 if (tarval_classify (tv) == 0) {
356 /* these operations are not commutative. Test only one predecessor. */
357 if (tarval_classify (computed_value (b)) == 0) {
359 /* Test if b > #bits of a ==> return 0 / divide b by #bits
360 --> transform node? */
363 case iro_Not: /* NotNot x == x */
364 case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
365 out of bounds exception if min =! max? */
366 if (get_irn_op(get_unop_op(n)) == get_irn_op(n))
367 n = get_unop_op(get_unop_op(n));
370 /* Mul is commutative and has again an other neutral element. */
371 if (tarval_classify (computed_value (a)) == 1) {
373 } else if (tarval_classify (computed_value (b)) == 1) {
378 /* Div is not commutative. */
379 if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
380 /* Turn Div into a tuple (mem, bad, a) */
381 ir_node *mem = get_Div_mem(n);
382 turn_into_tuple(n, 3);
383 set_Tuple_pred(n, 0, mem);
384 set_Tuple_pred(n, 1, new_Bad());
385 set_Tuple_pred(n, 2, a);
388 /* GL: Why are they skipped? DivMod allocates new nodes --> it's
389 teated in transform node.
390 case iro_Mod, Quot, DivMod
394 /* And has it's own neutral element */
395 else if (tarval_classify (computed_value (a)) == -1) {
397 } else if (tarval_classify (computed_value (b)) == -1) {
402 if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
404 } else if (get_irn_mode(n) == mode_b) {
405 if (get_irn_op(a) == op_Conv &&
406 get_irn_mode (get_Conv_op(a)) == mode_b) {
407 n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */
414 /* Several optimizations:
415 - no Phi in start block.
416 - remove Id operators that are inputs to Phi
417 - fold Phi-nodes, iff they have only one predecessor except
423 ir_node *block = NULL; /* to shutup gcc */
424 ir_node *first_val = NULL; /* to shutup gcc */
425 ir_node *scnd_val = NULL; /* to shutup gcc */
427 n_preds = get_Phi_n_preds(n);
429 block = get_nodes_Block(n);
430 assert(get_irn_op (block) == op_Block);
432 /* there should be no Phi nodes in the Start region. */
433 if (block == current_ir_graph->start_block) {
438 if (n_preds == 0) { /* Phi of dead Region without predecessors. */
439 /* GL: why not return new_Bad? */
444 /* first we test for a special case: */
445 /* Confirm is a special node fixing additional information for a
446 value that is known at a certain point. This is useful for
447 dataflow analysis. */
449 ir_node *a = follow_Id (get_Phi_pred(n, 0));
450 ir_node *b = follow_Id (get_Phi_pred(n, 1));
451 if ( (get_irn_op(a) == op_Confirm)
452 && (get_irn_op(b) == op_Confirm)
453 && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
454 && (get_irn_n(a, 1) == get_irn_n (b, 1))
455 && (a->data.num == (~b->data.num & irpn_True) )) {
456 n = follow_Id (get_irn_n(a, 0));
462 /* Find first non-self-referencing input */
463 for (i = 0; i < n_preds; ++i) {
464 first_val = follow_Id(get_Phi_pred(n, i));
466 set_Phi_pred(n, i, first_val);
467 if ( (first_val != n) /* not self pointer */
468 && (get_irn_op(first_val) != op_Bad) /* value not dead */
469 && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
470 break; /* then found first value. */
474 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
475 if (i >= n_preds) { n = new_Bad(); break; }
479 /* follow_Id () for rest of inputs, determine if any of these
480 are non-self-referencing */
481 while (++i < n_preds) {
482 scnd_val = follow_Id(get_Phi_pred(n, i));
484 set_Phi_pred(n, i, scnd_val);
486 && (scnd_val != first_val)
487 && (get_irn_op(scnd_val) != op_Bad)
488 && !(is_Bad (get_Block_cfgpred(block, i))) ) {
493 /* Fold, if no multiple distinct non-self-referencing inputs */
497 /* skip the remaining Ids. */
498 while (++i < n_preds) {
499 set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
507 #if 0 /* Is an illegal transformation: different nodes can
508 represent the same pointer value!! */
509 a = skip_Proj(get_Load_mem(n));
512 if (get_irn_op(a) == op_Store) {
513 if ( different_identity (b, get_Store_ptr(a))) {
514 /* load and store use different pointers, therefore load
515 needs not take store's memory but the state before. */
516 set_Load_mem (n, get_Store_mem(a));
517 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
524 /* remove unnecessary store. */
526 a = skip_Proj(get_Store_mem(n));
527 b = get_Store_ptr(n);
528 c = skip_Proj(get_Store_value(n));
530 if (get_irn_op(a) == op_Store
531 && get_Store_ptr(a) == b
532 && skip_Proj(get_Store_value(a)) == c) {
533 /* We have twice exactly the same store -- a write after write. */
535 } else if (get_irn_op(c) == op_Load
536 && (a == c || skip_Proj(get_Load_mem(c)) == a)
537 && get_Load_ptr(c) == b )
538 /* !!!??? and a cryptic test */ {
539 /* We just loaded the value from the same memory, i.e., the store
540 doesn't change the memory -- a write after read. */
541 a = get_Store_mem(n);
542 turn_into_tuple(n, 2);
543 set_Tuple_pred(n, 0, a);
544 set_Tuple_pred(n, 1, new_Bad());
551 a = get_Proj_pred(n);
553 if ( get_irn_op(a) == op_Tuple) {
554 /* Remove the Tuple/Proj combination. */
555 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
556 n = get_Tuple_pred(a, get_Proj_proj(n));
558 assert(0); /* This should not happen! */
561 } else if (get_irn_mode(n) == mode_X &&
562 is_Bad(get_nodes_Block(n))) {
563 /* Remove dead control flow. */
577 } /* end equivalent_node() */
580 /* tries several [inplace] [optimizing] transformations and returns a
581 equivalent node. The difference to equivalent_node is that these
582 transformations _do_ generate new nodes, and thus the old node must
583 not be freed even if the equivalent node isn't the old one. */
585 transform_node (ir_node *n)
588 ir_node *a = NULL, *b;
591 switch (get_irn_opcode(n)) {
597 a = get_DivMod_left(n);
598 b = get_DivMod_right(n);
599 mode = get_irn_mode(a);
601 if (!( mode_is_int(get_irn_mode(a))
602 && mode_is_int(get_irn_mode(b))))
606 a = new_Const (mode, tarval_from_long (mode, 1));
607 b = new_Const (mode, tarval_from_long (mode, 0));
614 if (tarval_classify(tb) == 1) {
615 b = new_Const (mode, tarval_from_long (mode, 0));
619 resa = tarval_div (ta, tb);
620 if (!resa) break; /* Causes exception!!! Model by replacing through
621 Jmp for X result!? */
622 resb = tarval_mod (ta, tb);
623 if (!resb) break; /* Causes exception! */
624 a = new_Const (mode, resa);
625 b = new_Const (mode, resb);
628 } else if (tarval_classify (ta) == 0) {
633 if (evaluated) { /* replace by tuple */
634 ir_node *mem = get_DivMod_mem(n);
635 turn_into_tuple(n, 4);
636 set_Tuple_pred(n, 0, mem);
637 set_Tuple_pred(n, 1, new_Bad()); /* no exception */
638 set_Tuple_pred(n, 2, a);
639 set_Tuple_pred(n, 3, b);
640 assert(get_nodes_Block(n));
646 /* Replace the Cond by a Jmp if it branches on a constant
649 a = get_Cond_selector(n);
652 if (ta && (get_irn_mode(a) == mode_b)) {
653 /* It's a boolean Cond, branching on a boolean constant.
654 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
655 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
656 turn_into_tuple(n, 2);
657 if (tv_val_b(ta) == 1) /* GL: I hope this returns 1 if true */ {
658 set_Tuple_pred(n, 0, new_Bad());
659 set_Tuple_pred(n, 1, jmp);
661 set_Tuple_pred(n, 0, jmp);
662 set_Tuple_pred(n, 1, new_Bad());
664 /* We might generate an endless loop, so keep it alive. */
665 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
666 } else if (ta && (get_irn_mode(a) == mode_I) && (get_Cond_kind(n) == dense)) {
667 /* I don't want to allow Tuples smaller than the biggest Proj.
668 Also this tuple might get really big...
669 I generate the Jmp here, and remember it in link. Link is used
670 when optimizing Proj. */
671 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
672 /* We might generate an endless loop, so keep it alive. */
673 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
674 } else if ( (get_irn_op(get_Cond_selector(n)) == op_Eor)
675 && (get_irn_mode(get_Cond_selector(n)) == mode_b)
676 && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
677 /* The Eor is a negate. Generate a new Cond without the negate,
678 simulate the negate by exchanging the results. */
679 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
681 } else if ( (get_irn_op(get_Cond_selector(n)) == op_Not)
682 && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
683 /* A Not before the Cond. Generate a new Cond without the Not,
684 simulate the Not by exchanging the results. */
685 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
692 a = get_Proj_pred(n);
694 if ( (get_irn_op(a) == op_Cond)
696 && get_irn_op(get_irn_link(a)) == op_Cond) {
697 /* Use the better Cond if the Proj projs from a Cond which get's
698 its result from an Eor/Not. */
699 assert ( ( (get_irn_op(get_Cond_selector(a)) == op_Eor)
700 || (get_irn_op(get_Cond_selector(a)) == op_Not))
701 && (get_irn_mode(get_Cond_selector(a)) == mode_b)
702 && (get_irn_op(get_irn_link(a)) == op_Cond)
703 && (get_Cond_selector(get_irn_link(a)) ==
704 get_Eor_left(get_Cond_selector(a))));
705 set_Proj_pred(n, get_irn_link(a));
706 if (get_Proj_proj(n) == 0)
710 } else if ( (get_irn_op(a) == op_Cond)
711 && (get_irn_mode(get_Cond_selector(a)) == mode_I)
713 && (get_Cond_kind(a) == dense)) {
714 /* The Cond is a Switch on a Constant */
715 if (get_Proj_proj(n) == tv_val_CHIL(value_of(a))) {
716 /* The always taken branch, reuse the existing Jmp. */
717 if (!get_irn_link(a)) /* well, if it exists ;-> */
718 set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
719 assert(get_irn_op(get_irn_link(a)) == op_Jmp);
721 } else {/* Not taken control flow, but be careful with the default! */
722 if (get_Proj_proj(n) < a->attr.c.default_proj){
723 /* a never taken branch */
726 a->attr.c.default_proj = get_Proj_proj(n);
731 case iro_Eor: { /* @@@ not tested as boolean Eor not allowed any more. */
733 b = get_Eor_right(n);
735 if ( (get_irn_mode(n) == mode_b)
736 && (get_irn_op(a) == op_Proj)
737 && (get_irn_mode(a) == mode_b)
738 && (tarval_classify (computed_value (b)) == 1)
739 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
740 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
741 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
742 mode_b, get_negated_pnc(get_Proj_proj(a)));
743 else if ( (get_irn_mode(n) == mode_b)
744 && (tarval_classify (computed_value (b)) == 1))
745 /* The Eor is a Not. Replace it by a Not. */
746 /* ????!!!Extend to bitfield 1111111. */
747 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
753 if ( (get_irn_mode(n) == mode_b)
754 && (get_irn_op(a) == op_Proj)
755 && (get_irn_mode(a) == mode_b)
756 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
757 /* We negate a Cmp. The Cmp has the negated result anyways! */
758 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
759 mode_b, get_negated_pnc(get_Proj_proj(a)));
767 /* **************** Common Subexpression Elimination **************** */
769 /* Compare function for two nodes in the hash table. Gets two */
770 /* nodes as parameters. Returns 0 if the nodes are a cse. */
772 vt_cmp (const void *elt, const void *key)
780 if (a == b) return 0;
782 if ((get_irn_op(a) != get_irn_op(b)) ||
783 (get_irn_mode(a) != get_irn_mode(b))) return 1;
785 /* compare if a's in and b's in are equal */
786 /* GL: we optimize only nodes with in arrays of fixed sizes.
787 if (get_irn_arity (a) != -2) {
788 ins = get_irn_arity (a);
789 if (ins != get_irn_arity (b)) return 1;
790 ain = get_irn_in (a);
791 bin = get_irn_in (b);
794 if (get_irn_arity (a) != get_irn_arity(b))
797 /* for block-local cse and pinned nodes: */
798 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
799 if (get_irn_n(a, -1) != get_irn_n(b, -1))
803 /* compare a->in[0..ins] with b->in[0..ins] */
804 for (i = 0; i < get_irn_arity(a); i++)
805 if (get_irn_n(a, i) != get_irn_n(b, i))
808 switch (get_irn_opcode(a)) {
810 return get_irn_const_attr (a) != get_irn_const_attr (b);
812 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
814 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
815 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
817 return (get_irn_free_attr(a) != get_irn_free_attr(b));
819 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
820 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
822 return (get_irn_call_attr(a) != get_irn_call_attr(b));
824 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
825 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
826 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
827 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
828 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type)
829 || (get_irn_sel_attr(a).ltyp != get_irn_sel_attr(b).ltyp);
831 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
839 ir_node_hash (ir_node *node)
844 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
845 h = get_irn_arity(node);
847 /* consider all in nodes... except the block. */
848 for (i = 0; i < get_irn_arity(node); i++) {
849 h = 9*h + (unsigned long)get_irn_n(node, i);
853 h = 9*h + (unsigned long) get_irn_mode (node);
855 h = 9*h + (unsigned long) get_irn_op (node);
861 new_identities (void)
863 return new_pset (vt_cmp, TUNE_NIR_NODES);
867 del_identities (pset *value_table)
869 del_pset (value_table);
872 /* Return the canonical node computing the same value as n.
873 Looks up the node in a hash table. */
874 static inline ir_node *
875 identify (pset *value_table, ir_node *n)
879 if (!value_table) return n;
881 if (get_opt_reassociation()) {
882 switch (get_irn_opcode (n)) {
889 /* for commutative operators perform a OP b == b OP a */
890 if (get_binop_left(n) > get_binop_right(n)) {
891 ir_node *h = get_binop_left(n);
892 set_binop_left(n, get_binop_right(n));
893 set_binop_right(n, h);
901 o = pset_find (value_table, n, ir_node_hash (n));
907 /* During construction we set the pinned flag in the graph right when the
908 optimizatin is performed. The flag turning on procedure global cse could
909 be changed between two allocations. This way we are safe. */
910 static inline ir_node *
911 identify_cons (pset *value_table, ir_node *n) {
913 n = identify(value_table, n);
914 if (get_irn_n(old, -1) != get_irn_n(n, -1))
915 set_irg_pinned(current_ir_graph, floats);
919 /* Return the canonical node computing the same value as n.
920 Looks up the node in a hash table, enters it in the table
921 if it isn't there yet. */
923 identify_remember (pset *value_table, ir_node *node)
927 if (!value_table) return node;
929 /* lookup or insert in hash table with given hash key. */
930 o = pset_insert (value_table, node, ir_node_hash (node));
932 if (o == node) return node;
938 add_identities (pset *value_table, ir_node *node) {
939 identify_remember (value_table, node);
942 /* garbage in, garbage out. If a node has a dead input, i.e., the
943 Bad node is input to the node, return the Bad node. */
944 static inline ir_node *
948 ir_op* op = get_irn_op(node);
950 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
951 blocks predecessors is dead. */
952 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
953 for (i = -1; i < get_irn_arity(node); i++) {
954 if (is_Bad(get_irn_n(node, i))) {
960 /* If Block has only Bads as predecessors it's garbage. */
961 /* If Phi has only Bads as predecessors it's garbage. */
962 if (op == op_Block || op == op_Phi) {
963 for (i = 0; i < get_irn_arity(node); i++) {
964 if (!is_Bad(get_irn_n(node, i))) break;
966 if (i = get_irn_arity(node)) node = new_Bad();
973 /* These optimizations deallocate nodes from the obstack.
974 It can only be called if it is guaranteed that no other nodes
975 reference this one, i.e., right after construction of a node. */
977 optimize (ir_node *n)
982 /* Allways optimize Phi nodes: part of the construction. */
983 if ((!get_optimize()) && (get_irn_op(n) != op_Phi)) return n;
985 /* constant expression evaluation / constant folding */
986 if (get_opt_constant_folding()) {
987 /* constants can not be evaluated */
988 if (get_irn_op(n) != op_Const) {
989 /* try to evaluate */
990 tv = computed_value (n);
992 /* evaluation was succesful -- replace the node. */
993 obstack_free (current_ir_graph->obst, n);
994 return new_Const (get_tv_mode (tv), tv);
999 /* remove unnecessary nodes */
1000 if (get_opt_constant_folding() || get_irn_op(n) == op_Phi)
1001 n = equivalent_node (n);
1003 /** common subexpression elimination **/
1004 /* Checks whether n is already available. */
1005 /* The block input is used to distinguish different subexpressions. Right
1006 now all nodes are pinned to blocks, i.e., the cse only finds common
1007 subexpressions within a block. */
1009 n = identify_cons (current_ir_graph->value_table, n);
1010 /* identify found a cse, so deallocate the old node. */
1012 obstack_free (current_ir_graph->obst, old_n);
1013 /* The AmRoq fiasco returns n here. Martin's version doesn't. */
1016 /* Some more constant expression evaluation that does not allow to
1018 if (get_opt_constant_folding())
1019 n = transform_node (n);
1021 /* Remove nodes with dead (Bad) input. */
1022 if (get_opt_unreachable_code())
1024 /* Now we can verify the node, as it has no dead inputs any more. */
1027 /* Now we have a legal, useful node. Enter it in hash table for cse */
1028 if (get_opt_cse()) {
1029 n = identify_remember (current_ir_graph->value_table, n);
1032 #if 0 /* GL: what's the use of this?? */
1033 if ((current_ir_graph->state & irgs_building) && IR_KEEP_ALIVE (n)) {
1034 assert (~current_ir_graph->state & irgs_keep_alives_in_arr);
1035 pdeq_putr (current_ir_graph->keep.living, n);
1042 /* These optimizations never deallocate nodes. This can cause dead
1043 nodes lying on the obstack. Remove these by a dead node elimination,
1044 i.e., a copying garbage collection. */
1046 optimize_in_place_2 (ir_node *n)
1051 if (!get_optimize()) return n;
1053 /* if not optimize return n */
1055 /* Here this is possible. Why? */
1059 /* constant expression evaluation / constant folding */
1060 if (get_opt_constant_folding()) {
1061 /* constants can not be evaluated */
1062 if (get_irn_op(n) != op_Const) {
1063 /* try to evaluate */
1064 tv = computed_value (n);
1066 /* evaluation was succesful -- replace the node. */
1067 n = new_Const (get_tv_mode (tv), tv);
1068 deb_info_copy(n, old_n, id_from_str("const_eval", 10));
1070 /* xprintf("* optimize: computed node %I\n", n->op->name);*/
1075 /* remove unnecessary nodes */
1076 /*if (get_opt_constant_folding()) */
1077 if (get_opt_constant_folding() || get_irn_op(n) == op_Phi)
1078 n = equivalent_node (n);
1080 /** common subexpression elimination **/
1081 /* Checks whether n is already available. */
1082 /* The block input is used to distinguish different subexpressions. Right
1083 now all nodes are pinned to blocks, i.e., the cse only finds common
1084 subexpressions within a block. */
1085 if (get_opt_cse()) {
1086 n = identify (current_ir_graph->value_table, n);
1089 /* identify found a cse, so deallocate the old node. */
1091 /* The AmRoq fiasco returns n here. Martin's version doesn't. */
1094 /* Some more constant expression evaluation. */
1095 if (get_opt_constant_folding())
1096 n = transform_node (n);
1098 /* Remove nodes with dead (Bad) input. */
1099 if (get_opt_unreachable_code())
1101 /* Now we can verify the node, as it has no dead inputs any more. */
1104 /* Now we have a legal, useful node. Enter it in hash table for cse.
1105 Blocks should be unique anyways. (Except the successor of start:
1106 is cse with the start block!) */
1107 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1108 n = identify_remember (current_ir_graph->value_table, n);
1113 /* Wrapper for external use, set proper status bits after optimization */
1115 optimize_in_place (ir_node *n) {
1116 /* Handle graph state */
1117 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1118 if (get_opt_global_cse())
1119 set_irg_pinned(current_ir_graph, floats);
1120 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1121 set_irg_outs_inconsistent(current_ir_graph);
1122 return optimize_in_place_2 (n);