1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Christian Schaefer, Goetz Lindenmaier
6 ** iropt --- optimizations intertwined with IR construction.
15 # include "irnode_t.h"
16 # include "irgraph_t.h"
23 # include "dbginfo_t.h"
24 # include "iropt_dbg.c"
26 /* Make types visible to allow most efficient access */
27 # include "entity_t.h"
29 /* Trivial INLINEable routine for copy propagation.
30 Does follow Ids, needed to optimize INLINEd code. */
31 static INLINE ir_node *
32 follow_Id (ir_node *n)
34 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
38 static INLINE tarval *
41 if ((n != NULL) && (get_irn_op(n) == op_Const))
42 return get_Const_tarval(n);
47 /* if n can be computed, return the value, else NULL. Performs
48 constant folding. GL: Only if n is arithmetic operator? */
50 computed_value (ir_node *n)
54 ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
55 tarval *ta = NULL, *tb = NULL; /* initialized to shut up gcc */
59 /* get the operands we will work on for simple cases. */
61 a = get_binop_left(n);
62 b = get_binop_right(n);
63 } else if (is_unop(n)) {
67 /* if the operands are constants, get the target value, else set it NULL.
68 (a and b may be NULL if we treat a node that is no computation.) */
72 /* Perform the constant evaluation / computation. */
73 switch (get_irn_opcode(n)) {
75 res = get_Const_tarval(n);
78 if ((get_SymConst_kind(n) == size) &&
79 (get_type_state(get_SymConst_type(n))) == layout_fixed)
80 res = tarval_from_long (mode_i, get_type_size(get_SymConst_type(n)));
83 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
84 && (get_irn_mode(a) != mode_p)) {
85 res = tarval_add (ta, tb);
89 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
90 && (get_irn_mode(a) != mode_p)) {
91 res = tarval_sub (ta, tb);
93 res = tarval_mode_null [get_irn_modecode (n)];
97 if (ta && mode_is_float(get_irn_mode(a)))
98 res = tarval_neg (ta);
101 if (ta && tb) /* tarval_mul tests for equivalent modes itself */ {
102 res = tarval_mul (ta, tb);
104 /* a*0 = 0 or 0*b = 0:
105 calls computed_value recursive and returns the 0 with proper
108 if ( (tarval_classify ((v = computed_value (a))) == 0)
109 || (tarval_classify ((v = computed_value (b))) == 0)) {
115 /* This was missing in original implementation. Why? */
116 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
117 if (tarval_classify(tb) == 0) {res = NULL; break;}
118 res = tarval_quo(ta, tb);
122 /* This was missing in original implementation. Why? */
123 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
124 if (tarval_classify(tb) == 0) {res = NULL; break;}
125 res = tarval_div(ta, tb);
129 /* This was missing in original implementation. Why? */
130 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
131 if (tarval_classify(tb) == 0) {res = NULL; break;}
132 res = tarval_mod(ta, tb);
135 /* for iro_DivMod see iro_Proj */
138 res = tarval_abs (ta);
142 res = tarval_and (ta, tb);
145 if ( (tarval_classify ((v = computed_value (a))) == 0)
146 || (tarval_classify ((v = computed_value (b))) == 0)) {
153 res = tarval_or (ta, tb);
156 if ( (tarval_classify ((v = computed_value (a))) == -1)
157 || (tarval_classify ((v = computed_value (b))) == -1)) {
162 case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
163 case iro_Not: if (ta) { res = tarval_neg (ta); } break;
164 case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
165 /* tarval_shr is faulty !! */
166 case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
167 case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
168 case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
169 case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
171 case iro_Proj: /* iro_Cmp */
175 a = get_Proj_pred(n);
176 /* Optimize Cmp nodes.
177 This performs a first step of unreachable code elimination.
178 Proj can not be computed, but folding a Cmp above the Proj here is
179 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
181 There are several case where we can evaluate a Cmp node:
182 1. The nodes compared are both the same. If we compare for
183 equal, greater equal, ... this will return true, else it
184 will return false. This step relies on cse.
185 2. The predecessors of Cmp are target values. We can evaluate
187 3. The predecessors are Allocs or void* constants. Allocs never
188 return NULL, they raise an exception. Therefore we can predict
190 if (get_irn_op(a) == op_Cmp) {
191 aa = get_Cmp_left(a);
192 ab = get_Cmp_right(a);
193 if (aa == ab) { /* 1.: */
194 /* This is a tric with the bits used for encoding the Cmp
195 Proj numbers, the following statement is not the same:
196 res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
197 res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
199 tarval *taa = computed_value (aa);
200 tarval *tab = computed_value (ab);
201 if (taa && tab) { /* 2.: */
202 /* strange checks... */
203 ir_pncmp flags = tarval_comp (taa, tab);
204 if (flags != irpn_False) {
205 res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
207 } else { /* check for 3.: */
208 ir_node *aaa = skip_nop(skip_Proj(aa));
209 ir_node *aba = skip_nop(skip_Proj(ab));
210 if ( ( (/* aa is ProjP and aaa is Alloc */
211 (get_irn_op(aa) == op_Proj)
212 && (get_irn_mode(aa) == mode_p)
213 && (get_irn_op(aaa) == op_Alloc))
214 && ( (/* ab is constant void */
215 (get_irn_op(ab) == op_Const)
216 && (get_irn_mode(ab) == mode_p)
217 && (get_Const_tarval(ab) == tarval_p_void))
218 || (/* ab is other Alloc */
219 (get_irn_op(ab) == op_Proj)
220 && (get_irn_mode(ab) == mode_p)
221 && (get_irn_op(aba) == op_Alloc)
223 || (/* aa is void and aba is Alloc */
224 (get_irn_op(aa) == op_Const)
225 && (get_irn_mode(aa) == mode_p)
226 && (get_Const_tarval(aa) == tarval_p_void)
227 && (get_irn_op(ab) == op_Proj)
228 && (get_irn_mode(ab) == mode_p)
229 && (get_irn_op(aba) == op_Alloc)))
231 res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
234 } else if (get_irn_op(a) == op_DivMod) {
235 ta = value_of(get_DivMod_left(a));
236 tb = value_of(get_DivMod_right(a));
237 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
238 if (tarval_classify(tb) == 0) {res = NULL; break;}
239 if (get_Proj_proj(n)== 0) /* Div */
240 res = tarval_div(ta, tb);
242 res = tarval_mod(ta, tb);
255 /* returns 1 if the a and b are pointers to different locations. */
257 different_identity (ir_node *a, ir_node *b)
259 assert (get_irn_mode (a) == mode_p
260 && get_irn_mode (b) == mode_p);
262 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
263 ir_node *a1 = get_Proj_pred (a);
264 ir_node *b1 = get_Proj_pred (b);
265 if (a1 != b1 && get_irn_op (a1) == op_Alloc
266 && get_irn_op (b1) == op_Alloc)
273 /* equivalent_node returns a node equivalent to N. It skips all nodes that
274 perform no actual computation, as, e.g., the Id nodes. It does not create
275 new nodes. It is therefore safe to free N if the node returned is not N.
276 If a node returns a Tuple we can not just skip it. If the size of the
277 in array fits, we transform n into a tuple (e.g., Div). */
279 equivalent_node (ir_node *n)
282 ir_node *a = NULL; /* to shutup gcc */
283 ir_node *b = NULL; /* to shutup gcc */
284 ir_node *c = NULL; /* to shutup gcc */
287 ins = get_irn_arity (n);
289 /* get the operands we will work on */
291 a = get_binop_left(n);
292 b = get_binop_right(n);
293 } else if (is_unop(n)) {
297 /* skip unnecessary nodes. */
298 switch (get_irn_opcode (n)) {
301 /* The Block constructor does not call optimize, but mature_block
302 calls the optimization. */
303 assert(get_Block_matured(n));
305 /* Straightening: a single entry Block following a single exit Block
306 can be merged, if it is not the Start block. */
307 /* !!! Beware, all Phi-nodes of n must have been optimized away.
308 This should be true, as the block is matured before optimize is called.
309 But what about Phi-cycles with the Phi0/Id that could not be resolved?
310 Remaining Phi nodes are just Ids. */
311 if ((get_Block_n_cfgpreds(n) == 1) &&
312 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
313 (get_opt_control_flow())) {
314 n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
316 } else if ((get_Block_n_cfgpreds(n) == 2) &&
317 (get_opt_control_flow())) {
318 /* Test whether Cond jumps twice to this block
319 @@@ we could do this also with two loops finding two preds from several ones. */
320 a = get_Block_cfgpred(n, 0);
321 b = get_Block_cfgpred(n, 1);
322 if ((get_irn_op(a) == op_Proj) &&
323 (get_irn_op(b) == op_Proj) &&
324 (get_Proj_pred(a) == get_Proj_pred(b)) &&
325 (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
326 (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
327 /* Also a single entry Block following a single exit Block. Phis have
328 twice the same operand and will be optimized away. */
329 n = get_nodes_Block(a); DBG_OPT_IFSIM;
331 } else if (get_opt_unreachable_code() &&
332 (n != current_ir_graph->start_block) &&
333 (n != current_ir_graph->end_block) ) {
335 /* If all inputs are dead, this block is dead too, except if it is
336 the start or end block. This is a step of unreachable code
338 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
339 if (!is_Bad(get_Block_cfgpred(n, i))) break;
341 if (i == get_Block_n_cfgpreds(n))
347 case iro_Jmp: /* GL: Why not same for op_Raise?? */
348 /* unreachable code elimination */
349 if (is_Bad(get_nodes_Block(n))) n = new_Bad();
351 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
352 See cases for iro_Cond and iro_Proj in transform_node. */
353 /** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
354 case iro_Or: if (a == b) {n = a; break;}
359 /* After running compute_node there is only one constant predecessor.
360 Find this predecessors value and remember the other node: */
361 if ((tv = computed_value (a))) {
363 } else if ((tv = computed_value (b))) {
367 /* If this predecessors constant value is zero, the operation is
368 unnecessary. Remove it: */
369 if (tarval_classify (tv) == 0) {
370 n = on; DBG_OPT_ALGSIM1;
378 /* these operations are not commutative. Test only one predecessor. */
379 if (tarval_classify (computed_value (b)) == 0) {
380 n = a; DBG_OPT_ALGSIM1;
381 /* Test if b > #bits of a ==> return 0 / divide b by #bits
382 --> transform node? */
385 case iro_Not: /* NotNot x == x */
386 case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
387 out of bounds exception if min =! max? */
388 if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
389 n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2
393 /* Mul is commutative and has again an other neutral element. */
394 if (tarval_classify (computed_value (a)) == 1) {
395 n = b; DBG_OPT_ALGSIM1
396 } else if (tarval_classify (computed_value (b)) == 1) {
397 n = a; DBG_OPT_ALGSIM1
401 /* Div is not commutative. */
402 if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
403 /* Turn Div into a tuple (mem, bad, a) */
404 ir_node *mem = get_Div_mem(n);
405 turn_into_tuple(n, 3);
406 set_Tuple_pred(n, 0, mem);
407 set_Tuple_pred(n, 1, new_Bad());
408 set_Tuple_pred(n, 2, a);
412 case iro_Mod, Quot, DivMod
413 DivMod allocates new nodes --> it's treated in transform node.
414 What about Quot, DivMod?
418 n = a; /* And has it's own neutral element */
419 } else if (tarval_classify (computed_value (a)) == -1) {
421 } else if (tarval_classify (computed_value (b)) == -1) {
424 if (n != oldn) DBG_OPT_ALGSIM1;
427 if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
428 n = a; DBG_OPT_ALGSIM3;
429 } else if (get_irn_mode(n) == mode_b) {
430 if (get_irn_op(a) == op_Conv &&
431 get_irn_mode (get_Conv_op(a)) == mode_b) {
432 n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM2;
439 /* Several optimizations:
440 - no Phi in start block.
441 - remove Id operators that are inputs to Phi
442 - fold Phi-nodes, iff they have only one predecessor except
447 ir_node *block = NULL; /* to shutup gcc */
448 ir_node *first_val = NULL; /* to shutup gcc */
449 ir_node *scnd_val = NULL; /* to shutup gcc */
451 n_preds = get_Phi_n_preds(n);
453 block = get_nodes_Block(n);
454 if ((is_Bad(block)) || /* Control dead */
455 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
456 return new_Bad(); /* in the Start Block. */
458 if (n_preds == 0) break; /* Phi of dead Region without predecessors. */
461 /* first we test for a special case: */
462 /* Confirm is a special node fixing additional information for a
463 value that is known at a certain point. This is useful for
464 dataflow analysis. */
466 ir_node *a = follow_Id (get_Phi_pred(n, 0));
467 ir_node *b = follow_Id (get_Phi_pred(n, 1));
468 if ( (get_irn_op(a) == op_Confirm)
469 && (get_irn_op(b) == op_Confirm)
470 && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
471 && (get_irn_n(a, 1) == get_irn_n (b, 1))
472 && (a->data.num == (~b->data.num & irpn_True) )) {
473 n = follow_Id (get_irn_n(a, 0));
479 /* Find first non-self-referencing input */
480 for (i = 0; i < n_preds; ++i) {
481 first_val = follow_Id(get_Phi_pred(n, i));
483 set_Phi_pred(n, i, first_val);
484 if ( (first_val != n) /* not self pointer */
485 && (get_irn_op(first_val) != op_Bad) /* value not dead */
486 && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
487 break; /* then found first value. */
491 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
492 if (i >= n_preds) { n = new_Bad(); break; }
496 /* follow_Id () for rest of inputs, determine if any of these
497 are non-self-referencing */
498 while (++i < n_preds) {
499 scnd_val = follow_Id(get_Phi_pred(n, i));
501 set_Phi_pred(n, i, scnd_val);
503 && (scnd_val != first_val)
504 && (get_irn_op(scnd_val) != op_Bad)
505 && !(is_Bad (get_Block_cfgpred(block, i))) ) {
510 /* Fold, if no multiple distinct non-self-referencing inputs */
512 n = first_val; DBG_OPT_PHI;
514 /* skip the remaining Ids. */
515 while (++i < n_preds) {
516 set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
524 #if 0 /* Is an illegal transformation: different nodes can
525 represent the same pointer value!! */
526 a = skip_Proj(get_Load_mem(n));
529 if (get_irn_op(a) == op_Store) {
530 if ( different_identity (b, get_Store_ptr(a))) {
531 /* load and store use different pointers, therefore load
532 needs not take store's memory but the state before. */
533 set_Load_mem (n, get_Store_mem(a));
534 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
541 /* remove unnecessary store. */
543 a = skip_Proj(get_Store_mem(n));
544 b = get_Store_ptr(n);
545 c = skip_Proj(get_Store_value(n));
547 if (get_irn_op(a) == op_Store
548 && get_Store_ptr(a) == b
549 && skip_Proj(get_Store_value(a)) == c) {
550 /* We have twice exactly the same store -- a write after write. */
552 } else if (get_irn_op(c) == op_Load
553 && (a == c || skip_Proj(get_Load_mem(c)) == a)
554 && get_Load_ptr(c) == b ) {
555 /* We just loaded the value from the same memory, i.e., the store
556 doesn't change the memory -- a write after read. */
557 a = get_Store_mem(n);
558 turn_into_tuple(n, 2);
559 set_Tuple_pred(n, 0, a);
560 set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
567 a = get_Proj_pred(n);
569 if ( get_irn_op(a) == op_Tuple) {
570 /* Remove the Tuple/Proj combination. */
571 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
572 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
574 assert(0); /* This should not happen! */
577 } else if (get_irn_mode(n) == mode_X &&
578 is_Bad(get_nodes_Block(n))) {
579 /* Remove dead control flow -- early gigo. */
586 n = follow_Id (n); DBG_OPT_ID;
593 } /* end equivalent_node() */
596 /* tries several [inplace] [optimizing] transformations and returns an
597 equivalent node. The difference to equivalent_node is that these
598 transformations _do_ generate new nodes, and thus the old node must
599 not be freed even if the equivalent node isn't the old one. */
601 transform_node (ir_node *n)
604 ir_node *a = NULL, *b;
607 switch (get_irn_opcode(n)) {
609 ta = computed_value(n);
611 /* Turn Div into a tuple (mem, bad, value) */
612 ir_node *mem = get_Div_mem(n);
613 turn_into_tuple(n, 3);
614 set_Tuple_pred(n, 0, mem);
615 set_Tuple_pred(n, 1, new_Bad());
616 set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
620 ta = computed_value(n);
622 /* Turn Div into a tuple (mem, bad, value) */
623 ir_node *mem = get_Mod_mem(n);
624 turn_into_tuple(n, 3);
625 set_Tuple_pred(n, 0, mem);
626 set_Tuple_pred(n, 1, new_Bad());
627 set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
635 a = get_DivMod_left(n);
636 b = get_DivMod_right(n);
637 mode = get_irn_mode(a);
639 if (!(mode_is_int(get_irn_mode(a)) &&
640 mode_is_int(get_irn_mode(b))))
644 a = new_Const (mode, tarval_from_long (mode, 1));
645 b = new_Const (mode, tarval_from_long (mode, 0));
652 if (tarval_classify(tb) == 1) {
653 b = new_Const (mode, tarval_from_long (mode, 0));
657 resa = tarval_div (ta, tb);
658 if (!resa) break; /* Causes exception!!! Model by replacing through
659 Jmp for X result!? */
660 resb = tarval_mod (ta, tb);
661 if (!resb) break; /* Causes exception! */
662 a = new_Const (mode, resa);
663 b = new_Const (mode, resb);
666 } else if (tarval_classify (ta) == 0) {
671 if (evaluated) { /* replace by tuple */
672 ir_node *mem = get_DivMod_mem(n);
673 turn_into_tuple(n, 4);
674 set_Tuple_pred(n, 0, mem);
675 set_Tuple_pred(n, 1, new_Bad()); /* no exception */
676 set_Tuple_pred(n, 2, a);
677 set_Tuple_pred(n, 3, b);
678 assert(get_nodes_Block(n));
684 /* Replace the Cond by a Jmp if it branches on a constant
687 a = get_Cond_selector(n);
691 (get_irn_mode(a) == mode_b) &&
692 (get_opt_unreachable_code())) {
693 /* It's a boolean Cond, branching on a boolean constant.
694 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
695 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
696 turn_into_tuple(n, 2);
697 if (tv_val_b(ta) == 1) /* GL: I hope this returns 1 if true */ {
698 set_Tuple_pred(n, 0, new_Bad());
699 set_Tuple_pred(n, 1, jmp);
701 set_Tuple_pred(n, 0, jmp);
702 set_Tuple_pred(n, 1, new_Bad());
704 /* We might generate an endless loop, so keep it alive. */
705 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
707 (get_irn_mode(a) == mode_I) &&
708 (get_Cond_kind(n) == dense) &&
709 (get_opt_unreachable_code())) {
710 /* I don't want to allow Tuples smaller than the biggest Proj.
711 Also this tuple might get really big...
712 I generate the Jmp here, and remember it in link. Link is used
713 when optimizing Proj. */
714 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
715 /* We might generate an endless loop, so keep it alive. */
716 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
717 } else if ((get_irn_op(get_Cond_selector(n)) == op_Eor)
718 && (get_irn_mode(get_Cond_selector(n)) == mode_b)
719 && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
720 /* The Eor is a negate. Generate a new Cond without the negate,
721 simulate the negate by exchanging the results. */
722 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
724 } else if ((get_irn_op(get_Cond_selector(n)) == op_Not)
725 && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
726 /* A Not before the Cond. Generate a new Cond without the Not,
727 simulate the Not by exchanging the results. */
728 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
735 a = get_Proj_pred(n);
737 if ((get_irn_op(a) == op_Cond)
739 && get_irn_op(get_irn_link(a)) == op_Cond) {
740 /* Use the better Cond if the Proj projs from a Cond which get's
741 its result from an Eor/Not. */
742 assert (((get_irn_op(get_Cond_selector(a)) == op_Eor)
743 || (get_irn_op(get_Cond_selector(a)) == op_Not))
744 && (get_irn_mode(get_Cond_selector(a)) == mode_b)
745 && (get_irn_op(get_irn_link(a)) == op_Cond)
746 && (get_Cond_selector(get_irn_link(a)) == get_Eor_left(get_Cond_selector(a))));
747 set_Proj_pred(n, get_irn_link(a));
748 if (get_Proj_proj(n) == 0)
752 } else if ((get_irn_op(a) == op_Cond)
753 && (get_irn_mode(get_Cond_selector(a)) == mode_I)
755 && (get_Cond_kind(a) == dense)
756 && (get_opt_unreachable_code())) {
757 /* The Cond is a Switch on a Constant */
758 if (get_Proj_proj(n) == tv_val_CHIL(value_of(a))) {
759 /* The always taken branch, reuse the existing Jmp. */
760 if (!get_irn_link(a)) /* well, if it exists ;-> */
761 set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
762 assert(get_irn_op(get_irn_link(a)) == op_Jmp);
764 } else {/* Not taken control flow, but be careful with the default! */
765 if (get_Proj_proj(n) < a->attr.c.default_proj){
766 /* a never taken branch */
769 a->attr.c.default_proj = get_Proj_proj(n);
774 case iro_Eor: { /* @@@ not tested as boolean Eor not allowed any more. */
776 b = get_Eor_right(n);
778 if ((get_irn_mode(n) == mode_b)
779 && (get_irn_op(a) == op_Proj)
780 && (get_irn_mode(a) == mode_b)
781 && (tarval_classify (computed_value (b)) == 1)
782 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
783 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
784 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
785 mode_b, get_negated_pnc(get_Proj_proj(a)));
786 else if ((get_irn_mode(n) == mode_b)
787 && (tarval_classify (computed_value (b)) == 1))
788 /* The Eor is a Not. Replace it by a Not. */
789 /* ????!!!Extend to bitfield 1111111. */
790 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
796 if ( (get_irn_mode(n) == mode_b)
797 && (get_irn_op(a) == op_Proj)
798 && (get_irn_mode(a) == mode_b)
799 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
800 /* We negate a Cmp. The Cmp has the negated result anyways! */
801 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
802 mode_b, get_negated_pnc(get_Proj_proj(a)));
810 /* **************** Common Subexpression Elimination **************** */
812 /* Compare function for two nodes in the hash table. Gets two */
813 /* nodes as parameters. Returns 0 if the nodes are a cse. */
815 vt_cmp (const void *elt, const void *key)
823 if (a == b) return 0;
825 if ((get_irn_op(a) != get_irn_op(b)) ||
826 (get_irn_mode(a) != get_irn_mode(b))) return 1;
828 /* compare if a's in and b's in are equal */
829 if (get_irn_arity (a) != get_irn_arity(b))
832 /* for block-local cse and pinned nodes: */
833 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
834 if (get_irn_n(a, -1) != get_irn_n(b, -1))
838 /* compare a->in[0..ins] with b->in[0..ins] */
839 for (i = 0; i < get_irn_arity(a); i++)
840 if (get_irn_n(a, i) != get_irn_n(b, i))
843 switch (get_irn_opcode(a)) {
845 return get_irn_const_attr (a) != get_irn_const_attr (b);
847 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
849 return get_Filter_proj(a) != get_Filter_proj(b);
851 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
852 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
854 return (get_irn_free_attr(a) != get_irn_free_attr(b));
856 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
857 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
859 return (get_irn_call_attr(a) != get_irn_call_attr(b));
861 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
862 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
863 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
864 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
865 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
867 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
875 ir_node_hash (ir_node *node)
880 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
881 h = get_irn_arity(node);
883 /* consider all in nodes... except the block. */
884 for (i = 0; i < get_irn_arity(node); i++) {
885 h = 9*h + (unsigned long)get_irn_n(node, i);
889 h = 9*h + (unsigned long) get_irn_mode (node);
891 h = 9*h + (unsigned long) get_irn_op (node);
897 new_identities (void)
899 return new_pset (vt_cmp, TUNE_NIR_NODES);
903 del_identities (pset *value_table)
905 del_pset (value_table);
908 /* Return the canonical node computing the same value as n.
909 Looks up the node in a hash table. */
910 static INLINE ir_node *
911 identify (pset *value_table, ir_node *n)
915 if (!value_table) return n;
917 if (get_opt_reassociation()) {
918 switch (get_irn_opcode (n)) {
925 /* for commutative operators perform a OP b == b OP a */
926 if (get_binop_left(n) > get_binop_right(n)) {
927 ir_node *h = get_binop_left(n);
928 set_binop_left(n, get_binop_right(n));
929 set_binop_right(n, h);
937 o = pset_find (value_table, n, ir_node_hash (n));
943 /* During construction we set the pinned flag in the graph right when the
944 optimizatin is performed. The flag turning on procedure global cse could
945 be changed between two allocations. This way we are safe. */
946 static INLINE ir_node *
947 identify_cons (pset *value_table, ir_node *n) {
949 n = identify(value_table, n);
950 if (get_irn_n(old, -1) != get_irn_n(n, -1))
951 set_irg_pinned(current_ir_graph, floats);
955 /* Return the canonical node computing the same value as n.
956 Looks up the node in a hash table, enters it in the table
957 if it isn't there yet. */
959 identify_remember (pset *value_table, ir_node *node)
963 if (!value_table) return node;
965 /* lookup or insert in hash table with given hash key. */
966 o = pset_insert (value_table, node, ir_node_hash (node));
968 if (o == node) return node;
974 add_identities (pset *value_table, ir_node *node) {
975 identify_remember (value_table, node);
978 /* garbage in, garbage out. If a node has a dead input, i.e., the
979 Bad node is input to the node, return the Bad node. */
980 static INLINE ir_node *
984 ir_op* op = get_irn_op(node);
986 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
987 blocks predecessors is dead. */
988 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
989 for (i = -1; i < get_irn_arity(node); i++) {
990 if (is_Bad(get_irn_n(node, i))) {
996 /* If Block has only Bads as predecessors it's garbage. */
997 /* If Phi has only Bads as predecessors it's garbage. */
998 if (op == op_Block || op == op_Phi) {
999 for (i = 0; i < get_irn_arity(node); i++) {
1000 if (!is_Bad(get_irn_n(node, i))) break;
1002 if (i = get_irn_arity(node)) node = new_Bad();
1009 /* These optimizations deallocate nodes from the obstack.
1010 It can only be called if it is guaranteed that no other nodes
1011 reference this one, i.e., right after construction of a node. */
1013 optimize (ir_node *n)
1018 /* Allways optimize Phi nodes: part of the construction. */
1019 if ((!get_optimize()) && (get_irn_op(n) != op_Phi)) return n;
1021 /* constant expression evaluation / constant folding */
1022 if (get_opt_constant_folding()) {
1023 /* constants can not be evaluated */
1024 if (get_irn_op(n) != op_Const) {
1025 /* try to evaluate */
1026 tv = computed_value (n);
1027 if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
1028 /* evaluation was succesful -- replace the node. */
1029 obstack_free (current_ir_graph->obst, n);
1030 return new_Const (get_tv_mode (tv), tv);
1035 /* remove unnecessary nodes */
1036 if (get_opt_constant_folding() ||
1037 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1038 (get_irn_op(n) == op_Id) ||
1039 (get_irn_op(n) == op_Proj) ||
1040 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1041 n = equivalent_node (n);
1043 /** common subexpression elimination **/
1044 /* Checks whether n is already available. */
1045 /* The block input is used to distinguish different subexpressions. Right
1046 now all nodes are pinned to blocks, i.e., the cse only finds common
1047 subexpressions within a block. */
1049 n = identify_cons (current_ir_graph->value_table, n);
1052 /* We found an existing, better node, so we can deallocate the old node. */
1053 obstack_free (current_ir_graph->obst, old_n);
1056 /* Some more constant expression evaluation that does not allow to
1058 if (get_opt_constant_folding() ||
1059 (get_irn_op(n) == op_Cond) ||
1060 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1061 n = transform_node (n);
1063 /* Remove nodes with dead (Bad) input.
1064 Run always for transformation induced Bads. */
1067 /* Now we can verify the node, as it has no dead inputs any more. */
1070 /* Now we have a legal, useful node. Enter it in hash table for cse */
1071 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1072 n = identify_remember (current_ir_graph->value_table, n);
1079 /* These optimizations never deallocate nodes. This can cause dead
1080 nodes lying on the obstack. Remove these by a dead node elimination,
1081 i.e., a copying garbage collection. */
1083 optimize_in_place_2 (ir_node *n)
1088 if (!get_optimize() && (get_irn_op(n) != op_Phi)) return n;
1090 /* if not optimize return n */
1093 /* Here this is possible. Why? */
1098 /* constant expression evaluation / constant folding */
1099 if (get_opt_constant_folding()) {
1100 /* constants can not be evaluated */
1101 if (get_irn_op(n) != op_Const) {
1102 /* try to evaluate */
1103 tv = computed_value (n);
1104 if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
1105 /* evaluation was succesful -- replace the node. */
1106 n = new_Const (get_tv_mode (tv), tv);
1107 __dbg_info_merge_pair(n, old_n, dbg_const_eval);
1113 /* remove unnecessary nodes */
1114 /*if (get_opt_constant_folding()) */
1115 if (get_opt_constant_folding() ||
1116 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1117 (get_irn_op(n) == op_Id) || /* ... */
1118 (get_irn_op(n) == op_Proj) || /* ... */
1119 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1120 n = equivalent_node (n);
1122 /** common subexpression elimination **/
1123 /* Checks whether n is already available. */
1124 /* The block input is used to distinguish different subexpressions. Right
1125 now all nodes are pinned to blocks, i.e., the cse only finds common
1126 subexpressions within a block. */
1127 if (get_opt_cse()) {
1128 n = identify (current_ir_graph->value_table, n);
1131 /* Some more constant expression evaluation. */
1132 if (get_opt_constant_folding() ||
1133 (get_irn_op(n) == op_Cond) ||
1134 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1135 n = transform_node (n);
1137 /* Remove nodes with dead (Bad) input.
1138 Run always for transformation induced Bads. */
1141 /* Now we can verify the node, as it has no dead inputs any more. */
1144 /* Now we have a legal, useful node. Enter it in hash table for cse.
1145 Blocks should be unique anyways. (Except the successor of start:
1146 is cse with the start block!) */
1147 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1148 n = identify_remember (current_ir_graph->value_table, n);
1153 /* Wrapper for external use, set proper status bits after optimization */
1155 optimize_in_place (ir_node *n) {
1156 /* Handle graph state */
1157 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1158 if (get_opt_global_cse())
1159 set_irg_pinned(current_ir_graph, floats);
1160 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1161 set_irg_outs_inconsistent(current_ir_graph);
1162 /* Maybe we could also test whether optimizing the node can
1163 change the control graph. */
1164 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1165 set_irg_dom_inconsistent(current_ir_graph);
1166 return optimize_in_place_2 (n);