1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Authors: Christian Schaefer, Goetz Lindenmaier
6 ** iropt --- optimizations intertwined with IR construction.
15 # include "irnode_t.h"
16 # include "irgraph_t.h"
23 # include "dbginfo_t.h"
24 # include "iropt_dbg.c"
26 /* Make types visible to allow most efficient access */
27 # include "entity_t.h"
29 /* Trivial INLINEable routine for copy propagation.
30 Does follow Ids, needed to optimize INLINEd code. */
31 static INLINE ir_node *
32 follow_Id (ir_node *n)
34 while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
38 static INLINE tarval *
41 if ((n != NULL) && (get_irn_op(n) == op_Const))
42 return get_Const_tarval(n);
47 /* if n can be computed, return the value, else NULL. Performs
48 constant folding. GL: Only if n is arithmetic operator? */
50 computed_value (ir_node *n)
54 ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
55 tarval *ta = NULL, *tb = NULL; /* initialized to shut up gcc */
59 /* get the operands we will work on for simple cases. */
61 a = get_binop_left(n);
62 b = get_binop_right(n);
63 } else if (is_unop(n)) {
67 /* if the operands are constants, get the target value, else set it NULL.
68 (a and b may be NULL if we treat a node that is no computation.) */
72 /* Perform the constant evaluation / computation. */
73 switch (get_irn_opcode(n)) {
75 res = get_Const_tarval(n);
78 if ((get_SymConst_kind(n) == size) &&
79 (get_type_state(get_SymConst_type(n))) == layout_fixed)
80 res = tarval_from_long (mode_i, get_type_size(get_SymConst_type(n)));
83 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
84 && (get_irn_mode(a) != mode_p)) {
85 res = tarval_add (ta, tb);
89 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
90 && (get_irn_mode(a) != mode_p)) {
91 res = tarval_sub (ta, tb);
93 res = tarval_mode_null [get_irn_modecode (n)];
97 if (ta && mode_is_float(get_irn_mode(a)))
98 res = tarval_neg (ta);
101 if (ta && tb) /* tarval_mul tests for equivalent modes itself */ {
102 res = tarval_mul (ta, tb);
104 /* a*0 = 0 or 0*b = 0:
105 calls computed_value recursive and returns the 0 with proper
108 if ( (tarval_classify ((v = computed_value (a))) == 0)
109 || (tarval_classify ((v = computed_value (b))) == 0)) {
115 /* This was missing in original implementation. Why? */
116 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
117 if (tarval_classify(tb) == 0) {res = NULL; break;}
118 res = tarval_quo(ta, tb);
122 /* This was missing in original implementation. Why? */
123 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
124 if (tarval_classify(tb) == 0) {res = NULL; break;}
125 res = tarval_div(ta, tb);
129 /* This was missing in original implementation. Why? */
130 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
131 if (tarval_classify(tb) == 0) {res = NULL; break;}
132 res = tarval_mod(ta, tb);
135 /* for iro_DivMod see iro_Proj */
138 res = tarval_abs (ta);
142 res = tarval_and (ta, tb);
145 if ( (tarval_classify ((v = computed_value (a))) == 0)
146 || (tarval_classify ((v = computed_value (b))) == 0)) {
153 res = tarval_or (ta, tb);
156 if ( (tarval_classify ((v = computed_value (a))) == -1)
157 || (tarval_classify ((v = computed_value (b))) == -1)) {
162 case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
163 case iro_Not: if (ta) { res = tarval_neg (ta); } break;
164 case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
165 /* tarval_shr is faulty !! */
166 case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
167 case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
168 case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
169 case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
171 case iro_Proj: /* iro_Cmp */
175 a = get_Proj_pred(n);
176 /* Optimize Cmp nodes.
177 This performs a first step of unreachable code elimination.
178 Proj can not be computed, but folding a Cmp above the Proj here is
179 not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
181 There are several case where we can evaluate a Cmp node:
182 1. The nodes compared are both the same. If we compare for
183 equal, this will return true, else it will return false.
184 This step relies on cse.
185 2. The predecessors of Cmp are target values. We can evaluate
187 3. The predecessors are Allocs or void* constants. Allocs never
188 return NULL, they raise an exception. Therefore we can predict
190 if (get_irn_op(a) == op_Cmp) {
191 aa = get_Cmp_left(a);
192 ab = get_Cmp_right(a);
193 if (aa == ab) { /* 1.: */
194 /* This is a tric with the bits used for encoding the Cmp
195 Proj numbers, the following statement is not the same:
196 res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
197 res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
199 tarval *taa = computed_value (aa);
200 tarval *tab = computed_value (ab);
201 if (taa && tab) { /* 2.: */
202 /* strange checks... */
203 ir_pncmp flags = tarval_comp (taa, tab);
204 if (flags != irpn_False) {
205 res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
207 } else { /* check for 3.: */
208 ir_node *aaa = skip_nop(skip_Proj(aa));
209 ir_node *aba = skip_nop(skip_Proj(ab));
210 if ( ( (/* aa is ProjP and aaa is Alloc */
211 (get_irn_op(aa) == op_Proj)
212 && (get_irn_mode(aa) == mode_p)
213 && (get_irn_op(aaa) == op_Alloc))
214 && ( (/* ab is constant void */
215 (get_irn_op(ab) == op_Const)
216 && (get_irn_mode(ab) == mode_p)
217 && (get_Const_tarval(ab) == tarval_p_void))
218 || (/* ab is other Alloc */
219 (get_irn_op(ab) == op_Proj)
220 && (get_irn_mode(ab) == mode_p)
221 && (get_irn_op(aba) == op_Alloc)
223 || (/* aa is void and aba is Alloc */
224 (get_irn_op(aa) == op_Const)
225 && (get_irn_mode(aa) == mode_p)
226 && (get_Const_tarval(aa) == tarval_p_void)
227 && (get_irn_op(ab) == op_Proj)
228 && (get_irn_mode(ab) == mode_p)
229 && (get_irn_op(aba) == op_Alloc)))
231 res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
234 } else if (get_irn_op(a) == op_DivMod) {
235 ta = value_of(get_DivMod_left(a));
236 tb = value_of(get_DivMod_right(a));
237 if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
238 if (tarval_classify(tb) == 0) {res = NULL; break;}
239 if (get_Proj_proj(n)== 0) /* Div */
240 res = tarval_div(ta, tb);
242 res = tarval_mod(ta, tb);
255 /* returns 1 if the a and b are pointers to different locations. */
257 different_identity (ir_node *a, ir_node *b)
259 assert (get_irn_mode (a) == mode_p
260 && get_irn_mode (b) == mode_p);
262 if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
263 ir_node *a1 = get_Proj_pred (a);
264 ir_node *b1 = get_Proj_pred (b);
265 if (a1 != b1 && get_irn_op (a1) == op_Alloc
266 && get_irn_op (b1) == op_Alloc)
273 /* equivalent_node returns a node equivalent to N. It skips all nodes that
274 perform no actual computation, as, e.g., the Id nodes. It does not create
275 new nodes. It is therefore safe to free N if the node returned is not N.
276 If a node returns a Tuple we can not just skip it. If the size of the
277 in array fits, we transform n into a tuple (e.g., Div). */
279 equivalent_node (ir_node *n)
282 ir_node *a = NULL; /* to shutup gcc */
283 ir_node *b = NULL; /* to shutup gcc */
284 ir_node *c = NULL; /* to shutup gcc */
287 ins = get_irn_arity (n);
289 /* get the operands we will work on */
291 a = get_binop_left(n);
292 b = get_binop_right(n);
293 } else if (is_unop(n)) {
297 /* skip unnecessary nodes. */
298 switch (get_irn_opcode (n)) {
301 /* The Block constructor does not call optimize, but mature_block
302 calls the optimization. */
303 assert(get_Block_matured(n));
305 /* Straightening: a single entry Block following a single exit Block
306 can be merged, if it is not the Start block. */
307 /* !!! Beware, all Phi-nodes of n must have been optimized away.
308 This should be true, as the block is matured before optimize is called.
309 But what about Phi-cycles with the Phi0/Id that could not be resolved?
310 Remaining Phi nodes are just Ids. */
311 if ((get_Block_n_cfgpreds(n) == 1) &&
312 (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
313 (get_opt_control_flow())) {
314 n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
316 } else if ((get_Block_n_cfgpreds(n) == 2) &&
317 (get_opt_control_flow())) {
318 /* Test whether Cond jumps twice to this block
319 @@@ we could do this also with two loops finding two preds from several ones. */
320 a = get_Block_cfgpred(n, 0);
321 b = get_Block_cfgpred(n, 1);
322 if ((get_irn_op(a) == op_Proj) &&
323 (get_irn_op(b) == op_Proj) &&
324 (get_Proj_pred(a) == get_Proj_pred(b)) &&
325 (get_irn_op(get_Proj_pred(a)) == op_Cond)) {
326 /* Also a single entry Block following a single exit Block. Phis have
327 twice the same operand and will be optimized away. */
328 n = get_nodes_Block(a); DBG_OPT_IFSIM;
330 } else if (get_opt_unreachable_code() &&
331 (n != current_ir_graph->start_block) &&
332 (n != current_ir_graph->end_block) ) {
334 /* If all inputs are dead, this block is dead too, except if it is
335 the start or end block. This is a step of unreachable code
337 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
338 if (!is_Bad(get_Block_cfgpred(n, i))) break;
340 if (i == get_Block_n_cfgpreds(n))
346 case iro_Jmp: /* GL: Why not same for op_Raise?? */
347 /* unreachable code elimination */
348 if (is_Bad(get_nodes_Block(n))) n = new_Bad();
350 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
351 See cases for iro_Cond and iro_Proj in transform_node. */
352 /** remove stuff as x+0, x*1 x&true ... constant expression evaluation **/
353 case iro_Or: if (a == b) {n = a; break;}
358 /* After running compute_node there is only one constant predecessor.
359 Find this predecessors value and remember the other node: */
360 if ((tv = computed_value (a))) {
362 } else if ((tv = computed_value (b))) {
366 /* If this predecessors constant value is zero, the operation is
367 unnecessary. Remove it: */
368 if (tarval_classify (tv) == 0) {
369 n = on; DBG_OPT_ALGSIM1;
377 /* these operations are not commutative. Test only one predecessor. */
378 if (tarval_classify (computed_value (b)) == 0) {
379 n = a; DBG_OPT_ALGSIM1;
380 /* Test if b > #bits of a ==> return 0 / divide b by #bits
381 --> transform node? */
384 case iro_Not: /* NotNot x == x */
385 case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
386 out of bounds exception if min =! max? */
387 if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
388 n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2
392 /* Mul is commutative and has again an other neutral element. */
393 if (tarval_classify (computed_value (a)) == 1) {
394 n = b; DBG_OPT_ALGSIM1
395 } else if (tarval_classify (computed_value (b)) == 1) {
396 n = a; DBG_OPT_ALGSIM1
400 /* Div is not commutative. */
401 if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
402 /* Turn Div into a tuple (mem, bad, a) */
403 ir_node *mem = get_Div_mem(n);
404 turn_into_tuple(n, 3);
405 set_Tuple_pred(n, 0, mem);
406 set_Tuple_pred(n, 1, new_Bad());
407 set_Tuple_pred(n, 2, a);
410 /* GL: Why are they skipped? DivMod allocates new nodes --> it's
411 treated in transform node.
412 case iro_Mod, Quot, DivMod
416 n = a; /* And has it's own neutral element */
417 } else if (tarval_classify (computed_value (a)) == -1) {
419 } else if (tarval_classify (computed_value (b)) == -1) {
422 if (n != oldn) DBG_OPT_ALGSIM1;
425 if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
426 n = a; DBG_OPT_ALGSIM3;
427 } else if (get_irn_mode(n) == mode_b) {
428 if (get_irn_op(a) == op_Conv &&
429 get_irn_mode (get_Conv_op(a)) == mode_b) {
430 n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM2;
437 /* Several optimizations:
438 - no Phi in start block.
439 - remove Id operators that are inputs to Phi
440 - fold Phi-nodes, iff they have only one predecessor except
445 ir_node *block = NULL; /* to shutup gcc */
446 ir_node *first_val = NULL; /* to shutup gcc */
447 ir_node *scnd_val = NULL; /* to shutup gcc */
449 n_preds = get_Phi_n_preds(n);
451 block = get_nodes_Block(n);
452 if ((is_Bad(block)) || /* Control dead */
453 (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
454 return new_Bad(); /* in the Start Block. */
456 if (n_preds == 0) break; /* Phi of dead Region without predecessors. */
459 /* first we test for a special case: */
460 /* Confirm is a special node fixing additional information for a
461 value that is known at a certain point. This is useful for
462 dataflow analysis. */
464 ir_node *a = follow_Id (get_Phi_pred(n, 0));
465 ir_node *b = follow_Id (get_Phi_pred(n, 1));
466 if ( (get_irn_op(a) == op_Confirm)
467 && (get_irn_op(b) == op_Confirm)
468 && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
469 && (get_irn_n(a, 1) == get_irn_n (b, 1))
470 && (a->data.num == (~b->data.num & irpn_True) )) {
471 n = follow_Id (get_irn_n(a, 0));
477 /* Find first non-self-referencing input */
478 for (i = 0; i < n_preds; ++i) {
479 first_val = follow_Id(get_Phi_pred(n, i));
481 set_Phi_pred(n, i, first_val);
482 if ( (first_val != n) /* not self pointer */
483 && (get_irn_op(first_val) != op_Bad) /* value not dead */
484 && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
485 break; /* then found first value. */
489 /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
490 if (i >= n_preds) { n = new_Bad(); break; }
494 /* follow_Id () for rest of inputs, determine if any of these
495 are non-self-referencing */
496 while (++i < n_preds) {
497 scnd_val = follow_Id(get_Phi_pred(n, i));
499 set_Phi_pred(n, i, scnd_val);
501 && (scnd_val != first_val)
502 && (get_irn_op(scnd_val) != op_Bad)
503 && !(is_Bad (get_Block_cfgpred(block, i))) ) {
508 /* Fold, if no multiple distinct non-self-referencing inputs */
510 n = first_val; DBG_OPT_PHI;
512 /* skip the remaining Ids. */
513 while (++i < n_preds) {
514 set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
522 #if 0 /* Is an illegal transformation: different nodes can
523 represent the same pointer value!! */
524 a = skip_Proj(get_Load_mem(n));
527 if (get_irn_op(a) == op_Store) {
528 if ( different_identity (b, get_Store_ptr(a))) {
529 /* load and store use different pointers, therefore load
530 needs not take store's memory but the state before. */
531 set_Load_mem (n, get_Store_mem(a));
532 } else if (( 0 /* ???didn't get cryptic test that returns 0 */ )) {
539 /* remove unnecessary store. */
541 a = skip_Proj(get_Store_mem(n));
542 b = get_Store_ptr(n);
543 c = skip_Proj(get_Store_value(n));
545 if (get_irn_op(a) == op_Store
546 && get_Store_ptr(a) == b
547 && skip_Proj(get_Store_value(a)) == c) {
548 /* We have twice exactly the same store -- a write after write. */
550 } else if (get_irn_op(c) == op_Load
551 && (a == c || skip_Proj(get_Load_mem(c)) == a)
552 && get_Load_ptr(c) == b ) {
553 /* We just loaded the value from the same memory, i.e., the store
554 doesn't change the memory -- a write after read. */
555 a = get_Store_mem(n);
556 turn_into_tuple(n, 2);
557 set_Tuple_pred(n, 0, a);
558 set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
565 a = get_Proj_pred(n);
567 if ( get_irn_op(a) == op_Tuple) {
568 /* Remove the Tuple/Proj combination. */
569 if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
570 n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
572 assert(0); /* This should not happen! */
575 } else if (get_irn_mode(n) == mode_X &&
576 is_Bad(get_nodes_Block(n))) {
577 /* Remove dead control flow. */
584 n = follow_Id (n); DBG_OPT_ID;
591 } /* end equivalent_node() */
594 /* tries several [inplace] [optimizing] transformations and returns an
595 equivalent node. The difference to equivalent_node is that these
596 transformations _do_ generate new nodes, and thus the old node must
597 not be freed even if the equivalent node isn't the old one. */
599 transform_node (ir_node *n)
602 ir_node *a = NULL, *b;
605 switch (get_irn_opcode(n)) {
607 ta = computed_value(n);
609 /* Turn Div into a tuple (mem, bad, value) */
610 ir_node *mem = get_Div_mem(n);
611 turn_into_tuple(n, 3);
612 set_Tuple_pred(n, 0, mem);
613 set_Tuple_pred(n, 1, new_Bad());
614 set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
618 ta = computed_value(n);
620 /* Turn Div into a tuple (mem, bad, value) */
621 ir_node *mem = get_Mod_mem(n);
622 turn_into_tuple(n, 3);
623 set_Tuple_pred(n, 0, mem);
624 set_Tuple_pred(n, 1, new_Bad());
625 set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
633 a = get_DivMod_left(n);
634 b = get_DivMod_right(n);
635 mode = get_irn_mode(a);
637 if (!(mode_is_int(get_irn_mode(a)) &&
638 mode_is_int(get_irn_mode(b))))
642 a = new_Const (mode, tarval_from_long (mode, 1));
643 b = new_Const (mode, tarval_from_long (mode, 0));
650 if (tarval_classify(tb) == 1) {
651 b = new_Const (mode, tarval_from_long (mode, 0));
655 resa = tarval_div (ta, tb);
656 if (!resa) break; /* Causes exception!!! Model by replacing through
657 Jmp for X result!? */
658 resb = tarval_mod (ta, tb);
659 if (!resb) break; /* Causes exception! */
660 a = new_Const (mode, resa);
661 b = new_Const (mode, resb);
664 } else if (tarval_classify (ta) == 0) {
669 if (evaluated) { /* replace by tuple */
670 ir_node *mem = get_DivMod_mem(n);
671 turn_into_tuple(n, 4);
672 set_Tuple_pred(n, 0, mem);
673 set_Tuple_pred(n, 1, new_Bad()); /* no exception */
674 set_Tuple_pred(n, 2, a);
675 set_Tuple_pred(n, 3, b);
676 assert(get_nodes_Block(n));
682 /* Replace the Cond by a Jmp if it branches on a constant
685 a = get_Cond_selector(n);
689 (get_irn_mode(a) == mode_b) &&
690 (get_opt_unreachable_code())) {
691 /* It's a boolean Cond, branching on a boolean constant.
692 Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
693 jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
694 turn_into_tuple(n, 2);
695 if (tv_val_b(ta) == 1) /* GL: I hope this returns 1 if true */ {
696 set_Tuple_pred(n, 0, new_Bad());
697 set_Tuple_pred(n, 1, jmp);
699 set_Tuple_pred(n, 0, jmp);
700 set_Tuple_pred(n, 1, new_Bad());
702 /* We might generate an endless loop, so keep it alive. */
703 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
705 (get_irn_mode(a) == mode_I) &&
706 (get_Cond_kind(n) == dense) &&
707 (get_opt_unreachable_code())) {
708 /* I don't want to allow Tuples smaller than the biggest Proj.
709 Also this tuple might get really big...
710 I generate the Jmp here, and remember it in link. Link is used
711 when optimizing Proj. */
712 set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
713 /* We might generate an endless loop, so keep it alive. */
714 add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
715 } else if ((get_irn_op(get_Cond_selector(n)) == op_Eor)
716 && (get_irn_mode(get_Cond_selector(n)) == mode_b)
717 && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
718 /* The Eor is a negate. Generate a new Cond without the negate,
719 simulate the negate by exchanging the results. */
720 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
722 } else if ((get_irn_op(get_Cond_selector(n)) == op_Not)
723 && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
724 /* A Not before the Cond. Generate a new Cond without the Not,
725 simulate the Not by exchanging the results. */
726 set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
733 a = get_Proj_pred(n);
735 if ((get_irn_op(a) == op_Cond)
737 && get_irn_op(get_irn_link(a)) == op_Cond) {
738 /* Use the better Cond if the Proj projs from a Cond which get's
739 its result from an Eor/Not. */
740 assert (((get_irn_op(get_Cond_selector(a)) == op_Eor)
741 || (get_irn_op(get_Cond_selector(a)) == op_Not))
742 && (get_irn_mode(get_Cond_selector(a)) == mode_b)
743 && (get_irn_op(get_irn_link(a)) == op_Cond)
744 && (get_Cond_selector(get_irn_link(a)) == get_Eor_left(get_Cond_selector(a))));
745 set_Proj_pred(n, get_irn_link(a));
746 if (get_Proj_proj(n) == 0)
750 } else if ((get_irn_op(a) == op_Cond)
751 && (get_irn_mode(get_Cond_selector(a)) == mode_I)
753 && (get_Cond_kind(a) == dense)
754 && (get_opt_unreachable_code())) {
755 /* The Cond is a Switch on a Constant */
756 if (get_Proj_proj(n) == tv_val_CHIL(value_of(a))) {
757 /* The always taken branch, reuse the existing Jmp. */
758 if (!get_irn_link(a)) /* well, if it exists ;-> */
759 set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
760 assert(get_irn_op(get_irn_link(a)) == op_Jmp);
762 } else {/* Not taken control flow, but be careful with the default! */
763 if (get_Proj_proj(n) < a->attr.c.default_proj){
764 /* a never taken branch */
767 a->attr.c.default_proj = get_Proj_proj(n);
772 case iro_Eor: { /* @@@ not tested as boolean Eor not allowed any more. */
774 b = get_Eor_right(n);
776 if ((get_irn_mode(n) == mode_b)
777 && (get_irn_op(a) == op_Proj)
778 && (get_irn_mode(a) == mode_b)
779 && (tarval_classify (computed_value (b)) == 1)
780 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
781 /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
782 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
783 mode_b, get_negated_pnc(get_Proj_proj(a)));
784 else if ((get_irn_mode(n) == mode_b)
785 && (tarval_classify (computed_value (b)) == 1))
786 /* The Eor is a Not. Replace it by a Not. */
787 /* ????!!!Extend to bitfield 1111111. */
788 n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
794 if ( (get_irn_mode(n) == mode_b)
795 && (get_irn_op(a) == op_Proj)
796 && (get_irn_mode(a) == mode_b)
797 && (get_irn_op(get_Proj_pred(a)) == op_Cmp))
798 /* We negate a Cmp. The Cmp has the negated result anyways! */
799 n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
800 mode_b, get_negated_pnc(get_Proj_proj(a)));
808 /* **************** Common Subexpression Elimination **************** */
810 /* Compare function for two nodes in the hash table. Gets two */
811 /* nodes as parameters. Returns 0 if the nodes are a cse. */
813 vt_cmp (const void *elt, const void *key)
821 if (a == b) return 0;
823 if ((get_irn_op(a) != get_irn_op(b)) ||
824 (get_irn_mode(a) != get_irn_mode(b))) return 1;
826 /* compare if a's in and b's in are equal */
827 if (get_irn_arity (a) != get_irn_arity(b))
830 /* for block-local cse and pinned nodes: */
831 if (!get_opt_global_cse() || (get_op_pinned(get_irn_op(a)) == pinned)) {
832 if (get_irn_n(a, -1) != get_irn_n(b, -1))
836 /* compare a->in[0..ins] with b->in[0..ins] */
837 for (i = 0; i < get_irn_arity(a); i++)
838 if (get_irn_n(a, i) != get_irn_n(b, i))
841 switch (get_irn_opcode(a)) {
843 return get_irn_const_attr (a) != get_irn_const_attr (b);
845 return get_irn_proj_attr (a) != get_irn_proj_attr (b);
847 return get_Filter_proj(a) != get_Filter_proj(b);
849 return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
850 || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
852 return (get_irn_free_attr(a) != get_irn_free_attr(b));
854 return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
855 || (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
857 return (get_irn_call_attr(a) != get_irn_call_attr(b));
859 return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
860 || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
861 || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
862 || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
863 || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
865 return get_irn_phi_attr (a) != get_irn_phi_attr (b);
873 ir_node_hash (ir_node *node)
878 /* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
879 h = get_irn_arity(node);
881 /* consider all in nodes... except the block. */
882 for (i = 0; i < get_irn_arity(node); i++) {
883 h = 9*h + (unsigned long)get_irn_n(node, i);
887 h = 9*h + (unsigned long) get_irn_mode (node);
889 h = 9*h + (unsigned long) get_irn_op (node);
895 new_identities (void)
897 return new_pset (vt_cmp, TUNE_NIR_NODES);
901 del_identities (pset *value_table)
903 del_pset (value_table);
906 /* Return the canonical node computing the same value as n.
907 Looks up the node in a hash table. */
908 static INLINE ir_node *
909 identify (pset *value_table, ir_node *n)
913 if (!value_table) return n;
915 if (get_opt_reassociation()) {
916 switch (get_irn_opcode (n)) {
923 /* for commutative operators perform a OP b == b OP a */
924 if (get_binop_left(n) > get_binop_right(n)) {
925 ir_node *h = get_binop_left(n);
926 set_binop_left(n, get_binop_right(n));
927 set_binop_right(n, h);
935 o = pset_find (value_table, n, ir_node_hash (n));
941 /* During construction we set the pinned flag in the graph right when the
942 optimizatin is performed. The flag turning on procedure global cse could
943 be changed between two allocations. This way we are safe. */
944 static INLINE ir_node *
945 identify_cons (pset *value_table, ir_node *n) {
947 n = identify(value_table, n);
948 if (get_irn_n(old, -1) != get_irn_n(n, -1))
949 set_irg_pinned(current_ir_graph, floats);
953 /* Return the canonical node computing the same value as n.
954 Looks up the node in a hash table, enters it in the table
955 if it isn't there yet. */
957 identify_remember (pset *value_table, ir_node *node)
961 if (!value_table) return node;
963 /* lookup or insert in hash table with given hash key. */
964 o = pset_insert (value_table, node, ir_node_hash (node));
966 if (o == node) return node;
972 add_identities (pset *value_table, ir_node *node) {
973 identify_remember (value_table, node);
976 /* garbage in, garbage out. If a node has a dead input, i.e., the
977 Bad node is input to the node, return the Bad node. */
978 static INLINE ir_node *
982 ir_op* op = get_irn_op(node);
984 /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
985 blocks predecessors is dead. */
986 if ( op != op_Block && op != op_Phi && op != op_Tuple) {
987 for (i = -1; i < get_irn_arity(node); i++) {
988 if (is_Bad(get_irn_n(node, i))) {
994 /* If Block has only Bads as predecessors it's garbage. */
995 /* If Phi has only Bads as predecessors it's garbage. */
996 if (op == op_Block || op == op_Phi) {
997 for (i = 0; i < get_irn_arity(node); i++) {
998 if (!is_Bad(get_irn_n(node, i))) break;
1000 if (i = get_irn_arity(node)) node = new_Bad();
1007 /* These optimizations deallocate nodes from the obstack.
1008 It can only be called if it is guaranteed that no other nodes
1009 reference this one, i.e., right after construction of a node. */
1011 optimize (ir_node *n)
1016 /* Allways optimize Phi nodes: part of the construction. */
1017 if ((!get_optimize()) && (get_irn_op(n) != op_Phi)) return n;
1019 /* constant expression evaluation / constant folding */
1020 if (get_opt_constant_folding()) {
1021 /* constants can not be evaluated */
1022 if (get_irn_op(n) != op_Const) {
1023 /* try to evaluate */
1024 tv = computed_value (n);
1025 if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
1026 /* evaluation was succesful -- replace the node. */
1027 obstack_free (current_ir_graph->obst, n);
1028 return new_Const (get_tv_mode (tv), tv);
1033 /* remove unnecessary nodes */
1034 if (get_opt_constant_folding() ||
1035 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1036 (get_irn_op(n) == op_Id) ||
1037 (get_irn_op(n) == op_Proj) ||
1038 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1039 n = equivalent_node (n);
1041 /** common subexpression elimination **/
1042 /* Checks whether n is already available. */
1043 /* The block input is used to distinguish different subexpressions. Right
1044 now all nodes are pinned to blocks, i.e., the cse only finds common
1045 subexpressions within a block. */
1047 n = identify_cons (current_ir_graph->value_table, n);
1050 /* We found an existing, better node, so we can deallocate the old node. */
1051 obstack_free (current_ir_graph->obst, old_n);
1054 /* Some more constant expression evaluation that does not allow to
1056 if (get_opt_constant_folding() ||
1057 (get_irn_op(n) == op_Cond) ||
1058 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1059 n = transform_node (n);
1061 /* Remove nodes with dead (Bad) input.
1062 Run always for transformation induced Bads. */
1065 /* Now we can verify the node, as it has no dead inputs any more. */
1068 /* Now we have a legal, useful node. Enter it in hash table for cse */
1069 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
1070 n = identify_remember (current_ir_graph->value_table, n);
1077 /* These optimizations never deallocate nodes. This can cause dead
1078 nodes lying on the obstack. Remove these by a dead node elimination,
1079 i.e., a copying garbage collection. */
1081 optimize_in_place_2 (ir_node *n)
1086 if (!get_optimize() && (get_irn_op(n) != op_Phi)) return n;
1088 /* if not optimize return n */
1091 /* Here this is possible. Why? */
1096 /* constant expression evaluation / constant folding */
1097 if (get_opt_constant_folding()) {
1098 /* constants can not be evaluated */
1099 if (get_irn_op(n) != op_Const) {
1100 /* try to evaluate */
1101 tv = computed_value (n);
1102 if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
1103 /* evaluation was succesful -- replace the node. */
1104 n = new_Const (get_tv_mode (tv), tv);
1105 __dbg_info_merge_pair(n, old_n, dbg_const_eval);
1111 /* remove unnecessary nodes */
1112 /*if (get_opt_constant_folding()) */
1113 if (get_opt_constant_folding() ||
1114 (get_irn_op(n) == op_Phi) || /* always optimize these nodes. */
1115 (get_irn_op(n) == op_Id) || /* ... */
1116 (get_irn_op(n) == op_Proj) || /* ... */
1117 (get_irn_op(n) == op_Block) ) /* Flags tested local. */
1118 n = equivalent_node (n);
1120 /** common subexpression elimination **/
1121 /* Checks whether n is already available. */
1122 /* The block input is used to distinguish different subexpressions. Right
1123 now all nodes are pinned to blocks, i.e., the cse only finds common
1124 subexpressions within a block. */
1125 if (get_opt_cse()) {
1126 n = identify (current_ir_graph->value_table, n);
1129 /* Some more constant expression evaluation. */
1130 if (get_opt_constant_folding() ||
1131 (get_irn_op(n) == op_Cond) ||
1132 (get_irn_op(n) == op_Proj)) /* Flags tested local. */
1133 n = transform_node (n);
1135 /* Remove nodes with dead (Bad) input.
1136 Run always for transformation induced Bads. */
1139 /* Now we can verify the node, as it has no dead inputs any more. */
1142 /* Now we have a legal, useful node. Enter it in hash table for cse.
1143 Blocks should be unique anyways. (Except the successor of start:
1144 is cse with the start block!) */
1145 if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
1146 n = identify_remember (current_ir_graph->value_table, n);
1151 /* Wrapper for external use, set proper status bits after optimization */
1153 optimize_in_place (ir_node *n) {
1154 /* Handle graph state */
1155 assert(get_irg_phase_state(current_ir_graph) != phase_building);
1156 if (get_opt_global_cse())
1157 set_irg_pinned(current_ir_graph, floats);
1158 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1159 set_irg_outs_inconsistent(current_ir_graph);
1160 /* Maybe we could also test whether optimizing the node can
1161 change the control graph. */
1162 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1163 set_irg_dom_inconsistent(current_ir_graph);
1164 return optimize_in_place_2 (n);