2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Data-flow driven minimal fixpoint value range propagation
23 * @author Christoph Mallon
33 #include "adt/xmalloc.h"
42 #include "iroptimize.h"
49 * - Implement cleared/set bit calculation for Add, Sub, Minus, Mul, Div, Mod, Shl, Shr, Shrs, Rotl
50 * - Implement min/max calculation for And, Eor, Or, Not, Conv, Shl, Shr, Shrs, Rotl, Mux
51 * - Implement min/max calculation for Add, Sub, Minus, Mul, Div, Mod, Conv, Shl, Shr, Shrs, Rotl, Mux
54 /* Tables of the cleared/set bit lattice
56 * Encoding of the lattice
59 * 01 - impossible state, is zero /and/ one
60 * 10 T top, may be either zero or one
74 * Half adder, half subtractor, and, xor, or, Mux
86 * Full adder, full subtractor
117 * Assume: Xmin <= Xmax and no overflow
118 * A + B = (Amin + Bmin, Amax + Bmax)
119 * -A = (-Amax, -Amin)
120 * A - B = A + -B = (Amin (-B)min, Amax + (-B)max) = (Amin - Bmax, Amax - Bmin)
123 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
125 static struct obstack obst;
127 typedef struct bitinfo
129 ir_tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
130 ir_tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
133 typedef struct environment_t {
134 unsigned modified:1; /**< Set, if the graph was modified. */
137 static bool is_undefined(bitinfo const* const b)
139 return tarval_is_null(b->z) && tarval_is_all_one(b->o);
142 static inline bitinfo* get_bitinfo(ir_node const* const irn)
144 return (bitinfo*)get_irn_link(irn);
147 static int set_bitinfo(ir_node* const irn, ir_tarval* const z, ir_tarval* const o)
149 bitinfo* b = get_bitinfo(irn);
151 b = OALLOCZ(&obst, bitinfo);
152 set_irn_link(irn, b);
153 } else if (z == b->z && o == b->o) {
156 /* Assert monotonicity. */
157 assert(tarval_is_null(tarval_andnot(b->z, z)));
158 assert(tarval_is_null(tarval_andnot(o, b->o)));
162 DB((dbg, LEVEL_3, "%+F: 0:%T 1:%T\n", irn, z, o));
166 static int mode_is_intb(ir_mode const* const m)
168 return mode_is_int(m) || m == mode_b;
171 static int transfer(ir_node* const irn)
173 ir_tarval* const f = get_tarval_b_false();
174 ir_tarval* const t = get_tarval_b_true();
175 ir_mode* const m = get_irn_mode(irn);
180 bitinfo* const b = get_bitinfo(get_nodes_block(irn));
182 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
184 /* Unreachble blocks might have no bitinfo. */
185 if (b == NULL || b->z == f) {
189 } else switch (get_irn_opcode(irn)) {
191 ir_node* const pred = get_Proj_pred(irn);
192 if (is_Start(pred)) {
193 goto result_unknown_X;
194 } else if (is_Cond(pred)) {
195 ir_node* const selector = get_Cond_selector(pred);
196 bitinfo* const b = get_bitinfo(selector);
197 if (is_undefined(b)) {
199 } else if (get_irn_mode(selector) == mode_b) {
201 if ((b->z == t) == get_Proj_proj(irn)) {
207 goto result_unknown_X;
210 long const val = get_Proj_proj(irn);
211 if (val != get_Cond_default_proj(pred)) {
212 ir_tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
213 if (!tarval_is_null(tarval_andnot(tv, b->z)) ||
214 !tarval_is_null(tarval_andnot(b->o, tv))) {
215 // At least one bit differs.
217 #if 0 // TODO must handle default Proj
218 } else if (b->z == b->o && b->z == tv) {
222 goto result_unknown_X;
225 goto cannot_analyse_X;
229 goto cannot_analyse_X;
235 goto result_unknown_X;
239 DB((dbg, LEVEL_4, "cannot analyse %+F\n", irn));
245 } else if (is_Block(irn)) {
247 int const arity = get_Block_n_cfgpreds(irn);
250 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
251 for (i = 0; i != arity; ++i) {
252 bitinfo* const b = get_bitinfo(get_Block_cfgpred(irn, i));
253 if (b != NULL && b->z == t) {
260 ir_graph *const irg = get_Block_irg(irn);
262 irn == get_irg_start_block(irg) ||
263 irn == get_irg_end_block(irg);
273 } else if (mode_is_intb(m)) {
274 bitinfo* const b = get_bitinfo(get_nodes_block(irn));
276 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
278 if (b == NULL || b->z == f) {
280 z = get_tarval_null(m);
281 o = get_tarval_all_one(m);
282 } else if (is_Phi(irn)) {
283 ir_node* const block = get_nodes_block(irn);
284 int const arity = get_Phi_n_preds(irn);
287 z = get_tarval_null(m);
288 o = get_tarval_all_one(m);
289 for (i = 0; i != arity; ++i) {
290 bitinfo* const b_cfg = get_bitinfo(get_Block_cfgpred(block, i));
291 if (b_cfg != NULL && b_cfg->z != f) {
292 bitinfo* const b = get_bitinfo(get_Phi_pred(irn, i));
293 /* Only use input if it's not undefined. */
294 if (!is_undefined(b)) {
295 z = tarval_or( z, b->z);
296 o = tarval_and(o, b->o);
301 int const arity = get_irn_arity(irn);
304 /* Undefined if any input is undefined. */
305 for (i = 0; i != arity; ++i) {
306 ir_node* const pred = get_irn_n(irn, i);
307 bitinfo* const pred_b = get_bitinfo(pred);
308 if (pred_b != NULL && is_undefined(pred_b))
312 switch (get_irn_opcode(irn)) {
314 z = o = get_Const_tarval(irn);
319 ir_node* const v = get_Confirm_value(irn);
320 bitinfo* const b = get_bitinfo(v);
321 /* TODO Use bound and relation. */
324 if ((get_Confirm_relation(irn) & ~ir_relation_unordered) == ir_relation_equal) {
325 bitinfo* const bound_b = get_bitinfo(get_Confirm_bound(irn));
326 z = tarval_and(z, bound_b->z);
327 o = tarval_or( o, bound_b->o);
333 bitinfo* const l = get_bitinfo(get_Shl_left(irn));
334 bitinfo* const r = get_bitinfo(get_Shl_right(irn));
335 ir_tarval* const rz = r->z;
337 z = tarval_shl(l->z, rz);
338 o = tarval_shl(l->o, rz);
346 bitinfo* const l = get_bitinfo(get_Shr_left(irn));
347 bitinfo* const r = get_bitinfo(get_Shr_right(irn));
348 ir_tarval* const rz = r->z;
350 z = tarval_shr(l->z, rz);
351 o = tarval_shr(l->o, rz);
359 bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
360 bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
361 ir_tarval* const rz = r->z;
363 z = tarval_shrs(l->z, rz);
364 o = tarval_shrs(l->o, rz);
372 bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
373 bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
374 ir_tarval* const rz = r->z;
376 z = tarval_rotl(l->z, rz);
377 o = tarval_rotl(l->o, rz);
385 bitinfo* const l = get_bitinfo(get_Add_left(irn));
386 bitinfo* const r = get_bitinfo(get_Add_right(irn));
387 ir_tarval* const lz = l->z;
388 ir_tarval* const lo = l->o;
389 ir_tarval* const rz = r->z;
390 ir_tarval* const ro = r->o;
391 if (lz == lo && rz == ro) {
392 z = o = tarval_add(lz, rz);
394 // TODO improve: can only do lower disjoint bits
395 /* Determine where any of the operands has zero bits, i.e. where no
396 * carry out is generated if there is not carry in */
397 ir_tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
398 /* Generate a mask of the lower consecutive zeroes: x | -x. In this
399 * range the addition is disjoint and therefore Add behaves like Or.
401 ir_tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
402 ir_tarval* const low_one_mask = tarval_not(low_zero_mask);
403 z = tarval_or( tarval_or(lz, rz), low_zero_mask);
404 o = tarval_and(tarval_or(lo, ro), low_one_mask);
410 bitinfo* const l = get_bitinfo(get_Sub_left(irn));
411 bitinfo* const r = get_bitinfo(get_Sub_right(irn));
412 if (l != NULL && r != NULL) { // Sub might subtract pointers.
413 ir_tarval* const lz = l->z;
414 ir_tarval* const lo = l->o;
415 ir_tarval* const rz = r->z;
416 ir_tarval* const ro = r->o;
417 if (lz == lo && rz == ro) {
418 z = o = tarval_sub(lz, rz, NULL);
419 } else if (tarval_is_null(tarval_andnot(rz, lo))) {
420 /* Every possible one of the subtrahend is backed by a safe one of the
421 * minuend, i.e. there are no borrows. */
422 // TODO extend no-borrow like carry for Add above
423 z = tarval_andnot(lz, ro);
424 o = tarval_andnot(lo, rz);
435 bitinfo* const l = get_bitinfo(get_Mul_left(irn));
436 bitinfo* const r = get_bitinfo(get_Mul_right(irn));
437 ir_tarval* const lz = l->z;
438 ir_tarval* const lo = l->o;
439 ir_tarval* const rz = r->z;
440 ir_tarval* const ro = r->o;
441 if (lz == lo && rz == ro) {
442 z = o = tarval_mul(lz, rz);
445 // Determine safe lower zeroes: x | -x.
446 ir_tarval* const lzn = tarval_or(lz, tarval_neg(lz));
447 ir_tarval* const rzn = tarval_or(rz, tarval_neg(rz));
448 // Concatenate safe lower zeroes.
449 if (tarval_cmp(lzn, rzn) == ir_relation_less) {
450 z = tarval_mul(tarval_eor(lzn, tarval_shl(lzn, get_tarval_one(m))), rzn);
452 z = tarval_mul(tarval_eor(rzn, tarval_shl(rzn, get_tarval_one(m))), lzn);
454 o = get_tarval_null(m);
460 bitinfo* const b = get_bitinfo(get_Minus_op(irn));
462 z = o = tarval_neg(b->z);
470 bitinfo* const l = get_bitinfo(get_And_left(irn));
471 bitinfo* const r = get_bitinfo(get_And_right(irn));
472 z = tarval_and(l->z, r->z);
473 o = tarval_and(l->o, r->o);
478 bitinfo* const l = get_bitinfo(get_Or_left(irn));
479 bitinfo* const r = get_bitinfo(get_Or_right(irn));
480 z = tarval_or(l->z, r->z);
481 o = tarval_or(l->o, r->o);
486 bitinfo* const l = get_bitinfo(get_Eor_left(irn));
487 bitinfo* const r = get_bitinfo(get_Eor_right(irn));
488 ir_tarval* const lz = l->z;
489 ir_tarval* const lo = l->o;
490 ir_tarval* const rz = r->z;
491 ir_tarval* const ro = r->o;
492 z = tarval_or(tarval_andnot(lz, ro), tarval_andnot(rz, lo));
493 o = tarval_or(tarval_andnot(ro, lz), tarval_andnot(lo, rz));
498 bitinfo* const b = get_bitinfo(get_Not_op(irn));
499 z = tarval_not(b->o);
500 o = tarval_not(b->z);
505 bitinfo* const b = get_bitinfo(get_Conv_op(irn));
506 if (b == NULL) // Happens when converting from float values.
508 z = tarval_convert_to(b->z, m);
509 o = tarval_convert_to(b->o, m);
514 bitinfo* const bf = get_bitinfo(get_Mux_false(irn));
515 bitinfo* const bt = get_bitinfo(get_Mux_true(irn));
516 bitinfo* const c = get_bitinfo(get_Mux_sel(irn));
520 } else if (c->z == f) {
524 z = tarval_or( bf->z, bt->z);
525 o = tarval_and(bf->o, bt->o);
531 bitinfo* const l = get_bitinfo(get_Cmp_left(irn));
532 bitinfo* const r = get_bitinfo(get_Cmp_right(irn));
533 if (l == NULL || r == NULL) {
534 goto result_unknown; // Cmp compares something we cannot evaluate.
536 ir_tarval* const lz = l->z;
537 ir_tarval* const lo = l->o;
538 ir_tarval* const rz = r->z;
539 ir_tarval* const ro = r->o;
540 ir_relation const relation = get_Cmp_relation(irn);
542 case ir_relation_less_greater:
543 if (!tarval_is_null(tarval_andnot(ro, lz)) ||
544 !tarval_is_null(tarval_andnot(lo, rz))) {
545 // At least one bit differs.
547 } else if (lz == lo && rz == ro && lz == rz) {
554 case ir_relation_equal:
555 if (!tarval_is_null(tarval_andnot(ro, lz)) ||
556 !tarval_is_null(tarval_andnot(lo, rz))) {
557 // At least one bit differs.
559 } else if (lz == lo && rz == ro && lz == rz) {
566 case ir_relation_less_equal:
567 case ir_relation_less:
568 /* TODO handle negative values */
569 if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
570 tarval_is_negative(rz) || tarval_is_negative(ro))
573 if (tarval_cmp(lz, ro) & relation) {
574 /* Left upper bound is smaller(/equal) than right lower bound. */
576 } else if (!(tarval_cmp(lo, rz) & relation)) {
577 /* Left lower bound is not smaller(/equal) than right upper bound. */
584 case ir_relation_greater_equal:
585 case ir_relation_greater:
586 /* TODO handle negative values */
587 if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
588 tarval_is_negative(rz) || tarval_is_negative(ro))
591 if (!(tarval_cmp(lz, ro) & relation)) {
592 /* Left upper bound is not greater(/equal) than right lower bound. */
594 } else if (tarval_cmp(lo, rz) & relation) {
595 /* Left lower bound is greater(/equal) than right upper bound. */
611 DB((dbg, LEVEL_4, "cannot analyse %+F\n", irn));
613 z = get_tarval_all_one(m);
614 o = get_tarval_null(m);
623 return set_bitinfo(irn, z, o);
626 static void first_round(ir_node* const irn, void* const env)
628 pdeq* const q = (pdeq*)env;
631 if (is_Phi(irn) || is_Block(irn)) {
632 /* Only Phis (and their users) need another round, if we did not have
633 * information about all their inputs in the first round, i.e. in loops. */
634 /* TODO inserts all Phis, should only insert Phis, which did no have all
635 * predecessors available */
640 static ir_node *make_bad_block(ir_graph *irg)
642 ir_node *bad = new_r_Bad(irg, mode_BB);
643 bitinfo *bb = get_bitinfo(bad);
645 ir_tarval* const f = get_tarval_b_false();
646 ir_tarval* const t = get_tarval_b_true();
647 set_bitinfo(bad, f, t); /* Undefined. */
652 static void apply_result(ir_node* const irn, void* ctx)
654 environment_t* env = (environment_t*)ctx;
662 block_b = get_bitinfo(irn);
663 /* Trivially unreachable blocks have no info. */
664 if (block_b == NULL || block_b->z == get_tarval_b_false()) {
665 ir_node *bad = make_bad_block(get_irn_irg(irn));
672 block = get_nodes_block(irn);
673 block_b = get_bitinfo(block);
674 /* Trivially unreachable blocks have no info. */
675 if (block_b == NULL || block_b->z == get_tarval_b_false()) {
676 /* Unreachable blocks might be replaced before the nodes in them. */
677 ir_mode *mode = get_irn_mode(irn);
678 ir_graph *irg = get_irn_irg(irn);
679 ir_node *bad = new_r_Bad(irg, mode);
685 b = get_bitinfo(irn);
687 if (is_Const(irn)) return; // It cannot get any better than a Const.
691 // Only display information if we could find out anything about the value.
692 DEBUG_ONLY(if (!tarval_is_all_one(z) || !tarval_is_null(o)))
693 DB((dbg, LEVEL_2, "%+F: 0:%T 1:%T%s\n", irn, z, o, z == o ? " --- constant" : ""));
695 // Replace node with constant value by Const.
697 ir_mode* const m = get_irn_mode(irn);
699 if (mode_is_intb(m)) {
700 ir_graph *irg = get_irn_irg(irn);
701 n = new_r_Const(irg, z);
702 } else if (m == mode_X) {
703 ir_graph* const irg = get_Block_irg(block);
704 if (z == get_tarval_b_true()) {
705 // Might produce an endless loop, so keep the block.
706 add_End_keepalive(get_irg_end(irg), block);
707 n = new_r_Jmp(block);
709 n = new_r_Bad(irg, mode_X);
710 /* Transferring analysis information to the bad node makes it a
711 * candidate for replacement. */
723 switch (get_irn_opcode(irn)) {
725 ir_node* const l = get_And_left(irn);
726 ir_node* const r = get_And_right(irn);
727 bitinfo const* const bl = get_bitinfo(l);
728 bitinfo const* const br = get_bitinfo(r);
729 if (bl->z == bl->o) {
730 if (tarval_is_null(tarval_andnot(br->z, bl->z))) {
731 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
735 } else if (br->z == br->o) {
736 if (tarval_is_null(tarval_andnot(bl->z, br->z))) {
737 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
746 ir_node* const l = get_Or_left(irn);
747 ir_node* const r = get_Or_right(irn);
748 bitinfo const* const bl = get_bitinfo(l);
749 bitinfo const* const br = get_bitinfo(r);
750 if (bl->z == bl->o) {
751 if (tarval_is_null(tarval_andnot(bl->o, br->o))) {
752 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
756 } else if (br->z == br->o) {
757 if (tarval_is_null(tarval_andnot(br->o, bl->o))) {
758 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
768 static void queue_users(pdeq* const q, ir_node* const n)
770 if (get_irn_mode(n) == mode_X) {
771 /* When the state of a control flow node changes, not only queue its
772 * successor blocks, but also the Phis in these blocks, because the Phis
773 * must reconsider this input path. */
775 foreach_out_edge(n, e) {
776 ir_node* const src = get_edge_src_irn(e);
778 /* should always be a block */
781 for (phi = get_Block_phis(src); phi; phi = get_Phi_next(phi))
787 foreach_out_edge(n, e) {
788 ir_node* const src = get_edge_src_irn(e);
789 if (get_irn_mode(src) == mode_T) {
798 static void clear_links(ir_node *irn, void *env)
801 set_irn_link(irn, NULL);
803 set_Block_phis(irn, NULL);
806 static void build_phi_lists(ir_node *irn, void *env)
810 add_Block_phi(get_nodes_block(irn), irn);
813 void fixpoint_vrp(ir_graph* const irg)
817 FIRM_DBG_REGISTER(dbg, "firm.opt.fp-vrp");
818 DB((dbg, LEVEL_1, "===> Performing constant propagation on %+F\n", irg));
822 /* HACK: to avoid finding dead code */
823 edges_deactivate(irg);
829 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
832 pdeq* const q = new_pdeq();
834 /* We need this extra step because the dom tree does not contain
835 * unreachable blocks in Firm. Moreover build phi list. */
836 irg_walk_anchors(irg, clear_links, build_phi_lists, NULL);
839 ir_tarval* const f = get_tarval_b_false();
840 ir_tarval* const t = get_tarval_b_true();
841 set_bitinfo(get_irg_end_block(irg), t, f); /* Reachable. */
844 /* TODO Improve iteration order. Best is reverse postorder in data flow
845 * direction and respecting loop nesting for fastest convergence. */
846 irg_walk_blkwise_dom_top_down(irg, NULL, first_round, q);
848 while (!pdeq_empty(q)) {
849 ir_node* const n = (ir_node*)pdeq_getl(q);
857 DB((dbg, LEVEL_2, "---> Applying analysis results\n"));
859 irg_walk_graph(irg, NULL, apply_result, &env);
862 /* control flow might changed */
863 set_irg_outs_inconsistent(irg);
864 set_irg_extblk_inconsistent(irg);
865 set_irg_doms_inconsistent(irg);
866 set_irg_loopinfo_inconsistent(irg);
867 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
870 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
872 obstack_free(&obst, NULL);
875 ir_graph_pass_t *fixpoint_vrp_irg_pass(const char *name)
877 return def_graph_pass(name ? name : "fixpoint_vrp", fixpoint_vrp);