2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Data-flow driven minimal fixpoint value range propagation
23 * @author Christoph Mallon
30 #include "adt/xmalloc.h"
39 #include "iroptimize.h"
46 * - Implement cleared/set bit calculation for Add, Sub, Minus, Mul, Div, Mod, Shl, Shr, Shrs, Rotl
47 * - Implement min/max calculation for And, Eor, Or, Not, Conv, Shl, Shr, Shrs, Rotl, Mux
48 * - Implement min/max calculation for Add, Sub, Minus, Mul, Div, Mod, Conv, Shl, Shr, Shrs, Rotl, Mux
51 /* Tables of the cleared/set bit lattice
53 * Encoding of the lattice
56 * 01 - impossible state, is zero /and/ one
57 * 10 T top, may be either zero or one
71 * Half adder, half subtractor, and, xor, or, Mux
83 * Full adder, full subtractor
114 * Assume: Xmin <= Xmax and no overflow
115 * A + B = (Amin + Bmin, Amax + Bmax)
116 * -A = (-Amax, -Amin)
117 * A - B = A + -B = (Amin (-B)min, Amax + (-B)max) = (Amin - Bmax, Amax - Bmin)
120 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
122 static struct obstack obst;
124 typedef struct bitinfo
126 ir_tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
127 ir_tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
130 typedef struct environment_t {
131 unsigned modified:1; /**< Set, if the graph was modified. */
134 static inline bitinfo* get_bitinfo(ir_node const* const irn)
136 return (bitinfo*)get_irn_link(irn);
139 static int set_bitinfo(ir_node* const irn, ir_tarval* const z, ir_tarval* const o)
141 bitinfo* b = get_bitinfo(irn);
143 b = OALLOCZ(&obst, bitinfo);
144 set_irn_link(irn, b);
145 } else if (z == b->z && o == b->o) {
150 DB((dbg, LEVEL_3, "%+F: 0:%T 1:%T\n", irn, z, o));
154 static int mode_is_intb(ir_mode const* const m)
156 return mode_is_int(m) || m == mode_b;
159 static int transfer(ir_node* const irn)
161 ir_mode* const m = get_irn_mode(irn);
166 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
167 switch (get_irn_opcode(irn)) {
169 ir_node* const pred = get_Proj_pred(irn);
170 if (is_Start(pred)) {
171 z = get_tarval_b_true();
172 o = get_tarval_b_false();
173 } else if (is_Cond(pred)) {
174 ir_node* const selector = get_Cond_selector(pred);
175 bitinfo* const b = get_bitinfo(selector);
176 ir_tarval* const bz = b->z;
177 ir_tarval* const bo = b->o;
178 if (get_irn_mode(selector) == mode_b) {
180 if ((bz == get_tarval_b_true()) == get_Proj_proj(irn)) {
181 z = o = get_tarval_b_true();
183 z = o = get_tarval_b_false();
186 goto result_unknown_X;
189 long const val = get_Proj_proj(irn);
190 if (val != get_Cond_default_proj(pred)) {
191 ir_tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
192 if (!tarval_is_null(tarval_andnot(tv, bz)) ||
193 !tarval_is_null(tarval_andnot(bo, tv))) {
194 // At least one bit differs.
195 z = o = get_tarval_b_false();
196 #if 0 // TODO must handle default Proj
197 } else if (bz == bo && bz == tv) {
198 z = o = get_tarval_b_true();
201 goto result_unknown_X;
204 goto cannot_analyse_X;
208 goto cannot_analyse_X;
214 bitinfo* const b = get_bitinfo(get_nodes_block(irn));
222 DB((dbg, LEVEL_4, "cannot analyse %+F\n", irn));
224 z = get_tarval_b_true();
225 o = get_tarval_b_false();
228 } else if (is_Block(irn)) {
230 int const arity = get_Block_n_cfgpreds(irn);
233 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
234 for (i = 0; i != arity; ++i) {
235 bitinfo* const b = get_bitinfo(get_Block_cfgpred(irn, i));
236 if (b != NULL && b->z == get_tarval_b_true()) {
242 o = get_tarval_b_false();
243 z = reachable || irn == get_irg_start_block(get_irn_irg(irn)) ? get_tarval_b_true() : o;
244 } else if (mode_is_intb(m)) {
245 DB((dbg, LEVEL_3, "transfer %+F\n", irn));
246 switch (get_irn_opcode(irn)) {
248 z = o = get_Const_tarval(irn);
253 bitinfo* const l = get_bitinfo(get_Shl_left(irn));
254 bitinfo* const r = get_bitinfo(get_Shl_right(irn));
255 ir_tarval* const rz = r->z;
257 z = tarval_shl(l->z, rz);
258 o = tarval_shl(l->o, rz);
266 bitinfo* const l = get_bitinfo(get_Shr_left(irn));
267 bitinfo* const r = get_bitinfo(get_Shr_right(irn));
268 ir_tarval* const rz = r->z;
270 z = tarval_shr(l->z, rz);
271 o = tarval_shr(l->o, rz);
279 bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
280 bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
281 ir_tarval* const rz = r->z;
283 z = tarval_shrs(l->z, rz);
284 o = tarval_shrs(l->o, rz);
292 bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
293 bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
294 ir_tarval* const rz = r->z;
296 z = tarval_rotl(l->z, rz);
297 o = tarval_rotl(l->o, rz);
305 bitinfo* const l = get_bitinfo(get_Add_left(irn));
306 bitinfo* const r = get_bitinfo(get_Add_right(irn));
307 ir_tarval* const lz = l->z;
308 ir_tarval* const lo = l->o;
309 ir_tarval* const rz = r->z;
310 ir_tarval* const ro = r->o;
311 if (lz == lo && rz == ro) {
312 z = o = tarval_add(lz, rz);
314 // TODO improve: can only do lower disjoint bits
315 /* Determine where any of the operands has zero bits, i.e. where no
316 * carry out is generated if there is not carry in */
317 ir_tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
318 /* Generate a mask of the lower consecutive zeroes: x | -x. In this
319 * range the addition is disjoint and therefore Add behaves like Or.
321 ir_tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
322 ir_tarval* const low_one_mask = tarval_not(low_zero_mask);
323 z = tarval_or( tarval_or(lz, rz), low_zero_mask);
324 o = tarval_and(tarval_or(lo, ro), low_one_mask);
330 bitinfo* const l = get_bitinfo(get_Sub_left(irn));
331 bitinfo* const r = get_bitinfo(get_Sub_right(irn));
332 if (l != NULL && r != NULL) { // Sub might subtract pointers.
333 ir_tarval* const lz = l->z;
334 ir_tarval* const lo = l->o;
335 ir_tarval* const rz = r->z;
336 ir_tarval* const ro = r->o;
337 if (lz == lo && rz == ro) {
338 z = o = tarval_sub(lz, rz, NULL);
339 } else if (tarval_is_null(tarval_andnot(rz, lo))) {
340 /* Every possible one of the subtrahend is backed by a safe one of the
341 * minuend, i.e. there are no borrows. */
342 // TODO extend no-borrow like carry for Add above
343 z = tarval_andnot(lz, ro);
344 o = tarval_andnot(lo, rz);
355 bitinfo* const l = get_bitinfo(get_Mul_left(irn));
356 bitinfo* const r = get_bitinfo(get_Mul_right(irn));
357 ir_tarval* const lz = l->z;
358 ir_tarval* const lo = l->o;
359 ir_tarval* const rz = r->z;
360 ir_tarval* const ro = r->o;
361 if (lz == lo && rz == ro) {
362 z = o = tarval_mul(lz, rz);
365 // Determine safe lower zeroes: x | -x.
366 ir_tarval* const lzn = tarval_or(lz, tarval_neg(lz));
367 ir_tarval* const rzn = tarval_or(rz, tarval_neg(rz));
368 // Concatenate safe lower zeroes.
369 if (tarval_cmp(lzn, rzn) == ir_relation_less) {
370 z = tarval_mul(tarval_eor(lzn, tarval_shl(lzn, get_tarval_one(m))), rzn);
372 z = tarval_mul(tarval_eor(rzn, tarval_shl(rzn, get_tarval_one(m))), lzn);
374 o = get_tarval_null(m);
380 bitinfo* const b = get_bitinfo(get_Minus_op(irn));
382 z = o = tarval_neg(b->z);
390 bitinfo* const l = get_bitinfo(get_And_left(irn));
391 bitinfo* const r = get_bitinfo(get_And_right(irn));
392 z = tarval_and(l->z, r->z);
393 o = tarval_and(l->o, r->o);
398 bitinfo* const l = get_bitinfo(get_Or_left(irn));
399 bitinfo* const r = get_bitinfo(get_Or_right(irn));
400 z = tarval_or(l->z, r->z);
401 o = tarval_or(l->o, r->o);
406 bitinfo* const l = get_bitinfo(get_Eor_left(irn));
407 bitinfo* const r = get_bitinfo(get_Eor_right(irn));
408 ir_tarval* const lz = l->z;
409 ir_tarval* const lo = l->o;
410 ir_tarval* const rz = r->z;
411 ir_tarval* const ro = r->o;
412 z = tarval_or(tarval_andnot(lz, ro), tarval_andnot(rz, lo));
413 o = tarval_or(tarval_andnot(ro, lz), tarval_andnot(lo, rz));
418 bitinfo* const b = get_bitinfo(get_Not_op(irn));
419 z = tarval_not(b->o);
420 o = tarval_not(b->z);
425 bitinfo* const b = get_bitinfo(get_Conv_op(irn));
426 if (b == NULL) // Happens when converting from float values.
428 z = tarval_convert_to(b->z, m);
429 o = tarval_convert_to(b->o, m);
434 bitinfo* const f = get_bitinfo(get_Mux_false(irn));
435 bitinfo* const t = get_bitinfo(get_Mux_true(irn));
436 bitinfo* const c = get_bitinfo(get_Mux_sel(irn));
437 if (c->o == get_tarval_b_true()) {
440 } else if (c->z == get_tarval_b_false()) {
444 z = tarval_or( f->z, t->z);
445 o = tarval_and(f->o, t->o);
451 ir_node* const block = get_nodes_block(irn);
452 int const arity = get_Phi_n_preds(irn);
455 z = get_tarval_null(m);
456 o = get_tarval_all_one(m);
457 for (i = 0; i != arity; ++i) {
458 bitinfo* const b_cfg = get_bitinfo(get_Block_cfgpred(block, i));
459 if (b_cfg != NULL && b_cfg->z != get_tarval_b_false()) {
460 bitinfo* const b = get_bitinfo(get_Phi_pred(irn, i));
461 z = tarval_or( z, b->z);
462 o = tarval_and(o, b->o);
469 bitinfo* const l = get_bitinfo(get_Cmp_left(irn));
470 bitinfo* const r = get_bitinfo(get_Cmp_right(irn));
471 if (l == NULL || r == NULL) {
472 goto result_unknown; // Cmp compares something we cannot evaluate.
474 ir_tarval* const lz = l->z;
475 ir_tarval* const lo = l->o;
476 ir_tarval* const rz = r->z;
477 ir_tarval* const ro = r->o;
478 ir_relation const relation = get_Cmp_relation(irn);
480 case ir_relation_less_greater:
481 if (!tarval_is_null(tarval_andnot(ro, lz)) ||
482 !tarval_is_null(tarval_andnot(lo, rz))) {
483 // At least one bit differs.
484 z = o = get_tarval_b_true();
485 } else if (lz == lo && rz == ro && lz == rz) {
486 z = o = get_tarval_b_false();
492 case ir_relation_equal:
493 if (!tarval_is_null(tarval_andnot(ro, lz)) ||
494 !tarval_is_null(tarval_andnot(lo, rz))) {
495 // At least one bit differs.
496 z = o = get_tarval_b_false();
497 } else if (lz == lo && rz == ro && lz == rz) {
498 z = o = get_tarval_b_true();
504 case ir_relation_less_equal:
505 case ir_relation_less:
506 /* TODO handle negative values */
507 if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
508 tarval_is_negative(rz) || tarval_is_negative(ro))
511 if (tarval_cmp(lz, ro) & relation) {
512 /* Left upper bound is smaller(/equal) than right lower bound. */
513 z = o = get_tarval_b_true();
514 } else if (!(tarval_cmp(lo, rz) & relation)) {
515 /* Left lower bound is not smaller(/equal) than right upper bound. */
516 z = o = get_tarval_b_false();
522 case ir_relation_greater_equal:
523 case ir_relation_greater:
524 /* TODO handle negative values */
525 if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
526 tarval_is_negative(rz) || tarval_is_negative(ro))
529 if (!(tarval_cmp(lz, ro) & relation)) {
530 /* Left upper bound is not greater(/equal) than right lower bound. */
531 z = o = get_tarval_b_false();
532 } else if (tarval_cmp(lo, rz) & relation) {
533 /* Left lower bound is greater(/equal) than right upper bound. */
534 z = o = get_tarval_b_true();
549 DB((dbg, LEVEL_4, "cannot analyse %+F\n", irn));
551 z = get_tarval_all_one(m);
552 o = get_tarval_null(m);
560 return set_bitinfo(irn, z, o);
563 static void first_round(ir_node* const irn, void* const env)
565 pdeq* const q = (pdeq*)env;
568 if (is_Phi(irn) || is_Block(irn)) {
569 /* Only Phis (and their users) need another round, if we did not have
570 * information about all their inputs in the first round, i.e. in loops. */
571 /* TODO inserts all Phis, should only insert Phis, which did no have all
572 * predecessors available */
577 static void apply_result(ir_node* const irn, void* ctx)
579 environment_t* env = (environment_t*)ctx;
580 bitinfo* const b = get_bitinfo(irn);
585 if (is_Const(irn)) return; // It cannot get any better than a Const.
589 // Only display information if we could find out anything about the value.
590 DEBUG_ONLY(if (!tarval_is_all_one(z) || !tarval_is_null(o)))
591 DB((dbg, LEVEL_2, "%+F: 0:%T 1:%T%s\n", irn, z, o, z == o ? " --- constant" : ""));
593 // Replace node with constant value by Const.
595 ir_mode* const m = get_irn_mode(irn);
597 if (mode_is_intb(m)) {
598 ir_graph *irg = get_irn_irg(irn);
599 n = new_r_Const(irg, z);
600 } else if (m == mode_X) {
601 ir_node* const block = get_nodes_block(irn);
602 ir_graph* const irg = get_Block_irg(block);
603 if (z == get_tarval_b_true()) {
604 // Might produce an endless loop, so keep the block.
605 add_End_keepalive(get_irg_end(irg), block);
606 n = new_r_Jmp(block);
609 /* Transferring analysis information to the bad node makes it a
610 * candidate for replacement. */
622 switch (get_irn_opcode(irn)) {
624 ir_node* const l = get_And_left(irn);
625 ir_node* const r = get_And_right(irn);
626 bitinfo const* const bl = get_bitinfo(l);
627 bitinfo const* const br = get_bitinfo(r);
628 if (bl->z == bl->o) {
629 if (tarval_is_null(tarval_andnot(br->z, bl->z))) {
630 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
634 } else if (br->z == br->o) {
635 if (tarval_is_null(tarval_andnot(bl->z, br->z))) {
636 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
645 ir_node* const l = get_Or_left(irn);
646 ir_node* const r = get_Or_right(irn);
647 bitinfo const* const bl = get_bitinfo(l);
648 bitinfo const* const br = get_bitinfo(r);
649 if (bl->z == bl->o) {
650 if (tarval_is_null(tarval_andnot(bl->o, br->o))) {
651 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
655 } else if (br->z == br->o) {
656 if (tarval_is_null(tarval_andnot(br->o, bl->o))) {
657 DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
667 static void queue_users(pdeq* const q, ir_node* const n)
669 if (get_irn_mode(n) == mode_X) {
670 /* When the state of a control flow node changes, not only queue its
671 * successor blocks, but also the Phis in these blocks, because the Phis
672 * must reconsider this input path. */
674 foreach_out_edge(n, e) {
675 ir_node* const src = get_edge_src_irn(e);
677 /* should always be a block */
680 for (phi = get_Block_phis(src); phi; phi = get_Phi_next(phi))
686 foreach_out_edge(n, e) {
687 ir_node* const src = get_edge_src_irn(e);
688 if (get_irn_mode(src) == mode_T) {
697 static void clear_links(ir_node *irn, void *env)
700 set_irn_link(irn, NULL);
702 set_Block_phis(irn, NULL);
705 static void build_phi_lists(ir_node *irn, void *env)
709 add_Block_phi(get_nodes_block(irn), irn);
712 void fixpoint_vrp(ir_graph* const irg)
716 FIRM_DBG_REGISTER(dbg, "firm.opt.fp-vrp");
717 DB((dbg, LEVEL_1, "===> Performing constant propagation on %+F\n", irg));
721 /* HACK: to avoid finding dead code */
722 edges_deactivate(irg);
728 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
731 pdeq* const q = new_pdeq();
733 /* We need this extra step because the dom tree does not contain unreachable
734 blocks in Firm. Moreover build phi list. */
735 irg_walk_graph(irg, clear_links, build_phi_lists, NULL);
737 /* TODO Improve iteration order. Best is reverse postorder in data flow
738 * direction and respecting loop nesting for fastest convergence. */
739 irg_walk_blkwise_dom_top_down(irg, firm_clear_link, first_round, q);
741 while (!pdeq_empty(q)) {
742 ir_node* const n = (ir_node*)pdeq_getl(q);
750 DB((dbg, LEVEL_2, "---> Applying analysis results\n"));
752 irg_walk_graph(irg, NULL, apply_result, &env);
755 /* control flow might changed */
756 set_irg_outs_inconsistent(irg);
757 set_irg_extblk_inconsistent(irg);
758 set_irg_doms_inconsistent(irg);
759 set_irg_loopinfo_inconsistent(irg);
760 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
763 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
765 obstack_free(&obst, NULL);
768 ir_graph_pass_t *fixpoint_vrp_irg_pass(const char *name)
770 return def_graph_pass(name ? name : "fixpoint_vrp", fixpoint_vrp);