* @author Christoph Mallon
* @version $Id$
*/
-
#include "config.h"
#include "adt/pdeq.h"
#include "irtools.h"
#include "tv.h"
#include "irpass.h"
+#include "irmemory.h"
/* TODO:
* - Implement cleared/set bit calculation for Add, Sub, Minus, Mul, Div, Mod, Shl, Shr, Shrs, Rotl
typedef struct bitinfo
{
- tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
- tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
+ ir_tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
+ ir_tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
} bitinfo;
+typedef struct environment_t {
+ unsigned modified:1; /**< Set, if the graph was modified. */
+} environment_t;
+
static inline bitinfo* get_bitinfo(ir_node const* const irn)
{
- return get_irn_link(irn);
+ return (bitinfo*)get_irn_link(irn);
}
-static int set_bitinfo(ir_node* const irn, tarval* const z, tarval* const o)
+static int set_bitinfo(ir_node* const irn, ir_tarval* const z, ir_tarval* const o)
{
bitinfo* b = get_bitinfo(irn);
if (b == NULL) {
static int transfer(ir_node* const irn)
{
ir_mode* const m = get_irn_mode(irn);
- tarval* z;
- tarval* o;
+ ir_tarval* z;
+ ir_tarval* o;
if (m == mode_X) {
DB((dbg, LEVEL_3, "transfer %+F\n", irn));
z = get_tarval_b_true();
o = get_tarval_b_false();
} else if (is_Cond(pred)) {
- ir_node* const selector = get_Cond_selector(pred);
- bitinfo* const b = get_bitinfo(selector);
- tarval* const bz = b->z;
- tarval* const bo = b->o;
+ ir_node* const selector = get_Cond_selector(pred);
+ bitinfo* const b = get_bitinfo(selector);
+ ir_tarval* const bz = b->z;
+ ir_tarval* const bo = b->o;
if (get_irn_mode(selector) == mode_b) {
if (bz == bo) {
if ((bz == get_tarval_b_true()) == get_Proj_proj(irn)) {
} else {
long const val = get_Proj_proj(irn);
if (val != get_Cond_default_proj(pred)) {
- tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
+ ir_tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
if (!tarval_is_null(tarval_andnot(tv, bz)) ||
!tarval_is_null(tarval_andnot(bo, tv))) {
// At least one bit differs.
}
case iro_Shl: {
- bitinfo* const l = get_bitinfo(get_Shl_left(irn));
- bitinfo* const r = get_bitinfo(get_Shl_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shl_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shl_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shl(l->z, rz);
o = tarval_shl(l->o, rz);
}
case iro_Shr: {
- bitinfo* const l = get_bitinfo(get_Shr_left(irn));
- bitinfo* const r = get_bitinfo(get_Shr_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shr_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shr_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shr(l->z, rz);
o = tarval_shr(l->o, rz);
}
case iro_Shrs: {
- bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
- bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shrs(l->z, rz);
o = tarval_shrs(l->o, rz);
}
case iro_Rotl: {
- bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
- bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
+ bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_rotl(l->z, rz);
o = tarval_rotl(l->o, rz);
}
case iro_Add: {
- bitinfo* const l = get_bitinfo(get_Add_left(irn));
- bitinfo* const r = get_bitinfo(get_Add_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Add_left(irn));
+ bitinfo* const r = get_bitinfo(get_Add_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_add(lz, rz);
} else {
// TODO improve: can only do lower disjoint bits
/* Determine where any of the operands has zero bits, i.e. where no
* carry out is generated if there is not carry in */
- tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
+ ir_tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
/* Generate a mask of the lower consecutive zeroes: x | -x. In this
* range the addition is disjoint and therefore Add behaves like Or.
*/
- tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
- tarval* const low_one_mask = tarval_not(low_zero_mask);
+ ir_tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
+ ir_tarval* const low_one_mask = tarval_not(low_zero_mask);
z = tarval_or( tarval_or(lz, rz), low_zero_mask);
o = tarval_and(tarval_or(lo, ro), low_one_mask);
}
bitinfo* const l = get_bitinfo(get_Sub_left(irn));
bitinfo* const r = get_bitinfo(get_Sub_right(irn));
if (l != NULL && r != NULL) { // Sub might subtract pointers.
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_sub(lz, rz, NULL);
} else if (tarval_is_null(tarval_andnot(rz, lo))) {
}
case iro_Mul: {
- bitinfo* const l = get_bitinfo(get_Mul_left(irn));
- bitinfo* const r = get_bitinfo(get_Mul_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Mul_left(irn));
+ bitinfo* const r = get_bitinfo(get_Mul_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_mul(lz, rz);
} else {
// TODO improve
// Determine safe lower zeroes: x | -x.
- tarval* const lzn = tarval_or(lz, tarval_neg(lz));
- tarval* const rzn = tarval_or(rz, tarval_neg(rz));
+ ir_tarval* const lzn = tarval_or(lz, tarval_neg(lz));
+ ir_tarval* const rzn = tarval_or(rz, tarval_neg(rz));
// Concatenate safe lower zeroes.
if (tarval_cmp(lzn, rzn) == pn_Cmp_Lt) {
z = tarval_mul(tarval_eor(lzn, tarval_shl(lzn, get_tarval_one(m))), rzn);
}
case iro_Eor: {
- bitinfo* const l = get_bitinfo(get_Eor_left(irn));
- bitinfo* const r = get_bitinfo(get_Eor_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Eor_left(irn));
+ bitinfo* const r = get_bitinfo(get_Eor_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
z = tarval_or(tarval_andnot(lz, ro), tarval_andnot(rz, lo));
o = tarval_or(tarval_andnot(ro, lz), tarval_andnot(lo, rz));
break;
z = tarval_or( f->z, t->z);
o = tarval_and(f->o, t->o);
}
+ break;
}
case iro_Phi: {
if (is_Cmp(pred)) { // TODO generalize
bitinfo* const l = get_bitinfo(get_Cmp_left(pred));
bitinfo* const r = get_bitinfo(get_Cmp_right(pred));
- if (l == NULL || r == NULL)
+ if (l == NULL || r == NULL) {
goto result_unknown; // Cmp compares something we cannot evaluate.
- switch (get_Proj_proj(irn)) {
- case pn_Cmp_Lg: {
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
- if (!tarval_is_null(tarval_andnot(ro, lz)) ||
- !tarval_is_null(tarval_andnot(lo, rz))) {
- // At least one bit differs.
- z = o = get_tarval_b_true();
- } else if (lz == lo && rz == ro && lz == rz) {
- z = o = get_tarval_b_false();
- } else {
- goto result_unknown;
- }
- break;
- }
-
- case pn_Cmp_Eq: {
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
- if (!tarval_is_null(tarval_andnot(ro, lz)) ||
- !tarval_is_null(tarval_andnot(lo, rz))) {
- // At least one bit differs.
- z = o = get_tarval_b_false();
- } else if (lz == lo && rz == ro && lz == rz) {
- z = o = get_tarval_b_true();
- } else {
- goto result_unknown;
- }
- break;
+ } else {
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
+ pn_Cmp const pn = get_Proj_proj(irn);
+ switch (pn) {
+ case pn_Cmp_Lg:
+ if (!tarval_is_null(tarval_andnot(ro, lz)) ||
+ !tarval_is_null(tarval_andnot(lo, rz))) {
+ // At least one bit differs.
+ z = o = get_tarval_b_true();
+ } else if (lz == lo && rz == ro && lz == rz) {
+ z = o = get_tarval_b_false();
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ case pn_Cmp_Eq:
+ if (!tarval_is_null(tarval_andnot(ro, lz)) ||
+ !tarval_is_null(tarval_andnot(lo, rz))) {
+ // At least one bit differs.
+ z = o = get_tarval_b_false();
+ } else if (lz == lo && rz == ro && lz == rz) {
+ z = o = get_tarval_b_true();
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ case pn_Cmp_Le:
+ case pn_Cmp_Lt:
+ /* TODO handle negative values */
+ if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
+ tarval_is_negative(rz) || tarval_is_negative(ro))
+ goto result_unknown;
+
+ if (tarval_cmp(lz, ro) & pn) {
+ /* Left upper bound is smaller(/equal) than right lower bound. */
+ z = o = get_tarval_b_true();
+ } else if (!(tarval_cmp(lo, rz) & pn)) {
+ /* Left lower bound is not smaller(/equal) than right upper bound. */
+ z = o = get_tarval_b_false();
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ case pn_Cmp_Ge:
+ case pn_Cmp_Gt:
+ /* TODO handle negative values */
+ if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
+ tarval_is_negative(rz) || tarval_is_negative(ro))
+ goto result_unknown;
+
+ if (!(tarval_cmp(lz, ro) & pn)) {
+ /* Left upper bound is not greater(/equal) than right lower bound. */
+ z = o = get_tarval_b_false();
+ } else if (tarval_cmp(lo, rz) & pn) {
+ /* Left lower bound is greater(/equal) than right upper bound. */
+ z = o = get_tarval_b_true();
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ default:
+ goto cannot_analyse;
}
-
- default:
- goto cannot_analyse;
}
} else {
goto cannot_analyse;
static void first_round(ir_node* const irn, void* const env)
{
- pdeq* const q = env;
+ pdeq* const q = (pdeq*)env;
transfer(irn);
if (is_Phi(irn) || is_Block(irn)) {
}
}
-static void apply_result(ir_node* const irn, void* const env)
+static void apply_result(ir_node* const irn, void* ctx)
{
- bitinfo* const b = get_bitinfo(irn);
- tarval* z;
- tarval* o;
- (void)env;
+ environment_t* env = (environment_t*)ctx;
+ bitinfo* const b = get_bitinfo(irn);
+ ir_tarval* z;
+ ir_tarval* o;
if (!b) return;
if (is_Const(irn)) return; // It cannot get any better than a Const.
ir_mode* const m = get_irn_mode(irn);
ir_node* n;
if (mode_is_intb(m)) {
- n = new_Const(z);
+ ir_graph *irg = get_irn_irg(irn);
+ n = new_r_Const(irg, z);
} else if (m == mode_X) {
ir_node* const block = get_nodes_block(irn);
ir_graph* const irg = get_Block_irg(block);
set_irn_link(n, b);
exchange_only:
exchange(irn, n);
+ env->modified = 1;
}
switch (get_irn_opcode(irn)) {
if (tarval_is_null(tarval_andnot(br->z, bl->z))) {
DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
exchange(irn, r);
+ env->modified = 1;
}
} else if (br->z == br->o) {
if (tarval_is_null(tarval_andnot(bl->z, br->z))) {
DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
exchange(irn, l);
+ env->modified = 1;
}
}
break;
if (tarval_is_null(tarval_andnot(bl->o, br->o))) {
DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
exchange(irn, r);
+ env->modified = 1;
}
} else if (br->z == br->o) {
if (tarval_is_null(tarval_andnot(br->o, bl->o))) {
DB((dbg, LEVEL_2, "%+F(%+F, %+F) is superfluous\n", irn, l, r));
exchange(irn, l);
+ env->modified = 1;
}
}
break;
ir_edge_t const* e;
foreach_out_edge(n, e) {
ir_node* const src = get_edge_src_irn(e);
- ir_edge_t const* src_e;
pdeq_putr(q, src);
- foreach_out_edge(src, src_e) {
- ir_node* const src_src = get_edge_src_irn(src_e);
- if (is_Phi(src_src))
- pdeq_putr(q, src_src);
+ /* should always be a block */
+ if (is_Block(src)) {
+ ir_node *phi;
+ for (phi = get_Block_phis(src); phi; phi = get_Phi_next(phi))
+ pdeq_putr(q, phi);
}
}
} else {
}
}
+static void clear_links(ir_node *irn, void *env)
+{
+ (void) env;
+ set_irn_link(irn, NULL);
+ if (is_Block(irn))
+ set_Block_phis(irn, NULL);
+}
+
+static void build_phi_lists(ir_node *irn, void *env)
+{
+ (void) env;
+ if (is_Phi(irn))
+ add_Block_phi(get_nodes_block(irn), irn);
+}
+
void fixpoint_vrp(ir_graph* const irg)
{
+ environment_t env;
+
FIRM_DBG_REGISTER(dbg, "firm.opt.fp-vrp");
DB((dbg, LEVEL_1, "===> Performing constant propagation on %+F\n", irg));
edges_assure(irg);
assure_doms(irg);
- { pdeq* const q = new_pdeq();
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
+
+ {
+ pdeq* const q = new_pdeq();
+
+ /* We need this extra step because the dom tree does not contain unreachable
+ blocks in Firm. Moreover build phi list. */
+ irg_walk_graph(irg, clear_links, build_phi_lists, NULL);
/* TODO Improve iteration order. Best is reverse postorder in data flow
* direction and respecting loop nesting for fastest convergence. */
irg_walk_blkwise_dom_top_down(irg, firm_clear_link, first_round, q);
while (!pdeq_empty(q)) {
- ir_node* const n = pdeq_getl(q);
+ ir_node* const n = (ir_node*)pdeq_getl(q);
if (transfer(n))
queue_users(q, n);
}
}
DB((dbg, LEVEL_2, "---> Applying analysis results\n"));
- irg_walk_graph(irg, NULL, apply_result, NULL);
+ env.modified = 0;
+ irg_walk_graph(irg, NULL, apply_result, &env);
+
+ if (env.modified) {
+ /* control flow might changed */
+ set_irg_outs_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ set_irg_loopinfo_inconsistent(irg);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ }
+
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_PHI_LIST);
obstack_free(&obst, NULL);
}