typedef struct bitinfo
{
- tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
- tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
+ ir_tarval* z; // safe zeroes, 0 = bit is zero, 1 = bit maybe is 1
+ ir_tarval* o; // safe ones, 0 = bit maybe is zero, 1 = bit is 1
} bitinfo;
typedef struct environment_t {
static inline bitinfo* get_bitinfo(ir_node const* const irn)
{
- return get_irn_link(irn);
+ return (bitinfo*)get_irn_link(irn);
}
-static int set_bitinfo(ir_node* const irn, tarval* const z, tarval* const o)
+static int set_bitinfo(ir_node* const irn, ir_tarval* const z, ir_tarval* const o)
{
bitinfo* b = get_bitinfo(irn);
if (b == NULL) {
static int transfer(ir_node* const irn)
{
- ir_mode* const m = get_irn_mode(irn);
- tarval* z;
- tarval* o;
+ ir_tarval* const f = get_tarval_b_false();
+ ir_tarval* const t = get_tarval_b_true();
+ ir_mode* const m = get_irn_mode(irn);
+ ir_tarval* z;
+ ir_tarval* o;
if (m == mode_X) {
+ bitinfo* const b = get_bitinfo(get_nodes_block(irn));
+
DB((dbg, LEVEL_3, "transfer %+F\n", irn));
- switch (get_irn_opcode(irn)) {
+
+ if (b->z == f) {
+ z = f;
+ o = t;
+ } else switch (get_irn_opcode(irn)) {
case iro_Proj: {
ir_node* const pred = get_Proj_pred(irn);
if (is_Start(pred)) {
- z = get_tarval_b_true();
- o = get_tarval_b_false();
+ goto result_unknown_X;
} else if (is_Cond(pred)) {
- ir_node* const selector = get_Cond_selector(pred);
- bitinfo* const b = get_bitinfo(selector);
- tarval* const bz = b->z;
- tarval* const bo = b->o;
+ ir_node* const selector = get_Cond_selector(pred);
+ bitinfo* const b = get_bitinfo(selector);
+ ir_tarval* const bz = b->z;
+ ir_tarval* const bo = b->o;
if (get_irn_mode(selector) == mode_b) {
if (bz == bo) {
- if ((bz == get_tarval_b_true()) == get_Proj_proj(irn)) {
- z = o = get_tarval_b_true();
+ if ((bz == t) == get_Proj_proj(irn)) {
+ z = o = t;
} else {
- z = o = get_tarval_b_false();
+ z = o = f;
}
} else {
goto result_unknown_X;
} else {
long const val = get_Proj_proj(irn);
if (val != get_Cond_default_proj(pred)) {
- tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
+ ir_tarval* const tv = new_tarval_from_long(val, get_irn_mode(selector));
if (!tarval_is_null(tarval_andnot(tv, bz)) ||
!tarval_is_null(tarval_andnot(bo, tv))) {
// At least one bit differs.
- z = o = get_tarval_b_false();
+ z = o = f;
#if 0 // TODO must handle default Proj
} else if (bz == bo && bz == tv) {
- z = o = get_tarval_b_true();
+ z = o = t;
#endif
} else {
goto result_unknown_X;
break;
}
- case iro_Jmp: {
- bitinfo* const b = get_bitinfo(get_nodes_block(irn));
- z = b->z;
- o = b->o;
- break;
- }
+ case iro_Jmp:
+ goto result_unknown_X;
default:
cannot_analyse_X:
DB((dbg, LEVEL_4, "cannot analyse %+F\n", irn));
result_unknown_X:
- z = get_tarval_b_true();
- o = get_tarval_b_false();
+ z = t;
+ o = f;
break;
}
} else if (is_Block(irn)) {
DB((dbg, LEVEL_3, "transfer %+F\n", irn));
for (i = 0; i != arity; ++i) {
bitinfo* const b = get_bitinfo(get_Block_cfgpred(irn, i));
- if (b != NULL && b->z == get_tarval_b_true()) {
+ if (b != NULL && b->z == t) {
reachable = 1;
break;
}
}
- o = get_tarval_b_false();
- z = reachable || irn == get_irg_start_block(get_irn_irg(irn)) ? get_tarval_b_true() : o;
+ if (!reachable) {
+ ir_graph *const irg = get_Block_irg(irn);
+ reachable =
+ irn == get_irg_start_block(irg) ||
+ irn == get_irg_end_block(irg);
+ }
+
+ if (reachable) {
+ z = t;
+ o = f;
+ } else {
+ z = f;
+ o = t;
+ }
} else if (mode_is_intb(m)) {
DB((dbg, LEVEL_3, "transfer %+F\n", irn));
switch (get_irn_opcode(irn)) {
break;
}
+ case iro_Confirm: {
+ ir_node* const v = get_Confirm_value(irn);
+ bitinfo* const b = get_bitinfo(v);
+ /* TODO Use bound and relation. */
+ z = b->z;
+ o = b->o;
+ break;
+ }
+
case iro_Shl: {
- bitinfo* const l = get_bitinfo(get_Shl_left(irn));
- bitinfo* const r = get_bitinfo(get_Shl_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shl_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shl_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shl(l->z, rz);
o = tarval_shl(l->o, rz);
}
case iro_Shr: {
- bitinfo* const l = get_bitinfo(get_Shr_left(irn));
- bitinfo* const r = get_bitinfo(get_Shr_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shr_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shr_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shr(l->z, rz);
o = tarval_shr(l->o, rz);
}
case iro_Shrs: {
- bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
- bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Shrs_left(irn));
+ bitinfo* const r = get_bitinfo(get_Shrs_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_shrs(l->z, rz);
o = tarval_shrs(l->o, rz);
}
case iro_Rotl: {
- bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
- bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
- tarval* const rz = r->z;
+ bitinfo* const l = get_bitinfo(get_Rotl_left(irn));
+ bitinfo* const r = get_bitinfo(get_Rotl_right(irn));
+ ir_tarval* const rz = r->z;
if (rz == r->o) {
z = tarval_rotl(l->z, rz);
o = tarval_rotl(l->o, rz);
}
case iro_Add: {
- bitinfo* const l = get_bitinfo(get_Add_left(irn));
- bitinfo* const r = get_bitinfo(get_Add_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Add_left(irn));
+ bitinfo* const r = get_bitinfo(get_Add_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_add(lz, rz);
} else {
// TODO improve: can only do lower disjoint bits
/* Determine where any of the operands has zero bits, i.e. where no
* carry out is generated if there is not carry in */
- tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
+ ir_tarval* const no_c_in_no_c_out = tarval_and(lz, rz);
/* Generate a mask of the lower consecutive zeroes: x | -x. In this
* range the addition is disjoint and therefore Add behaves like Or.
*/
- tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
- tarval* const low_one_mask = tarval_not(low_zero_mask);
+ ir_tarval* const low_zero_mask = tarval_or(no_c_in_no_c_out, tarval_neg(no_c_in_no_c_out));
+ ir_tarval* const low_one_mask = tarval_not(low_zero_mask);
z = tarval_or( tarval_or(lz, rz), low_zero_mask);
o = tarval_and(tarval_or(lo, ro), low_one_mask);
}
bitinfo* const l = get_bitinfo(get_Sub_left(irn));
bitinfo* const r = get_bitinfo(get_Sub_right(irn));
if (l != NULL && r != NULL) { // Sub might subtract pointers.
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_sub(lz, rz, NULL);
} else if (tarval_is_null(tarval_andnot(rz, lo))) {
}
case iro_Mul: {
- bitinfo* const l = get_bitinfo(get_Mul_left(irn));
- bitinfo* const r = get_bitinfo(get_Mul_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Mul_left(irn));
+ bitinfo* const r = get_bitinfo(get_Mul_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
if (lz == lo && rz == ro) {
z = o = tarval_mul(lz, rz);
} else {
// TODO improve
// Determine safe lower zeroes: x | -x.
- tarval* const lzn = tarval_or(lz, tarval_neg(lz));
- tarval* const rzn = tarval_or(rz, tarval_neg(rz));
+ ir_tarval* const lzn = tarval_or(lz, tarval_neg(lz));
+ ir_tarval* const rzn = tarval_or(rz, tarval_neg(rz));
// Concatenate safe lower zeroes.
- if (tarval_cmp(lzn, rzn) == pn_Cmp_Lt) {
+ if (tarval_cmp(lzn, rzn) == ir_relation_less) {
z = tarval_mul(tarval_eor(lzn, tarval_shl(lzn, get_tarval_one(m))), rzn);
} else {
z = tarval_mul(tarval_eor(rzn, tarval_shl(rzn, get_tarval_one(m))), lzn);
}
case iro_Eor: {
- bitinfo* const l = get_bitinfo(get_Eor_left(irn));
- bitinfo* const r = get_bitinfo(get_Eor_right(irn));
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ bitinfo* const l = get_bitinfo(get_Eor_left(irn));
+ bitinfo* const r = get_bitinfo(get_Eor_right(irn));
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
z = tarval_or(tarval_andnot(lz, ro), tarval_andnot(rz, lo));
o = tarval_or(tarval_andnot(ro, lz), tarval_andnot(lo, rz));
break;
}
case iro_Mux: {
- bitinfo* const f = get_bitinfo(get_Mux_false(irn));
- bitinfo* const t = get_bitinfo(get_Mux_true(irn));
- bitinfo* const c = get_bitinfo(get_Mux_sel(irn));
- if (c->o == get_tarval_b_true()) {
- z = t->z;
- o = t->o;
- } else if (c->z == get_tarval_b_false()) {
- z = f->z;
- o = f->o;
+ bitinfo* const bf = get_bitinfo(get_Mux_false(irn));
+ bitinfo* const bt = get_bitinfo(get_Mux_true(irn));
+ bitinfo* const c = get_bitinfo(get_Mux_sel(irn));
+ if (c->o == t) {
+ z = bt->z;
+ o = bt->o;
+ } else if (c->z == f) {
+ z = bf->z;
+ o = bf->o;
} else {
- z = tarval_or( f->z, t->z);
- o = tarval_and(f->o, t->o);
+ z = tarval_or( bf->z, bt->z);
+ o = tarval_and(bf->o, bt->o);
}
break;
}
o = get_tarval_all_one(m);
for (i = 0; i != arity; ++i) {
bitinfo* const b_cfg = get_bitinfo(get_Block_cfgpred(block, i));
- if (b_cfg != NULL && b_cfg->z != get_tarval_b_false()) {
+ if (b_cfg != NULL && b_cfg->z != f) {
bitinfo* const b = get_bitinfo(get_Phi_pred(irn, i));
z = tarval_or( z, b->z);
o = tarval_and(o, b->o);
break;
}
- case iro_Proj: {
- ir_node* const pred = get_Proj_pred(irn);
- if (is_Cmp(pred)) { // TODO generalize
- bitinfo* const l = get_bitinfo(get_Cmp_left(pred));
- bitinfo* const r = get_bitinfo(get_Cmp_right(pred));
- if (l == NULL || r == NULL)
- goto result_unknown; // Cmp compares something we cannot evaluate.
- switch (get_Proj_proj(irn)) {
- case pn_Cmp_Lg: {
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ case iro_Cmp: {
+ bitinfo* const l = get_bitinfo(get_Cmp_left(irn));
+ bitinfo* const r = get_bitinfo(get_Cmp_right(irn));
+ if (l == NULL || r == NULL) {
+ goto result_unknown; // Cmp compares something we cannot evaluate.
+ } else {
+ ir_tarval* const lz = l->z;
+ ir_tarval* const lo = l->o;
+ ir_tarval* const rz = r->z;
+ ir_tarval* const ro = r->o;
+ ir_relation const relation = get_Cmp_relation(irn);
+ switch (relation) {
+ case ir_relation_less_greater:
if (!tarval_is_null(tarval_andnot(ro, lz)) ||
!tarval_is_null(tarval_andnot(lo, rz))) {
// At least one bit differs.
- z = o = get_tarval_b_true();
+ z = o = t;
} else if (lz == lo && rz == ro && lz == rz) {
- z = o = get_tarval_b_false();
+ z = o = f;
} else {
goto result_unknown;
}
break;
- }
- case pn_Cmp_Eq: {
- tarval* const lz = l->z;
- tarval* const lo = l->o;
- tarval* const rz = r->z;
- tarval* const ro = r->o;
+ case ir_relation_equal:
if (!tarval_is_null(tarval_andnot(ro, lz)) ||
!tarval_is_null(tarval_andnot(lo, rz))) {
// At least one bit differs.
- z = o = get_tarval_b_false();
+ z = o = f;
} else if (lz == lo && rz == ro && lz == rz) {
- z = o = get_tarval_b_true();
+ z = o = t;
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ case ir_relation_less_equal:
+ case ir_relation_less:
+ /* TODO handle negative values */
+ if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
+ tarval_is_negative(rz) || tarval_is_negative(ro))
+ goto result_unknown;
+
+ if (tarval_cmp(lz, ro) & relation) {
+ /* Left upper bound is smaller(/equal) than right lower bound. */
+ z = o = t;
+ } else if (!(tarval_cmp(lo, rz) & relation)) {
+ /* Left lower bound is not smaller(/equal) than right upper bound. */
+ z = o = f;
+ } else {
+ goto result_unknown;
+ }
+ break;
+
+ case ir_relation_greater_equal:
+ case ir_relation_greater:
+ /* TODO handle negative values */
+ if (tarval_is_negative(lz) || tarval_is_negative(lo) ||
+ tarval_is_negative(rz) || tarval_is_negative(ro))
+ goto result_unknown;
+
+ if (!(tarval_cmp(lz, ro) & relation)) {
+ /* Left upper bound is not greater(/equal) than right lower bound. */
+ z = o = f;
+ } else if (tarval_cmp(lo, rz) & relation) {
+ /* Left lower bound is greater(/equal) than right upper bound. */
+ z = o = t;
} else {
goto result_unknown;
}
break;
- }
default:
goto cannot_analyse;
}
- } else {
- goto cannot_analyse;
}
break;
}
static void first_round(ir_node* const irn, void* const env)
{
- pdeq* const q = env;
+ pdeq* const q = (pdeq*)env;
transfer(irn);
if (is_Phi(irn) || is_Block(irn)) {
static void apply_result(ir_node* const irn, void* ctx)
{
- bitinfo* const b = get_bitinfo(irn);
- tarval* z;
- tarval* o;
- environment_t* env = ctx;
+ environment_t* env = (environment_t*)ctx;
+ ir_node* block;
+ bitinfo* b;
+ ir_tarval* z;
+ ir_tarval* o;
+
+ if (is_Block(irn)) {
+ bitinfo* const block_b = get_bitinfo(irn);
+ /* Trivially unreachable blocks have no info. */
+ if (block_b == NULL || block_b->z == get_tarval_b_false()) {
+ exchange(irn, get_irg_bad(get_Block_irg(irn)));
+ env->modified = 1;
+ }
+ return;
+ }
+ /* Unreachable blocks are replaced before the nodes in them. */
+ block = get_nodes_block(irn);
+ if (is_Bad(block)) {
+ exchange(irn, block);
+ env->modified = 1;
+ return;
+ }
+
+ b = get_bitinfo(irn);
if (!b) return;
if (is_Const(irn)) return; // It cannot get any better than a Const.
ir_mode* const m = get_irn_mode(irn);
ir_node* n;
if (mode_is_intb(m)) {
- n = new_Const(z);
+ ir_graph *irg = get_irn_irg(irn);
+ n = new_r_Const(irg, z);
} else if (m == mode_X) {
- ir_node* const block = get_nodes_block(irn);
- ir_graph* const irg = get_Block_irg(block);
+ ir_graph* const irg = get_Block_irg(block);
if (z == get_tarval_b_true()) {
// Might produce an endless loop, so keep the block.
add_End_keepalive(get_irg_end(irg), block);
obstack_init(&obst);
+ /* HACK: to avoid finding dead code */
+ edges_deactivate(irg);
+ edges_activate(irg);
+
edges_assure(irg);
assure_doms(irg);
/* We need this extra step because the dom tree does not contain unreachable
blocks in Firm. Moreover build phi list. */
- irg_walk_graph(irg, clear_links, build_phi_lists, NULL);
+ irg_walk_anchors(irg, clear_links, build_phi_lists, NULL);
+
+ { ir_tarval* const f = get_tarval_b_false();
+ ir_tarval* const t = get_tarval_b_true();
+ set_bitinfo(get_irg_bad(irg), f, t); /* Undefined. */
+ set_bitinfo(get_irg_end_block(irg), t, f); /* Reachable. */
+ }
/* TODO Improve iteration order. Best is reverse postorder in data flow
* direction and respecting loop nesting for fastest convergence. */
- irg_walk_blkwise_dom_top_down(irg, firm_clear_link, first_round, q);
+ irg_walk_blkwise_dom_top_down(irg, NULL, first_round, q);
while (!pdeq_empty(q)) {
- ir_node* const n = pdeq_getl(q);
+ ir_node* const n = (ir_node*)pdeq_getl(q);
if (transfer(n))
queue_users(q, n);
}