ASSERT_AND_RET_DBG(
(
(proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
- (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */
- is_Bad(get_Cond_selector(pred)) /* rare */
+ (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) /* switch */
),
"wrong Proj from Cond", 0,
show_proj_failure(p);
/* We don't test */
break;
- case iro_Bad:
- /* hmm, optimization did not remove it */
- break;
-
default:
/* ASSERT_AND_RET(0, "Unknown opcode", 0); */
break;
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
- /* ignore Bound checks of Bad */
- if (is_Bad(get_Bound_index(n)))
- return 1;
ASSERT_AND_RET_DBG(
(
(proj == pn_Bound_M && mode == mode_M) ||
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
- ASSERT_AND_RET(
- is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ ASSERT_AND_RET(get_irn_mode(pred) == mode_X,
"Block node must have a mode_X predecessor", 0);
ASSERT_AND_RET(is_cfop(skip_Proj(skip_Tuple(pred))), "Block predecessor must be a cfop", 0);
}
/* Phi: BB x dataM^n --> dataM */
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Phi_pred(n, i);
- if (!is_Bad(pred)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(pred) == mymode,
- "Phi node", 0,
- show_phi_failure(n, pred, i);
- );
- }
+ ASSERT_AND_RET_DBG(get_irn_mode(pred) == mymode,
+ "Phi node", 0, show_phi_failure(n, pred, i);
+ );
}
ASSERT_AND_RET(mode_is_dataM(mymode) || mymode == mode_b, "Phi node", 0 );
+#if 0
if (mymode == mode_M) {
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
int j;
ir_node *pred_i = get_Phi_pred(n, i);
- if (is_Bad(pred_i))
- continue;
for (j = i - 1; j >= 0; --j) {
ir_node *pred_j = get_Phi_pred(n, j);
- if (is_Bad(pred_j))
- continue;
-#if 0
/* currently this checks fails for blocks with exception
outputs (and these are NOT basic blocks). So it is disabled yet. */
ASSERT_AND_RET_DBG(
0,
ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
);
-#endif
}
}
}
+#endif
return 1;
}
ir_printf("node %+F", n));
}
- /* We don't want to test nodes whose predecessors are Bad,
- as we would have to special case that for each operation. */
- if (op != op_Phi && op != op_Block) {
- int i;
- for (i = get_irn_arity(n) - 1; i >= 0; --i) {
- if (is_Bad(get_irn_n(n, i)))
- return 1;
- }
- }
-
if (op->ops.verify_node)
return op->ops.verify_node(n, irg);
pmap *branch_nodes; /**< map blocks to their branching nodes,
map mode_X nodes to the blocks they branch to */
int res;
- ir_nodeset_t ignore_nodes;
+ ir_nodeset_t reachable_blocks;
ir_nodeset_t kept_nodes;
ir_nodeset_t true_projs;
ir_nodeset_t false_projs;
} check_cfg_env_t;
-static int check_cfg_node(ir_node *node, check_cfg_env_t *env)
+static int check_block_cfg(ir_node *block, check_cfg_env_t *env)
{
- pmap *branch_nodes = env->branch_nodes;
- ir_mode *mode = get_irn_mode(node);
-
- if (ir_nodeset_contains(&env->ignore_nodes, node))
- return 1;
+ pmap *branch_nodes;
+ int n_cfgpreds;
+ int i;
- if (mode == mode_X) {
- ir_node *block = get_nodes_block(node);
- ir_node *former_branch = pmap_get(branch_nodes, block);
- ir_node *skipped = skip_Tuple(node);
- ir_node *branch = skipped;
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->reachable_blocks, block),
+ "Block is not reachable by blockwalker (endless loop with no kept block?)", 0,
+ ir_printf("block %+F\n", block));
+ n_cfgpreds = get_Block_n_cfgpreds(block);
+ branch_nodes = env->branch_nodes;
+ for (i = 0; i < n_cfgpreds; ++i) {
+ /* check that each mode_X node is only connected
+ * to 1 user */
+ ir_node *branch = get_Block_cfgpred(block, i);
+ ir_node *former_dest;
+ ir_node *former_branch;
+ ir_node *branch_proj;
+ ir_node *branch_block;
+ branch = skip_Tuple(branch);
+ if (is_Bad(branch))
+ continue;
+ former_dest = pmap_get(branch_nodes, branch);
+ ASSERT_AND_RET_DBG(former_dest==NULL || is_unknown_jump(skip_Proj(branch)),
+ "Multiple users on mode_X node", 0,
+ ir_printf("node %+F\n", branch));
+ pmap_insert(branch_nodes, branch, block);
+
+ /* check that there's only 1 branching instruction in each block */
+ branch_block = get_nodes_block(branch);
+ branch_proj = branch;
if (is_Proj(branch)) {
branch = skip_Proj(branch);
}
+ former_branch = pmap_get(branch_nodes, branch_block);
ASSERT_AND_RET_DBG(former_branch == NULL || former_branch == branch,
- "Multiple branching nodes in a block", 0,
- ir_printf("nodes %+F,%+F in block %+F\n",
- node, former_branch, block));
- pmap_insert(branch_nodes, block, branch);
+ "Multiple branching nodes in a block", 0,
+ ir_printf("nodes %+F,%+F in block %+F\n",
+ branch, former_branch, branch_block));
+ pmap_insert(branch_nodes, branch_block, branch);
if (is_Cond(branch)) {
- long pn = get_Proj_proj(skipped);
+ long pn = get_Proj_proj(branch_proj);
if (get_irn_mode(get_Cond_selector(branch)) == mode_b) {
if (pn == pn_Cond_true)
ir_nodeset_insert(&env->true_projs, branch);
ir_nodeset_insert(&env->true_projs, branch);
}
}
- } else if (is_Block(node)) {
- int n_cfgpreds = get_Block_n_cfgpreds(node);
- int i;
-
- for (i = 0; i < n_cfgpreds; ++i) {
- ir_node *branch = get_Block_cfgpred(node, i);
- ir_node *former_dest;
- if (is_Bad(branch))
- continue;
- former_dest = pmap_get(branch_nodes, branch);
- ASSERT_AND_RET_DBG(former_dest==NULL || is_unknown_jump(skip_Proj(branch)),
- "Multiple users on mode_X node", 0,
- ir_printf("node %+F\n", node));
- pmap_insert(branch_nodes, branch, node);
- }
- } else if (is_Tuple(node)) {
- int arity = get_irn_arity(node);
- int i;
-
- for (i = 0; i < arity; ++i) {
- ir_node *in = get_irn_n(node, i);
- ir_nodeset_insert(&env->ignore_nodes, in);
- }
}
return 1;
static void check_cfg_walk_func(ir_node *node, void *data)
{
check_cfg_env_t *env = (check_cfg_env_t*)data;
- int res = check_cfg_node(node, env);
- env->res &= res;
+ if (!is_Block(node))
+ return;
+ env->res &= check_block_cfg(node, env);
}
static int verify_block_branch(ir_node *block, check_cfg_env_t *env)
{
ir_node *branch = pmap_get(env->branch_nodes, block);
- ASSERT_AND_RET_DBG(branch != NULL || ir_nodeset_contains(&env->kept_nodes, block),
+ ASSERT_AND_RET_DBG(branch != NULL
+ || ir_nodeset_contains(&env->kept_nodes, block)
+ || block == get_irg_end_block(get_irn_irg(block)),
"block contains no cfop", 0,
ir_printf("block %+F\n", block));
return 1;
}
}
+static void collect_reachable_blocks(ir_node *block, void *data)
+{
+ ir_nodeset_t *reachable_blocks = (ir_nodeset_t*) data;
+ ir_nodeset_insert(reachable_blocks, block);
+}
+
/**
* Checks CFG well-formedness
*/
check_cfg_env_t env;
env.branch_nodes = pmap_create(); /**< map blocks to branch nodes */
env.res = 1;
- ir_nodeset_init(&env.ignore_nodes);
+ ir_nodeset_init(&env.reachable_blocks);
ir_nodeset_init(&env.true_projs);
ir_nodeset_init(&env.false_projs);
+ irg_block_walk_graph(irg, collect_reachable_blocks, NULL,
+ &env.reachable_blocks);
+
/* note that we do not use irg_walk_block because it will miss these
* invalid blocks without a jump instruction which we want to detect
* here */
irg_walk_graph(irg, check_cfg_walk_func, NULL, &env);
- ir_nodeset_destroy(&env.ignore_nodes);
ir_nodeset_init(&env.kept_nodes);
{
ir_nodeset_destroy(&env.false_projs);
ir_nodeset_destroy(&env.true_projs);
ir_nodeset_destroy(&env.kept_nodes);
+ ir_nodeset_destroy(&env.reachable_blocks);
pmap_destroy(env.branch_nodes);
return env.res;
}
last_irg_error = NULL;
#endif /* NDEBUG */
- if ((flags & VERIFY_ENFORCE_SSA) && pinned)
+ if (!check_cfg(irg))
+ res = 0;
+
+ if (res == 1 && (flags & VERIFY_ENFORCE_SSA) && pinned)
compute_doms(irg);
irg_walk_anchors(
&res
);
- if (!check_cfg(irg))
- res = 0;
-
if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT && ! res) {
ir_entity *ent = get_irg_entity(irg);