#include "irouts.h"
#include "irflag_t.h"
#include "irpass_t.h"
+#include "irnodeset.h"
/** if this flag is set, verify entity types in Load & Store nodes */
static int verify_entities = 0;
ASSERT_AND_RET(
is_Bad(pred) || (get_irn_mode(pred) == mode_X),
"Block node must have a mode_X predecessor", 0);
+ ASSERT_AND_RET(is_cfop(skip_Proj(skip_Tuple(pred))), "Block predecessor must be a cfop", 0);
}
if (n == get_irg_start_block(irg)) {
ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 0, "Start Block node", 0);
}
- if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
+ if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend) {
/* End block may only have Return, Raise or fragile ops as preds. */
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
),
"End Block node", 0);
}
- /* irg attr must == graph we are in. */
- ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
- return 1;
+ }
+ /* irg attr must == graph we are in. */
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
+ return 1;
}
/**
ir_node *bl = get_nodes_block(use);
for (i = get_irn_arity(use) - 1; i >= 0; --i) {
- ir_node *def = get_irn_n(use, i);
- ir_node *def_bl = get_nodes_block(def);
- ir_node *use_bl = bl;
+ ir_node *def = get_irn_n(use, i);
+ ir_node *def_bl = get_nodes_block(def);
+ ir_node *use_bl = bl;
ir_graph *irg;
- /* ignore dead definition blocks, will be removed */
- if (is_Block_dead(def_bl) || get_Block_dom_depth(def_bl) == -1)
+ /* we have no dominance relation for unreachable blocks, so we can't
+ * check the dominance property there */
+ if (!is_Block(def_bl) || get_Block_dom_depth(def_bl) == -1)
continue;
- if (is_Phi(use))
+ if (is_Phi(use)) {
+ if (is_Bad(def))
+ continue;
use_bl = get_Block_cfgpred_block(bl, i);
+ }
- /* ignore dead use blocks, will be removed */
- if (is_Block_dead(use_bl) || get_Block_dom_depth(use_bl) == -1)
+ if (!is_Block(use_bl) || get_Block_dom_depth(use_bl) == -1)
continue;
irg = get_irn_irg(use);
/* Tests the modes of n and its predecessors. */
int irn_verify_irg(ir_node *n, ir_graph *irg)
{
- int i;
ir_op *op;
if (!get_node_verification_mode())
op = get_irn_op(n);
- /* We don't want to test nodes whose predecessors are Bad,
- as we would have to special case that for each operation. */
- if (op != op_Phi && op != op_Block) {
- for (i = get_irn_arity(n) - 1; i >= 0; --i) {
- if (is_Bad(get_irn_n(n, i)))
- return 1;
- }
- }
-
if (_get_op_pinned(op) >= op_pin_state_exc_pinned) {
op_pin_state state = get_irn_pinned(n);
ASSERT_AND_RET_DBG(
state == op_pin_state_pinned,
"invalid pin state", 0,
ir_printf("node %+F", n));
+ } else if (!is_Block(n) && is_irn_pinned_in_irg(n)
+ && !is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
+ ASSERT_AND_RET_DBG(is_Block(get_nodes_block(n)) || is_Anchor(n),
+ "block input is not a block", 0,
+ ir_printf("node %+F", n));
+ }
+
+ /* We don't want to test nodes whose predecessors are Bad,
+ as we would have to special case that for each operation. */
+ if (op != op_Phi && op != op_Block) {
+ int i;
+ for (i = get_irn_arity(n) - 1; i >= 0; --i) {
+ if (is_Bad(get_irn_n(n, i)))
+ return 1;
+ }
}
if (op->ops.verify_node)
#endif /* DEBUG_libfirm */
+typedef struct check_cfg_env_t {
+ pmap *branch_nodes; /**< map blocks to their branching nodes,
+ map mode_X nodes to the blocks they branch to */
+ int res;
+ ir_nodeset_t kept_nodes;
+ ir_nodeset_t true_projs;
+ ir_nodeset_t false_projs;
+} check_cfg_env_t;
+
+static int check_block_cfg(ir_node *block, check_cfg_env_t *env)
+{
+ pmap *branch_nodes;
+ int n_cfgpreds;
+ int i;
+
+ n_cfgpreds = get_Block_n_cfgpreds(block);
+ branch_nodes = env->branch_nodes;
+ for (i = 0; i < n_cfgpreds; ++i) {
+ /* check that each mode_X node is only connected
+ * to 1 user */
+ ir_node *branch = get_Block_cfgpred(block, i);
+ ir_node *former_dest;
+ ir_node *former_branch;
+ ir_node *branch_proj;
+ ir_node *branch_block;
+ branch = skip_Tuple(branch);
+ if (is_Bad(branch))
+ continue;
+ former_dest = pmap_get(branch_nodes, branch);
+ ASSERT_AND_RET_DBG(former_dest==NULL || is_unknown_jump(skip_Proj(branch)),
+ "Multiple users on mode_X node", 0,
+ ir_printf("node %+F\n", branch));
+ pmap_insert(branch_nodes, branch, block);
+
+ /* check that there's only 1 branching instruction in each block */
+ branch_block = get_nodes_block(branch);
+ branch_proj = branch;
+ if (is_Proj(branch)) {
+ branch = skip_Proj(branch);
+ }
+ former_branch = pmap_get(branch_nodes, branch_block);
+
+ ASSERT_AND_RET_DBG(former_branch == NULL || former_branch == branch,
+ "Multiple branching nodes in a block", 0,
+ ir_printf("nodes %+F,%+F in block %+F\n",
+ branch, former_branch, branch_block));
+ pmap_insert(branch_nodes, branch_block, branch);
+
+ if (is_Cond(branch)) {
+ long pn = get_Proj_proj(branch_proj);
+ if (get_irn_mode(get_Cond_selector(branch)) == mode_b) {
+ if (pn == pn_Cond_true)
+ ir_nodeset_insert(&env->true_projs, branch);
+ if (pn == pn_Cond_false)
+ ir_nodeset_insert(&env->false_projs, branch);
+ } else {
+ int default_pn = get_Cond_default_proj(branch);
+ if (pn == default_pn)
+ ir_nodeset_insert(&env->true_projs, branch);
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void check_cfg_walk_func(ir_node *node, void *data)
+{
+ check_cfg_env_t *env = (check_cfg_env_t*)data;
+ if (!is_Block(node))
+ return;
+ env->res &= check_block_cfg(node, env);
+}
+
+static int verify_block_branch(ir_node *block, check_cfg_env_t *env)
+{
+ ir_node *branch = pmap_get(env->branch_nodes, block);
+ ASSERT_AND_RET_DBG(branch != NULL
+ || ir_nodeset_contains(&env->kept_nodes, block)
+ || block == get_irg_end_block(get_irn_irg(block)),
+ "block contains no cfop", 0,
+ ir_printf("block %+F\n", block));
+ return 1;
+}
+
+static int verify_cond_projs(ir_node *cond, check_cfg_env_t *env)
+{
+ if (get_irn_mode(get_Cond_selector(cond)) == mode_b) {
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
+ "Cond node lacks true proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->false_projs, cond),
+ "Cond node lacks false proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ } else {
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
+ "Cond node lacks default Proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ }
+ return 1;
+}
+
+static void assert_branch(ir_node *node, void *data)
+{
+ check_cfg_env_t *env = (check_cfg_env_t*)data;
+ if (is_Block(node)) {
+ env->res &= verify_block_branch(node, env);
+ } else if (is_Cond(node)) {
+ env->res &= verify_cond_projs(node, env);
+ }
+}
+
+/**
+ * Checks CFG well-formedness
+ */
+static int check_cfg(ir_graph *irg)
+{
+ check_cfg_env_t env;
+ env.branch_nodes = pmap_create(); /**< map blocks to branch nodes */
+ env.res = 1;
+ ir_nodeset_init(&env.true_projs);
+ ir_nodeset_init(&env.false_projs);
+
+ /* note that we do not use irg_walk_block because it will miss these
+ * invalid blocks without a jump instruction which we want to detect
+ * here */
+ irg_walk_graph(irg, check_cfg_walk_func, NULL, &env);
+
+ ir_nodeset_init(&env.kept_nodes);
+ {
+ ir_node *end = get_irg_end(irg);
+ int arity = get_irn_arity(end);
+ int i;
+ for (i = 0; i < arity; ++i) {
+ ir_node *n = get_irn_n(end, i);
+ ir_nodeset_insert(&env.kept_nodes, n);
+ }
+ }
+ irg_walk_graph(irg, assert_branch, NULL, &env);
+
+ ir_nodeset_destroy(&env.false_projs);
+ ir_nodeset_destroy(&env.true_projs);
+ ir_nodeset_destroy(&env.kept_nodes);
+ pmap_destroy(env.branch_nodes);
+ return env.res;
+}
+
/*
* Calls irn_verify for each node in irg.
* Graph must be in state "op_pin_state_pinned".
last_irg_error = NULL;
#endif /* NDEBUG */
- if ((flags & VERIFY_ENFORCE_SSA) && pinned)
+ if (!check_cfg(irg))
+ res = 0;
+
+ if (res == 1 && (flags & VERIFY_ENFORCE_SSA) && pinned)
compute_doms(irg);
irg_walk_anchors(