/*
- * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "irouts.h"
#include "irflag_t.h"
#include "irpass_t.h"
+#include "irnodeset.h"
/** if this flag is set, verify entity types in Load & Store nodes */
static int verify_entities = 0;
(proj == pn_Start_X_initial_exec && mode == mode_X) ||
(proj == pn_Start_M && mode == mode_M) ||
(proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
- (proj == pn_Start_P_tls && mode_is_reference(mode)) ||
(proj == pn_Start_T_args && mode == mode_T)
),
"wrong Proj from Start", 0,
ASSERT_AND_RET_DBG(
(
(proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
- (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */
- is_Bad(get_Cond_selector(pred)) /* rare */
+ (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) /* switch */
),
"wrong Proj from Cond", 0,
show_proj_failure(p);
(proj == pn_Call_M && mode == mode_M) ||
(proj == pn_Call_X_regular && mode == mode_X) ||
(proj == pn_Call_X_except && mode == mode_X) ||
- (proj == pn_Call_T_result && mode == mode_T) ||
- (proj == pn_Call_P_value_res_base && mode_is_reference(mode))
+ (proj == pn_Call_T_result && mode == mode_T)
),
"wrong Proj from Call", 0,
show_proj_failure(p);
(proj == pn_Div_M && mode == mode_M) ||
(proj == pn_Div_X_regular && mode == mode_X) ||
(proj == pn_Div_X_except && mode == mode_X) ||
- (proj == pn_Div_res && mode == get_Div_resmode(n))
+ (proj == pn_Div_res && mode_is_data(mode) && mode == get_Div_resmode(n))
),
"wrong Proj from Div", 0,
show_proj_failure(p);
return 1;
}
-/**
- * verify a Proj(Cmp) node
- */
-static int verify_node_Proj_Cmp(ir_node *n, ir_node *p)
-{
- ir_mode *mode = get_irn_mode(p);
- long proj = get_Proj_proj(p);
- (void) n;
-
- ASSERT_AND_RET_DBG(
- (proj >= 0 && proj <= 15 && mode == mode_b),
- "wrong Proj from Cmp", 0,
- show_proj_failure(p);
- );
- ASSERT_AND_RET_DBG(
- (mode_is_float(get_irn_mode(get_Cmp_left(n))) || !(proj & pn_Cmp_Uo)),
- "unordered Proj for non-float Cmp (Did you use Ne instead of Lg?)", 0,
- show_proj_failure(p);
- );
- return 1;
-}
-
/**
* verify a Proj(Load) node
*/
/* We don't test */
break;
- case iro_Bad:
- /* hmm, optimization did not remove it */
- break;
-
default:
/* ASSERT_AND_RET(0, "Unknown opcode", 0); */
break;
ir_mode *mode = get_irn_mode(p);
long proj = get_Proj_proj(p);
- /* ignore Bound checks of Bad */
- if (is_Bad(get_Bound_index(n)))
- return 1;
ASSERT_AND_RET_DBG(
(
(proj == pn_Bound_M && mode == mode_M) ||
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
- ASSERT_AND_RET(
- is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ ASSERT_AND_RET(get_irn_mode(pred) == mode_X,
"Block node must have a mode_X predecessor", 0);
+ ASSERT_AND_RET(is_cfop(skip_Proj(skip_Tuple(pred))), "Block predecessor must be a cfop", 0);
}
if (n == get_irg_start_block(irg)) {
ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 0, "Start Block node", 0);
}
- if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
+ if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend) {
/* End block may only have Return, Raise or fragile ops as preds. */
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
),
"End Block node", 0);
}
- /* irg attr must == graph we are in. */
- ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
- return 1;
+ }
+ /* irg attr must == graph we are in. */
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
+ return 1;
}
/**
(void) irg;
ASSERT_AND_RET(
- /* Div: BB x M x int x int --> M x X x int */
+ /* Div: BB x M x data x data --> M x X x data */
op1mode == mode_M &&
op2mode == op3mode &&
mode_is_data(op2mode) &&
ASSERT_AND_RET_DBG(
/* And or Or or Eor: BB x int x int --> int */
- (mode_is_int(mymode) || mymode == mode_b) &&
+ (mode_is_int(mymode) || mode_is_reference(mymode) || mymode == mode_b) &&
op2mode == op1mode &&
mymode == op2mode,
"And, Or or Eor node", 0,
/* Cmp: BB x datab x datab --> b16 */
mode_is_datab(op1mode) &&
op2mode == op1mode &&
- mymode == mode_T,
+ mymode == mode_b,
"Cmp node", 0,
show_binop_failure(n, "/* Cmp: BB x datab x datab --> b16 */");
);
/* Phi: BB x dataM^n --> dataM */
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Phi_pred(n, i);
- if (!is_Bad(pred)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(pred) == mymode,
- "Phi node", 0,
- show_phi_failure(n, pred, i);
- );
- }
+ ASSERT_AND_RET_DBG(get_irn_mode(pred) == mymode,
+ "Phi node", 0, show_phi_failure(n, pred, i);
+ );
}
ASSERT_AND_RET(mode_is_dataM(mymode) || mymode == mode_b, "Phi node", 0 );
+#if 0
if (mymode == mode_M) {
for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
int j;
ir_node *pred_i = get_Phi_pred(n, i);
- if (is_Bad(pred_i))
- continue;
for (j = i - 1; j >= 0; --j) {
ir_node *pred_j = get_Phi_pred(n, j);
- if (is_Bad(pred_j))
- continue;
-#if 0
/* currently this checks fails for blocks with exception
outputs (and these are NOT basic blocks). So it is disabled yet. */
ASSERT_AND_RET_DBG(
0,
ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
);
-#endif
}
}
}
+#endif
return 1;
}
ir_node *bl = get_nodes_block(use);
for (i = get_irn_arity(use) - 1; i >= 0; --i) {
- ir_node *def = get_irn_n(use, i);
- ir_node *def_bl = get_nodes_block(def);
- ir_node *use_bl = bl;
+ ir_node *def = get_irn_n(use, i);
+ ir_node *def_bl = get_nodes_block(def);
+ ir_node *use_bl = bl;
ir_graph *irg;
- /* ignore dead definition blocks, will be removed */
- if (is_Block_dead(def_bl) || get_Block_dom_depth(def_bl) == -1)
+ /* we have no dominance relation for unreachable blocks, so we can't
+ * check the dominance property there */
+ if (!is_Block(def_bl) || get_Block_dom_depth(def_bl) == -1)
continue;
- if (is_Phi(use))
+ if (is_Phi(use)) {
+ if (is_Bad(def))
+ continue;
use_bl = get_Block_cfgpred_block(bl, i);
+ }
- /* ignore dead use blocks, will be removed */
- if (is_Block_dead(use_bl) || get_Block_dom_depth(use_bl) == -1)
+ if (!is_Block(use_bl) || get_Block_dom_depth(use_bl) == -1)
continue;
irg = get_irn_irg(use);
/* Tests the modes of n and its predecessors. */
int irn_verify_irg(ir_node *n, ir_graph *irg)
{
- int i;
ir_op *op;
if (!get_node_verification_mode())
op = get_irn_op(n);
- /* We don't want to test nodes whose predecessors are Bad,
- as we would have to special case that for each operation. */
- if (op != op_Phi && op != op_Block) {
- for (i = get_irn_arity(n) - 1; i >= 0; --i) {
- if (is_Bad(get_irn_n(n, i)))
- return 1;
- }
- }
-
if (_get_op_pinned(op) >= op_pin_state_exc_pinned) {
op_pin_state state = get_irn_pinned(n);
ASSERT_AND_RET_DBG(
state == op_pin_state_pinned,
"invalid pin state", 0,
ir_printf("node %+F", n));
+ } else if (!is_Block(n) && is_irn_pinned_in_irg(n)
+ && !is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
+ ASSERT_AND_RET_DBG(is_Block(get_nodes_block(n)) || is_Anchor(n),
+ "block input is not a block", 0,
+ ir_printf("node %+F", n));
}
if (op->ops.verify_node)
#endif /* DEBUG_libfirm */
+typedef struct check_cfg_env_t {
+ pmap *branch_nodes; /**< map blocks to their branching nodes,
+ map mode_X nodes to the blocks they branch to */
+ int res;
+ ir_nodeset_t reachable_blocks;
+ ir_nodeset_t kept_nodes;
+ ir_nodeset_t true_projs;
+ ir_nodeset_t false_projs;
+} check_cfg_env_t;
+
+static int check_block_cfg(ir_node *block, check_cfg_env_t *env)
+{
+ pmap *branch_nodes;
+ int n_cfgpreds;
+ int i;
+
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->reachable_blocks, block),
+ "Block is not reachable by blockwalker (endless loop with no kept block?)", 0,
+ ir_printf("block %+F\n", block));
+
+ n_cfgpreds = get_Block_n_cfgpreds(block);
+ branch_nodes = env->branch_nodes;
+ for (i = 0; i < n_cfgpreds; ++i) {
+ /* check that each mode_X node is only connected
+ * to 1 user */
+ ir_node *branch = get_Block_cfgpred(block, i);
+ ir_node *former_dest;
+ ir_node *former_branch;
+ ir_node *branch_proj;
+ ir_node *branch_block;
+ branch = skip_Tuple(branch);
+ if (is_Bad(branch))
+ continue;
+ former_dest = pmap_get(branch_nodes, branch);
+ ASSERT_AND_RET_DBG(former_dest==NULL || is_unknown_jump(skip_Proj(branch)),
+ "Multiple users on mode_X node", 0,
+ ir_printf("node %+F\n", branch));
+ pmap_insert(branch_nodes, branch, block);
+
+ /* check that there's only 1 branching instruction in each block */
+ branch_block = get_nodes_block(branch);
+ branch_proj = branch;
+ if (is_Proj(branch)) {
+ branch = skip_Proj(branch);
+ }
+ former_branch = pmap_get(branch_nodes, branch_block);
+
+ ASSERT_AND_RET_DBG(former_branch == NULL || former_branch == branch,
+ "Multiple branching nodes in a block", 0,
+ ir_printf("nodes %+F,%+F in block %+F\n",
+ branch, former_branch, branch_block));
+ pmap_insert(branch_nodes, branch_block, branch);
+
+ if (is_Cond(branch)) {
+ long pn = get_Proj_proj(branch_proj);
+ if (get_irn_mode(get_Cond_selector(branch)) == mode_b) {
+ if (pn == pn_Cond_true)
+ ir_nodeset_insert(&env->true_projs, branch);
+ if (pn == pn_Cond_false)
+ ir_nodeset_insert(&env->false_projs, branch);
+ } else {
+ int default_pn = get_Cond_default_proj(branch);
+ if (pn == default_pn)
+ ir_nodeset_insert(&env->true_projs, branch);
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void check_cfg_walk_func(ir_node *node, void *data)
+{
+ check_cfg_env_t *env = (check_cfg_env_t*)data;
+ if (!is_Block(node))
+ return;
+ env->res &= check_block_cfg(node, env);
+}
+
+static int verify_block_branch(ir_node *block, check_cfg_env_t *env)
+{
+ ir_node *branch = pmap_get(env->branch_nodes, block);
+ ASSERT_AND_RET_DBG(branch != NULL
+ || ir_nodeset_contains(&env->kept_nodes, block)
+ || block == get_irg_end_block(get_irn_irg(block)),
+ "block contains no cfop", 0,
+ ir_printf("block %+F\n", block));
+ return 1;
+}
+
+static int verify_cond_projs(ir_node *cond, check_cfg_env_t *env)
+{
+ if (get_irn_mode(get_Cond_selector(cond)) == mode_b) {
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
+ "Cond node lacks true proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->false_projs, cond),
+ "Cond node lacks false proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ } else {
+ ASSERT_AND_RET_DBG(ir_nodeset_contains(&env->true_projs, cond),
+ "Cond node lacks default Proj", 0,
+ ir_printf("Cond %+F\n", cond));
+ }
+ return 1;
+}
+
+static void assert_branch(ir_node *node, void *data)
+{
+ check_cfg_env_t *env = (check_cfg_env_t*)data;
+ if (is_Block(node)) {
+ env->res &= verify_block_branch(node, env);
+ } else if (is_Cond(node)) {
+ env->res &= verify_cond_projs(node, env);
+ }
+}
+
+static void collect_reachable_blocks(ir_node *block, void *data)
+{
+ ir_nodeset_t *reachable_blocks = (ir_nodeset_t*) data;
+ ir_nodeset_insert(reachable_blocks, block);
+}
+
+/**
+ * Checks CFG well-formedness
+ */
+static int check_cfg(ir_graph *irg)
+{
+ check_cfg_env_t env;
+ env.branch_nodes = pmap_create(); /**< map blocks to branch nodes */
+ env.res = 1;
+ ir_nodeset_init(&env.reachable_blocks);
+ ir_nodeset_init(&env.true_projs);
+ ir_nodeset_init(&env.false_projs);
+
+ irg_block_walk_graph(irg, collect_reachable_blocks, NULL,
+ &env.reachable_blocks);
+
+ /* note that we do not use irg_walk_block because it will miss these
+ * invalid blocks without a jump instruction which we want to detect
+ * here */
+ irg_walk_graph(irg, check_cfg_walk_func, NULL, &env);
+
+ ir_nodeset_init(&env.kept_nodes);
+ {
+ ir_node *end = get_irg_end(irg);
+ int arity = get_irn_arity(end);
+ int i;
+ for (i = 0; i < arity; ++i) {
+ ir_node *n = get_irn_n(end, i);
+ ir_nodeset_insert(&env.kept_nodes, n);
+ }
+ }
+ irg_walk_graph(irg, assert_branch, NULL, &env);
+
+ ir_nodeset_destroy(&env.false_projs);
+ ir_nodeset_destroy(&env.true_projs);
+ ir_nodeset_destroy(&env.kept_nodes);
+ ir_nodeset_destroy(&env.reachable_blocks);
+ pmap_destroy(env.branch_nodes);
+ return env.res;
+}
+
/*
* Calls irn_verify for each node in irg.
* Graph must be in state "op_pin_state_pinned".
{
int res = 1;
#ifdef DEBUG_libfirm
+ int pinned = get_irg_pinned(irg) == op_pin_state_pinned;
#ifndef NDEBUG
- last_irg_error = NULL;
+ last_irg_error = NULL;
#endif /* NDEBUG */
- assert(get_irg_pinned(irg) == op_pin_state_pinned && "Verification need pinned graph");
+ if (pinned && !check_cfg(irg))
+ res = 0;
- if (flags & VERIFY_ENFORCE_SSA)
+ if (res == 1 && (flags & VERIFY_ENFORCE_SSA) && pinned)
compute_doms(irg);
irg_walk_anchors(
irg,
- get_irg_dom_state(irg) == dom_consistent &&
- get_irg_pinned(irg) == op_pin_state_pinned ? verify_wrap_ssa : verify_wrap,
- NULL, &res
+ pinned && get_irg_dom_state(irg) == dom_consistent
+ ? verify_wrap_ssa : verify_wrap,
+ NULL,
+ &res
);
if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT && ! res) {
fprintf(stderr, "irg_verify_bads: Block %ld has Bad predecessor\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_graph(irg, "-assert");
+ dump_ir_graph(irg, "assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_verify_bads: node %ld has Bad Block\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_graph(irg, "-assert");
+ dump_ir_graph(irg, "assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_verify_bads: node %ld is a Tuple\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_graph(irg, "-assert");
+ dump_ir_graph(irg, "assert");
assert(0 && "Tuple detected");
}
}
fprintf(stderr, "irg_verify_bads: Phi %ld has Bad Input\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_graph(irg, "-assert");
+ dump_ir_graph(irg, "assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_verify_bads: node %ld has Bad Input\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_graph(irg, "-assert");
+ dump_ir_graph(irg, "assert");
assert(0 && "Bad NON-CF detected");
}
}
/*
* set the default verify operation
*/
-void firm_set_default_verifyer(ir_opcode code, ir_op_ops *ops)
+void firm_set_default_verifier(unsigned code, ir_op_ops *ops)
{
#define CASE(a) \
case iro_##a: \
CASE(Call);
CASE(Div);
CASE(Mod);
- CASE(Cmp);
CASE(Load);
CASE(Store);
CASE(Alloc);