/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
vrfy_entities = enable;
}
+#ifndef NDEBUG
+
/**
* little helper for NULL modes
*/
get_irn_node_nr(block), get_irn_arity(block));
}
-/** If the address is Sel or SymConst, return the entity. */
+#endif /* #ifndef NDEBUG */
+
+/**
+ * If the address is Sel or SymConst, return the entity.
+ *
+ * @param ptr the node representing the address
+ */
static ir_entity *get_ptr_entity(ir_node *ptr) {
if (get_irn_op(ptr) == op_Sel) {
return get_Sel_entity(ptr);
- } else if ((get_irn_op(ptr) == op_SymConst) && (get_SymConst_kind(ptr) == symconst_addr_ent)) {
+ } else if (is_SymConst_addr_ent(ptr)) {
return get_SymConst_entity(ptr);
}
return NULL;
"wrong Proj from Call", 0,
show_proj_failure(p);
);
+ /* if we have exception flow, we must have a real Memory input */
if (proj == pn_Call_X_regular)
ASSERT_AND_RET(
get_irn_op(get_Call_mem(n)) != op_NoMem,
"wrong Proj from Cmp", 0,
show_proj_failure(p);
);
+ ASSERT_AND_RET_DBG(
+ (mode_is_float(get_irn_mode(get_Cmp_left(n))) || !(proj & pn_Cmp_Uo)),
+ "unordered Proj for non-float Cmp", 0,
+ show_proj_failure(p);
+ );
return 1;
}
if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
/* value argument */ break;
+ if (get_irg_phase_state(get_irn_irg(pred)) != phase_backend) {
ASSERT_AND_RET_DBG(
- (mode == get_type_mode(get_method_param_type(mt, proj))),
- "Mode of Proj from Start doesn't match mode of param type.", 0,
- show_proj_mode_failure(p, get_method_param_type(mt, proj));
- );
+ (mode == get_type_mode(get_method_param_type(mt, proj))),
+ "Mode of Proj from Start doesn't match mode of param type.", 0,
+ show_proj_mode_failure(p, get_method_param_type(mt, proj));
+ );
+ }
} else if (nr == pn_Start_P_value_arg_base) {
ASSERT_AND_RET(
(proj >= 0 && mode_is_reference(mode)),
static int verify_node_Proj_EndReg(ir_node *n, ir_node *p) {
(void) n;
(void) p;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET(
(get_irp_ip_view_state() != ip_view_no),
"EndReg may only appear if ip view is constructed.", 0);
+#endif
return 1;
}
static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p) {
(void) n;
(void) p;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET(
(get_irp_ip_view_state() != ip_view_no),
"EndExcept may only appear if ip view is constructed.", 0);
+#endif
return 1;
}
*/
static int verify_node_Block(ir_node *n, ir_graph *irg) {
int i;
+ ir_node *mb = get_Block_MacroBlock(n);
+
+ ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
+
+ if (is_Block(mb) && mb != n) {
+ /* Blocks with more than one predecessor must be header blocks */
+ ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
+ }
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(n, i);
"Block node", 0);
}
- /* End block may only have Return, Raise or fragile ops as preds. */
if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
+ /* End block may only have Return, Raise or fragile ops as preds. */
for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
- if (is_Proj(pred) || get_irn_op(pred) == op_Tuple)
+ if (is_Proj(pred) || is_Tuple(pred))
break; /* We can not test properly. How many tuples are there? */
ASSERT_AND_RET(
(
- is_Return(pred) ||
- is_Bad(pred) ||
- (get_irn_op(pred) == op_Raise) ||
+ is_Return(pred) ||
+ is_Bad(pred) ||
+ is_Raise(pred) ||
is_fragile_op(pred)
),
"End Block node", 0);
ir_mode *mymode = get_irn_mode(n);
(void) irg;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
"Break may only appear if ip view is constructed.", 0);
+#endif
ASSERT_AND_RET(
/* Jmp: BB --> X */
mymode == mode_X, "Break node", 0
for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
ir_type *res_type = get_method_res_type(mt, i);
- if (is_atomic_type(res_type)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
- "Mode of result for Return doesn't match mode of result type.", 0,
- show_return_modes(irg, n, mt, i);
- );
- } else {
- ASSERT_AND_RET_DBG(
- mode_is_reference(get_irn_mode(get_Return_res(n, i))),
- "Mode of result for Return doesn't match mode of result type.", 0,
- show_return_modes(irg, n, mt, i);
- );
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(res_type)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Return_res(n, i))),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ }
}
}
return 1;
get_Call_n_params(n) >= get_method_n_params(mt),
"Number of args for Call doesn't match number of args in variadic type.",
0,
- fprintf(stderr, "Call has %d params, method %s type %d\n",
- get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
+ ir_fprintf(stderr, "Call %+F has %d params, method %s type %d\n",
+ n, get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
);
} else {
- ASSERT_AND_RET(
+ ASSERT_AND_RET_DBG(
get_Call_n_params(n) == get_method_n_params(mt),
"Number of args for Call doesn't match number of args in non variadic type.",
- 0);
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, method %s type %d\n",
+ n, get_Call_n_params(n), get_type_name(mt), get_method_n_params(mt));
+ );
}
for (i = 0; i < get_method_n_params(mt); i++) {
ir_type *t = get_method_param_type(mt, i);
- if (is_atomic_type(t)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
- "Mode of arg for Call doesn't match mode of arg type.", 0,
- show_call_param(n, mt);
- );
- } else {
- /* call with a compound type, mode must be reference */
- ASSERT_AND_RET_DBG(
- mode_is_reference(get_irn_mode(get_Call_param(n, i))),
- "Mode of arg for Call doesn't match mode of arg type.", 0,
- show_call_param(n, mt);
- );
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(t)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ } else {
+ /* call with a compound type, mode must be reference */
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Call_param(n, i))),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ }
}
}
(void) irg;
ASSERT_AND_RET_DBG(
- /* Conv: BB x datab1 --> datab2 */
- mode_is_datab(op1mode) && mode_is_datab(mymode),
+ get_irg_phase_state(irg) == phase_backend ||
+ (mode_is_datab(op1mode) && mode_is_data(mymode)),
"Conv node", 0,
- show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
+ show_unop_failure(n, "/* Conv: BB x datab --> data */");
);
return 1;
}
int i;
(void) irg;
- if (! is_Bad(block) && get_irg_phase_state(get_irn_irg(n)) != phase_building) {
+ if (! is_Bad(block) && get_irg_phase_state(get_irn_irg(n)) != phase_building && get_irn_arity(n) > 0) {
/* a Phi node MUST have the same number of inputs as its block */
ASSERT_AND_RET_DBG(
get_irn_arity(n) == get_irn_arity(block),
static int verify_node_Filter(ir_node *n, ir_graph *irg) {
(void) n;
(void) irg;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
"Filter may only appear if ip view is constructed.", 0);
+#endif
/* We should further do tests as for Proj and Phi. */
return 1;
}
* @return non-zero on success, 0 on dominance error
*/
static int check_dominance_for_node(ir_node *use) {
+ if (is_Block(use)) {
+ ir_node *mbh = get_Block_MacroBlock(use);
+
+ if (mbh != use) {
+ /* must be a partBlock */
+ if (is_Block(mbh)) {
+ ASSERT_AND_RET(block_dominates(mbh, use), "MacroBlock header must dominate a partBlock", 0);
+ }
+ }
+ }
/* This won't work for blocks and the end node */
- if (!is_Block(use) && use != get_irg_end(current_ir_graph) && use != current_ir_graph->anchor) {
+ else if (use != get_irg_end(current_ir_graph) && use != current_ir_graph->anchor) {
int i;
ir_node *bl = get_nodes_block(use);
}
int irn_vrfy(ir_node *n) {
- int res = 1;
#ifdef DEBUG_libfirm
- res = irn_vrfy_irg(n, current_ir_graph);
+ return irn_vrfy_irg(n, current_ir_graph);
+#else
+ (void)n;
+ return 1;
#endif
- return res;
}
/*-----------------------------------------------------------------*/
else
fprintf(stderr, "irg_verify: Verifying graph %p failed\n", (void *)current_ir_graph);
}
+#else
+ (void)irg;
+ (void)flags;
#endif /* DEBUG_libfirm */
return res;
fprintf(stderr, "irg_vrfy_bads: Block %ld has Bad predecessor\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_block_graph(current_ir_graph, "-assert");
+ dump_ir_block_graph_sched(current_ir_graph, "-assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_vrfy_bads: node %ld has Bad Block\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_block_graph(current_ir_graph, "-assert");
+ dump_ir_block_graph_sched(current_ir_graph, "-assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_vrfy_bads: node %ld is a Tuple\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_block_graph(current_ir_graph, "-assert");
+ dump_ir_block_graph_sched(current_ir_graph, "-assert");
assert(0 && "Tuple detected");
}
}
fprintf(stderr, "irg_vrfy_bads: Phi %ld has Bad Input\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_block_graph(current_ir_graph, "-assert");
+ dump_ir_block_graph_sched(current_ir_graph, "-assert");
assert(0 && "Bad CF detected");
}
}
fprintf(stderr, "irg_vrfy_bads: node %ld has Bad Input\n", get_irn_node_nr(node));
}
if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
- dump_ir_block_graph(current_ir_graph, "-assert");
+ dump_ir_block_graph_sched(current_ir_graph, "-assert");
assert(0 && "Bad NON-CF detected");
}
}