if (nr == pn_Start_T_args) {
ASSERT_AND_RET(
- (proj >= 0 && mode_is_data(mode)),
+ (proj >= 0 && mode_is_datab(mode)),
"wrong Proj from Proj from Start", 0);
ASSERT_AND_RET(
(proj < get_method_n_params(mt)),
if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
/* value argument */ break;
+ if (get_irg_phase_state(get_irn_irg(pred)) != phase_backend) {
ASSERT_AND_RET_DBG(
- (mode == get_type_mode(get_method_param_type(mt, proj))),
- "Mode of Proj from Start doesn't match mode of param type.", 0,
- show_proj_mode_failure(p, get_method_param_type(mt, proj));
- );
+ (mode == get_type_mode(get_method_param_type(mt, proj))),
+ "Mode of Proj from Start doesn't match mode of param type.", 0,
+ show_proj_mode_failure(p, get_method_param_type(mt, proj));
+ );
+ }
} else if (nr == pn_Start_P_value_arg_base) {
ASSERT_AND_RET(
(proj >= 0 && mode_is_reference(mode)),
static int verify_node_Proj_EndReg(ir_node *n, ir_node *p) {
(void) n;
(void) p;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET(
(get_irp_ip_view_state() != ip_view_no),
"EndReg may only appear if ip view is constructed.", 0);
+#endif
return 1;
}
static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p) {
(void) n;
(void) p;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET(
(get_irp_ip_view_state() != ip_view_no),
"EndExcept may only appear if ip view is constructed.", 0);
+#endif
return 1;
}
ir_mode *mymode = get_irn_mode(n);
(void) irg;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
"Break may only appear if ip view is constructed.", 0);
+#endif
ASSERT_AND_RET(
/* Jmp: BB --> X */
mymode == mode_X, "Break node", 0
for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
ir_type *res_type = get_method_res_type(mt, i);
- if (is_atomic_type(res_type)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
- "Mode of result for Return doesn't match mode of result type.", 0,
- show_return_modes(irg, n, mt, i);
- );
- } else {
- ASSERT_AND_RET_DBG(
- mode_is_reference(get_irn_mode(get_Return_res(n, i))),
- "Mode of result for Return doesn't match mode of result type.", 0,
- show_return_modes(irg, n, mt, i);
- );
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(res_type)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Return_res(n, i))),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ }
}
}
return 1;
return 1;
}
+/**
+ * Check if the pinned state is right.
+ */
+static int verify_right_pinned(ir_node *n) {
+ ir_node *mem;
+
+ if (get_irn_pinned(n) == op_pin_state_pinned)
+ return 1;
+ mem = get_Call_mem(n);
+
+ /* if it's not pinned, its memory predecessor must be NoMem or Pin */
+ if (is_NoMem(mem) || is_Pin(mem))
+ return 1;
+ return 0;
+}
+
/**
* verify a Call node
*/
ASSERT_AND_RET( op1mode == mode_M && mode_is_reference(op2mode), "Call node", 0 ); /* operand M x ref */
/* NoMem nodes are only allowed as memory input if the Call is NOT pinned */
- ASSERT_AND_RET(
- (get_irn_op(get_Call_mem(n)) == op_NoMem) ||
- (get_irn_op(get_Call_mem(n)) != op_NoMem && get_irn_pinned(n) == op_pin_state_pinned),
- "Call node with wrong memory input", 0 );
+ ASSERT_AND_RET(verify_right_pinned(n),"Call node with wrong memory input", 0 );
mt = get_Call_type(n);
if(get_unknown_type() == mt) {
}
for (i = get_Call_n_params(n) - 1; i >= 0; --i) {
- ASSERT_AND_RET( mode_is_data(get_irn_mode(get_Call_param(n, i))), "Call node", 0 ); /* operand datai */
+ ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Call_param(n, i))), "Call node", 0 ); /* operand datai */
}
ASSERT_AND_RET( mymode == mode_T, "Call result not a tuple", 0 ); /* result T */
for (i = 0; i < get_method_n_params(mt); i++) {
ir_type *t = get_method_param_type(mt, i);
- if (is_atomic_type(t)) {
- ASSERT_AND_RET_DBG(
- get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
- "Mode of arg for Call doesn't match mode of arg type.", 0,
- show_call_param(n, mt);
- );
- } else {
- /* call with a compound type, mode must be reference */
- ASSERT_AND_RET_DBG(
- mode_is_reference(get_irn_mode(get_Call_param(n, i))),
- "Mode of arg for Call doesn't match mode of arg type.", 0,
- show_call_param(n, mt);
- );
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(t)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ } else {
+ /* call with a compound type, mode must be reference */
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Call_param(n, i))),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ }
}
}
ASSERT_AND_RET_DBG(
(
/* common Add: BB x numP x numP --> numP */
- (op1mode == mymode && op2mode == op1mode && mode_is_numP(mymode)) ||
+ (op1mode == mymode && op2mode == op1mode && mode_is_data(mymode)) ||
/* Pointer Add: BB x ref x int --> ref */
(mode_is_reference(op1mode) && mode_is_int(op2mode) && op1mode == mymode) ||
/* Pointer Add: BB x int x ref --> ref */
ASSERT_AND_RET_DBG(
(
/* common Sub: BB x numP x numP --> numP */
- (mymode ==op1mode && mymode == op2mode && mode_is_numP(op1mode)) ||
+ (mymode ==op1mode && mymode == op2mode && mode_is_data(op1mode)) ||
/* Pointer Sub: BB x ref x int --> ref */
(op1mode == mymode && mode_is_int(op2mode) && mode_is_reference(mymode)) ||
/* Pointer Sub: BB x int x ref --> ref */
ASSERT_AND_RET_DBG(
(
- /* Mul: BB x int1 x int1 --> int2 */
- (mode_is_int(op1mode) && op2mode == op1mode && mode_is_int(mymode)) ||
+ /* Mul: BB x int_n x int_n --> int_n|int_2n */
+ (mode_is_int(op1mode) && op2mode == op1mode && mode_is_int(mymode) &&
+ (op1mode == mymode || get_mode_size_bits(op1mode) * 2 == get_mode_size_bits(mymode))) ||
/* Mul: BB x float x float --> float */
(mode_is_float(op1mode) && op2mode == op1mode && mymode == op1mode)
),
"Mul node",0,
- show_binop_failure(n, "/* Mul: BB x int1 x int1 --> int2 */ |\n"
+ show_binop_failure(n, "/* Mul: BB x int_n x int_n --> int_n|int_2n */ |\n"
"/* Mul: BB x float x float --> float */");
);
return 1;
}
+/**
+ * verify a Mulh node
+ */
+static int verify_node_Mulh(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Mulh_left(n));
+ ir_mode *op2mode = get_irn_mode(get_Mulh_right(n));
+ (void) irg;
+
+ ASSERT_AND_RET_DBG(
+ (
+ /* Mulh: BB x int x int --> int */
+ (mode_is_int(op1mode) && op2mode == op1mode && op1mode == mymode)
+ ),
+ "Mulh node",0,
+ show_binop_failure(n, "/* Mulh: BB x int x int --> int */");
+ );
+ return 1;
+}
+
/**
* verify a Quot node
*/
ASSERT_AND_RET_DBG(
/* Cmp: BB x datab x datab --> b16 */
- mode_is_data (op1mode) &&
+ mode_is_datab(op1mode) &&
op2mode == op1mode &&
mymode == mode_T,
"Cmp node", 0,
(void) irg;
ASSERT_AND_RET_DBG(
- /* Conv: BB x datab1 --> datab2 */
- mode_is_datab(op1mode) && mode_is_datab(mymode),
+ get_irg_phase_state(irg) == phase_backend ||
+ (mode_is_datab(op1mode) && mode_is_data(mymode)),
"Conv node", 0,
- show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
+ show_unop_failure(n, "/* Conv: BB x datab --> data */");
);
return 1;
}
int i;
(void) irg;
- if (! is_Bad(block) && get_irg_phase_state(get_irn_irg(n)) != phase_building) {
+ if (! is_Bad(block) && get_irg_phase_state(get_irn_irg(n)) != phase_building && get_irn_arity(n) > 0) {
/* a Phi node MUST have the same number of inputs as its block */
ASSERT_AND_RET_DBG(
get_irn_arity(n) == get_irn_arity(block),
static int verify_node_Filter(ir_node *n, ir_graph *irg) {
(void) n;
(void) irg;
+#ifdef INTERPROCEDURAL_VIEW
ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
"Filter may only appear if ip view is constructed.", 0);
+#endif
/* We should further do tests as for Proj and Phi. */
return 1;
}
ir_mode *op2mode = get_irn_mode(get_Store_ptr(n));
ir_mode *op3mode = get_irn_mode(get_Store_value(n));
- ASSERT_AND_RET(op1mode == mode_M && mode_is_data(op3mode), "Store node", 0 );
+ ASSERT_AND_RET(op1mode == mode_M && mode_is_datab(op3mode), "Store node", 0 );
if (get_irg_phase_state(irg) != phase_backend) {
ASSERT_AND_RET(mode_is_reference(op2mode), "Store node", 0 );
} else {
op1mode == mode_b &&
op2mode == mymode &&
op3mode == mymode &&
- mode_is_numP(mymode),
+ mode_is_data(mymode),
"Mux node", 0
);
return 1;
/* NoMem nodes are only allowed as memory input if the CopyB is NOT pinned.
This should happen RARELY, as CopyB COPIES MEMORY */
- ASSERT_AND_RET(
- (get_irn_op(get_CopyB_mem(n)) == op_NoMem) ||
- (get_irn_op(get_CopyB_mem(n)) != op_NoMem && get_irn_pinned(n) == op_pin_state_pinned),
- "CopyB node with wrong memory input", 0 );
+ ASSERT_AND_RET(verify_right_pinned(n), "CopyB node with wrong memory input", 0 );
return 1;
}
*/
static int check_dominance_for_node(ir_node *use) {
/* This won't work for blocks and the end node */
- if (!is_Block(use) && use != get_irg_end(current_ir_graph)) {
+ if (!is_Block(use) && use != get_irg_end(current_ir_graph) && use != current_ir_graph->anchor) {
int i;
ir_node *bl = get_nodes_block(use);
}
int irn_vrfy(ir_node *n) {
- int res = 1;
#ifdef DEBUG_libfirm
- res = irn_vrfy_irg(n, current_ir_graph);
+ return irn_vrfy_irg(n, current_ir_graph);
+#else
+ (void)n;
+ return 1;
#endif
- return res;
}
/*-----------------------------------------------------------------*/
int *res = env;
*res = irn_vrfy(node);
- if (*res)
+ if (*res) {
*res = check_dominance_for_node(node);
+ }
}
#endif /* DEBUG_libfirm */
if (flags & VRFY_ENFORCE_SSA)
compute_doms(irg);
- irg_walk_graph(
+ irg_walk_anchors(
irg,
get_irg_dom_state(irg) == dom_consistent &&
get_irg_pinned(irg) == op_pin_state_pinned ? vrfy_wrap_ssa : vrfy_wrap,
else
fprintf(stderr, "irg_verify: Verifying graph %p failed\n", (void *)current_ir_graph);
}
+#else
+ (void)irg;
+ (void)flags;
#endif /* DEBUG_libfirm */
return res;
CASE(Sub);
CASE(Minus);
CASE(Mul);
+ CASE(Mulh);
CASE(Quot);
CASE(DivMod);
CASE(Div);