+static int verify_node_Proj_Bound(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ /* ignore Bound checks of Bad */
+ if (is_Bad(get_Bound_index(n)))
+ return 1;
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Bound_M && mode == mode_M) ||
+ (proj == pn_Bound_X_regular && mode == mode_X) ||
+ (proj == pn_Bound_X_except && mode == mode_X) ||
+ (proj == pn_Bound_res && mode == get_irn_mode(get_Bound_index(n)))
+ ),
+ "wrong Proj from Bound", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj node
+ */
+static int verify_node_Proj(ir_node *p, ir_graph *irg)
+{
+ ir_node *pred;
+ ir_op *op;
+
+ pred = skip_Id(get_Proj_pred(p));
+ ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0);
+ ASSERT_AND_RET(get_irg_pinned(irg) == op_pin_state_floats || get_nodes_block(pred) == get_nodes_block(p), "Proj must be in same block as its predecessor", 0);
+
+ op = get_irn_op(pred);
+
+ if (op->ops.verify_proj_node)
+ return op->ops.verify_proj_node(pred, p);
+
+ /* all went ok */
+ return 1;
+}
+
+/**
+ * verify a Block node
+ */
+static int verify_node_Block(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_node *mb = get_Block_MacroBlock(n);
+
+ ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
+
+ if (is_Block(mb) && mb != n) {
+ ir_node *pred;
+
+ /* Blocks with more than one predecessor must be header blocks */
+ ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
+ if (get_irg_phase_state(irg) != phase_backend) {
+ pred = get_Block_cfgpred(n, 0);
+ if (is_Proj(pred)) {
+ /* the predecessor MUST be a regular Proj */
+ ir_node *frag_op = get_Proj_pred(pred);
+ ASSERT_AND_RET(
+ is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
+ "partBlock with non-regular predecessor", 0);
+ } else {
+ /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
+ of fragile nodes during construction. It does not violate our assumption of dominance
+ so let it. */
+ ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
+ "partBlock with non-regular predecessor", 0);
+ }
+ } else {
+ /* relax in backend: Bound nodes are probably lowered into conditional jumps */
+ }
+ }
+
+ for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ASSERT_AND_RET(
+ is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ "Block node must have a mode_X predecessor", 0);
+ }
+
+ if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
+ /* End block may only have Return, Raise or fragile ops as preds. */
+ for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
+ ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
+ if (is_Proj(pred) || is_Tuple(pred))
+ break; /* We can not test properly. How many tuples are there? */
+ ASSERT_AND_RET(
+ (
+ is_Return(pred) ||
+ is_Bad(pred) ||
+ is_Raise(pred) ||
+ is_fragile_op(pred)
+ ),
+ "End Block node", 0);
+ }
+ /* irg attr must == graph we are in. */
+ if (! get_interprocedural_view()) {
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
+ }
+ return 1;
+}
+
+/**
+ * verify a Start node
+ */
+static int verify_node_Start(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Start: BB --> X x M x ref x data1 x ... x datan x ref */
+ mymode == mode_T, "Start node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Jmp node
+ */
+static int verify_node_Jmp(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Jmp: BB --> X */
+ mymode == mode_X, "Jmp node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify an IJmp node
+ */
+static int verify_node_IJmp(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_IJmp_target(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* IJmp: BB x ref --> X */
+ mymode == mode_X && mode_is_reference(op1mode), "IJmp node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Break node
+ */
+static int verify_node_Break(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+#ifdef INTERPROCEDURAL_VIEW
+ ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
+ "Break may only appear if ip view is constructed.", 0);
+#endif
+ ASSERT_AND_RET(
+ /* Jmp: BB --> X */
+ mymode == mode_X, "Break node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Cond node
+ */
+static int verify_node_Cond(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Cond_selector(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Cond: BB x b --> X x X */
+ (op1mode == mode_b ||
+ /* Cond: BB x int --> X^n */
+ mode_is_int(op1mode) ), "Cond node", 0
+ );
+ ASSERT_AND_RET(mymode == mode_T, "Cond mode is not a tuple", 0);
+
+ return 1;
+}
+
+/**
+ * verify a Return node
+ */
+static int verify_node_Return(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *mem_mode = get_irn_mode(get_Return_mem(n));
+ ir_type *mt;
+
+ /* Return: BB x M x data1 x ... x datan --> X */
+
+ ASSERT_AND_RET( mem_mode == mode_M, "Return node", 0 ); /* operand M */
+
+ for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Return_res(n, i))), "Return node", 0 ); /* operand datai */
+ }
+ ASSERT_AND_RET( mymode == mode_X, "Result X", 0 ); /* result X */
+ /* Compare returned results with result types of method type */
+ mt = get_entity_type(get_irg_entity(irg));
+ ASSERT_AND_RET_DBG( get_Return_n_ress(n) == get_method_n_ress(mt),
+ "Number of results for Return doesn't match number of results in type.", 0,
+ show_return_nres(irg, n, mt););
+ for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
+ ir_type *res_type = get_method_res_type(mt, i);
+
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(res_type)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Return_res(n, i))),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ }
+ }
+ }
+ return 1;
+}
+
+/**
+ * verify a Raise node
+ */
+static int verify_node_Raise(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Raise_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Raise_exo_ptr(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Sel: BB x M x ref --> X x M */
+ op1mode == mode_M && mode_is_reference(op2mode) &&
+ mymode == mode_T, "Raise node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Const node
+ */
+static int verify_node_Const(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Const: BB --> data */
+ (mode_is_data(mymode) ||
+ mymode == mode_b) /* we want boolean constants for static evaluation */
+ ,"Const node", 0 /* of Cmp. */
+ );
+ ASSERT_AND_RET(
+ /* the modes of the constant and teh tarval must match */
+ mymode == get_tarval_mode(get_Const_tarval(n)),
+ "Const node, tarval and node mode mismatch", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a SymConst node
+ */
+static int verify_node_SymConst(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* SymConst: BB --> int*/
+ (mode_is_int(mymode) ||
+ /* SymConst: BB --> ref */
+ mode_is_reference(mymode))
+ ,"SymConst node", 0);
+ return 1;
+}
+
+/**
+ * verify a Sel node
+ */
+static int verify_node_Sel(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Sel_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Sel_ptr(n));
+ ir_entity *ent;
+ (void) irg;
+
+ ASSERT_AND_RET_DBG(
+ /* Sel: BB x M x ref x int^n --> ref */
+ (op1mode == mode_M && op2mode == mymode && mode_is_reference(mymode)),
+ "Sel node", 0, show_node_failure(n)
+ );
+
+ for (i = get_Sel_n_indexs(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET_DBG(mode_is_int(get_irn_mode(get_Sel_index(n, i))), "Sel node", 0, show_node_failure(n));
+ }
+ ent = get_Sel_entity(n);
+ ASSERT_AND_RET_DBG(ent, "Sel node with empty entity", 0, show_node_failure(n));
+ return 1;
+}
+
+/**
+ * verify an InstOf node
+ */
+static int verify_node_InstOf(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_InstOf_obj(n));
+ (void) irg;
+
+ ASSERT_AND_RET(mode_T == mymode, "mode of Instof is not a tuple", 0);
+ ASSERT_AND_RET(mode_is_data(op1mode), "Instof not on data", 0);
+ return 1;
+}
+
+/**
+ * Check if the pinned state is right.
+ */
+static int verify_right_pinned(ir_node *n)
+{
+ ir_node *mem;
+
+ if (get_irn_pinned(n) == op_pin_state_pinned)
+ return 1;
+ mem = get_Call_mem(n);
+
+ /* if it's not pinned, its memory predecessor must be NoMem or Pin */
+ if (is_NoMem(mem) || is_Pin(mem))
+ return 1;
+ return 0;
+}
+
+/**
+ * verify a Call node
+ */
+static int verify_node_Call(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Call_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Call_ptr(n));
+ ir_type *mt;
+ int i;
+ (void) irg;
+
+ /* Call: BB x M x ref x data1 x ... x datan
+ --> M x datan+1 x ... x data n+m */
+ ASSERT_AND_RET( op1mode == mode_M && mode_is_reference(op2mode), "Call node", 0 ); /* operand M x ref */
+
+ /* NoMem nodes are only allowed as memory input if the Call is NOT pinned */
+ ASSERT_AND_RET(verify_right_pinned(n),"Call node with wrong memory input", 0 );
+
+ mt = get_Call_type(n);
+ if (get_unknown_type() == mt) {
+ return 1;
+ }
+
+ for (i = get_Call_n_params(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Call_param(n, i))), "Call node", 0 ); /* operand datai */
+ }
+
+ ASSERT_AND_RET( mymode == mode_T, "Call result not a tuple", 0 ); /* result T */
+ /* Compare arguments of node with those of type */
+
+ if (get_method_variadicity(mt) == variadicity_variadic) {
+ ASSERT_AND_RET_DBG(
+ get_Call_n_params(n) >= get_method_n_params(mt),
+ "Number of args for Call doesn't match number of args in variadic type.",
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
+ n, get_Call_n_params(n), get_method_n_params(mt));
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ get_Call_n_params(n) == get_method_n_params(mt),
+ "Number of args for Call doesn't match number of args in non variadic type.",
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
+ n, get_Call_n_params(n), get_method_n_params(mt));
+ );
+ }
+
+ for (i = 0; i < get_method_n_params(mt); i++) {
+ ir_type *t = get_method_param_type(mt, i);
+
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(t)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ } else {
+ /* call with a compound type, mode must be reference */
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Call_param(n, i))),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ }
+ }
+ }
+
+#if 0
+ if (Call_has_callees(n)) {
+ for (i = 0; i < get_Call_n_callees(n); i++) {
+ ASSERT_AND_RET(is_entity(get_Call_callee(n, i)), "callee array must contain entities.", 0);
+ }
+ }