+ if (is_Bad(pred_i))
+ continue;
+ for (j = i - 1; j >= 0; --j) {
+ ir_node *pred_j = get_Phi_pred(n, j);
+
+ if (is_Bad(pred_j))
+ continue;
+ ASSERT_AND_RET_DBG(
+ (pred_i == pred_j) || (get_irn_n(pred_i, -1) != get_irn_n(pred_j, -1)),
+ "At least two different PhiM predecessors are in the same block",
+ 0,
+ ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
+ );
+ }
+ }
+ }
+ return 1;
+}
+
+/**
+ * verify a Filter node
+ */
+static int verify_node_Filter(ir_node *n, ir_graph *irg) {
+ ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
+ "Filter may only appear if ip view is constructed.", 0);
+ /* We should further do tests as for Proj and Phi. */
+ return 1;
+}
+
+/**
+ * verify a Load node
+ */
+static int verify_node_Load(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Load_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Load_ptr(n));
+
+ ASSERT_AND_RET(
+ /* Load: BB x M x ref --> M x X x data */
+ op1mode == mode_M && mode_is_reference(op2mode),
+ "Load node", 0
+ );
+ ASSERT_AND_RET( mymode == mode_T, "Load node", 0 );
+
+ /*
+ * jack's gen_add_firm_code:simpleSel seems to build Load (Load
+ * (Proj (Proj))) sometimes ...
+
+ * interprete.c:ai_eval seems to assume that this happens, too
+
+ * obset.c:get_abstval_any can't deal with this if the load has
+ * mode_T
+ *
+ {
+ entity *ent = hunt_for_entity (get_Load_ptr (n), n);
+ assert ((NULL != ent) || (mymode != mode_T));
+ }
+ */
+
+ return 1;
+}
+
+/**
+ * verify a Store node
+ */
+static int verify_node_Store(ir_node *n, ir_graph *irg) {
+ entity *target;
+
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Store_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Store_ptr(n));
+ ir_mode *op3mode = get_irn_mode(get_Store_value(n));
+
+ ASSERT_AND_RET(
+ /* Store: BB x M x ref x data --> M x X */
+ op1mode == mode_M && mode_is_reference(op2mode) && mode_is_data(op3mode),
+ "Store node", 0
+ );
+ ASSERT_AND_RET(mymode == mode_T, "Store node", 0);
+
+ target = get_ptr_entity(get_Store_ptr(n));
+ if (vrfy_entities && target && get_irg_phase_state(current_ir_graph) == phase_high) {
+ /*
+ * If lowered code, any Sels that add 0 may be removed, causing
+ * an direct access to entities of array or compound type.
+ * Prevent this by checking the phase.
+ */
+ ASSERT_AND_RET( op3mode == get_type_mode(get_entity_type(target)),
+ "Store node", 0);
+ }
+
+ return 1;
+}
+
+/**
+ * verify an Alloc node
+ */
+static int verify_node_Alloc(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Alloc_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Alloc_size(n));
+
+ ASSERT_AND_RET_DBG(
+ /* Alloc: BB x M x int_u --> M x X x ref */
+ op1mode == mode_M &&
+ mode_is_int(op2mode) &&
+ !mode_is_signed(op2mode) &&
+ mymode == mode_T,
+ "Alloc node", 0,
+ show_binop_failure(n, "/* Alloc: BB x M x int_u --> M x X x ref */");
+ );
+ return 1;
+}
+
+/**
+ * verify a Free node
+ */
+static int verify_node_Free(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Free_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Free_ptr(n));
+ ir_mode *op3mode = get_irn_mode(get_Free_size(n));
+
+ ASSERT_AND_RET_DBG(
+ /* Free: BB x M x ref x int_u --> M */
+ op1mode == mode_M && mode_is_reference(op2mode) &&
+ mode_is_int(op3mode) &&
+ !mode_is_signed(op3mode) &&
+ mymode == mode_M,
+ "Free node", 0,
+ show_triop_failure(n, "/* Free: BB x M x ref x int_u --> M */");
+ );
+ return 1;
+}
+
+/**
+ * verify a Sync node
+ */
+static int verify_node_Sync(ir_node *n, ir_graph *irg) {
+ int i;
+ ir_mode *mymode = get_irn_mode(n);
+
+ /* Sync: BB x M^n --> M */
+ for (i = get_Sync_n_preds(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET( get_irn_mode(get_Sync_pred(n, i)) == mode_M, "Sync node", 0 );
+ };
+ ASSERT_AND_RET( mymode == mode_M, "Sync node", 0 );
+ return 1;
+}
+
+/**
+ * verify a Confirm node
+ */
+static int verify_node_Confirm(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Confirm_value(n));
+ ir_mode *op2mode = get_irn_mode(get_Confirm_bound(n));
+
+ ASSERT_AND_RET_DBG(
+ /* Confirm: BB x T x T --> T */
+ op1mode == mymode &&
+ op2mode == mymode,
+ "Confirm node", 0,
+ show_binop_failure(n, "/* Confirm: BB x T x T --> T */");
+ );
+ return 1;
+}
+
+/**
+ * verify a Mux node
+ */
+static int verify_node_Mux(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Mux_sel(n));
+ ir_mode *op2mode = get_irn_mode(get_Mux_true(n));
+ ir_mode *op3mode = get_irn_mode(get_Mux_false(n));
+
+ ASSERT_AND_RET(
+ /* Mux: BB x b x numP x numP --> numP */
+ op1mode == mode_b &&
+ op2mode == mymode &&
+ op3mode == mymode &&
+ mode_is_numP(mymode),
+ "Mux node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a CopyB node
+ */
+static int verify_node_CopyB(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_CopyB_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_CopyB_dst(n));
+ ir_mode *op3mode = get_irn_mode(get_CopyB_src(n));
+ ir_type *t = get_CopyB_type(n);
+
+ /* CopyB: BB x M x ref x ref --> M x X */
+ ASSERT_AND_RET(
+ mymode == mode_T &&
+ op1mode == mode_M &&
+ mode_is_reference(op2mode) &&
+ mode_is_reference(op3mode),
+ "CopyB node", 0 ); /* operand M x ref x ref */
+
+ ASSERT_AND_RET(
+ is_compound_type(t),
+ "CopyB node should copy compound types only", 0 );
+
+ /* NoMem nodes are only allowed as memory input if the CopyB is NOT pinned.
+ This should happen RARELY, as CopyB COPIES MEMORY */
+ ASSERT_AND_RET(
+ (get_irn_op(get_CopyB_mem(n)) == op_NoMem) ||
+ (get_irn_op(get_CopyB_mem(n)) != op_NoMem && get_irn_pinned(n) == op_pin_state_pinned),
+ "CopyB node with wrong memory input", 0 );
+ return 1;
+}
+
+/**
+ * verify a Bound node
+ */
+static int verify_node_Bound(ir_node *n, ir_graph *irg) {
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Bound_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Bound_index(n));
+ ir_mode *op3mode = get_irn_mode(get_Bound_lower(n));
+ ir_mode *op4mode = get_irn_mode(get_Bound_upper(n));
+
+ /* Bound: BB x M x ref x ref --> M x X */
+ ASSERT_AND_RET(
+ mymode == mode_T &&
+ op1mode == mode_M &&
+ op2mode == op3mode &&
+ op3mode == op4mode &&
+ mode_is_int(op3mode),
+ "Bound node", 0 ); /* operand M x int x int x int */
+
+ /* NoMem nodes are only allowed as memory input if the Bound is NOT pinned.
+ This should happen RARELY, as Bound COPIES MEMORY */
+ ASSERT_AND_RET(
+ (get_irn_op(get_Bound_mem(n)) == op_NoMem) ||
+ (get_irn_op(get_Bound_mem(n)) != op_NoMem && get_irn_pinned(n) == op_pin_state_pinned),
+ "Bound node with wrong memory input", 0 );
+ return 1;
+}
+
+/**
+ * Check dominance.
+ * For each usage of a node, it is checked, if the block of the
+ * node dominates the block of the usage (for phis: the predecessor
+ * block of the phi for the corresponding edge).
+ *
+ * @return non-zero on success, 0 on dominance error
+ */
+static int check_dominance_for_node(ir_node *use)
+{
+ /* This won't work for blocks and the end node */
+ if (!is_Block(use) && use != get_irg_end(current_ir_graph)) {
+ int i;
+ ir_node *bl = get_nodes_block(use);
+
+ for (i = get_irn_arity(use) - 1; i >= 0; --i) {
+ ir_node *def = get_irn_n(use, i);
+ ir_node *def_bl = get_nodes_block(def);
+ ir_node *use_bl = bl;
+
+ /* ignore dead definition blocks, will be removed */
+ if (is_Block_dead(def_bl) || get_Block_dom_depth(def_bl) == -1)
+ continue;
+
+ if (is_Phi(use))
+ use_bl = get_Block_cfgpred_block(bl, i);
+
+ /* ignore dead use blocks, will be removed */
+ if (is_Block_dead(use_bl) || get_Block_dom_depth(use_bl) == -1)
+ continue;