- fprintf(stderr, "\nFIRM: irn_vrfy_irg() Phi node %ld has mode %s different from predeccessor node %ld mode %s\n",
- get_irn_node_nr(phi), get_mode_name_ex(get_irn_mode(phi)),
- get_irn_node_nr(pred), get_mode_name_ex(get_irn_mode(pred)));
-}
-
-/**
- * verify the Proj number
- */
-static int
-vrfy_Proj_proj(ir_node *p, ir_graph *irg) {
- ir_node *pred;
- ir_mode *mode;
- int proj;
-
- pred = skip_Id(get_Proj_pred(p));
- ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0);
- mode = get_irn_mode(p);
- proj = get_Proj_proj(p);
-
- switch (get_irn_opcode(pred)) {
- case iro_Start:
- ASSERT_AND_RET_DBG(
- (
- (proj == pn_Start_X_initial_exec && mode == mode_X) ||
- (proj == pn_Start_M && mode == mode_M) ||
- (proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
- (proj == pn_Start_P_globals && mode_is_reference(mode)) ||
- (proj == pn_Start_T_args && mode == mode_T) ||
- (proj == pn_Start_P_value_arg_base && mode_is_reference(mode)) ||
- (proj == pn_Start_P_value_arg_base && mode == mode_T) /* FIXME: only one of those */
- ),
- "wrong Proj from Start", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Cond:
- ASSERT_AND_RET_DBG(
- (
- (proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
- (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) /* switch */
- ),
- "wrong Proj from Cond", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Raise:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Raise_X && mode == mode_X) || (proj == pn_Raise_M && mode == mode_M)),
- "wrong Proj from Raise", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_InstOf:
- ASSERT_AND_RET_DBG(
- (proj >= 0 && mode == mode_X),
- "wrong Proj from InstOf", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Call:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Call_M_regular && mode == mode_M) ||
- (proj == pn_Call_X_except && mode == mode_X) ||
- (proj == pn_Call_T_result && mode == mode_T) ||
- (proj == pn_Call_M_except && mode == mode_M) ||
- (proj == pn_Call_P_value_res_base && mode == mode_P)),
- "wrong Proj from Call", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_FuncCall:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Call_M_regular && mode == mode_M) ||
- (proj == pn_Call_X_except && mode == mode_X) ||
- (proj == pn_Call_T_result && mode == mode_T) ||
- (proj == pn_Call_M_except && mode == mode_M) ||
- (proj == pn_Call_P_value_res_base && mode == mode_P)),
- "wrong Proj from FuncCall", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Quot:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Quot_M && mode == mode_M) ||
- (proj == pn_Quot_X_except && mode == mode_X) ||
- (proj == pn_Quot_res && mode_is_float(mode))),
- "wrong Proj from Quot", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_DivMod:
- ASSERT_AND_RET_DBG(
- ((proj == pn_DivMod_M && mode == mode_M) ||
- (proj == pn_DivMod_X_except && mode == mode_X) ||
- (proj == pn_DivMod_res_div && mode_is_int(mode)) ||
- (proj == pn_DivMod_res_mod && mode_is_int(mode))),
- "wrong Proj from DivMod", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Div:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Div_M && mode == mode_M) ||
- (proj == pn_Div_X_except && mode == mode_X) ||
- (proj == pn_Div_res && mode_is_int(mode))),
- "wrong Proj from Div or Mod", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Mod:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Mod_M && mode == mode_M) ||
- (proj == pn_Mod_X_except && mode == mode_X) ||
- (proj == pn_Mod_res && mode_is_int(mode))),
- "wrong Proj from Div or Mod", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Cmp:
- ASSERT_AND_RET_DBG(
- (proj >= 0 && proj <= 15 && mode == mode_b),
- "wrong Proj from Cmp", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Load:
- if (proj == pn_Load_res) {
- ir_node *ptr = get_Load_ptr(pred);
- entity *ent = NULL;
- if (get_irn_op(ptr) == op_Sel) {
- ent = get_Sel_entity(ptr);
- } /*
- We may not test this, after lowering and optimization the Const can
- have an unexpected type.
- else if ((get_irn_op(ptr) == op_Const) &&
- tarval_is_entity(get_Const_tarval(ptr))) {
- ent = get_tarval_entity(get_Const_tarval(ptr));
- } */
- if (ent) {
- ASSERT_AND_RET_DBG(
- (mode == get_type_mode(get_entity_type(ent))),
- "wrong data Proj from Load, entity type_mode failed", 0,
- show_proj_failure_ent(p, ent);
- );
- }
- else {
- ASSERT_AND_RET_DBG(
- mode_is_data(mode),
- "wrong data Proj from Load", 0,
- show_proj_failure(p);
- );
- }
- } else {
- ASSERT_AND_RET_DBG(
- ((proj == pn_Load_M && mode == mode_M) ||
- (proj == pn_Load_X_except && mode == mode_X)),
- "wrong Proj from Load", 0,
- show_proj_failure(p);
- );
- }
- break;
-
- case iro_Store:
- ASSERT_AND_RET_DBG(
- ((proj == pn_Store_M && mode == mode_M) ||
- (proj == pn_Store_X_except && mode == mode_X)),
- "wrong Proj from Store", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Alloc:
- ASSERT_AND_RET_DBG(
- (
- (proj == pn_Alloc_M && mode == mode_M) ||
- (proj == pn_Alloc_X_except /* && mode == mode_X*/) ||
- (proj == pn_Alloc_res && mode_is_reference(mode))
- ),
- "wrong Proj from Alloc", 0,
- show_proj_failure(p);
- );
- break;
-
- case iro_Proj:
- {
- type *mt; /* A method type */
- long nr = get_Proj_proj(pred);
-
- pred = skip_Id(get_Proj_pred(pred));
- ASSERT_AND_RET((get_irn_mode(pred) == mode_T), "Proj from something not a tuple", 0);
- switch (get_irn_opcode(pred))
- {
- case iro_Start:
- mt = get_entity_type(get_irg_entity(irg));
-
- if (nr == pn_Start_T_args) {
- ASSERT_AND_RET(
- (proj >= 0 && mode_is_data(mode)),
- "wrong Proj from Proj from Start", 0);
- ASSERT_AND_RET(
- (proj < get_method_n_params(mt)),
- "More Projs for args than args in type", 0
- );
- if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
- /* value argument */ break;
-
- ASSERT_AND_RET(
- (mode == get_type_mode(get_method_param_type(mt, proj))),
- "Mode of Proj from Start doesn't match mode of param type.", 0);
- }
- else if (nr == pn_Start_P_value_arg_base) {
- ASSERT_AND_RET(
- (proj >= 0 && mode_is_reference(mode)),
- "wrong Proj from Proj from Start", 0
- );
- ASSERT_AND_RET(
- (proj < get_method_n_params(mt)),
- "More Projs for args than args in type", 0
- );
- }
- break;
-
- case iro_Call:
- {
- ASSERT_AND_RET(
- (proj >= 0 && mode_is_data(mode)),
- "wrong Proj from Proj from Call", 0);
- mt = get_Call_type(pred);
- ASSERT_AND_RET(
- (proj < get_method_n_ress(mt)),
- "More Projs for results than results in type.", 0);
- if ((mode_is_reference(mode)) && is_compound_type(get_method_res_type(mt, proj)))
- /* value result */ break;
-
- ASSERT_AND_RET(
- (mode == get_type_mode(get_method_res_type(mt, proj))),
- "Mode of Proj from Call doesn't match mode of result type.", 0);
- }
- break;
-
- case iro_FuncCall:
- {
- ASSERT_AND_RET(
- (proj >= 0 && mode_is_data(mode)),
- "wrong Proj from Proj from FuncCall", 0);
- mt = get_FuncCall_type(pred);
- ASSERT_AND_RET(
- (proj < get_method_n_ress(mt)),
- "More Projs for results than results in type.", 0);
- if ((mode_is_reference(mode)) && is_compound_type(get_method_res_type(mt, proj)))
- /* value result */ break;
-
- ASSERT_AND_RET(
- (mode == get_type_mode(get_method_res_type(mt, proj))),
- "Mode of Proj from FuncCall doesn't match mode of result type.", 0);
- }
- break;
-
- case iro_Tuple:
- /* We don't test */
- break;
-
- default:
- ASSERT_AND_RET(0, "Unknown opcode", 0);
- }
- break;
-
- }
- case iro_Tuple:
- /* We don't test */
- break;
-
- case iro_CallBegin:
- break;
-
- case iro_EndReg:
- break;
-
- case iro_EndExcept:
- break;
-
- default:
- ASSERT_AND_RET(0, "Unknown opcode", 0);
- }
-
- /* all went ok */
- return 1;
+ (void) pos;
+ show_entity_failure(phi);
+ fprintf(stderr, " Phi node %ld has mode %s different from predeccessor node %ld mode %s\n",
+ get_irn_node_nr(phi), get_mode_name_ex(get_irn_mode(phi)),
+ get_irn_node_nr(pred), get_mode_name_ex(get_irn_mode(pred)));
+}
+
+/**
+ * Show Phi inputs
+ */
+static void show_phi_inputs(ir_node *phi, ir_node *block)
+{
+ show_entity_failure(phi);
+ fprintf(stderr, " Phi node %ld has %d inputs, its Block %ld has %d\n",
+ get_irn_node_nr(phi), get_irn_arity(phi),
+ get_irn_node_nr(block), get_irn_arity(block));
+}
+
+#endif /* #ifndef NDEBUG */
+
+/**
+ * If the address is Sel or SymConst, return the entity.
+ *
+ * @param ptr the node representing the address
+ */
+static ir_entity *get_ptr_entity(ir_node *ptr)
+{
+ if (is_Sel(ptr)) {
+ return get_Sel_entity(ptr);
+ } else if (is_SymConst_addr_ent(ptr)) {
+ return get_SymConst_entity(ptr);
+ }
+ return NULL;
+}
+
+/**
+ * verify a Proj(Start) node
+ */
+static int verify_node_Proj_Start(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ (void) n;
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Start_X_initial_exec && mode == mode_X) ||
+ (proj == pn_Start_M && mode == mode_M) ||
+ (proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
+ (proj == pn_Start_P_tls && mode_is_reference(mode)) ||
+ (proj == pn_Start_T_args && mode == mode_T)
+ ),
+ "wrong Proj from Start", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(Cond) node
+ */
+static int verify_node_Proj_Cond(ir_node *pred, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
+ (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */
+ is_Bad(get_Cond_selector(pred)) /* rare */
+ ),
+ "wrong Proj from Cond", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(Raise) node
+ */
+static int verify_node_Proj_Raise(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ (void) n;
+
+ ASSERT_AND_RET_DBG(
+ ((proj == pn_Raise_X && mode == mode_X) || (proj == pn_Raise_M && mode == mode_M)),
+ "wrong Proj from Raise", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(InstOf) node
+ */
+static int verify_node_Proj_InstOf(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ (void) n;
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_InstOf_M && mode == mode_M) ||
+ (proj == pn_InstOf_X_regular && mode == mode_X) ||
+ (proj == pn_InstOf_X_except && mode == mode_X) ||
+ (proj == pn_InstOf_res && mode_is_reference(mode))
+ ),
+ "wrong Proj from InstOf", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(Call) node
+ */
+static int verify_node_Proj_Call(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Call_M && mode == mode_M) ||
+ (proj == pn_Call_X_regular && mode == mode_X) ||
+ (proj == pn_Call_X_except && mode == mode_X) ||
+ (proj == pn_Call_T_result && mode == mode_T) ||
+ (proj == pn_Call_P_value_res_base && mode_is_reference(mode))
+ ),
+ "wrong Proj from Call", 0,
+ show_proj_failure(p);
+ );
+ /* if we have exception flow, we must have a real Memory input */
+ if (proj == pn_Call_X_regular)
+ ASSERT_AND_RET(
+ !is_NoMem(get_Call_mem(n)),
+ "Regular Proj from FunctionCall", 0);
+ else if (proj == pn_Call_X_except)
+ ASSERT_AND_RET(
+ !is_NoMem(get_Call_mem(n)),
+ "Exception Proj from FunctionCall", 0);
+ else if (proj == pn_Call_M)
+ ASSERT_AND_RET(
+ (!is_NoMem(get_Call_mem(n)) || 1),
+ "Memory Proj from FunctionCall", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(Quot) node
+ */
+static int verify_node_Proj_Quot(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Quot_M && mode == mode_M) ||
+ (proj == pn_Quot_X_regular && mode == mode_X) ||
+ (proj == pn_Quot_X_except && mode == mode_X) ||
+ (proj == pn_Quot_res && mode_is_float(mode) && mode == get_Quot_resmode(n))
+ ),
+ "wrong Proj from Quot", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_Quot_X_regular)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned Quot", 0);
+ else if (proj == pn_Quot_X_except)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned Quot", 0);
+ else if (proj == pn_Quot_M)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Memory Proj from unpinned Quot", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(DivMod) node
+ */
+static int verify_node_Proj_DivMod(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_DivMod_M && mode == mode_M) ||
+ (proj == pn_DivMod_X_regular && mode == mode_X) ||
+ (proj == pn_DivMod_X_except && mode == mode_X) ||
+ (proj == pn_DivMod_res_div && mode_is_int(mode) && mode == get_DivMod_resmode(n)) ||
+ (proj == pn_DivMod_res_mod && mode_is_int(mode) && mode == get_DivMod_resmode(n))
+ ),
+ "wrong Proj from DivMod", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_DivMod_X_regular)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned DivMod", 0);
+ else if (proj == pn_DivMod_X_except)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned DivMod", 0);
+ else if (proj == pn_DivMod_M)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Memory Proj from unpinned DivMod", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(Div) node
+ */
+static int verify_node_Proj_Div(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Div_M && mode == mode_M) ||
+ (proj == pn_Div_X_regular && mode == mode_X) ||
+ (proj == pn_Div_X_except && mode == mode_X) ||
+ (proj == pn_Div_res && mode_is_int(mode) && mode == get_Div_resmode(n))
+ ),
+ "wrong Proj from Div", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_Div_X_regular)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned Div", 0);
+ else if (proj == pn_Div_X_except)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned Div", 0);
+ else if (proj == pn_Div_M)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Memory Proj from unpinned Div", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(Mod) node
+ */
+static int verify_node_Proj_Mod(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Mod_M && mode == mode_M) ||
+ (proj == pn_Mod_X_regular && mode == mode_X) ||
+ (proj == pn_Mod_X_except && mode == mode_X) ||
+ (proj == pn_Mod_res && mode_is_int(mode) && mode == get_Mod_resmode(n))
+ ),
+ "wrong Proj from Mod", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_Mod_X_regular)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned Mod", 0);
+ else if (proj == pn_Mod_X_except)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned Mod", 0);
+ else if (proj == pn_Mod_M)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Memory Proj from unpinned Div", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(Cmp) node
+ */
+static int verify_node_Proj_Cmp(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ (void) n;
+
+ ASSERT_AND_RET_DBG(
+ (proj >= 0 && proj <= 15 && mode == mode_b),
+ "wrong Proj from Cmp", 0,
+ show_proj_failure(p);
+ );
+ ASSERT_AND_RET_DBG(
+ (mode_is_float(get_irn_mode(get_Cmp_left(n))) || !(proj & pn_Cmp_Uo)),
+ "unordered Proj for non-float Cmp", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(Load) node
+ */
+static int verify_node_Proj_Load(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ if (proj == pn_Load_res) {
+ ir_node *ptr = get_Load_ptr(n);
+ ir_entity *ent = get_ptr_entity(ptr);
+
+ if (vrfy_entities && ent && get_irg_phase_state(current_ir_graph) == phase_high) {
+ /* do NOT check this for lowered phases, see comment on Store */
+ ASSERT_AND_RET_DBG(
+ (mode == get_type_mode(get_entity_type(ent))),
+ "wrong data Proj from Load, entity type_mode failed", 0,
+ show_proj_failure_ent(p, ent);
+ );
+ }
+ else {
+ ASSERT_AND_RET_DBG(
+ mode_is_data(mode) && mode == get_Load_mode(n),
+ "wrong data Proj from Load", 0,
+ show_proj_failure(p);
+ );
+ }
+ }
+ else {
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Load_M && mode == mode_M) ||
+ (proj == pn_Load_X_regular && mode == mode_X) ||
+ (proj == pn_Load_X_except && mode == mode_X)
+ ),
+ "wrong Proj from Load", 0,
+ show_proj_failure(p);
+ );
+ }
+ if (proj == pn_Load_X_regular) {
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned Load", 0);
+ } else if (proj == pn_Load_X_except) {
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned Load", 0);
+ }
+ return 1;
+}
+
+/**
+ * verify a Proj(Store) node
+ */
+static int verify_node_Proj_Store(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Store_M && mode == mode_M) ||
+ (proj == pn_Store_X_regular && mode == mode_X) ||
+ (proj == pn_Store_X_except && mode == mode_X)
+ ),
+ "wrong Proj from Store", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_Store_X_regular) {
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned Store", 0);
+ } else if (proj == pn_Store_X_except) {
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned Store", 0);
+ }
+ return 1;
+}
+
+/**
+ * verify a Proj(Alloc) node
+ */
+static int verify_node_Proj_Alloc(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ (void) n;
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Alloc_M && mode == mode_M) ||
+ (proj == pn_Alloc_X_regular && mode == mode_X) ||
+ (proj == pn_Alloc_X_except && mode == mode_X) ||
+ (proj == pn_Alloc_res && mode_is_reference(mode))
+ ),
+ "wrong Proj from Alloc", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj(Proj) node
+ */
+static int verify_node_Proj_Proj(ir_node *pred, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+ long nr = get_Proj_proj(pred);
+ ir_type *mt; /* A method type */
+
+ pred = skip_Id(get_Proj_pred(pred));
+ ASSERT_AND_RET((get_irn_mode(pred) == mode_T), "Proj from something not a tuple", 0);
+
+ switch (get_irn_opcode(pred)) {
+ case iro_Start:
+ mt = get_entity_type(get_irg_entity(get_irn_irg(pred)));
+
+ if (nr == pn_Start_T_args) {
+ ASSERT_AND_RET(
+ (proj >= 0 && mode_is_datab(mode)),
+ "wrong Proj from Proj from Start", 0);
+ ASSERT_AND_RET(
+ (proj < get_method_n_params(mt)),
+ "More Projs for args than args in type", 0
+ );
+ if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
+ /* value argument */ break;
+
+ if (get_irg_phase_state(get_irn_irg(pred)) != phase_backend) {
+ ASSERT_AND_RET_DBG(
+ (mode == get_type_mode(get_method_param_type(mt, proj))),
+ "Mode of Proj from Start doesn't match mode of param type.", 0,
+ show_proj_mode_failure(p, get_method_param_type(mt, proj));
+ );
+ }
+ }
+ break;
+
+ case iro_Call:
+ {
+ ASSERT_AND_RET(
+ (proj >= 0 && mode_is_datab(mode)),
+ "wrong Proj from Proj from Call", 0);
+ mt = get_Call_type(pred);
+ ASSERT_AND_RET(
+ (proj < get_method_n_ress(mt)),
+ "More Projs for results than results in type.", 0);
+ if ((mode_is_reference(mode)) && is_compound_type(get_method_res_type(mt, proj)))
+ /* value result */ break;
+
+ ASSERT_AND_RET(
+ (mode == get_type_mode(get_method_res_type(mt, proj))),
+ "Mode of Proj from Call doesn't match mode of result type.", 0);
+ }
+ break;
+
+ case iro_Tuple:
+ /* We don't test */
+ break;
+
+ case iro_Bad:
+ /* hmm, optimization did not remove it */
+ break;
+
+ default:
+ /* ASSERT_AND_RET(0, "Unknown opcode", 0); */
+ break;
+ }
+ return 1;
+}
+
+/**
+ * verify a Proj(Tuple) node
+ */
+static int verify_node_Proj_Tuple(ir_node *n, ir_node *p)
+{
+ (void) n;
+ (void) p;
+ /* We don't test */
+ return 1;
+}
+
+/**
+ * verify a Proj(CallBegin) node
+ */
+static int verify_node_Proj_CallBegin(ir_node *n, ir_node *p)
+{
+ (void) n;
+ (void) p;
+ return 1;
+}
+
+/**
+ * verify a Proj(EndReg) node
+ */
+static int verify_node_Proj_EndReg(ir_node *n, ir_node *p)
+{
+ (void) n;
+ (void) p;
+#ifdef INTERPROCEDURAL_VIEW
+ ASSERT_AND_RET(
+ (get_irp_ip_view_state() != ip_view_no),
+ "EndReg may only appear if ip view is constructed.", 0);
+#endif
+ return 1;
+}
+
+/**
+ * verify a Proj(EndExcept) node
+ */
+static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p)
+{
+ (void) n;
+ (void) p;
+#ifdef INTERPROCEDURAL_VIEW
+ ASSERT_AND_RET(
+ (get_irp_ip_view_state() != ip_view_no),
+ "EndExcept may only appear if ip view is constructed.", 0);
+#endif
+ return 1;
+}
+
+/**
+ * verify a Proj(CopyB) node
+ */
+static int verify_node_Proj_CopyB(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_CopyB_M && mode == mode_M) ||
+ (proj == pn_CopyB_X_regular && mode == mode_X) ||
+ (proj == pn_CopyB_X_except && mode == mode_X)
+ ),
+ "wrong Proj from CopyB", 0,
+ show_proj_failure(p);
+ );
+ if (proj == pn_CopyB_X_regular)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Regular Proj from unpinned CopyB", 0);
+ else if (proj == pn_CopyB_X_except)
+ ASSERT_AND_RET(
+ get_irn_pinned(n) == op_pin_state_pinned,
+ "Exception Proj from unpinned CopyB", 0);
+ return 1;
+}
+
+/**
+ * verify a Proj(Bound) node
+ */
+static int verify_node_Proj_Bound(ir_node *n, ir_node *p)
+{
+ ir_mode *mode = get_irn_mode(p);
+ long proj = get_Proj_proj(p);
+
+ /* ignore Bound checks of Bad */
+ if (is_Bad(get_Bound_index(n)))
+ return 1;
+ ASSERT_AND_RET_DBG(
+ (
+ (proj == pn_Bound_M && mode == mode_M) ||
+ (proj == pn_Bound_X_regular && mode == mode_X) ||
+ (proj == pn_Bound_X_except && mode == mode_X) ||
+ (proj == pn_Bound_res && mode == get_irn_mode(get_Bound_index(n)))
+ ),
+ "wrong Proj from Bound", 0,
+ show_proj_failure(p);
+ );
+ return 1;
+}
+
+/**
+ * verify a Proj node
+ */
+static int verify_node_Proj(ir_node *p, ir_graph *irg)
+{
+ ir_node *pred;
+ ir_op *op;
+
+ pred = skip_Id(get_Proj_pred(p));
+ ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0);
+ ASSERT_AND_RET(get_irg_pinned(irg) == op_pin_state_floats || get_nodes_block(pred) == get_nodes_block(p), "Proj must be in same block as its predecessor", 0);
+
+ op = get_irn_op(pred);
+
+ if (op->ops.verify_proj_node)
+ return op->ops.verify_proj_node(pred, p);
+
+ /* all went ok */
+ return 1;
+}
+
+/**
+ * verify a Block node
+ */
+static int verify_node_Block(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_node *mb = get_Block_MacroBlock(n);
+
+ ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
+
+ if (is_Block(mb) && mb != n) {
+ ir_node *pred;
+
+ /* Blocks with more than one predecessor must be header blocks */
+ ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
+ if (get_irg_phase_state(irg) != phase_backend) {
+ pred = get_Block_cfgpred(n, 0);
+ if (is_Proj(pred)) {
+ /* the predecessor MUST be a regular Proj */
+ ir_node *frag_op = get_Proj_pred(pred);
+ ASSERT_AND_RET(
+ is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
+ "partBlock with non-regular predecessor", 0);
+ } else {
+ /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
+ of fragile nodes during construction. It does not violate our assumption of dominance
+ so let it. */
+ ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
+ "partBlock with non-regular predecessor", 0);
+ }
+ } else {
+ /* relax in backend: Bound nodes are probably lowered into conditional jumps */
+ }
+ }
+
+ for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ASSERT_AND_RET(
+ is_Bad(pred) || (get_irn_mode(pred) == mode_X),
+ "Block node must have a mode_X predecessor", 0);
+ }
+
+ if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
+ /* End block may only have Return, Raise or fragile ops as preds. */
+ for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
+ ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
+ if (is_Proj(pred) || is_Tuple(pred))
+ break; /* We can not test properly. How many tuples are there? */
+ ASSERT_AND_RET(
+ (
+ is_Return(pred) ||
+ is_Bad(pred) ||
+ is_Raise(pred) ||
+ is_fragile_op(pred)
+ ),
+ "End Block node", 0);
+ }
+ /* irg attr must == graph we are in. */
+ if (! get_interprocedural_view()) {
+ ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
+ }
+ return 1;
+}
+
+/**
+ * verify a Start node
+ */
+static int verify_node_Start(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Start: BB --> X x M x ref x data1 x ... x datan x ref */
+ mymode == mode_T, "Start node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Jmp node
+ */
+static int verify_node_Jmp(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Jmp: BB --> X */
+ mymode == mode_X, "Jmp node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify an IJmp node
+ */
+static int verify_node_IJmp(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_IJmp_target(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* IJmp: BB x ref --> X */
+ mymode == mode_X && mode_is_reference(op1mode), "IJmp node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Break node
+ */
+static int verify_node_Break(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+#ifdef INTERPROCEDURAL_VIEW
+ ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
+ "Break may only appear if ip view is constructed.", 0);
+#endif
+ ASSERT_AND_RET(
+ /* Jmp: BB --> X */
+ mymode == mode_X, "Break node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Cond node
+ */
+static int verify_node_Cond(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Cond_selector(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Cond: BB x b --> X x X */
+ (op1mode == mode_b ||
+ /* Cond: BB x int --> X^n */
+ mode_is_int(op1mode) ), "Cond node", 0
+ );
+ ASSERT_AND_RET(mymode == mode_T, "Cond mode is not a tuple", 0);
+
+ return 1;
+}
+
+/**
+ * verify a Return node
+ */
+static int verify_node_Return(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *mem_mode = get_irn_mode(get_Return_mem(n));
+ ir_type *mt;
+
+ /* Return: BB x M x data1 x ... x datan --> X */
+
+ ASSERT_AND_RET( mem_mode == mode_M, "Return node", 0 ); /* operand M */
+
+ for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Return_res(n, i))), "Return node", 0 ); /* operand datai */
+ }
+ ASSERT_AND_RET( mymode == mode_X, "Result X", 0 ); /* result X */
+ /* Compare returned results with result types of method type */
+ mt = get_entity_type(get_irg_entity(irg));
+ ASSERT_AND_RET_DBG( get_Return_n_ress(n) == get_method_n_ress(mt),
+ "Number of results for Return doesn't match number of results in type.", 0,
+ show_return_nres(irg, n, mt););
+ for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
+ ir_type *res_type = get_method_res_type(mt, i);
+
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(res_type)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Return_res(n, i))),
+ "Mode of result for Return doesn't match mode of result type.", 0,
+ show_return_modes(irg, n, mt, i);
+ );
+ }
+ }
+ }
+ return 1;
+}
+
+/**
+ * verify a Raise node
+ */
+static int verify_node_Raise(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Raise_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Raise_exo_ptr(n));
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Sel: BB x M x ref --> X x M */
+ op1mode == mode_M && mode_is_reference(op2mode) &&
+ mymode == mode_T, "Raise node", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a Const node
+ */
+static int verify_node_Const(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* Const: BB --> data */
+ (mode_is_data(mymode) ||
+ mymode == mode_b) /* we want boolean constants for static evaluation */
+ ,"Const node", 0 /* of Cmp. */
+ );
+ ASSERT_AND_RET(
+ /* the modes of the constant and teh tarval must match */
+ mymode == get_tarval_mode(get_Const_tarval(n)),
+ "Const node, tarval and node mode mismatch", 0
+ );
+ return 1;
+}
+
+/**
+ * verify a SymConst node
+ */
+static int verify_node_SymConst(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ (void) irg;
+
+ ASSERT_AND_RET(
+ /* SymConst: BB --> int*/
+ (mode_is_int(mymode) ||
+ /* SymConst: BB --> ref */
+ mode_is_reference(mymode))
+ ,"SymConst node", 0);
+ return 1;
+}
+
+/**
+ * verify a Sel node
+ */
+static int verify_node_Sel(ir_node *n, ir_graph *irg)
+{
+ int i;
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Sel_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Sel_ptr(n));
+ ir_entity *ent;
+ (void) irg;
+
+ ASSERT_AND_RET_DBG(
+ /* Sel: BB x M x ref x int^n --> ref */
+ (op1mode == mode_M && op2mode == mymode && mode_is_reference(mymode)),
+ "Sel node", 0, show_node_failure(n)
+ );
+
+ for (i = get_Sel_n_indexs(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET_DBG(mode_is_int(get_irn_mode(get_Sel_index(n, i))), "Sel node", 0, show_node_failure(n));
+ }
+ ent = get_Sel_entity(n);
+ ASSERT_AND_RET_DBG(ent, "Sel node with empty entity", 0, show_node_failure(n));
+ return 1;
+}
+
+/**
+ * verify an InstOf node
+ */
+static int verify_node_InstOf(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_InstOf_obj(n));
+ (void) irg;
+
+ ASSERT_AND_RET(mode_T == mymode, "mode of Instof is not a tuple", 0);
+ ASSERT_AND_RET(mode_is_data(op1mode), "Instof not on data", 0);
+ return 1;
+}
+
+/**
+ * Check if the pinned state is right.
+ */
+static int verify_right_pinned(ir_node *n)
+{
+ ir_node *mem;
+
+ if (get_irn_pinned(n) == op_pin_state_pinned)
+ return 1;
+ mem = get_Call_mem(n);
+
+ /* if it's not pinned, its memory predecessor must be NoMem or Pin */
+ if (is_NoMem(mem) || is_Pin(mem))
+ return 1;
+ return 0;
+}
+
+/**
+ * verify a Call node
+ */
+static int verify_node_Call(ir_node *n, ir_graph *irg)
+{
+ ir_mode *mymode = get_irn_mode(n);
+ ir_mode *op1mode = get_irn_mode(get_Call_mem(n));
+ ir_mode *op2mode = get_irn_mode(get_Call_ptr(n));
+ ir_type *mt;
+ int i;
+ (void) irg;
+
+ /* Call: BB x M x ref x data1 x ... x datan
+ --> M x datan+1 x ... x data n+m */
+ ASSERT_AND_RET( op1mode == mode_M && mode_is_reference(op2mode), "Call node", 0 ); /* operand M x ref */
+
+ /* NoMem nodes are only allowed as memory input if the Call is NOT pinned */
+ ASSERT_AND_RET(verify_right_pinned(n),"Call node with wrong memory input", 0 );
+
+ mt = get_Call_type(n);
+ if (get_unknown_type() == mt) {
+ return 1;
+ }
+
+ for (i = get_Call_n_params(n) - 1; i >= 0; --i) {
+ ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Call_param(n, i))), "Call node", 0 ); /* operand datai */
+ }
+
+ ASSERT_AND_RET( mymode == mode_T, "Call result not a tuple", 0 ); /* result T */
+ /* Compare arguments of node with those of type */
+
+ if (get_method_variadicity(mt) == variadicity_variadic) {
+ ASSERT_AND_RET_DBG(
+ get_Call_n_params(n) >= get_method_n_params(mt),
+ "Number of args for Call doesn't match number of args in variadic type.",
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
+ n, get_Call_n_params(n), get_method_n_params(mt));
+ );
+ } else {
+ ASSERT_AND_RET_DBG(
+ get_Call_n_params(n) == get_method_n_params(mt),
+ "Number of args for Call doesn't match number of args in non variadic type.",
+ 0,
+ ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
+ n, get_Call_n_params(n), get_method_n_params(mt));
+ );
+ }
+
+ for (i = 0; i < get_method_n_params(mt); i++) {
+ ir_type *t = get_method_param_type(mt, i);
+
+ if (get_irg_phase_state(irg) != phase_backend) {
+ if (is_atomic_type(t)) {
+ ASSERT_AND_RET_DBG(
+ get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ } else {
+ /* call with a compound type, mode must be reference */
+ ASSERT_AND_RET_DBG(
+ mode_is_reference(get_irn_mode(get_Call_param(n, i))),
+ "Mode of arg for Call doesn't match mode of arg type.", 0,
+ show_call_param(n, mt);
+ );
+ }
+ }
+ }
+
+#if 0
+ if (Call_has_callees(n)) {
+ for (i = 0; i < get_Call_n_callees(n); i++) {
+ ASSERT_AND_RET(is_entity(get_Call_callee(n, i)), "callee array must contain entities.", 0);
+ }
+ }
+#endif
+ return 1;