2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Check irnodes for correctness.
23 * @author Christian Schaefer, Goetz Lindenmaier, Till Riedel, Michael Beck
30 #include "irgraph_t.h"
41 /** if this flag is set, verify entity types in Load & Store nodes */
42 static int vrfy_entities = 0;
44 const char *firm_vrfy_failure_msg;
46 /* enable verification of Load/Store entities */
47 void vrfy_enable_entity_tests(int enable)
49 vrfy_entities = enable;
55 * little helper for NULL modes
57 static const char *get_mode_name_ex(ir_mode *mode)
61 return get_mode_name(mode);
64 /** the last IRG, on which a verification error was found */
65 static ir_graph *last_irg_error = NULL;
68 * print the name of the entity of an verification failure
70 * @param node the node caused the failure
72 static void show_entity_failure(ir_node *node)
74 ir_graph *irg = get_irn_irg(node);
76 if (last_irg_error == irg)
81 if (irg == get_const_code_irg()) {
82 fprintf(stderr, "\nFIRM: irn_vrfy_irg() <of CONST_CODE_IRG> failed\n");
84 ir_entity *ent = get_irg_entity(irg);
87 ir_type *ent_type = get_entity_owner(ent);
90 ir_fprintf(stderr, "\nFIRM: irn_vrfy_irg() %+F::%s failed\n",
91 ent_type, get_entity_name(ent));
93 fprintf(stderr, "\nFIRM: irn_vrfy_irg() <NULL>::%s failed\n", get_entity_name(ent));
96 fprintf(stderr, "\nFIRM: irn_vrfy_irg() <IRG %p> failed\n", (void *)irg);
102 * Prints a failure for a Node
104 static void show_node_failure(ir_node *n)
106 show_entity_failure(n);
107 fprintf(stderr, " node %ld %s%s\n" ,
109 get_irn_opname(n), get_irn_modename(n)
114 * Prints a failure message for a binop
116 static void show_binop_failure(ir_node *n, const char *text)
118 ir_node *left = get_binop_left(n);
119 ir_node *right = get_binop_right(n);
121 show_entity_failure(n);
122 fprintf(stderr, " node %ld %s%s(%s%s, %s%s) did not match (%s)\n",
124 get_irn_opname(n), get_irn_modename(n),
125 get_irn_opname(left), get_irn_modename(left),
126 get_irn_opname(right), get_irn_modename(right),
131 * Prints a failure message for an unop
133 static void show_unop_failure(ir_node *n, const char *text)
135 ir_node *op = get_unop_op(n);
137 show_entity_failure(n);
138 fprintf(stderr, " node %ld %s%s(%s%s) did not match (%s)\n",
140 get_irn_opname(n), get_irn_modename(n),
141 get_irn_opname(op), get_irn_modename(op),
146 * Prints a failure message for an op with 3 operands
148 static void show_triop_failure(ir_node *n, const char *text)
150 ir_node *op0 = get_irn_n(n, 0);
151 ir_node *op1 = get_irn_n(n, 1);
152 ir_node *op2 = get_irn_n(n, 2);
154 show_entity_failure(n);
155 fprintf(stderr, " of node %ld %s%s(%s%s, %s%s, %s%s) did not match (%s)\n",
157 get_irn_opname(n), get_irn_modename(n),
158 get_irn_opname(op0), get_irn_modename(op0),
159 get_irn_opname(op1), get_irn_modename(op1),
160 get_irn_opname(op2), get_irn_modename(op2),
165 * Prints a failure message for a proj
167 static void show_proj_failure(ir_node *n)
169 ir_node *op = get_Proj_pred(n);
170 int proj = get_Proj_proj(n);
172 show_entity_failure(n);
173 fprintf(stderr, " node %ld %s%s %d(%s%s) failed\n" ,
175 get_irn_opname(n), get_irn_modename(n), proj,
176 get_irn_opname(op), get_irn_modename(op));
180 * Prints a failure message for a proj from Start
182 static void show_proj_mode_failure(ir_node *n, ir_type *ty)
184 long proj = get_Proj_proj(n);
185 ir_mode *m = get_type_mode(ty);
187 ir_print_type(type_name, sizeof(type_name), ty);
189 show_entity_failure(n);
190 fprintf(stderr, " Proj %ld mode %s proj %ld (type %s mode %s) failed\n" ,
195 get_mode_name_ex(m));
199 * Prints a failure message for a proj
201 static void show_proj_failure_ent(ir_node *n, ir_entity *ent)
203 ir_node *op = get_Proj_pred(n);
204 int proj = get_Proj_proj(n);
205 ir_mode *m = get_type_mode(get_entity_type(ent));
207 ir_print_type(type_name, sizeof(type_name), get_entity_type(ent));
209 show_entity_failure(n);
210 fprintf(stderr, " node %ld %s%s %d(%s%s) entity %s(type %s mode %s)failed\n" ,
212 get_irn_opname(n), get_irn_modename(n), proj,
213 get_irn_opname(op), get_irn_modename(op),
214 get_entity_name(ent), type_name,
215 get_mode_name_ex(m));
219 * Show a node and a graph
221 static void show_node_on_graph(ir_graph *irg, ir_node *n)
223 ir_fprintf(stderr, "\nFIRM: irn_vrfy_irg() of %+F, node %+F\n", irg, n);
227 * Show call parameters
229 static void show_call_param(ir_node *n, ir_type *mt)
233 ir_print_type(type_name, sizeof(type_name), mt);
235 show_entity_failure(n);
236 fprintf(stderr, " Call type-check failed: %s(", type_name);
237 for (i = 0; i < get_method_n_params(mt); ++i) {
238 fprintf(stderr, "%s ", get_mode_name_ex(get_type_mode(get_method_param_type(mt, i))));
240 fprintf(stderr, ") != CALL(");
242 for (i = 0; i < get_Call_n_params(n); ++i) {
243 fprintf(stderr, "%s ", get_mode_name_ex(get_irn_mode(get_Call_param(n, i))));
245 fprintf(stderr, ")\n");
251 static void show_return_modes(ir_graph *irg, ir_node *n, ir_type *mt, int i)
253 ir_entity *ent = get_irg_entity(irg);
255 show_entity_failure(n);
256 fprintf(stderr, " Return node %ld in entity \"%s\" mode %s different from type mode %s\n",
257 get_irn_node_nr(n), get_entity_name(ent),
258 get_mode_name_ex(get_irn_mode(get_Return_res(n, i))),
259 get_mode_name_ex(get_type_mode(get_method_res_type(mt, i)))
264 * Show return number of results
266 static void show_return_nres(ir_graph *irg, ir_node *n, ir_type *mt)
268 ir_entity *ent = get_irg_entity(irg);
270 show_entity_failure(n);
271 fprintf(stderr, " Return node %ld in entity \"%s\" has %d results different from type %d\n",
272 get_irn_node_nr(n), get_entity_name(ent),
273 get_Return_n_ress(n), get_method_n_ress(mt));
279 static void show_phi_failure(ir_node *phi, ir_node *pred, int pos)
282 show_entity_failure(phi);
283 fprintf(stderr, " Phi node %ld has mode %s different from predeccessor node %ld mode %s\n",
284 get_irn_node_nr(phi), get_mode_name_ex(get_irn_mode(phi)),
285 get_irn_node_nr(pred), get_mode_name_ex(get_irn_mode(pred)));
291 static void show_phi_inputs(ir_node *phi, ir_node *block)
293 show_entity_failure(phi);
294 fprintf(stderr, " Phi node %ld has %d inputs, its Block %ld has %d\n",
295 get_irn_node_nr(phi), get_irn_arity(phi),
296 get_irn_node_nr(block), get_irn_arity(block));
299 #endif /* #ifndef NDEBUG */
302 * If the address is Sel or SymConst, return the entity.
304 * @param ptr the node representing the address
306 static ir_entity *get_ptr_entity(ir_node *ptr)
309 return get_Sel_entity(ptr);
310 } else if (is_SymConst_addr_ent(ptr)) {
311 return get_SymConst_entity(ptr);
317 * verify a Proj(Start) node
319 static int verify_node_Proj_Start(ir_node *n, ir_node *p)
321 ir_mode *mode = get_irn_mode(p);
322 long proj = get_Proj_proj(p);
327 (proj == pn_Start_X_initial_exec && mode == mode_X) ||
328 (proj == pn_Start_M && mode == mode_M) ||
329 (proj == pn_Start_P_frame_base && mode_is_reference(mode)) ||
330 (proj == pn_Start_P_tls && mode_is_reference(mode)) ||
331 (proj == pn_Start_T_args && mode == mode_T)
333 "wrong Proj from Start", 0,
334 show_proj_failure(p);
340 * verify a Proj(Cond) node
342 static int verify_node_Proj_Cond(ir_node *pred, ir_node *p)
344 ir_mode *mode = get_irn_mode(p);
345 long proj = get_Proj_proj(p);
349 (proj >= 0 && mode == mode_X && get_irn_mode(get_Cond_selector(pred)) == mode_b) || /* compare */
350 (mode == mode_X && mode_is_int(get_irn_mode(get_Cond_selector(pred)))) || /* switch */
351 is_Bad(get_Cond_selector(pred)) /* rare */
353 "wrong Proj from Cond", 0,
354 show_proj_failure(p);
360 * verify a Proj(Raise) node
362 static int verify_node_Proj_Raise(ir_node *n, ir_node *p)
364 ir_mode *mode = get_irn_mode(p);
365 long proj = get_Proj_proj(p);
369 ((proj == pn_Raise_X && mode == mode_X) || (proj == pn_Raise_M && mode == mode_M)),
370 "wrong Proj from Raise", 0,
371 show_proj_failure(p);
377 * verify a Proj(InstOf) node
379 static int verify_node_Proj_InstOf(ir_node *n, ir_node *p)
381 ir_mode *mode = get_irn_mode(p);
382 long proj = get_Proj_proj(p);
387 (proj == pn_InstOf_M && mode == mode_M) ||
388 (proj == pn_InstOf_X_regular && mode == mode_X) ||
389 (proj == pn_InstOf_X_except && mode == mode_X) ||
390 (proj == pn_InstOf_res && mode_is_reference(mode))
392 "wrong Proj from InstOf", 0,
393 show_proj_failure(p);
399 * verify a Proj(Call) node
401 static int verify_node_Proj_Call(ir_node *n, ir_node *p)
403 ir_mode *mode = get_irn_mode(p);
404 long proj = get_Proj_proj(p);
408 (proj == pn_Call_M && mode == mode_M) ||
409 (proj == pn_Call_X_regular && mode == mode_X) ||
410 (proj == pn_Call_X_except && mode == mode_X) ||
411 (proj == pn_Call_T_result && mode == mode_T) ||
412 (proj == pn_Call_P_value_res_base && mode_is_reference(mode))
414 "wrong Proj from Call", 0,
415 show_proj_failure(p);
417 /* if we have exception flow, we must have a real Memory input */
418 if (proj == pn_Call_X_regular)
420 !is_NoMem(get_Call_mem(n)),
421 "Regular Proj from FunctionCall", 0);
422 else if (proj == pn_Call_X_except)
424 !is_NoMem(get_Call_mem(n)),
425 "Exception Proj from FunctionCall", 0);
430 * verify a Proj(Quot) node
432 static int verify_node_Proj_Quot(ir_node *n, ir_node *p)
434 ir_mode *mode = get_irn_mode(p);
435 long proj = get_Proj_proj(p);
439 (proj == pn_Quot_M && mode == mode_M) ||
440 (proj == pn_Quot_X_regular && mode == mode_X) ||
441 (proj == pn_Quot_X_except && mode == mode_X) ||
442 (proj == pn_Quot_res && mode_is_float(mode) && mode == get_Quot_resmode(n))
444 "wrong Proj from Quot", 0,
445 show_proj_failure(p);
447 if (proj == pn_Quot_X_regular)
449 get_irn_pinned(n) == op_pin_state_pinned,
450 "Regular Proj from unpinned Quot", 0);
451 else if (proj == pn_Quot_X_except)
453 get_irn_pinned(n) == op_pin_state_pinned,
454 "Exception Proj from unpinned Quot", 0);
455 else if (proj == pn_Quot_M)
457 get_irn_pinned(n) == op_pin_state_pinned,
458 "Memory Proj from unpinned Quot", 0);
463 * verify a Proj(DivMod) node
465 static int verify_node_Proj_DivMod(ir_node *n, ir_node *p)
467 ir_mode *mode = get_irn_mode(p);
468 long proj = get_Proj_proj(p);
472 (proj == pn_DivMod_M && mode == mode_M) ||
473 (proj == pn_DivMod_X_regular && mode == mode_X) ||
474 (proj == pn_DivMod_X_except && mode == mode_X) ||
475 (proj == pn_DivMod_res_div && mode_is_int(mode) && mode == get_DivMod_resmode(n)) ||
476 (proj == pn_DivMod_res_mod && mode_is_int(mode) && mode == get_DivMod_resmode(n))
478 "wrong Proj from DivMod", 0,
479 show_proj_failure(p);
481 if (proj == pn_DivMod_X_regular)
483 get_irn_pinned(n) == op_pin_state_pinned,
484 "Regular Proj from unpinned DivMod", 0);
485 else if (proj == pn_DivMod_X_except)
487 get_irn_pinned(n) == op_pin_state_pinned,
488 "Exception Proj from unpinned DivMod", 0);
489 else if (proj == pn_DivMod_M)
491 get_irn_pinned(n) == op_pin_state_pinned,
492 "Memory Proj from unpinned DivMod", 0);
497 * verify a Proj(Div) node
499 static int verify_node_Proj_Div(ir_node *n, ir_node *p)
501 ir_mode *mode = get_irn_mode(p);
502 long proj = get_Proj_proj(p);
506 (proj == pn_Div_M && mode == mode_M) ||
507 (proj == pn_Div_X_regular && mode == mode_X) ||
508 (proj == pn_Div_X_except && mode == mode_X) ||
509 (proj == pn_Div_res && mode_is_int(mode) && mode == get_Div_resmode(n))
511 "wrong Proj from Div", 0,
512 show_proj_failure(p);
514 if (proj == pn_Div_X_regular)
516 get_irn_pinned(n) == op_pin_state_pinned,
517 "Regular Proj from unpinned Div", 0);
518 else if (proj == pn_Div_X_except)
520 get_irn_pinned(n) == op_pin_state_pinned,
521 "Exception Proj from unpinned Div", 0);
522 else if (proj == pn_Div_M)
524 get_irn_pinned(n) == op_pin_state_pinned,
525 "Memory Proj from unpinned Div", 0);
530 * verify a Proj(Mod) node
532 static int verify_node_Proj_Mod(ir_node *n, ir_node *p)
534 ir_mode *mode = get_irn_mode(p);
535 long proj = get_Proj_proj(p);
539 (proj == pn_Mod_M && mode == mode_M) ||
540 (proj == pn_Mod_X_regular && mode == mode_X) ||
541 (proj == pn_Mod_X_except && mode == mode_X) ||
542 (proj == pn_Mod_res && mode_is_int(mode) && mode == get_Mod_resmode(n))
544 "wrong Proj from Mod", 0,
545 show_proj_failure(p);
547 if (proj == pn_Mod_X_regular)
549 get_irn_pinned(n) == op_pin_state_pinned,
550 "Regular Proj from unpinned Mod", 0);
551 else if (proj == pn_Mod_X_except)
553 get_irn_pinned(n) == op_pin_state_pinned,
554 "Exception Proj from unpinned Mod", 0);
555 else if (proj == pn_Mod_M)
557 get_irn_pinned(n) == op_pin_state_pinned,
558 "Memory Proj from unpinned Div", 0);
563 * verify a Proj(Cmp) node
565 static int verify_node_Proj_Cmp(ir_node *n, ir_node *p)
567 ir_mode *mode = get_irn_mode(p);
568 long proj = get_Proj_proj(p);
572 (proj >= 0 && proj <= 15 && mode == mode_b),
573 "wrong Proj from Cmp", 0,
574 show_proj_failure(p);
577 (mode_is_float(get_irn_mode(get_Cmp_left(n))) || !(proj & pn_Cmp_Uo)),
578 "unordered Proj for non-float Cmp", 0,
579 show_proj_failure(p);
585 * verify a Proj(Load) node
587 static int verify_node_Proj_Load(ir_node *n, ir_node *p)
589 ir_mode *mode = get_irn_mode(p);
590 long proj = get_Proj_proj(p);
592 if (proj == pn_Load_res) {
593 ir_node *ptr = get_Load_ptr(n);
594 ir_entity *ent = get_ptr_entity(ptr);
596 if (vrfy_entities && ent && get_irg_phase_state(current_ir_graph) == phase_high) {
597 /* do NOT check this for lowered phases, see comment on Store */
599 (mode == get_type_mode(get_entity_type(ent))),
600 "wrong data Proj from Load, entity type_mode failed", 0,
601 show_proj_failure_ent(p, ent);
606 mode_is_data(mode) && mode == get_Load_mode(n),
607 "wrong data Proj from Load", 0,
608 show_proj_failure(p);
615 (proj == pn_Load_M && mode == mode_M) ||
616 (proj == pn_Load_X_regular && mode == mode_X) ||
617 (proj == pn_Load_X_except && mode == mode_X)
619 "wrong Proj from Load", 0,
620 show_proj_failure(p);
623 if (proj == pn_Load_X_regular) {
625 get_irn_pinned(n) == op_pin_state_pinned,
626 "Regular Proj from unpinned Load", 0);
627 } else if (proj == pn_Load_X_except) {
629 get_irn_pinned(n) == op_pin_state_pinned,
630 "Exception Proj from unpinned Load", 0);
636 * verify a Proj(Store) node
638 static int verify_node_Proj_Store(ir_node *n, ir_node *p)
640 ir_mode *mode = get_irn_mode(p);
641 long proj = get_Proj_proj(p);
645 (proj == pn_Store_M && mode == mode_M) ||
646 (proj == pn_Store_X_regular && mode == mode_X) ||
647 (proj == pn_Store_X_except && mode == mode_X)
649 "wrong Proj from Store", 0,
650 show_proj_failure(p);
652 if (proj == pn_Store_X_regular) {
654 get_irn_pinned(n) == op_pin_state_pinned,
655 "Regular Proj from unpinned Store", 0);
656 } else if (proj == pn_Store_X_except) {
658 get_irn_pinned(n) == op_pin_state_pinned,
659 "Exception Proj from unpinned Store", 0);
665 * verify a Proj(Alloc) node
667 static int verify_node_Proj_Alloc(ir_node *n, ir_node *p)
669 ir_mode *mode = get_irn_mode(p);
670 long proj = get_Proj_proj(p);
675 (proj == pn_Alloc_M && mode == mode_M) ||
676 (proj == pn_Alloc_X_regular && mode == mode_X) ||
677 (proj == pn_Alloc_X_except && mode == mode_X) ||
678 (proj == pn_Alloc_res && mode_is_reference(mode))
680 "wrong Proj from Alloc", 0,
681 show_proj_failure(p);
687 * verify a Proj(Proj) node
689 static int verify_node_Proj_Proj(ir_node *pred, ir_node *p)
691 ir_mode *mode = get_irn_mode(p);
692 long proj = get_Proj_proj(p);
693 long nr = get_Proj_proj(pred);
694 ir_type *mt; /* A method type */
696 pred = skip_Id(get_Proj_pred(pred));
697 ASSERT_AND_RET((get_irn_mode(pred) == mode_T), "Proj from something not a tuple", 0);
699 switch (get_irn_opcode(pred)) {
701 mt = get_entity_type(get_irg_entity(get_irn_irg(pred)));
703 if (nr == pn_Start_T_args) {
705 (proj >= 0 && mode_is_datab(mode)),
706 "wrong Proj from Proj from Start", 0);
708 (proj < get_method_n_params(mt)),
709 "More Projs for args than args in type", 0
711 if ((mode_is_reference(mode)) && is_compound_type(get_method_param_type(mt, proj)))
712 /* value argument */ break;
714 if (get_irg_phase_state(get_irn_irg(pred)) != phase_backend) {
716 (mode == get_type_mode(get_method_param_type(mt, proj))),
717 "Mode of Proj from Start doesn't match mode of param type.", 0,
718 show_proj_mode_failure(p, get_method_param_type(mt, proj));
727 (proj >= 0 && mode_is_datab(mode)),
728 "wrong Proj from Proj from Call", 0);
729 mt = get_Call_type(pred);
731 (proj < get_method_n_ress(mt)),
732 "More Projs for results than results in type.", 0);
733 if ((mode_is_reference(mode)) && is_compound_type(get_method_res_type(mt, proj)))
734 /* value result */ break;
737 (mode == get_type_mode(get_method_res_type(mt, proj))),
738 "Mode of Proj from Call doesn't match mode of result type.", 0);
747 /* hmm, optimization did not remove it */
751 /* ASSERT_AND_RET(0, "Unknown opcode", 0); */
758 * verify a Proj(Tuple) node
760 static int verify_node_Proj_Tuple(ir_node *n, ir_node *p)
769 * verify a Proj(CallBegin) node
771 static int verify_node_Proj_CallBegin(ir_node *n, ir_node *p)
779 * verify a Proj(EndReg) node
781 static int verify_node_Proj_EndReg(ir_node *n, ir_node *p)
785 #ifdef INTERPROCEDURAL_VIEW
787 (get_irp_ip_view_state() != ip_view_no),
788 "EndReg may only appear if ip view is constructed.", 0);
794 * verify a Proj(EndExcept) node
796 static int verify_node_Proj_EndExcept(ir_node *n, ir_node *p)
800 #ifdef INTERPROCEDURAL_VIEW
802 (get_irp_ip_view_state() != ip_view_no),
803 "EndExcept may only appear if ip view is constructed.", 0);
809 * verify a Proj(CopyB) node
811 static int verify_node_Proj_CopyB(ir_node *n, ir_node *p)
813 ir_mode *mode = get_irn_mode(p);
814 long proj = get_Proj_proj(p);
818 (proj == pn_CopyB_M && mode == mode_M) ||
819 (proj == pn_CopyB_X_regular && mode == mode_X) ||
820 (proj == pn_CopyB_X_except && mode == mode_X)
822 "wrong Proj from CopyB", 0,
823 show_proj_failure(p);
825 if (proj == pn_CopyB_X_regular)
827 get_irn_pinned(n) == op_pin_state_pinned,
828 "Regular Proj from unpinned CopyB", 0);
829 else if (proj == pn_CopyB_X_except)
831 get_irn_pinned(n) == op_pin_state_pinned,
832 "Exception Proj from unpinned CopyB", 0);
837 * verify a Proj(Bound) node
839 static int verify_node_Proj_Bound(ir_node *n, ir_node *p)
841 ir_mode *mode = get_irn_mode(p);
842 long proj = get_Proj_proj(p);
844 /* ignore Bound checks of Bad */
845 if (is_Bad(get_Bound_index(n)))
849 (proj == pn_Bound_M && mode == mode_M) ||
850 (proj == pn_Bound_X_regular && mode == mode_X) ||
851 (proj == pn_Bound_X_except && mode == mode_X) ||
852 (proj == pn_Bound_res && mode == get_irn_mode(get_Bound_index(n)))
854 "wrong Proj from Bound", 0,
855 show_proj_failure(p);
863 static int verify_node_Proj(ir_node *p, ir_graph *irg)
868 pred = skip_Id(get_Proj_pred(p));
869 ASSERT_AND_RET(get_irn_mode(pred) == mode_T, "mode of a 'projed' node is not Tuple", 0);
870 ASSERT_AND_RET(get_irg_pinned(irg) == op_pin_state_floats || get_nodes_block(pred) == get_nodes_block(p), "Proj must be in same block as its predecessor", 0);
872 op = get_irn_op(pred);
874 if (op->ops.verify_proj_node)
875 return op->ops.verify_proj_node(pred, p);
882 * verify a Block node
884 static int verify_node_Block(ir_node *n, ir_graph *irg)
887 ir_node *mb = get_Block_MacroBlock(n);
889 ASSERT_AND_RET(is_Block(mb) || is_Bad(mb), "Block node with wrong MacroBlock", 0);
891 if (is_Block(mb) && mb != n) {
894 /* Blocks with more than one predecessor must be header blocks */
895 ASSERT_AND_RET(get_Block_n_cfgpreds(n) == 1, "partBlock with more than one predecessor", 0);
896 if (get_irg_phase_state(irg) != phase_backend) {
897 pred = get_Block_cfgpred(n, 0);
899 /* the predecessor MUST be a regular Proj */
900 ir_node *frag_op = get_Proj_pred(pred);
902 is_fragile_op(frag_op) && get_Proj_proj(pred) == pn_Generic_X_regular,
903 "partBlock with non-regular predecessor", 0);
905 /* We allow Jmps to be predecessors of partBlocks. This can happen due to optimization
906 of fragile nodes during construction. It does not violate our assumption of dominance
908 ASSERT_AND_RET(is_Jmp(pred) || is_Bad(pred),
909 "partBlock with non-regular predecessor", 0);
912 /* relax in backend: Bound nodes are probably lowered into conditional jumps */
916 for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
917 ir_node *pred = get_Block_cfgpred(n, i);
919 is_Bad(pred) || (get_irn_mode(pred) == mode_X),
920 "Block node must have a mode_X predecessor", 0);
923 if (n == get_irg_end_block(irg) && get_irg_phase_state(irg) != phase_backend)
924 /* End block may only have Return, Raise or fragile ops as preds. */
925 for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
926 ir_node *pred = skip_Proj(get_Block_cfgpred(n, i));
927 if (is_Proj(pred) || is_Tuple(pred))
928 break; /* We can not test properly. How many tuples are there? */
936 "End Block node", 0);
938 /* irg attr must == graph we are in. */
939 ASSERT_AND_RET(((get_irn_irg(n) && get_irn_irg(n) == irg)), "Block node has wrong irg attribute", 0);
944 * verify a Start node
946 static int verify_node_Start(ir_node *n, ir_graph *irg)
948 ir_mode *mymode = get_irn_mode(n);
952 /* Start: BB --> X x M x ref x data1 x ... x datan x ref */
953 mymode == mode_T, "Start node", 0
961 static int verify_node_Jmp(ir_node *n, ir_graph *irg)
963 ir_mode *mymode = get_irn_mode(n);
968 mymode == mode_X, "Jmp node", 0
974 * verify an IJmp node
976 static int verify_node_IJmp(ir_node *n, ir_graph *irg)
978 ir_mode *mymode = get_irn_mode(n);
979 ir_mode *op1mode = get_irn_mode(get_IJmp_target(n));
983 /* IJmp: BB x ref --> X */
984 mymode == mode_X && mode_is_reference(op1mode), "IJmp node", 0
990 * verify a Break node
992 static int verify_node_Break(ir_node *n, ir_graph *irg)
994 ir_mode *mymode = get_irn_mode(n);
997 #ifdef INTERPROCEDURAL_VIEW
998 ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
999 "Break may only appear if ip view is constructed.", 0);
1003 mymode == mode_X, "Break node", 0
1009 * verify a Cond node
1011 static int verify_node_Cond(ir_node *n, ir_graph *irg)
1013 ir_mode *mymode = get_irn_mode(n);
1014 ir_mode *op1mode = get_irn_mode(get_Cond_selector(n));
1018 /* Cond: BB x b --> X x X */
1019 (op1mode == mode_b ||
1020 /* Cond: BB x int --> X^n */
1021 mode_is_int(op1mode) ), "Cond node", 0
1023 ASSERT_AND_RET(mymode == mode_T, "Cond mode is not a tuple", 0);
1029 * verify a Return node
1031 static int verify_node_Return(ir_node *n, ir_graph *irg)
1034 ir_mode *mymode = get_irn_mode(n);
1035 ir_mode *mem_mode = get_irn_mode(get_Return_mem(n));
1038 /* Return: BB x M x data1 x ... x datan --> X */
1040 ASSERT_AND_RET( mem_mode == mode_M, "Return node", 0 ); /* operand M */
1042 for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
1043 ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Return_res(n, i))), "Return node", 0 ); /* operand datai */
1045 ASSERT_AND_RET( mymode == mode_X, "Result X", 0 ); /* result X */
1046 /* Compare returned results with result types of method type */
1047 mt = get_entity_type(get_irg_entity(irg));
1048 ASSERT_AND_RET_DBG( get_Return_n_ress(n) == get_method_n_ress(mt),
1049 "Number of results for Return doesn't match number of results in type.", 0,
1050 show_return_nres(irg, n, mt););
1051 for (i = get_Return_n_ress(n) - 1; i >= 0; --i) {
1052 ir_type *res_type = get_method_res_type(mt, i);
1054 if (get_irg_phase_state(irg) != phase_backend) {
1055 if (is_atomic_type(res_type)) {
1057 get_irn_mode(get_Return_res(n, i)) == get_type_mode(res_type),
1058 "Mode of result for Return doesn't match mode of result type.", 0,
1059 show_return_modes(irg, n, mt, i);
1063 mode_is_reference(get_irn_mode(get_Return_res(n, i))),
1064 "Mode of result for Return doesn't match mode of result type.", 0,
1065 show_return_modes(irg, n, mt, i);
1074 * verify a Raise node
1076 static int verify_node_Raise(ir_node *n, ir_graph *irg)
1078 ir_mode *mymode = get_irn_mode(n);
1079 ir_mode *op1mode = get_irn_mode(get_Raise_mem(n));
1080 ir_mode *op2mode = get_irn_mode(get_Raise_exo_ptr(n));
1084 /* Sel: BB x M x ref --> X x M */
1085 op1mode == mode_M && mode_is_reference(op2mode) &&
1086 mymode == mode_T, "Raise node", 0
1092 * verify a Const node
1094 static int verify_node_Const(ir_node *n, ir_graph *irg)
1096 ir_mode *mymode = get_irn_mode(n);
1100 /* Const: BB --> data */
1101 (mode_is_data(mymode) ||
1102 mymode == mode_b) /* we want boolean constants for static evaluation */
1103 ,"Const node", 0 /* of Cmp. */
1106 /* the modes of the constant and teh tarval must match */
1107 mymode == get_tarval_mode(get_Const_tarval(n)),
1108 "Const node, tarval and node mode mismatch", 0
1114 * verify a SymConst node
1116 static int verify_node_SymConst(ir_node *n, ir_graph *irg)
1118 ir_mode *mymode = get_irn_mode(n);
1122 /* SymConst: BB --> int*/
1123 (mode_is_int(mymode) ||
1124 /* SymConst: BB --> ref */
1125 mode_is_reference(mymode))
1126 ,"SymConst node", 0);
1133 static int verify_node_Sel(ir_node *n, ir_graph *irg)
1136 ir_mode *mymode = get_irn_mode(n);
1137 ir_mode *op1mode = get_irn_mode(get_Sel_mem(n));
1138 ir_mode *op2mode = get_irn_mode(get_Sel_ptr(n));
1143 /* Sel: BB x M x ref x int^n --> ref */
1144 (op1mode == mode_M && op2mode == mymode && mode_is_reference(mymode)),
1145 "Sel node", 0, show_node_failure(n)
1148 for (i = get_Sel_n_indexs(n) - 1; i >= 0; --i) {
1149 ASSERT_AND_RET_DBG(mode_is_int(get_irn_mode(get_Sel_index(n, i))), "Sel node", 0, show_node_failure(n));
1151 ent = get_Sel_entity(n);
1152 ASSERT_AND_RET_DBG(ent, "Sel node with empty entity", 0, show_node_failure(n));
1157 * verify an InstOf node
1159 static int verify_node_InstOf(ir_node *n, ir_graph *irg)
1161 ir_mode *mymode = get_irn_mode(n);
1162 ir_mode *op1mode = get_irn_mode(get_InstOf_obj(n));
1165 ASSERT_AND_RET(mode_T == mymode, "mode of Instof is not a tuple", 0);
1166 ASSERT_AND_RET(mode_is_data(op1mode), "Instof not on data", 0);
1171 * Check if the pinned state is right.
1173 static int verify_right_pinned(ir_node *n)
1177 if (get_irn_pinned(n) == op_pin_state_pinned)
1179 mem = get_Call_mem(n);
1181 /* if it's not pinned, its memory predecessor must be NoMem or Pin */
1182 if (is_NoMem(mem) || is_Pin(mem))
1188 * verify a Call node
1190 static int verify_node_Call(ir_node *n, ir_graph *irg)
1192 ir_mode *mymode = get_irn_mode(n);
1193 ir_mode *op1mode = get_irn_mode(get_Call_mem(n));
1194 ir_mode *op2mode = get_irn_mode(get_Call_ptr(n));
1199 /* Call: BB x M x ref x data1 x ... x datan
1200 --> M x datan+1 x ... x data n+m */
1201 ASSERT_AND_RET( op1mode == mode_M && mode_is_reference(op2mode), "Call node", 0 ); /* operand M x ref */
1203 /* NoMem nodes are only allowed as memory input if the Call is NOT pinned */
1204 ASSERT_AND_RET(verify_right_pinned(n),"Call node with wrong memory input", 0 );
1206 mt = get_Call_type(n);
1207 if (get_unknown_type() == mt) {
1211 for (i = get_Call_n_params(n) - 1; i >= 0; --i) {
1212 ASSERT_AND_RET( mode_is_datab(get_irn_mode(get_Call_param(n, i))), "Call node", 0 ); /* operand datai */
1215 ASSERT_AND_RET( mymode == mode_T, "Call result not a tuple", 0 ); /* result T */
1216 /* Compare arguments of node with those of type */
1218 if (get_method_variadicity(mt) == variadicity_variadic) {
1220 get_Call_n_params(n) >= get_method_n_params(mt),
1221 "Number of args for Call doesn't match number of args in variadic type.",
1223 ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
1224 n, get_Call_n_params(n), get_method_n_params(mt));
1228 get_Call_n_params(n) == get_method_n_params(mt),
1229 "Number of args for Call doesn't match number of args in non variadic type.",
1231 ir_fprintf(stderr, "Call %+F has %d params, type %d\n",
1232 n, get_Call_n_params(n), get_method_n_params(mt));
1236 for (i = 0; i < get_method_n_params(mt); i++) {
1237 ir_type *t = get_method_param_type(mt, i);
1239 if (get_irg_phase_state(irg) != phase_backend) {
1240 if (is_atomic_type(t)) {
1242 get_irn_mode(get_Call_param(n, i)) == get_type_mode(t),
1243 "Mode of arg for Call doesn't match mode of arg type.", 0,
1244 show_call_param(n, mt);
1247 /* call with a compound type, mode must be reference */
1249 mode_is_reference(get_irn_mode(get_Call_param(n, i))),
1250 "Mode of arg for Call doesn't match mode of arg type.", 0,
1251 show_call_param(n, mt);
1258 if (Call_has_callees(n)) {
1259 for (i = 0; i < get_Call_n_callees(n); i++) {
1260 ASSERT_AND_RET(is_entity(get_Call_callee(n, i)), "callee array must contain entities.", 0);
1268 * verify an Add node
1270 static int verify_node_Add(ir_node *n, ir_graph *irg)
1272 ir_mode *mymode = get_irn_mode(n);
1273 ir_mode *op1mode = get_irn_mode(get_Add_left(n));
1274 ir_mode *op2mode = get_irn_mode(get_Add_right(n));
1279 /* common Add: BB x numP x numP --> numP */
1280 (op1mode == mymode && op2mode == op1mode && mode_is_data(mymode)) ||
1281 /* Pointer Add: BB x ref x int --> ref */
1282 (mode_is_reference(op1mode) && mode_is_int(op2mode) && op1mode == mymode) ||
1283 /* Pointer Add: BB x int x ref --> ref */
1284 (mode_is_int(op1mode) && op2mode == mymode && mode_is_reference(mymode))
1287 show_binop_failure(n, "/* common Add: BB x numP x numP --> numP */ |\n"
1288 "/* Pointer Add: BB x ref x int --> ref */ |\n"
1289 "/* Pointer Add: BB x int x ref --> ref */");
1297 static int verify_node_Sub(ir_node *n, ir_graph *irg)
1299 ir_mode *mymode = get_irn_mode(n);
1300 ir_mode *op1mode = get_irn_mode(get_Sub_left(n));
1301 ir_mode *op2mode = get_irn_mode(get_Sub_right(n));
1306 /* common Sub: BB x numP x numP --> numP */
1307 (mymode ==op1mode && mymode == op2mode && mode_is_data(op1mode)) ||
1308 /* Pointer Sub: BB x ref x int --> ref */
1309 (op1mode == mymode && mode_is_int(op2mode) && mode_is_reference(mymode)) ||
1310 /* Pointer Sub: BB x ref x ref --> int */
1311 (op1mode == op2mode && mode_is_reference(op2mode) && mode_is_int(mymode))
1314 show_binop_failure(n, "/* common Sub: BB x numP x numP --> numP */ |\n"
1315 "/* Pointer Sub: BB x ref x int --> ref */ |\n"
1316 "/* Pointer Sub: BB x ref x ref --> int */" );
1322 * verify a Minus node
1324 static int verify_node_Minus(ir_node *n, ir_graph *irg)
1326 ir_mode *mymode = get_irn_mode(n);
1327 ir_mode *op1mode = get_irn_mode(get_Minus_op(n));
1331 /* Minus: BB x num --> num */
1332 op1mode == mymode && mode_is_num(op1mode), "Minus node", 0,
1333 show_unop_failure(n , "/* Minus: BB x num --> num */");
1341 static int verify_node_Mul(ir_node *n, ir_graph *irg)
1343 ir_mode *mymode = get_irn_mode(n);
1344 ir_mode *op1mode = get_irn_mode(get_Mul_left(n));
1345 ir_mode *op2mode = get_irn_mode(get_Mul_right(n));
1350 /* Mul: BB x int_n x int_n --> int_n|int_2n */
1351 (mode_is_int(op1mode) && op2mode == op1mode && mode_is_int(mymode) &&
1352 (op1mode == mymode || get_mode_size_bits(op1mode) * 2 == get_mode_size_bits(mymode))) ||
1353 /* Mul: BB x float x float --> float */
1354 (mode_is_float(op1mode) && op2mode == op1mode && mymode == op1mode)
1357 show_binop_failure(n, "/* Mul: BB x int_n x int_n --> int_n|int_2n */ |\n"
1358 "/* Mul: BB x float x float --> float */");
1364 * verify a Mulh node
1366 static int verify_node_Mulh(ir_node *n, ir_graph *irg)
1368 ir_mode *mymode = get_irn_mode(n);
1369 ir_mode *op1mode = get_irn_mode(get_Mulh_left(n));
1370 ir_mode *op2mode = get_irn_mode(get_Mulh_right(n));
1375 /* Mulh: BB x int x int --> int */
1376 (mode_is_int(op1mode) && op2mode == op1mode && op1mode == mymode)
1379 show_binop_failure(n, "/* Mulh: BB x int x int --> int */");
1385 * verify a Quot node
1387 static int verify_node_Quot(ir_node *n, ir_graph *irg)
1389 ir_mode *mymode = get_irn_mode(n);
1390 ir_mode *op1mode = get_irn_mode(get_Quot_mem(n));
1391 ir_mode *op2mode = get_irn_mode(get_Quot_left(n));
1392 ir_mode *op3mode = get_irn_mode(get_Quot_right(n));
1396 /* Quot: BB x M x float x float --> M x X x float */
1397 op1mode == mode_M && op2mode == op3mode &&
1398 get_mode_sort(op2mode) == irms_float_number &&
1401 show_binop_failure(n, "/* Quot: BB x M x float x float --> M x X x float */");
1407 * verify a DivMod node
1409 static int verify_node_DivMod(ir_node *n, ir_graph *irg)
1411 ir_mode *mymode = get_irn_mode(n);
1412 ir_mode *op1mode = get_irn_mode(get_DivMod_mem(n));
1413 ir_mode *op2mode = get_irn_mode(get_DivMod_left(n));
1414 ir_mode *op3mode = get_irn_mode(get_DivMod_right(n));
1418 /* DivMod: BB x M x int x int --> M x X x int x int */
1419 op1mode == mode_M &&
1420 mode_is_int(op2mode) &&
1421 op3mode == op2mode &&
1431 static int verify_node_Div(ir_node *n, ir_graph *irg)
1433 ir_mode *mymode = get_irn_mode(n);
1434 ir_mode *op1mode = get_irn_mode(get_Div_mem(n));
1435 ir_mode *op2mode = get_irn_mode(get_Div_left(n));
1436 ir_mode *op3mode = get_irn_mode(get_Div_right(n));
1440 /* Div: BB x M x int x int --> M x X x int */
1441 op1mode == mode_M &&
1442 op2mode == op3mode &&
1443 mode_is_int(op2mode) &&
1453 static int verify_node_Mod(ir_node *n, ir_graph *irg)
1455 ir_mode *mymode = get_irn_mode(n);
1456 ir_mode *op1mode = get_irn_mode(get_Mod_mem(n));
1457 ir_mode *op2mode = get_irn_mode(get_Mod_left(n));
1458 ir_mode *op3mode = get_irn_mode(get_Mod_right(n));
1462 /* Mod: BB x M x int x int --> M x X x int */
1463 op1mode == mode_M &&
1464 op2mode == op3mode &&
1465 mode_is_int(op2mode) &&
1473 * verify an Abs node
1475 static int verify_node_Abs(ir_node *n, ir_graph *irg)
1477 ir_mode *mymode = get_irn_mode(n);
1478 ir_mode *op1mode = get_irn_mode(get_Abs_op(n));
1482 /* Abs: BB x num --> num */
1483 op1mode == mymode &&
1484 mode_is_num (op1mode),
1486 show_unop_failure(n, "/* Abs: BB x num --> num */");
1492 * verify a logical And, Or, Eor node
1494 static int verify_node_Logic(ir_node *n, ir_graph *irg)
1496 ir_mode *mymode = get_irn_mode(n);
1497 ir_mode *op1mode = get_irn_mode(get_binop_left(n));
1498 ir_mode *op2mode = get_irn_mode(get_binop_right(n));
1502 /* And or Or or Eor: BB x int x int --> int */
1503 (mode_is_int(mymode) || mymode == mode_b) &&
1504 op2mode == op1mode &&
1506 "And, Or or Eor node", 0,
1507 show_binop_failure(n, "/* And or Or or Eor: BB x int x int --> int */");
1512 #define verify_node_And verify_node_Logic
1513 #define verify_node_Or verify_node_Logic
1514 #define verify_node_Eor verify_node_Logic
1519 static int verify_node_Not(ir_node *n, ir_graph *irg)
1521 ir_mode *mymode = get_irn_mode(n);
1522 ir_mode *op1mode = get_irn_mode(get_Not_op(n));
1526 /* Not: BB x int --> int */
1527 (mode_is_int(mymode) || mymode == mode_b) &&
1530 show_unop_failure(n, "/* Not: BB x int --> int */");
1538 static int verify_node_Cmp(ir_node *n, ir_graph *irg)
1540 ir_mode *mymode = get_irn_mode(n);
1541 ir_mode *op1mode = get_irn_mode(get_Cmp_left(n));
1542 ir_mode *op2mode = get_irn_mode(get_Cmp_right(n));
1546 /* Cmp: BB x datab x datab --> b16 */
1547 mode_is_datab(op1mode) &&
1548 op2mode == op1mode &&
1551 show_binop_failure(n, "/* Cmp: BB x datab x datab --> b16 */");
1557 * verify a Shift node
1559 static int verify_node_Shift(ir_node *n, ir_graph *irg)
1561 ir_mode *mymode = get_irn_mode(n);
1562 ir_mode *op1mode = get_irn_mode(get_binop_left(n));
1563 ir_mode *op2mode = get_irn_mode(get_binop_right(n));
1567 /* Shl, Shr or Shrs: BB x int x int_u --> int */
1568 mode_is_int(op1mode) &&
1569 mode_is_int(op2mode) &&
1570 !mode_is_signed(op2mode) &&
1572 "Shl, Shr or Shrs node", 0,
1573 show_binop_failure(n, "/* Shl, Shr or Shrs: BB x int x int_u --> int */");
1578 #define verify_node_Shl verify_node_Shift
1579 #define verify_node_Shr verify_node_Shift
1580 #define verify_node_Shrs verify_node_Shift
1583 * verify a Rotl node
1585 static int verify_node_Rotl(ir_node *n, ir_graph *irg)
1587 ir_mode *mymode = get_irn_mode(n);
1588 ir_mode *op1mode = get_irn_mode(get_Rotl_left(n));
1589 ir_mode *op2mode = get_irn_mode(get_Rotl_right(n));
1593 /* Rotl: BB x int x int --> int */
1594 mode_is_int(op1mode) &&
1595 mode_is_int(op2mode) &&
1598 show_binop_failure(n, "/* Rotl: BB x int x int --> int */");
1604 * verify a Conv node
1606 static int verify_node_Conv(ir_node *n, ir_graph *irg)
1608 ir_mode *mymode = get_irn_mode(n);
1609 ir_mode *op1mode = get_irn_mode(get_Conv_op(n));
1613 get_irg_phase_state(irg) == phase_backend ||
1614 (mode_is_datab(op1mode) && mode_is_data(mymode)),
1616 show_unop_failure(n, "/* Conv: BB x datab --> data */");
1622 * verify a Cast node
1624 static int verify_node_Cast(ir_node *n, ir_graph *irg)
1626 ir_mode *mymode = get_irn_mode(n);
1627 ir_mode *op1mode = get_irn_mode(get_Cast_op(n));
1631 /* Conv: BB x datab1 --> datab2 */
1632 mode_is_data(op1mode) && op1mode == mymode,
1634 show_unop_failure(n, "/* Conv: BB x datab1 --> datab2 */");
1642 static int verify_node_Phi(ir_node *n, ir_graph *irg)
1644 ir_mode *mymode = get_irn_mode(n);
1645 ir_node *block = get_nodes_block(n);
1649 /* a Phi node MUST have the same number of inputs as its block
1650 * Exception is a phi with 0 inputs which is used when (re)constructing the
1652 if (! is_Bad(block) && get_irg_phase_state(get_irn_irg(n)) != phase_building && get_irn_arity(n) > 0) {
1654 get_irn_arity(n) == get_irn_arity(block),
1655 "wrong number of inputs in Phi node", 0,
1656 show_phi_inputs(n, block);
1660 /* Phi: BB x dataM^n --> dataM */
1661 for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
1662 ir_node *pred = get_Phi_pred(n, i);
1663 if (!is_Bad(pred)) {
1665 get_irn_mode(pred) == mymode,
1667 show_phi_failure(n, pred, i);
1671 ASSERT_AND_RET(mode_is_dataM(mymode) || mymode == mode_b, "Phi node", 0 );
1673 if (mymode == mode_M) {
1674 for (i = get_Phi_n_preds(n) - 1; i >= 0; --i) {
1676 ir_node *pred_i = get_Phi_pred(n, i);
1680 for (j = i - 1; j >= 0; --j) {
1681 ir_node *pred_j = get_Phi_pred(n, j);
1686 /* currently this checks fails for blocks with exception
1687 outputs (and these are NOT basic blocks). So it is disabled yet. */
1689 (pred_i == pred_j) || (get_irn_n(pred_i, -1) != get_irn_n(pred_j, -1)),
1690 "At least two different PhiM predecessors are in the same block",
1692 ir_printf("%+F and %+F of %+F are in %+F\n", pred_i, pred_j, n, get_irn_n(pred_i, -1))
1702 * verify a Filter node
1704 static int verify_node_Filter(ir_node *n, ir_graph *irg)
1708 #ifdef INTERPROCEDURAL_VIEW
1709 ASSERT_AND_RET((get_irp_ip_view_state() != ip_view_no),
1710 "Filter may only appear if ip view is constructed.", 0);
1712 /* We should further do tests as for Proj and Phi. */
1717 * verify a Load node
1719 static int verify_node_Load(ir_node *n, ir_graph *irg)
1721 ir_mode *mymode = get_irn_mode(n);
1722 ir_mode *op1mode = get_irn_mode(get_Load_mem(n));
1723 ir_mode *op2mode = get_irn_mode(get_Load_ptr(n));
1725 ASSERT_AND_RET(op1mode == mode_M, "Load node", 0);
1726 if (get_irg_phase_state(irg) != phase_backend) {
1727 ASSERT_AND_RET(mode_is_reference(op2mode), "Load node", 0 );
1729 ASSERT_AND_RET( mymode == mode_T, "Load node", 0 );
1732 * jack's gen_add_firm_code:simpleSel seems to build Load (Load
1733 * (Proj (Proj))) sometimes ...
1735 * interprete.c:ai_eval seems to assume that this happens, too
1737 * obset.c:get_abstval_any can't deal with this if the load has
1741 ir_entity *ent = hunt_for_entity (get_Load_ptr (n), n);
1742 assert ((NULL != ent) || (mymode != mode_T));
1750 * verify a Store node
1752 static int verify_node_Store(ir_node *n, ir_graph *irg)
1756 ir_mode *mymode = get_irn_mode(n);
1757 ir_mode *op1mode = get_irn_mode(get_Store_mem(n));
1758 ir_mode *op2mode = get_irn_mode(get_Store_ptr(n));
1759 ir_mode *op3mode = get_irn_mode(get_Store_value(n));
1761 ASSERT_AND_RET(op1mode == mode_M && mode_is_datab(op3mode), "Store node", 0 );
1762 if (get_irg_phase_state(irg) != phase_backend) {
1763 ASSERT_AND_RET(mode_is_reference(op2mode), "Store node", 0 );
1765 ASSERT_AND_RET(mymode == mode_T, "Store node", 0);
1767 target = get_ptr_entity(get_Store_ptr(n));
1768 if (vrfy_entities && target && get_irg_phase_state(current_ir_graph) == phase_high) {
1770 * If lowered code, any Sels that add 0 may be removed, causing
1771 * an direct access to entities of array or compound type.
1772 * Prevent this by checking the phase.
1774 ASSERT_AND_RET( op3mode == get_type_mode(get_entity_type(target)),
1782 * verify an Alloc node
1784 static int verify_node_Alloc(ir_node *n, ir_graph *irg)
1786 ir_mode *mymode = get_irn_mode(n);
1787 ir_mode *op1mode = get_irn_mode(get_Alloc_mem(n));
1788 ir_mode *op2mode = get_irn_mode(get_Alloc_count(n));
1792 /* Alloc: BB x M x int_u --> M x X x ref */
1793 op1mode == mode_M &&
1794 mode_is_int(op2mode) &&
1795 !mode_is_signed(op2mode) &&
1798 show_node_failure(n);
1804 * verify a Free node
1806 static int verify_node_Free(ir_node *n, ir_graph *irg)
1808 ir_mode *mymode = get_irn_mode(n);
1809 ir_mode *op1mode = get_irn_mode(get_Free_mem(n));
1810 ir_mode *op2mode = get_irn_mode(get_Free_ptr(n));
1811 ir_mode *op3mode = get_irn_mode(get_Free_size(n));
1815 /* Free: BB x M x ref x int_u --> M */
1816 op1mode == mode_M && mode_is_reference(op2mode) &&
1817 mode_is_int(op3mode) &&
1818 !mode_is_signed(op3mode) &&
1821 show_triop_failure(n, "/* Free: BB x M x ref x int_u --> M */");
1827 * verify a Sync node
1829 static int verify_node_Sync(ir_node *n, ir_graph *irg)
1832 ir_mode *mymode = get_irn_mode(n);
1835 /* Sync: BB x M^n --> M */
1836 for (i = get_Sync_n_preds(n) - 1; i >= 0; --i) {
1837 ASSERT_AND_RET( get_irn_mode(get_Sync_pred(n, i)) == mode_M, "Sync node", 0 );
1839 ASSERT_AND_RET( mymode == mode_M, "Sync node", 0 );
1844 * verify a Confirm node
1846 static int verify_node_Confirm(ir_node *n, ir_graph *irg)
1848 ir_mode *mymode = get_irn_mode(n);
1849 ir_mode *op1mode = get_irn_mode(get_Confirm_value(n));
1850 ir_mode *op2mode = get_irn_mode(get_Confirm_bound(n));
1854 /* Confirm: BB x T x T --> T */
1855 op1mode == mymode &&
1858 show_binop_failure(n, "/* Confirm: BB x T x T --> T */");
1866 static int verify_node_Mux(ir_node *n, ir_graph *irg)
1868 ir_mode *mymode = get_irn_mode(n);
1869 ir_mode *op1mode = get_irn_mode(get_Mux_sel(n));
1870 ir_mode *op2mode = get_irn_mode(get_Mux_true(n));
1871 ir_mode *op3mode = get_irn_mode(get_Mux_false(n));
1875 /* Mux: BB x b x datab x datab --> datab */
1876 op1mode == mode_b &&
1877 op2mode == mymode &&
1878 op3mode == mymode &&
1879 mode_is_datab(mymode),
1886 * verify a CopyB node
1888 static int verify_node_CopyB(ir_node *n, ir_graph *irg)
1890 ir_mode *mymode = get_irn_mode(n);
1891 ir_mode *op1mode = get_irn_mode(get_CopyB_mem(n));
1892 ir_mode *op2mode = get_irn_mode(get_CopyB_dst(n));
1893 ir_mode *op3mode = get_irn_mode(get_CopyB_src(n));
1894 ir_type *t = get_CopyB_type(n);
1896 /* CopyB: BB x M x ref x ref --> M x X */
1897 ASSERT_AND_RET(mymode == mode_T && op1mode == mode_M, "CopyB node", 0);
1898 if (get_irg_phase_state(irg) != phase_backend) {
1899 ASSERT_AND_RET(mode_is_reference(op2mode) && mode_is_reference(op3mode),
1904 is_compound_type(t),
1905 "CopyB node should copy compound types only", 0 );
1907 /* NoMem nodes are only allowed as memory input if the CopyB is NOT pinned.
1908 This should happen RARELY, as CopyB COPIES MEMORY */
1909 ASSERT_AND_RET(verify_right_pinned(n), "CopyB node with wrong memory input", 0 );
1914 * verify a Bound node
1916 static int verify_node_Bound(ir_node *n, ir_graph *irg)
1918 ir_mode *mymode = get_irn_mode(n);
1919 ir_mode *op1mode = get_irn_mode(get_Bound_mem(n));
1920 ir_mode *op2mode = get_irn_mode(get_Bound_index(n));
1921 ir_mode *op3mode = get_irn_mode(get_Bound_lower(n));
1922 ir_mode *op4mode = get_irn_mode(get_Bound_upper(n));
1925 /* Bound: BB x M x int x int x int --> M x X */
1928 op1mode == mode_M &&
1929 op2mode == op3mode &&
1930 op3mode == op4mode &&
1931 mode_is_int(op3mode),
1938 * For each usage of a node, it is checked, if the block of the
1939 * node dominates the block of the usage (for phis: the predecessor
1940 * block of the phi for the corresponding edge).
1942 * @return non-zero on success, 0 on dominance error
1944 static int check_dominance_for_node(ir_node *use)
1946 if (is_Block(use)) {
1947 ir_node *mbh = get_Block_MacroBlock(use);
1950 /* must be a partBlock */
1951 if (is_Block(mbh)) {
1952 ASSERT_AND_RET(block_dominates(mbh, use), "MacroBlock header must dominate a partBlock", 0);
1956 /* This won't work for blocks and the end node */
1957 else if (use != get_irg_end(current_ir_graph) && use != current_ir_graph->anchor) {
1959 ir_node *bl = get_nodes_block(use);
1961 for (i = get_irn_arity(use) - 1; i >= 0; --i) {
1962 ir_node *def = get_irn_n(use, i);
1963 ir_node *def_bl = get_nodes_block(def);
1964 ir_node *use_bl = bl;
1966 /* ignore dead definition blocks, will be removed */
1967 if (is_Block_dead(def_bl) || get_Block_dom_depth(def_bl) == -1)
1971 use_bl = get_Block_cfgpred_block(bl, i);
1973 /* ignore dead use blocks, will be removed */
1974 if (is_Block_dead(use_bl) || get_Block_dom_depth(use_bl) == -1)
1978 block_dominates(def_bl, use_bl),
1979 "the definition of a value used violates the dominance property", 0,
1981 "graph %+F: %+F of %+F must dominate %+F of user %+F input %d\n",
1982 current_ir_graph, def_bl, def, use_bl, use, i
1990 /* Tests the modes of n and its predecessors. */
1991 int irn_vrfy_irg(ir_node *n, ir_graph *irg)
1996 if (!get_node_verification_mode())
2000 * do NOT check placement in interprocedural view, as we don't always
2001 * know the "right" graph ...
2005 /* this is an expensive check for large graphs (it has a quadratic
2006 * runtime but with a small constant); so do NOT run it in release mode
2009 node_is_in_irgs_storage(irg, n),
2010 "Node is not stored on proper IR graph!", 0,
2011 show_node_on_graph(irg, n);
2014 assert(get_irn_irg(n) == irg);
2016 unsigned idx = get_irn_idx(n);
2017 ir_node *node_from_map = get_idx_irn(irg, idx);
2018 ASSERT_AND_RET_DBG(node_from_map == n, "Node index and index map entry differ", 0,
2019 ir_printf("node %+F node in map %+F(%p)\n", n, node_from_map, node_from_map));
2024 /* We don't want to test nodes whose predecessors are Bad,
2025 as we would have to special case that for each operation. */
2026 if (op != op_Phi && op != op_Block) {
2027 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
2028 if (is_Bad(get_irn_n(n, i)))
2033 if (_get_op_pinned(op) >= op_pin_state_exc_pinned) {
2034 op_pin_state state = get_irn_pinned(n);
2036 state == op_pin_state_floats ||
2037 state == op_pin_state_pinned,
2038 "invalid pin state", 0,
2039 ir_printf("node %+F", n));
2042 if (op->ops.verify_node)
2043 return op->ops.verify_node(n, irg);
2049 int irn_vrfy(ir_node *n)
2051 #ifdef DEBUG_libfirm
2052 return irn_vrfy_irg(n, current_ir_graph);
2059 /*-----------------------------------------------------------------*/
2060 /* Verify the whole graph. */
2061 /*-----------------------------------------------------------------*/
2063 #ifdef DEBUG_libfirm
2065 * Walker to check every node
2067 static void vrfy_wrap(ir_node *node, void *env)
2070 *res = irn_vrfy_irg(node, current_ir_graph);
2074 * Walker to check every node including SSA property.
2075 * Only called if dominance info is available.
2077 static void vrfy_wrap_ssa(ir_node *node, void *env)
2081 *res = irn_vrfy_irg(node, current_ir_graph);
2083 *res = check_dominance_for_node(node);
2087 #endif /* DEBUG_libfirm */
2090 * Calls irn_vrfy for each node in irg.
2091 * Graph must be in state "op_pin_state_pinned".
2092 * If dominance info is available, check the SSA property.
2094 int irg_verify(ir_graph *irg, unsigned flags)
2097 #ifdef DEBUG_libfirm
2100 rem = current_ir_graph;
2101 current_ir_graph = irg;
2104 last_irg_error = NULL;
2107 assert(get_irg_pinned(irg) == op_pin_state_pinned && "Verification need pinned graph");
2109 if (flags & VRFY_ENFORCE_SSA)
2114 get_irg_dom_state(irg) == dom_consistent &&
2115 get_irg_pinned(irg) == op_pin_state_pinned ? vrfy_wrap_ssa : vrfy_wrap,
2119 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT && ! res) {
2120 ir_entity *ent = get_irg_entity(irg);
2123 fprintf(stderr, "irg_verify: Verifying graph %s failed\n", get_entity_name(ent));
2125 fprintf(stderr, "irg_verify: Verifying graph %p failed\n", (void *)irg);
2128 current_ir_graph = rem;
2132 #endif /* DEBUG_libfirm */
2138 ir_graph_pass_t pass;
2143 * Wrapper to irg_verify to be run as an ir_graph pass.
2145 static int irg_verify_wrapper(ir_graph *irg, void *context)
2147 struct pass_t *pass = context;
2148 irg_verify(irg, pass->flags);
2149 /* do NOT rerun the pass if verify is ok :-) */
2153 /* Creates an ir_graph pass for irg_verify(). */
2154 ir_graph_pass_t *irg_verify_pass(const char *name, unsigned flags)
2156 struct pass_t *pass = XMALLOCZ(struct pass_t);
2158 def_graph_pass_constructor(
2159 &pass->pass, name ? name : "irg_verify", irg_verify_wrapper);
2161 /* neither dump for verify */
2162 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
2163 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
2165 pass->flags = flags;
2169 /* create a verify pass */
2170 int irn_vrfy_irg_dump(ir_node *n, ir_graph *irg, const char **bad_string)
2173 firm_verification_t old = get_node_verification_mode();
2175 firm_vrfy_failure_msg = NULL;
2176 do_node_verification(FIRM_VERIFICATION_ERROR_ONLY);
2177 res = irn_vrfy_irg(n, irg);
2178 if (res && get_irg_dom_state(irg) == dom_consistent &&
2179 get_irg_pinned(irg) == op_pin_state_pinned)
2180 res = check_dominance_for_node(n);
2181 do_node_verification(old);
2182 *bad_string = firm_vrfy_failure_msg;
2188 typedef struct _vrfy_bad_env_t {
2194 * Pre-Walker: check Bad predecessors of node.
2196 static void check_bads(ir_node *node, void *env)
2198 vrfy_bad_env_t *venv = env;
2199 int i, arity = get_irn_arity(node);
2201 if (is_Block(node)) {
2202 if ((venv->flags & BAD_CF) == 0) {
2204 /* check for Bad Block predecessor */
2205 for (i = 0; i < arity; ++i) {
2206 ir_node *pred = get_irn_n(node, i);
2209 venv->res |= BAD_CF;
2211 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
2212 fprintf(stderr, "irg_vrfy_bads: Block %ld has Bad predecessor\n", get_irn_node_nr(node));
2214 if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
2215 dump_ir_block_graph_sched(current_ir_graph, "-assert");
2216 assert(0 && "Bad CF detected");
2222 if ((venv->flags & BAD_BLOCK) == 0) {
2224 /* check for Bad Block */
2225 if (is_Bad(get_nodes_block(node))) {
2226 venv->res |= BAD_BLOCK;
2228 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
2229 fprintf(stderr, "irg_vrfy_bads: node %ld has Bad Block\n", get_irn_node_nr(node));
2231 if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
2232 dump_ir_block_graph_sched(current_ir_graph, "-assert");
2233 assert(0 && "Bad CF detected");
2238 if ((venv->flags & TUPLE) == 0) {
2239 if (is_Tuple(node)) {
2242 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
2243 fprintf(stderr, "irg_vrfy_bads: node %ld is a Tuple\n", get_irn_node_nr(node));
2245 if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
2246 dump_ir_block_graph_sched(current_ir_graph, "-assert");
2247 assert(0 && "Tuple detected");
2252 for (i = 0; i < arity; ++i) {
2253 ir_node *pred = get_irn_n(node, i);
2256 /* check for Phi with Bad inputs */
2257 if (is_Phi(node) && !is_Bad(get_nodes_block(node)) && is_Bad(get_irn_n(get_nodes_block(node), i))) {
2258 if (venv->flags & BAD_CF)
2261 venv->res |= BAD_CF;
2263 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
2264 fprintf(stderr, "irg_vrfy_bads: Phi %ld has Bad Input\n", get_irn_node_nr(node));
2266 if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
2267 dump_ir_block_graph_sched(current_ir_graph, "-assert");
2268 assert(0 && "Bad CF detected");
2273 /* Bad node input */
2274 if ((venv->flags & BAD_DF) == 0) {
2275 venv->res |= BAD_DF;
2277 if (get_node_verification_mode() == FIRM_VERIFICATION_REPORT) {
2278 fprintf(stderr, "irg_vrfy_bads: node %ld has Bad Input\n", get_irn_node_nr(node));
2280 if (get_node_verification_mode() == FIRM_VERIFICATION_ON) {
2281 dump_ir_block_graph_sched(current_ir_graph, "-assert");
2282 assert(0 && "Bad NON-CF detected");
2291 * verify occurrence of bad nodes
2293 int irg_vrfy_bads(ir_graph *irg, int flags)
2300 irg_walk_graph(irg, check_bads, NULL, &env);
2306 * set the default verify operation
2308 void firm_set_default_verifyer(ir_opcode code, ir_op_ops *ops)
2312 ops->verify_node = verify_node_##a; \
2369 ops->verify_proj_node = verify_node_Proj_##a; \