2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various verify routines that check a scheduled graph for correctness.
23 * @author Matthias Braun
46 #include "besched_t.h"
49 #include "beintlive_t.h"
51 static int my_values_interfere(const ir_node *a, const ir_node *b);
53 typedef struct be_verify_register_pressure_env_t_ {
54 ir_graph *irg; /**< the irg to verify */
55 be_lv_t *lv; /**< Liveness information. */
56 const arch_register_class_t *cls; /**< the register class to check for */
57 int registers_available; /**< number of available registers */
58 int problem_found; /**< flag indicating if a problem was found */
59 } be_verify_register_pressure_env_t;
62 * Print all nodes of a pset into a file.
64 static void print_living_values(FILE *F, const ir_nodeset_t *live_nodes) {
65 ir_nodeset_iterator_t iter;
69 foreach_ir_nodeset(live_nodes, node, iter) {
70 ir_fprintf(F, "%+F ", node);
76 * Check if number of live nodes never exceeds the number of available registers.
78 static void verify_liveness_walker(ir_node *block, void *data) {
79 be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
80 ir_nodeset_t live_nodes;
84 /* collect register pressure info, start with end of a block */
85 // ir_fprintf(stderr, "liveness check %+F\n", block);
86 ir_nodeset_init(&live_nodes);
87 be_liveness_end_of_block(env->lv, env->cls, block,
90 // print_living_values(stderr, &live_nodes);
91 pressure = ir_nodeset_size(&live_nodes);
92 if(pressure > env->registers_available) {
93 ir_fprintf(stderr, "Verify Warning: Register pressure too high at end of block %+F(%s) (%d/%d):\n",
94 block, get_irg_dump_name(env->irg), pressure, env->registers_available);
95 print_living_values(stderr, &live_nodes);
96 env->problem_found = 1;
99 sched_foreach_reverse(block, irn) {
103 // print_living_values(stderr, &live_nodes);
104 be_liveness_transfer(env->cls, irn, &live_nodes);
106 pressure = ir_nodeset_size(&live_nodes);
108 if(pressure > env->registers_available) {
109 ir_fprintf(stderr, "Verify Warning: Register pressure too high before node %+F in %+F(%s) (%d/%d):\n",
110 irn, block, get_irg_dump_name(env->irg), pressure, env->registers_available);
111 print_living_values(stderr, &live_nodes);
112 env->problem_found = 1;
116 ir_nodeset_destroy(&live_nodes);
120 * Start a walk over the irg and check the register pressure.
122 int be_verify_register_pressure(const be_irg_t *birg,
123 const arch_register_class_t *cls,
125 be_verify_register_pressure_env_t env;
127 env.lv = be_liveness(birg);
130 env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
131 env.problem_found = 0;
133 be_liveness_assure_sets(env.lv);
134 irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
135 be_liveness_free(env.lv);
137 return ! env.problem_found;
142 /*--------------------------------------------------------------------------- */
146 typedef struct be_verify_schedule_env_t_ {
147 int problem_found; /**< flags indicating if there was a problem */
148 bitset_t *scheduled; /**< bitset of scheduled nodes */
149 ir_graph *irg; /**< the irg to check */
150 const arch_env_t *arch_env; /**< the arch_env */
151 } be_verify_schedule_env_t;
154 * Simple schedule checker.
156 static void verify_schedule_walker(ir_node *block, void *data) {
157 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
159 ir_node *non_phi_found = NULL;
160 int cfchange_found = 0;
161 /* TODO ask arch about delay branches */
162 int delay_branches = 0;
163 int last_timestep = INT_MIN;
166 * Tests for the following things:
167 * 1. Make sure that all phi nodes are scheduled at the beginning of the block
168 * 2. There is 1 or no control flow changing node scheduled and exactly delay_branches operations after it.
169 * 3. No value is defined after it has been used
170 * 4. mode_T nodes have all projs scheduled behind them followed by Keeps
171 * (except mode_X projs)
173 sched_foreach(block, node) {
177 /* this node is scheduled */
178 if(bitset_is_set(env->scheduled, get_irn_idx(node))) {
179 ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
180 env->problem_found = 1;
182 bitset_set(env->scheduled, get_irn_idx(node));
184 /* Check that scheduled nodes are in the correct block */
185 if(get_nodes_block(node) != block) {
186 ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
187 env->problem_found = 1;
190 /* Check that timesteps are increasing */
191 timestep = sched_get_time_step(node);
192 if(timestep <= last_timestep) {
193 ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
195 env->problem_found = 1;
197 last_timestep = timestep;
199 /* Check that phis come before any other node */
201 if (non_phi_found != NULL) {
202 ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes (for example %+F) in block %+F (%s)\n",
203 node, non_phi_found, block, get_irg_dump_name(env->irg));
204 env->problem_found = 1;
207 non_phi_found = node;
210 /* Check for control flow changing nodes */
211 if (is_cfop(node) && get_irn_opcode(node) != iro_Start) {
212 /* check, that only one CF operation is scheduled */
213 if (cfchange_found == 1) {
214 ir_fprintf(stderr, "Verify Warning: More than 1 control flow changing node (%+F) scheduled in block %+F (%s)\n",
215 node, block, get_irg_dump_name(env->irg));
216 env->problem_found = 1;
219 } else if (cfchange_found) {
220 /* proj and keepany aren't real instructions... */
221 if(!is_Proj(node) && !be_is_Keep(node)) {
222 /* check for delay branches */
223 if (delay_branches == 0) {
224 ir_fprintf(stderr, "Verify Warning: Node %+F scheduled after control flow changing node (+delay branches) in block %+F (%s)\n",
225 node, block, get_irg_dump_name(env->irg));
226 env->problem_found = 1;
233 /* Check that all uses come before their definitions */
235 int nodetime = sched_get_time_step(node);
236 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
237 ir_node *arg = get_irn_n(node, i);
238 if(get_nodes_block(arg) != block
239 || !sched_is_scheduled(arg))
242 if(sched_get_time_step(arg) >= nodetime) {
243 ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
244 arg, node, block, get_irg_dump_name(env->irg));
245 env->problem_found = 1;
250 /* Check that no dead nodes are scheduled */
251 if(get_irn_n_edges(node) == 0) {
252 ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
253 node, block, get_irg_dump_name(env->irg));
254 env->problem_found = 1;
257 if(be_is_Keep(node)) {
258 /* at least 1 of the keep arguments has to be it schedule
260 int arity = get_irn_arity(node);
262 ir_node *prev = sched_prev(node);
263 while(be_is_Keep(prev))
264 prev = sched_prev(prev);
266 for(i = 0; i < arity; ++i) {
267 ir_node *in = get_irn_n(node, i);
273 ir_fprintf(stderr, "%+F not scheduled after its pred node in block %+F (%s)\n",
274 node, block, get_irg_dump_name(env->irg));
275 env->problem_found = 1;
280 /* check that all delay branches are filled (at least with NOPs) */
281 if (cfchange_found && delay_branches != 0) {
282 ir_fprintf(stderr, "Verify warning: Not all delay slots filled after jump (%d/%d) in block %+F (%s)\n",
283 block, get_irg_dump_name(env->irg));
284 env->problem_found = 1;
288 static int should_be_scheduled(ir_node *node)
296 if(get_irn_mode(node) == mode_M) {
297 if(is_Phi(node) || is_Sync(node) || is_Pin(node))
300 if(be_is_Keep(node) && get_irn_opcode(get_nodes_block(node)) == iro_Bad)
303 switch(get_irn_opcode(node)) {
313 if (arch_irn_get_flags(node) & arch_irn_flags_ignore)
319 static void check_schedule(ir_node *node, void *data) {
320 be_verify_schedule_env_t *env = data;
324 should_be = should_be_scheduled(node);
328 scheduled = bitset_is_set(env->scheduled, get_irn_idx(node)) ? 1 : 0;
329 should_be = should_be ? 1 : 0;
330 if(should_be != scheduled) {
331 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should%s be scheduled\n",
332 node, get_nodes_block(node), get_irg_dump_name(env->irg), should_be ? "" : " not");
333 env->problem_found = 1;
338 * Start a walk over the irg and check schedule.
340 int be_verify_schedule(const be_irg_t *birg)
342 be_verify_schedule_env_t env;
344 env.problem_found = 0;
345 env.irg = be_get_birg_irg(birg);
346 env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
347 env.arch_env = birg->main_env->arch_env;
349 irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
350 /* check if all nodes are scheduled */
351 irg_walk_graph(env.irg, check_schedule, NULL, &env);
353 return ! env.problem_found;
358 /*--------------------------------------------------------------------------- */
362 typedef struct _spill_t {
368 const arch_env_t *arch_env;
373 } be_verify_spillslots_env_t;
375 static int cmp_spill(const void* d1, const void* d2, size_t size) {
376 const spill_t* s1 = d1;
377 const spill_t* s2 = d2;
380 return s1->spill != s2->spill;
383 static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node) {
387 return set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
390 static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
392 int hash = HASH_PTR(node);
395 res = set_find(env->spills, &spill, sizeof(spill), hash);
399 res = set_insert(env->spills, &spill, sizeof(spill), hash);
405 static ir_node *get_memory_edge(const ir_node *node) {
407 ir_node *result = NULL;
409 arity = get_irn_arity(node);
410 for(i = arity - 1; i >= 0; --i) {
411 ir_node *arg = get_irn_n(node, i);
412 if(get_irn_mode(arg) == mode_M) {
413 assert(result == NULL);
422 void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
425 void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
427 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
428 node, get_nodes_block(node), get_irg_dump_name(env->irg));
433 void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
434 ir_entity *spillent = arch_get_frame_entity(node);
435 be_check_entity(env, node, spillent);
436 get_spill(env, node, ent);
438 if(spillent != ent) {
439 ir_fprintf(stderr, "Verify warning: Spill %+F has different entity than reload %+F in block %+F(%s)\n",
440 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
441 env->problem_found = 1;
445 static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
448 int hash = HASH_PTR(node);
453 assert(is_Proj(node));
455 memperm = get_Proj_pred(node);
456 out = get_Proj_proj(node);
458 spillent = be_get_MemPerm_out_entity(memperm, out);
459 be_check_entity(env, memperm, spillent);
460 if(spillent != ent) {
461 ir_fprintf(stderr, "Verify warning: MemPerm %+F has different entity than reload %+F in block %+F(%s)\n",
462 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
463 env->problem_found = 1;
467 res = set_find(env->spills, &spill, sizeof(spill), hash);
472 spill.ent = spillent;
473 res = set_insert(env->spills, &spill, sizeof(spill), hash);
475 for(i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
476 ir_node* arg = get_irn_n(memperm, i + 1);
477 ir_entity* argent = be_get_MemPerm_in_entity(memperm, i);
479 collect(env, arg, memperm, argent);
483 static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent) {
486 int hash = HASH_PTR(node);
488 assert(is_Phi(node));
491 res = set_find(env->spills, &spill, sizeof(spill), hash);
497 res = set_insert(env->spills, &spill, sizeof(spill), hash);
499 /* is 1 of the arguments a spill? */
500 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
501 ir_node* arg = get_irn_n(node, i);
502 collect(env, arg, reload, ent);
506 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
507 if(be_is_Spill(node)) {
508 collect_spill(env, node, reload, ent);
509 } else if(is_Proj(node)) {
510 collect_memperm(env, node, reload, ent);
511 } else if(is_Phi(node) && get_irn_mode(node) == mode_M) {
512 collect_memphi(env, node, reload, ent);
514 /* Disabled for now, spills might get transformed by the backend */
516 ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
517 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
518 env->problem_found = 1;
524 * This walker function searches for reloads and collects all the spills
525 * and memphis attached to them.
527 static void collect_spills_walker(ir_node *node, void *data) {
528 be_verify_spillslots_env_t *env = data;
530 /* @@@ ia32_classify returns classification of Proj_pred :-/ */
534 if (arch_irn_class_is(node, reload)) {
535 ir_node *spill = get_memory_edge(node);
539 ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
540 node, get_nodes_block(node), get_irg_dump_name(env->irg));
541 env->problem_found = 1;
544 ent = arch_get_frame_entity(node);
545 be_check_entity(env, node, ent);
547 collect(env, spill, node, ent);
548 ARR_APP1(ir_node*, env->reloads, node);
552 static void check_spillslot_interference(be_verify_spillslots_env_t *env) {
553 int spillcount = set_count(env->spills);
554 spill_t **spills = alloca(spillcount * sizeof(spills[0]));
558 for(spill = set_first(env->spills), i = 0; spill != NULL; spill = set_next(env->spills), ++i) {
562 for(i = 0; i < spillcount; ++i) {
563 spill_t *sp1 = spills[i];
566 for(i2 = i+1; i2 < spillcount; ++i2) {
567 spill_t *sp2 = spills[i2];
569 if(sp1->ent != sp2->ent)
572 if(my_values_interfere(sp1->spill, sp2->spill)) {
573 ir_fprintf(stderr, "Verify warning: Spillslots for %+F in block %+F(%s) and %+F in block %+F(%s) interfere\n",
574 sp1->spill, get_nodes_block(sp1->spill), get_irg_dump_name(env->irg),
575 sp2->spill, get_nodes_block(sp2->spill), get_irg_dump_name(env->irg));
576 env->problem_found = 1;
577 my_values_interfere(sp1->spill, sp2->spill);
583 static void check_lonely_spills(ir_node *node, void *data) {
584 be_verify_spillslots_env_t *env = data;
586 if(be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
587 spill_t *spill = find_spill(env, node);
588 if(be_is_Spill(node)) {
589 ir_entity *ent = arch_get_frame_entity(node);
590 be_check_entity(env, node, ent);
594 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) not connected to a reaload\n",
595 node, get_nodes_block(node), get_irg_dump_name(env->irg));
600 int be_verify_spillslots(const arch_env_t *arch_env, ir_graph *irg)
602 be_verify_spillslots_env_t env;
604 env.arch_env = arch_env;
606 env.spills = new_set(cmp_spill, 10);
607 env.reloads = NEW_ARR_F(ir_node*, 0);
608 env.problem_found = 0;
610 irg_walk_graph(irg, collect_spills_walker, NULL, &env);
611 irg_walk_graph(irg, check_lonely_spills, NULL, &env);
613 check_spillslot_interference(&env);
615 DEL_ARR_F(env.reloads);
618 return ! env.problem_found;
623 /*--------------------------------------------------------------------------- */
628 * Check, if two values interfere.
629 * @param a The first value.
630 * @param b The second value.
631 * @return 1, if a and b interfere, 0 if not.
633 static int my_values_interfere(const ir_node *a, const ir_node *b) {
634 const ir_edge_t *edge;
636 int a2b = value_dominates(a, b);
637 int b2a = value_dominates(b, a);
639 /* If there is no dominance relation, they do not interfere. */
644 * Adjust a and b so, that a dominates b if
645 * a dominates b or vice versa.
648 const ir_node *t = a;
653 bb = get_nodes_block(b);
656 * Look at all usages of a.
657 * If there's one usage of a in the block of b, then
658 * we check, if this use is dominated by b, if that's true
659 * a and b interfere. Note that b must strictly dominate the user,
660 * since if b is the last user of in the block, b and a do not
662 * Uses of a not in b's block can be disobeyed, because the
663 * check for a being live at the end of b's block is already
666 foreach_out_edge(a, edge) {
667 const ir_node *user = get_edge_src_irn(edge);
671 if(get_irn_opcode(user) == iro_End)
674 /* in case of phi arguments we compare with the block the value comes from */
676 ir_node *phiblock = get_nodes_block(user);
679 user = get_irn_n(phiblock, get_edge_src_pos(edge));
682 if(value_dominates(b, user))
691 /*--------------------------------------------------------------------------- */
693 static const arch_env_t *arch_env;
694 static ir_graph *irg;
696 static int problem_found;
697 static const arch_register_class_t *regclass;
698 static ir_node **registers;
700 static void check_register_constraints(ir_node *node)
702 const arch_register_t *reg;
705 /* verify output register */
706 if (arch_get_irn_reg_class(node, -1) != NULL) {
707 reg = arch_get_irn_register(node);
709 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
710 node, get_nodes_block(node), get_irg_dump_name(irg));
712 } else if (!arch_register_type_is(reg, joker) && !arch_reg_is_allocatable(node, -1, reg)) {
713 ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
714 reg->name, node, get_nodes_block(node), get_irg_dump_name(irg));
719 /* verify input register */
720 arity = get_irn_arity(node);
721 for (i = 0; i < arity; ++i) {
722 ir_node *pred = get_irn_n(node, i);
724 if (is_Unknown(pred))
728 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
729 node, get_nodes_block(node), get_irg_dump_name(irg), i);
734 if (arch_get_irn_reg_class(node, i) == NULL)
737 reg = arch_get_irn_register(pred);
739 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned (%+F input constraint)\n",
740 pred, get_nodes_block(pred), get_irg_dump_name(irg), node);
743 } else if (!arch_register_type_is(reg, joker) && ! arch_reg_is_allocatable(node, i, reg)) {
744 ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
745 reg->name, i, node, get_nodes_block(node), get_irg_dump_name(irg));
750 /* phis should be NOPs at this point, which means all input regs
751 * must be the same as the output reg */
755 reg = arch_get_irn_register(node);
757 arity = get_irn_arity(node);
758 for (i = 0; i < arity; ++i) {
759 ir_node *pred = get_Phi_pred(node, i);
760 const arch_register_t *pred_reg = arch_get_irn_register(pred);
762 if (reg != pred_reg && !arch_register_type_is(pred_reg, joker)) {
763 ir_fprintf(stderr, "Verify warning: Input %d of %+F in block %+F(%s) uses register %s instead of %s\n",
764 i, node, get_nodes_block(node), get_irg_dump_name(irg), pred_reg->name, reg->name);
771 static void value_used(ir_node *node) {
772 const arch_register_t *reg;
775 if (arch_get_irn_reg_class(node, -1) != regclass)
778 reg = arch_get_irn_register(node);
779 if (reg->type & arch_register_type_virtual)
782 reg_node = registers[reg->index];
783 if (reg_node != NULL && reg_node != node) {
784 ir_fprintf(stderr, "Verify warning: Register %s assigned more than once in block %+F(%s) (nodes %+F %+F)\n",
785 reg->name, get_nodes_block(node), get_irg_dump_name(irg),
790 registers[reg->index] = node;
793 static void value_def(ir_node *node)
795 const arch_register_t *reg;
798 if (arch_get_irn_reg_class(node, -1) != regclass)
801 reg = arch_get_irn_register(node);
802 if (reg->type & arch_register_type_virtual)
805 reg_node = registers[reg->index];
807 if (reg_node != node) {
808 ir_fprintf(stderr, "Verify warning: Node %+F not registered as value for Register %s (but %+F) in block %+F(%s)\n",
809 node, reg->name, reg_node, get_nodes_block(node), get_irg_dump_name(irg));
812 registers[reg->index] = NULL;
815 static void verify_block_register_allocation(ir_node *block, void *data) {
819 nregclasses = arch_env_get_n_reg_class(arch_env);
820 for (i = 0; i < nregclasses; ++i) {
824 regclass = arch_env_get_reg_class(arch_env, i);
826 assert(lv->nodes && "live sets must be computed");
828 n_regs = arch_register_class_n_regs(regclass);
829 registers = alloca(n_regs * sizeof(registers[0]));
830 memset(registers, 0, n_regs * sizeof(registers[0]));
832 be_lv_foreach(lv, block, be_lv_state_end, idx) {
833 ir_node *node = be_lv_get_irn(lv, block, idx);
837 sched_foreach_reverse(block, node) {
840 if (get_irn_mode(node) == mode_T) {
841 const ir_edge_t *edge;
842 foreach_out_edge(node, edge) {
843 ir_node *def = get_edge_src_irn(edge);
850 check_register_constraints(node);
854 arity = get_irn_arity(node);
855 for (i2 = 0; i2 < arity; ++i2) {
856 ir_node *use = get_irn_n(node, i2);
861 be_lv_foreach(lv, block, be_lv_state_in, idx) {
862 ir_node *node = be_lv_get_irn(lv, block, idx);
866 /* set must be empty now */
867 for (i2 = 0; i2 < n_regs; ++i2) {
868 if (registers[i2] == NULL)
871 ir_fprintf(stderr, "Verify warning: Node %+F not live-in and no def found in block %+F(%s)\n",
872 registers[i2], block, get_irg_dump_name(irg));
878 int be_verify_register_allocation(const be_irg_t *birg) {
879 arch_env = be_get_birg_arch_env(birg);
880 irg = be_get_birg_irg(birg);
881 lv = be_liveness(birg);
884 be_liveness_assure_sets(lv);
885 irg_block_walk_graph(irg, verify_block_register_allocation, NULL, NULL);
887 be_liveness_free(lv);
889 return !problem_found;
894 /*--------------------------------------------------------------------------- */
898 typedef struct _verify_out_dead_nodes_env {
902 } verify_out_dead_nodes_env;
904 static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env) {
905 ir_graph *irg = env->irg;
906 const ir_edge_t* edge;
908 if (irn_visited_else_mark(node))
911 /* we find too many (uncritical) dead nodes in block out edges */
915 foreach_out_edge(node, edge) {
916 ir_node* src = get_edge_src_irn(edge);
918 if(!bitset_is_set(env->reachable, get_irn_idx(src)) && !is_Block(src)) {
919 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) only reachable through out edges from %+F\n",
920 src, get_nodes_block(src), get_irg_dump_name(irg), node);
921 env->problem_found = 1;
925 check_out_edges(src, env);
929 static void set_reachable(ir_node *node, void* data)
931 bitset_t* reachable = data;
932 bitset_set(reachable, get_irn_idx(node));
935 int be_verify_out_edges(ir_graph *irg) {
936 verify_out_dead_nodes_env env;
939 env.reachable = bitset_alloca(get_irg_last_idx(irg));
940 env.problem_found = edges_verify(irg);
942 irg_walk_in_or_dep_graph(irg, set_reachable, NULL, env.reachable);
943 irg_walk_anchors(irg, set_reachable, NULL, env.reachable);
944 inc_irg_visited(irg);
945 check_out_edges(get_irg_start(irg), &env);
947 return ! env.problem_found;