2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various verify routines that check a scheduled graph for correctness.
23 * @author Matthias Braun
46 #include "besched_t.h"
49 #include "beintlive_t.h"
51 static int my_values_interfere(const ir_node *a, const ir_node *b);
53 typedef struct be_verify_register_pressure_env_t_ {
54 ir_graph *irg; /**< the irg to verify */
55 be_lv_t *lv; /**< Liveness information. */
56 const arch_env_t *arch_env; /**< an architecture environment */
57 const arch_register_class_t *cls; /**< the register class to check for */
58 int registers_available; /**< number of available registers */
59 int problem_found; /**< flag indicating if a problem was found */
60 } be_verify_register_pressure_env_t;
63 * Print all nodes of a pset into a file.
65 static void print_living_values(FILE *F, pset *live_nodes) {
69 foreach_pset(live_nodes, node) {
70 ir_fprintf(F, "%+F ", node);
76 * Check if number of live nodes never exceeds the number of available registers.
78 static void verify_liveness_walker(ir_node *block, void *data) {
79 be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
80 pset *live_nodes = pset_new_ptr_default();
84 /* collect register pressure info, start with end of a block */
85 be_liveness_end_of_block(env->lv, env->arch_env, env->cls, block, live_nodes);
87 pressure = pset_count(live_nodes);
88 if(pressure > env->registers_available) {
89 ir_fprintf(stderr, "Verify Warning: Register pressure too high at end of block %+F(%s) (%d/%d):\n",
90 block, get_irg_dump_name(env->irg), pressure, env->registers_available);
91 print_living_values(stderr, live_nodes);
92 env->problem_found = 1;
95 sched_foreach_reverse(block, irn) {
99 be_liveness_transfer(env->arch_env, env->cls, irn, live_nodes);
101 pressure = pset_count(live_nodes);
103 if(pressure > env->registers_available) {
104 ir_fprintf(stderr, "Verify Warning: Register pressure too high before node %+F in %+F(%s) (%d/%d):\n",
105 irn, block, get_irg_dump_name(env->irg), pressure, env->registers_available);
106 print_living_values(stderr, live_nodes);
107 env->problem_found = 1;
110 del_pset(live_nodes);
114 * Start a walk over the irg and check the register pressure.
116 int be_verify_register_pressure(const be_irg_t *birg,
117 const arch_register_class_t *cls,
119 be_verify_register_pressure_env_t env;
121 env.lv = be_liveness(irg);
123 env.arch_env = birg->main_env->arch_env;
125 env.registers_available = env.cls->n_regs - be_put_ignore_regs(birg, env.cls, NULL);
126 env.problem_found = 0;
128 irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
129 be_liveness_free(env.lv);
131 return ! env.problem_found;
136 //---------------------------------------------------------------------------
140 typedef struct be_verify_schedule_env_t_ {
141 int problem_found; /**< flags indicating if there was a problem */
142 bitset_t *scheduled; /**< bitset of scheduled nodes */
143 ir_graph *irg; /**< the irg to check */
144 const arch_env_t *arch_env; /**< the arch_env */
145 } be_verify_schedule_env_t;
148 * Simple schedule checker.
150 static void verify_schedule_walker(ir_node *block, void *data) {
151 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
153 int non_phi_found = 0;
154 int cfchange_found = 0;
155 // TODO ask arch about delay branches
156 int delay_branches = 0;
157 int last_timestep = INT_MIN;
158 ir_node *proj_keep_node = NULL;
159 int proj_keep_mode = 0;
162 * Tests for the following things:
163 * 1. Make sure that all phi nodes are scheduled at the beginning of the block
164 * 2. There is 1 or no control flow changing node scheduled and exactly delay_branches operations after it.
165 * 3. No value is defined after it has been used
166 * 4. mode_T nodes have all projs scheduled behind them followed by Keeps
167 * (except mode_X projs)
169 sched_foreach(block, node) {
173 // this node is scheduled
174 if(bitset_is_set(env->scheduled, get_irn_idx(node))) {
175 ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
176 env->problem_found = 1;
178 bitset_set(env->scheduled, get_irn_idx(node));
180 // Check that scheduled nodes are in the correct block
181 if(get_nodes_block(node) != block) {
182 ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
183 env->problem_found = 1;
186 // Check that timesteps are increasing
187 timestep = sched_get_time_step(node);
188 if(timestep <= last_timestep) {
189 ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
191 env->problem_found = 1;
193 last_timestep = timestep;
195 // Check that phis come before any other node
198 ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes in block %+F (%s)\n",
199 node, block, get_irg_dump_name(env->irg));
200 env->problem_found = 1;
206 // Check for control flow changing nodes
207 if (is_cfop(node) && get_irn_opcode(node) != iro_Start) {
208 /* check, that only one CF operation is scheduled */
209 if (cfchange_found == 1) {
210 ir_fprintf(stderr, "Verify Warning: More than 1 control flow changing node (%+F) scheduled in block %+F (%s)\n",
211 node, block, get_irg_dump_name(env->irg));
212 env->problem_found = 1;
215 } else if (cfchange_found) {
216 // proj and keepany aren't real instructions...
217 if(!is_Proj(node) && !be_is_Keep(node)) {
218 /* check for delay branches */
219 if (delay_branches == 0) {
220 ir_fprintf(stderr, "Verify Warning: Node %+F scheduled after control flow changing node (+delay branches) in block %+F (%s)\n",
221 node, block, get_irg_dump_name(env->irg));
222 env->problem_found = 1;
229 // Check that all uses come before their definitions
231 int nodetime = sched_get_time_step(node);
232 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
233 ir_node *arg = get_irn_n(node, i);
234 if(get_nodes_block(arg) != block
235 || !sched_is_scheduled(arg))
238 if(sched_get_time_step(arg) >= nodetime) {
239 ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
240 arg, node, block, get_irg_dump_name(env->irg));
241 env->problem_found = 1;
246 // Check that no dead nodes are scheduled
247 if(get_irn_n_edges(node) == 0) {
248 ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
249 node, block, get_irg_dump_name(env->irg));
250 env->problem_found = 1;
253 // check that all projs/keeps are behind their nodes
254 if(proj_keep_mode == 0) {
257 proj_keep_node = get_Proj_pred(node);
258 if(get_Proj_pred(node) != sched_prev(node)) {
259 ir_fprintf(stderr, "Proj %+F not scheduled after by its pred node in block %+F (%s)\n",
260 node, block, get_irg_dump_name(env->irg));
261 env->problem_found = 1;
263 } else if(be_is_Keep(node)) {
265 proj_keep_node = get_irn_n(node, 0);
266 if(proj_keep_node != sched_prev(node)) {
267 ir_fprintf(stderr, "Proj %+F not scheduled after its pred node in block %+F (%s)\n",
268 node, block, get_irg_dump_name(env->irg));
269 env->problem_found = 1;
273 } else if(proj_keep_mode == 1) {
275 if(get_Proj_pred(node) != proj_keep_node) {
276 ir_fprintf(stderr, "Proj %+F not scheduled after its pred node in block %+F (%s)\n",
277 node, block, get_irg_dump_name(env->irg));
278 env->problem_found = 1;
280 } else if(be_is_Keep(node)) {
281 ir_node *pred = get_irn_n(node, 0);
282 if(skip_Proj_const(pred) != proj_keep_node) {
283 ir_fprintf(stderr, "Proj %+F not scheduled after its pred node in block %+F (%s)\n",
284 node, block, get_irg_dump_name(env->irg));
285 env->problem_found = 1;
291 proj_keep_node = NULL;
293 } else if(proj_keep_mode == 2) {
294 if(be_is_Keep(skip_Proj_const(node))) {
295 if(get_irn_n(node, 0) != proj_keep_node) {
296 ir_fprintf(stderr, "Proj %+F not scheduled after its pred node in block %+F (%s)\n",
297 node, block, get_irg_dump_name(env->irg));
298 env->problem_found = 1;
302 proj_keep_node = NULL;
307 /* check that all delay branches are filled (at least with NOPs) */
308 if (cfchange_found && delay_branches != 0) {
309 ir_fprintf(stderr, "Verify warning: Not all delay slots filled after jump (%d/%d) in block %+F (%s)\n",
310 block, get_irg_dump_name(env->irg));
311 env->problem_found = 1;
315 static int should_be_scheduled(be_verify_schedule_env_t *env, ir_node *node) {
319 if(get_irn_mode(node) == mode_M) {
322 if(is_Phi(node) || is_Sync(node) || is_Pin(node))
326 if(get_irn_mode(node) == mode_X)
328 return should_be_scheduled(env, get_Proj_pred(node));
330 if(be_is_Keep(node) && get_irn_opcode(get_nodes_block(node)) == iro_Bad)
333 switch(get_irn_opcode(node)) {
343 if(arch_irn_get_flags(env->arch_env, node) & arch_irn_flags_ignore)
349 static void check_schedule(ir_node *node, void *data) {
350 be_verify_schedule_env_t *env = data;
354 should_be = should_be_scheduled(env, node);
358 scheduled = bitset_is_set(env->scheduled, get_irn_idx(node)) ? 1 : 0;
359 should_be = should_be ? 1 : 0;
360 if(should_be != scheduled) {
361 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should%s be scheduled\n",
362 node, get_nodes_block(node), get_irg_dump_name(env->irg), should_be ? "" : " not");
363 env->problem_found = 1;
368 * Start a walk over the irg and check schedule.
370 int be_verify_schedule(const be_irg_t *birg)
372 be_verify_schedule_env_t env;
374 env.problem_found = 0;
375 env.irg = be_get_birg_irg(birg);
376 env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
377 env.arch_env = birg->main_env->arch_env;
379 irg_block_walk_graph(env.irg, verify_schedule_walker, NULL, &env);
380 // check if all nodes are scheduled
381 irg_walk_graph(env.irg, check_schedule, NULL, &env);
383 return ! env.problem_found;
388 //---------------------------------------------------------------------------
392 typedef struct _spill_t {
398 const arch_env_t *arch_env;
403 } be_verify_spillslots_env_t;
405 static int cmp_spill(const void* d1, const void* d2, size_t size) {
406 const spill_t* s1 = d1;
407 const spill_t* s2 = d2;
408 return s1->spill != s2->spill;
411 static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node) {
415 return set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
418 static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
420 int hash = HASH_PTR(node);
423 res = set_find(env->spills, &spill, sizeof(spill), hash);
427 res = set_insert(env->spills, &spill, sizeof(spill), hash);
433 static ir_node *get_memory_edge(const ir_node *node) {
435 ir_node *result = NULL;
437 arity = get_irn_arity(node);
438 for(i = arity - 1; i >= 0; --i) {
439 ir_node *arg = get_irn_n(node, i);
440 if(get_irn_mode(arg) == mode_M) {
441 assert(result == NULL);
450 void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
453 void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent) {
455 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
456 node, get_nodes_block(node), get_irg_dump_name(env->irg));
461 void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
462 ir_entity *spillent = arch_get_frame_entity(env->arch_env, node);
463 be_check_entity(env, node, spillent);
464 get_spill(env, node, ent);
466 if(spillent != ent) {
467 ir_fprintf(stderr, "Verify warning: Spill %+F has different entity than reload %+F in block %+F(%s)\n",
468 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
469 env->problem_found = 1;
473 static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
476 int hash = HASH_PTR(node);
481 assert(is_Proj(node));
483 memperm = get_Proj_pred(node);
484 out = get_Proj_proj(node);
486 spillent = be_get_MemPerm_out_entity(memperm, out);
487 be_check_entity(env, memperm, spillent);
488 if(spillent != ent) {
489 ir_fprintf(stderr, "Verify warning: MemPerm %+F has different entity than reload %+F in block %+F(%s)\n",
490 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
491 env->problem_found = 1;
495 res = set_find(env->spills, &spill, sizeof(spill), hash);
500 spill.ent = spillent;
501 res = set_insert(env->spills, &spill, sizeof(spill), hash);
503 for(i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
504 ir_node* arg = get_irn_n(memperm, i + 1);
505 ir_entity* argent = be_get_MemPerm_in_entity(memperm, i);
507 collect(env, arg, memperm, argent);
511 static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent) {
514 int hash = HASH_PTR(node);
516 assert(is_Phi(node));
519 res = set_find(env->spills, &spill, sizeof(spill), hash);
525 res = set_insert(env->spills, &spill, sizeof(spill), hash);
527 // is 1 of the arguments a spill?
528 for(i = 0, arity = get_irn_arity(node); i < arity; ++i) {
529 ir_node* arg = get_irn_n(node, i);
530 collect(env, arg, reload, ent);
534 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent) {
535 if(be_is_Spill(node)) {
536 collect_spill(env, node, reload, ent);
537 } else if(is_Proj(node)) {
538 collect_memperm(env, node, reload, ent);
539 } else if(is_Phi(node) && get_irn_mode(node) == mode_M) {
540 collect_memphi(env, node, reload, ent);
542 // Disabled for now, spills might get transformed by the backend
544 ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
545 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
546 env->problem_found = 1;
552 * This walker function searches for reloads and collects all the spills
553 * and memphis attached to them.
555 static void collect_spills_walker(ir_node *node, void *data) {
556 be_verify_spillslots_env_t *env = data;
557 const arch_env_t *arch_env = env->arch_env;
559 // @@@ ia32_classify returns classification of Proj_pred :-/
563 if(arch_irn_class_is(arch_env, node, reload)) {
564 ir_node *spill = get_memory_edge(node);
568 ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
569 node, get_nodes_block(node), get_irg_dump_name(env->irg));
570 env->problem_found = 1;
573 ent = arch_get_frame_entity(env->arch_env, node);
574 be_check_entity(env, node, ent);
576 collect(env, spill, node, ent);
577 ARR_APP1(ir_node*, env->reloads, node);
581 static void check_spillslot_interference(be_verify_spillslots_env_t *env) {
582 int spillcount = set_count(env->spills);
583 spill_t **spills = alloca(spillcount * sizeof(spills[0]));
587 for(spill = set_first(env->spills), i = 0; spill != NULL; spill = set_next(env->spills), ++i) {
591 for(i = 0; i < spillcount; ++i) {
592 spill_t *sp1 = spills[i];
595 for(i2 = i+1; i2 < spillcount; ++i2) {
596 spill_t *sp2 = spills[i2];
598 if(sp1->ent != sp2->ent)
601 if(my_values_interfere(sp1->spill, sp2->spill)) {
602 ir_fprintf(stderr, "Verify warning: Spillslots for %+F in block %+F(%s) and %+F in block %+F(%s) interfere\n",
603 sp1->spill, get_nodes_block(sp1->spill), get_irg_dump_name(env->irg),
604 sp2->spill, get_nodes_block(sp2->spill), get_irg_dump_name(env->irg));
605 env->problem_found = 1;
606 my_values_interfere(sp1->spill, sp2->spill);
612 static void check_lonely_spills(ir_node *node, void *data) {
613 be_verify_spillslots_env_t *env = data;
615 if(be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
616 spill_t *spill = find_spill(env, node);
617 if(be_is_Spill(node)) {
618 ir_entity *ent = arch_get_frame_entity(env->arch_env, node);
619 be_check_entity(env, node, ent);
623 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) not connected to a reaload\n",
624 node, get_nodes_block(node), get_irg_dump_name(env->irg));
629 int be_verify_spillslots(const arch_env_t *arch_env, ir_graph *irg)
631 be_verify_spillslots_env_t env;
633 env.arch_env = arch_env;
635 env.spills = new_set(cmp_spill, 10);
636 env.reloads = NEW_ARR_F(ir_node*, 0);
637 env.problem_found = 0;
639 irg_walk_graph(irg, collect_spills_walker, NULL, &env);
640 irg_walk_graph(irg, check_lonely_spills, NULL, &env);
642 check_spillslot_interference(&env);
644 DEL_ARR_F(env.reloads);
647 return ! env.problem_found;
652 //---------------------------------------------------------------------------
657 * Check, if two values interfere.
658 * @param a The first value.
659 * @param b The second value.
660 * @return 1, if a and b interfere, 0 if not.
662 static int my_values_interfere(const ir_node *a, const ir_node *b) {
663 const ir_edge_t *edge;
665 int a2b = value_dominates(a, b);
666 int b2a = value_dominates(b, a);
668 /* If there is no dominance relation, they do not interfere. */
673 * Adjust a and b so, that a dominates b if
674 * a dominates b or vice versa.
677 const ir_node *t = a;
682 bb = get_nodes_block(b);
685 * Look at all usages of a.
686 * If there's one usage of a in the block of b, then
687 * we check, if this use is dominated by b, if that's true
688 * a and b interfere. Note that b must strictly dominate the user,
689 * since if b is the last user of in the block, b and a do not
691 * Uses of a not in b's block can be disobeyed, because the
692 * check for a being live at the end of b's block is already
695 foreach_out_edge(a, edge) {
696 const ir_node *user = get_edge_src_irn(edge);
700 if(get_irn_opcode(user) == iro_End)
703 // in case of phi arguments we compare with the block the value comes from
705 ir_node *phiblock = get_nodes_block(user);
708 user = get_irn_n(phiblock, get_edge_src_pos(edge));
711 if(value_dominates(b, user))
720 //---------------------------------------------------------------------------
724 typedef struct _be_verify_register_allocation_env_t {
725 const arch_env_t *arch_env;
729 } be_verify_register_allocation_env_t;
731 static void check_register_constraints(ir_node *node, be_verify_register_allocation_env_t *env) {
732 const arch_env_t *arch_env = env->arch_env;
733 const arch_register_t *reg;
736 /* verify output register */
737 if (arch_get_irn_reg_class(arch_env, node, -1) != NULL) {
738 reg = arch_get_irn_register(arch_env, node);
740 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
741 node, get_nodes_block(node), get_irg_dump_name(env->irg));
742 env->problem_found = 1;
744 else if (! arch_register_type_is(reg, joker) && !arch_reg_is_allocatable(arch_env, node, -1, reg)) {
745 ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
746 reg->name, node, get_nodes_block(node), get_irg_dump_name(env->irg));
747 env->problem_found = 1;
751 /* verify input register */
752 arity = get_irn_arity(node);
753 for (i = 0; i < arity; ++i) {
754 ir_node *pred = get_irn_n(node, i);
756 if (is_Unknown(pred))
760 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
761 node, get_nodes_block(node), get_irg_dump_name(env->irg), i);
762 env->problem_found = 1;
766 if (arch_get_irn_reg_class(arch_env, node, i) == NULL)
769 reg = arch_get_irn_register(arch_env, pred);
771 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
772 pred, get_nodes_block(pred), get_irg_dump_name(env->irg));
773 env->problem_found = 1;
776 else if (! arch_register_type_is(reg, joker) && ! arch_reg_is_allocatable(arch_env, node, i, reg)) {
777 ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
778 reg->name, i, node, get_nodes_block(node), get_irg_dump_name(env->irg));
779 env->problem_found = 1;
784 static void check_register_allocation(be_verify_register_allocation_env_t *env,
785 const arch_register_class_t *regclass, pset *nodes) {
786 const arch_env_t *arch_env = env->arch_env;
787 const arch_register_t *reg = NULL;
789 bitset_t *registers = bitset_alloca(arch_register_class_n_regs(regclass));
792 foreach_pset(nodes, node) {
793 if (arch_get_irn_reg_class(arch_env, node, -1) != regclass)
796 reg = arch_get_irn_register(arch_env, node);
798 /* this problem is already reported in 'check_register_constraints' */
802 if (bitset_is_set(registers, reg->index)) {
807 bitset_set(registers, reg->index);
811 ir_fprintf(stderr, "Verify warning: Register %s assigned more than once in block %+F(%s)\n",
812 reg->name, get_nodes_block(node), get_irg_dump_name(env->irg));
813 env->problem_found = 1;
815 foreach_pset(nodes, node) {
816 if (arch_get_irn_register(arch_env, node) == reg) {
817 ir_fprintf(stderr, " at node %+F\n", node);
823 static void verify_block_register_allocation(ir_node *block, void *data) {
824 be_verify_register_allocation_env_t *env = data;
825 const arch_env_t *arch_env = env->arch_env;
826 const arch_isa_t *isa = arch_env->isa;
829 nregclasses = arch_isa_get_n_reg_class(isa);
830 for (i = 0; i < nregclasses; ++i) {
831 const arch_register_class_t *regclass = arch_isa_get_reg_class(isa, i);
833 pset *live_nodes = pset_new_ptr_default();
835 be_liveness_end_of_block(env->lv, env->arch_env, regclass, block, live_nodes);
836 check_register_allocation(env, regclass, live_nodes);
838 sched_foreach_reverse(block, node) {
842 be_liveness_transfer(env->arch_env, regclass, node, live_nodes);
843 check_register_allocation(env, regclass, live_nodes);
844 check_register_constraints(node, env);
847 del_pset(live_nodes);
851 int be_verify_register_allocation(const arch_env_t *arch_env, ir_graph *irg) {
852 be_verify_register_allocation_env_t env;
854 env.arch_env = arch_env;
856 env.lv = be_liveness(irg);
857 env.problem_found = 0;
859 irg_block_walk_graph(irg, verify_block_register_allocation, NULL, &env);
861 be_liveness_free(env.lv);
863 return !env.problem_found;
868 //---------------------------------------------------------------------------
872 typedef struct _verify_out_dead_nodes_env {
876 } verify_out_dead_nodes_env;
878 static void check_out_edges(ir_node *node, verify_out_dead_nodes_env *env) {
879 ir_graph *irg = env->irg;
880 const ir_edge_t* edge;
882 if(irn_visited(node))
884 mark_irn_visited(node);
886 foreach_out_edge(node, edge) {
887 ir_node* src = get_edge_src_irn(edge);
889 if(!bitset_is_set(env->reachable, get_irn_idx(src)) && !is_Block(node)) {
890 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) only reachable through out edges from %+F\n",
891 src, get_nodes_block(src), get_irg_dump_name(irg), node);
892 env->problem_found = 1;
896 check_out_edges(src, env);
900 static void set_reachable(ir_node *node, void* data)
902 bitset_t* reachable = data;
903 bitset_set(reachable, get_irn_idx(node));
906 int be_verify_out_edges(ir_graph *irg) {
907 verify_out_dead_nodes_env env;
910 env.reachable = bitset_alloca(get_irg_last_idx(irg));
911 env.problem_found = edges_verify(irg);
913 irg_walk_in_or_dep_graph(irg, set_reachable, NULL, env.reachable);
914 irg_walk_anchors(irg, set_reachable, NULL, env.reachable);
915 inc_irg_visited(irg);
916 check_out_edges(get_irg_start(irg), &env);
918 return ! env.problem_found;