2 * Copyright (C) 1995-2010 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Various verify routines that check a scheduled graph for correctness.
23 * @author Matthias Braun
47 #include "beintlive_t.h"
48 #include "belistsched.h"
50 static int my_values_interfere(const ir_node *a, const ir_node *b);
52 typedef struct be_verify_register_pressure_env_t_ {
53 ir_graph *irg; /**< the irg to verify */
54 be_lv_t *lv; /**< Liveness information. */
55 const arch_register_class_t *cls; /**< the register class to check for */
56 int registers_available; /**< number of available registers */
57 int problem_found; /**< flag indicating if a problem was found */
58 } be_verify_register_pressure_env_t;
61 * Print all nodes of a pset into a file.
63 static void print_living_values(FILE *F, const ir_nodeset_t *live_nodes)
65 ir_nodeset_iterator_t iter;
69 foreach_ir_nodeset(live_nodes, node, iter) {
70 ir_fprintf(F, "%+F ", node);
76 * Check if number of live nodes never exceeds the number of available registers.
78 static void verify_liveness_walker(ir_node *block, void *data)
80 be_verify_register_pressure_env_t *env = (be_verify_register_pressure_env_t *)data;
81 ir_nodeset_t live_nodes;
85 /* collect register pressure info, start with end of a block */
86 // ir_fprintf(stderr, "liveness check %+F\n", block);
87 ir_nodeset_init(&live_nodes);
88 be_liveness_end_of_block(env->lv, env->cls, block,
91 // print_living_values(stderr, &live_nodes);
92 pressure = ir_nodeset_size(&live_nodes);
93 if (pressure > env->registers_available) {
94 ir_fprintf(stderr, "Verify Warning: Register pressure too high at end of block %+F(%s) (%d/%d):\n",
95 block, get_irg_dump_name(env->irg), pressure, env->registers_available);
96 print_living_values(stderr, &live_nodes);
97 env->problem_found = 1;
100 sched_foreach_reverse(block, irn) {
104 // print_living_values(stderr, &live_nodes);
105 be_liveness_transfer(env->cls, irn, &live_nodes);
107 pressure = ir_nodeset_size(&live_nodes);
109 if (pressure > env->registers_available) {
110 ir_fprintf(stderr, "Verify Warning: Register pressure too high before node %+F in %+F(%s) (%d/%d):\n",
111 irn, block, get_irg_dump_name(env->irg), pressure, env->registers_available);
112 print_living_values(stderr, &live_nodes);
113 env->problem_found = 1;
117 ir_nodeset_destroy(&live_nodes);
121 * Start a walk over the irg and check the register pressure.
123 int be_verify_register_pressure(ir_graph *irg, const arch_register_class_t *cls)
125 be_verify_register_pressure_env_t env;
127 env.lv = be_liveness(irg);
130 env.registers_available = be_get_n_allocatable_regs(irg, cls);
131 env.problem_found = 0;
133 be_liveness_assure_sets(env.lv);
134 irg_block_walk_graph(irg, verify_liveness_walker, NULL, &env);
135 be_liveness_free(env.lv);
137 return ! env.problem_found;
142 /*--------------------------------------------------------------------------- */
146 typedef struct be_verify_schedule_env_t_ {
147 int problem_found; /**< flags indicating a problem */
148 bitset_t *scheduled; /**< bitset of scheduled nodes */
149 ir_graph *irg; /**< the irg to check */
150 } be_verify_schedule_env_t;
153 * Simple schedule checker.
155 static void verify_schedule_walker(ir_node *block, void *data)
157 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*) data;
159 ir_node *non_phi_found = NULL;
160 ir_node *cfchange_found = NULL;
161 int last_timestep = INT_MIN;
164 * Tests for the following things:
165 * 1. Make sure that all phi nodes are scheduled at the beginning of the
167 * 2. No value is defined after it has been used
168 * 3. mode_T nodes have all projs scheduled behind them followed by Keeps
169 * (except mode_X projs)
171 sched_foreach(block, node) {
174 /* this node is scheduled */
175 if (bitset_is_set(env->scheduled, get_irn_idx(node))) {
176 ir_fprintf(stderr, "Verify warning: %+F appears to be schedule twice\n");
177 env->problem_found = 1;
179 bitset_set(env->scheduled, get_irn_idx(node));
181 /* Check that scheduled nodes are in the correct block */
182 if (get_nodes_block(node) != block) {
183 ir_fprintf(stderr, "Verify warning: %+F is in block %+F but scheduled in %+F\n", node, get_nodes_block(node), block);
184 env->problem_found = 1;
187 /* Check that timesteps are increasing */
188 timestep = sched_get_time_step(node);
189 if (timestep <= last_timestep) {
190 ir_fprintf(stderr, "Verify warning: Schedule timestep did not increase at node %+F\n",
192 env->problem_found = 1;
194 last_timestep = timestep;
196 /* Check that phis come before any other node */
198 if (non_phi_found != NULL) {
199 ir_fprintf(stderr, "Verify Warning: Phi node %+F scheduled after non-Phi nodes (for example %+F) in block %+F (%s)\n",
200 node, non_phi_found, block, get_irg_dump_name(env->irg));
201 env->problem_found = 1;
204 non_phi_found = node;
207 /* Check for control flow changing nodes */
209 /* check, that only one CF operation is scheduled */
210 if (cfchange_found != NULL) {
211 ir_fprintf(stderr, "Verify Warning: Additional control flow changing node %+F scheduled after %+F in block %+F (%s)\n",
212 node, block, cfchange_found, get_irg_dump_name(env->irg));
213 env->problem_found = 1;
215 cfchange_found = node;
217 } else if (cfchange_found != NULL) {
218 /* proj and keepany aren't real instructions... */
219 if (!is_Proj(node) && !be_is_Keep(node)) {
220 ir_fprintf(stderr, "Verify Warning: Node %+F scheduled after control flow changing node in block %+F (%s)\n",
221 node, block, get_irg_dump_name(env->irg));
222 env->problem_found = 1;
226 /* Check that all uses come before their definitions */
230 sched_timestep_t nodetime = sched_get_time_step(node);
231 for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
232 ir_node *arg = get_irn_n(node, i);
233 if (get_nodes_block(arg) != block
234 || !sched_is_scheduled(arg))
237 if (sched_get_time_step(arg) >= nodetime) {
238 ir_fprintf(stderr, "Verify Warning: Value %+F used by %+F before it was defined in block %+F (%s)\n",
239 arg, node, block, get_irg_dump_name(env->irg));
240 env->problem_found = 1;
245 /* Check that no dead nodes are scheduled */
246 if (get_irn_n_edges(node) == 0) {
247 ir_fprintf(stderr, "Verify warning: Node %+F is dead but scheduled in block %+F (%s)\n",
248 node, block, get_irg_dump_name(env->irg));
249 env->problem_found = 1;
252 if (be_is_Keep(node) || be_is_CopyKeep(node)) {
253 /* at least 1 of the keep arguments has to be its schedule
255 int arity = get_irn_arity(node);
257 ir_node *prev = sched_prev(node);
258 while (be_is_Keep(prev) || be_is_CopyKeep(prev))
259 prev = sched_prev(prev);
263 for (i = 0; i < arity; ++i) {
264 ir_node *in = get_irn_n(node, i);
271 prev = sched_prev(prev);
276 ir_fprintf(stderr, "%+F not scheduled after its pred node in block %+F (%s)\n",
277 node, block, get_irg_dump_name(env->irg));
278 env->problem_found = 1;
284 static void check_schedule(ir_node *node, void *data)
286 be_verify_schedule_env_t *env = (be_verify_schedule_env_t*)data;
287 bool should_be = !is_Proj(node) && !(arch_irn_get_flags(node) & arch_irn_flags_not_scheduled);
288 bool scheduled = bitset_is_set(env->scheduled, get_irn_idx(node));
290 if (should_be != scheduled) {
291 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should%s be scheduled\n",
292 node, get_nodes_block(node), get_irg_dump_name(env->irg), should_be ? "" : " not");
293 env->problem_found = 1;
298 * Start a walk over the irg and check schedule.
300 int be_verify_schedule(ir_graph *irg)
302 be_verify_schedule_env_t env;
304 env.problem_found = 0;
306 env.scheduled = bitset_alloca(get_irg_last_idx(env.irg));
308 irg_block_walk_graph(irg, verify_schedule_walker, NULL, &env);
309 /* check if all nodes are scheduled */
310 irg_walk_graph(irg, check_schedule, NULL, &env);
312 return ! env.problem_found;
317 /*--------------------------------------------------------------------------- */
321 typedef struct spill_t {
331 } be_verify_spillslots_env_t;
333 static int cmp_spill(const void* d1, const void* d2, size_t size)
335 const spill_t* s1 = (const spill_t*)d1;
336 const spill_t* s2 = (const spill_t*)d2;
339 return s1->spill != s2->spill;
342 static spill_t *find_spill(be_verify_spillslots_env_t *env, ir_node *node)
347 return (spill_t*)set_find(env->spills, &spill, sizeof(spill), HASH_PTR(node));
350 static spill_t *get_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent)
353 int hash = HASH_PTR(node);
356 res = (spill_t*)set_find(env->spills, &spill, sizeof(spill), hash);
360 res = (spill_t*)set_insert(env->spills, &spill, sizeof(spill), hash);
366 static ir_node *get_memory_edge(const ir_node *node)
369 ir_node *result = NULL;
371 arity = get_irn_arity(node);
372 for (i = arity - 1; i >= 0; --i) {
373 ir_node *arg = get_irn_n(node, i);
374 if (get_irn_mode(arg) == mode_M) {
375 assert(result == NULL);
383 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent);
385 static void be_check_entity(be_verify_spillslots_env_t *env, ir_node *node, ir_entity *ent)
388 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have an entity assigned\n",
389 node, get_nodes_block(node), get_irg_dump_name(env->irg));
393 static void collect_spill(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
395 ir_entity *spillent = arch_get_frame_entity(node);
396 be_check_entity(env, node, spillent);
397 get_spill(env, node, ent);
399 if (spillent != ent) {
400 ir_fprintf(stderr, "Verify warning: Spill %+F has different entity than reload %+F in block %+F(%s)\n",
401 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
402 env->problem_found = 1;
406 static void collect_memperm(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
410 int hash = HASH_PTR(node);
415 assert(is_Proj(node));
417 memperm = get_Proj_pred(node);
418 out = get_Proj_proj(node);
420 spillent = be_get_MemPerm_out_entity(memperm, out);
421 be_check_entity(env, memperm, spillent);
422 if (spillent != ent) {
423 ir_fprintf(stderr, "Verify warning: MemPerm %+F has different entity than reload %+F in block %+F(%s)\n",
424 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
425 env->problem_found = 1;
429 res = (spill_t*)set_find(env->spills, &spill, sizeof(spill), hash);
434 spill.ent = spillent;
435 res = (spill_t*)set_insert(env->spills, &spill, sizeof(spill), hash);
437 for (i = 0, arity = be_get_MemPerm_entity_arity(memperm); i < arity; ++i) {
438 ir_node* arg = get_irn_n(memperm, i + 1);
439 ir_entity* argent = be_get_MemPerm_in_entity(memperm, i);
441 collect(env, arg, memperm, argent);
445 static void collect_memphi(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity *ent)
449 int hash = HASH_PTR(node);
451 assert(is_Phi(node));
454 res = (spill_t*)set_find(env->spills, &spill, sizeof(spill), hash);
460 res = (spill_t*)set_insert(env->spills, &spill, sizeof(spill), hash);
462 /* is 1 of the arguments a spill? */
463 for (i = 0, arity = get_irn_arity(node); i < arity; ++i) {
464 ir_node* arg = get_irn_n(node, i);
465 collect(env, arg, reload, ent);
469 static void collect(be_verify_spillslots_env_t *env, ir_node *node, ir_node *reload, ir_entity* ent)
471 if (be_is_Spill(node)) {
472 collect_spill(env, node, reload, ent);
473 } else if (is_Proj(node)) {
474 collect_memperm(env, node, reload, ent);
475 } else if (is_Phi(node) && get_irn_mode(node) == mode_M) {
476 collect_memphi(env, node, reload, ent);
478 /* Disabled for now, spills might get transformed by the backend */
480 ir_fprintf(stderr, "Verify warning: No spill, memperm or memphi attached to node %+F found from node %+F in block %+F(%s)\n",
481 node, reload, get_nodes_block(node), get_irg_dump_name(env->irg));
482 env->problem_found = 1;
488 * This walker function searches for reloads and collects all the spills
489 * and memphis attached to them.
491 static void collect_spills_walker(ir_node *node, void *data)
493 be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
495 if (arch_irn_classify(node) & arch_irn_class_reload) {
496 ir_node *spill = get_memory_edge(node);
500 ir_fprintf(stderr, "Verify warning: No spill attached to reload %+F in block %+F(%s)\n",
501 node, get_nodes_block(node), get_irg_dump_name(env->irg));
502 env->problem_found = 1;
505 ent = arch_get_frame_entity(node);
506 be_check_entity(env, node, ent);
508 collect(env, spill, node, ent);
509 ARR_APP1(ir_node*, env->reloads, node);
513 static void check_spillslot_interference(be_verify_spillslots_env_t *env)
515 int spillcount = set_count(env->spills);
516 spill_t **spills = ALLOCAN(spill_t*, spillcount);
521 foreach_set(env->spills, spill_t*, spill) {
525 for (i = 0; i < spillcount; ++i) {
526 spill_t *sp1 = spills[i];
529 for (i2 = i+1; i2 < spillcount; ++i2) {
530 spill_t *sp2 = spills[i2];
532 if (sp1->ent != sp2->ent)
535 if (my_values_interfere(sp1->spill, sp2->spill)) {
536 ir_fprintf(stderr, "Verify warning: Spillslots for %+F in block %+F(%s) and %+F in block %+F(%s) interfere\n",
537 sp1->spill, get_nodes_block(sp1->spill), get_irg_dump_name(env->irg),
538 sp2->spill, get_nodes_block(sp2->spill), get_irg_dump_name(env->irg));
539 env->problem_found = 1;
540 my_values_interfere(sp1->spill, sp2->spill);
546 static void check_lonely_spills(ir_node *node, void *data)
548 be_verify_spillslots_env_t *env = (be_verify_spillslots_env_t*)data;
550 if (be_is_Spill(node) || (is_Proj(node) && be_is_MemPerm(get_Proj_pred(node)))) {
551 spill_t *spill = find_spill(env, node);
552 if (be_is_Spill(node)) {
553 ir_entity *ent = arch_get_frame_entity(node);
554 be_check_entity(env, node, ent);
558 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) not connected to a reaload\n",
559 node, get_nodes_block(node), get_irg_dump_name(env->irg));
564 int be_verify_spillslots(ir_graph *irg)
566 be_verify_spillslots_env_t env;
569 env.spills = new_set(cmp_spill, 10);
570 env.reloads = NEW_ARR_F(ir_node*, 0);
571 env.problem_found = 0;
573 irg_walk_graph(irg, collect_spills_walker, NULL, &env);
574 irg_walk_graph(irg, check_lonely_spills, NULL, &env);
576 check_spillslot_interference(&env);
578 DEL_ARR_F(env.reloads);
581 return ! env.problem_found;
586 /*--------------------------------------------------------------------------- */
591 * Check, if two values interfere.
592 * @param a The first value.
593 * @param b The second value.
594 * @return 1, if a and b interfere, 0 if not.
596 static int my_values_interfere(const ir_node *a, const ir_node *b)
598 const ir_edge_t *edge;
600 int a2b = value_dominates(a, b);
601 int b2a = value_dominates(b, a);
603 /* If there is no dominance relation, they do not interfere. */
608 * Adjust a and b so, that a dominates b if
609 * a dominates b or vice versa.
612 const ir_node *t = a;
617 bb = get_nodes_block(b);
620 * Look at all usages of a.
621 * If there's one usage of a in the block of b, then
622 * we check, if this use is dominated by b, if that's true
623 * a and b interfere. Note that b must strictly dominate the user,
624 * since if b is the last user of in the block, b and a do not
626 * Uses of a not in b's block can be disobeyed, because the
627 * check for a being live at the end of b's block is already
630 foreach_out_edge(a, edge) {
631 const ir_node *user = get_edge_src_irn(edge);
635 if (get_irn_opcode(user) == iro_End)
638 /* in case of phi arguments we compare with the block the value comes from */
640 ir_node *phiblock = get_nodes_block(user);
643 user = get_irn_n(phiblock, get_edge_src_pos(edge));
646 if (value_dominates(b, user))
655 /*--------------------------------------------------------------------------- */
657 static const arch_env_t *arch_env;
658 static ir_graph *irg;
660 static int problem_found;
661 static const arch_register_class_t *regclass;
662 static ir_node **registers;
664 static void check_output_constraints(ir_node *node)
666 /* verify output register */
667 if (arch_get_irn_reg_class_out(node) == regclass) {
668 const arch_register_t *reg = arch_get_irn_register(node);
670 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned\n",
671 node, get_nodes_block(node), get_irg_dump_name(irg));
673 } else if (!(reg->type & arch_register_type_joker) && !arch_reg_out_is_allocatable(node, reg)) {
674 ir_fprintf(stderr, "Verify warning: Register %s assigned as output of %+F not allowed (register constraint) in block %+F(%s)\n",
675 reg->name, node, get_nodes_block(node), get_irg_dump_name(irg));
681 static void check_input_constraints(ir_node *node)
683 const arch_register_t *reg;
686 /* verify input register */
687 arity = get_irn_arity(node);
688 for (i = 0; i < arity; ++i) {
689 const arch_register_req_t *req = arch_get_in_register_req(node, i);
690 ir_node *pred = get_irn_n(node, i);
691 const arch_register_req_t *pred_req = arch_get_register_req_out(pred);
694 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) has Bad as input %d\n",
695 node, get_nodes_block(node), get_irg_dump_name(irg), i);
699 if (req->cls == NULL)
702 if (req->width > pred_req->width) {
703 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) register width of value at input %d too small\n",
704 node, get_nodes_block(node), get_irg_dump_name(irg), i);
708 reg = arch_get_irn_register(pred);
709 if (req->type & arch_register_req_type_aligned) {
710 if (reg->index % req->width != 0) {
711 ir_fprintf(stderr, "Verify warning: %+F in block %+F(%s) register allignment not fulfilled\n",
712 node, get_nodes_block(node), get_irg_dump_name(irg), i);
718 ir_fprintf(stderr, "Verify warning: Node %+F in block %+F(%s) should have a register assigned (%+F input constraint)\n",
719 pred, get_nodes_block(pred), get_irg_dump_name(irg), node);
722 } else if (!(reg->type & arch_register_type_joker) && ! arch_reg_is_allocatable(node, i, reg)) {
723 ir_fprintf(stderr, "Verify warning: Register %s as input %d of %+F not allowed (register constraint) in block %+F(%s)\n",
724 reg->name, i, node, get_nodes_block(node), get_irg_dump_name(irg));
729 /* phis should be NOPs at this point, which means all input regs
730 * must be the same as the output reg */
732 reg = arch_get_irn_register(node);
734 arity = get_irn_arity(node);
735 for (i = 0; i < arity; ++i) {
736 ir_node *pred = get_Phi_pred(node, i);
737 const arch_register_t *pred_reg = arch_get_irn_register(pred);
739 if (reg != pred_reg && !(pred_reg->type & arch_register_type_joker)) {
740 const char *pred_name = pred_reg != NULL ? pred_reg->name : "(null)";
741 const char *reg_name = reg != NULL ? reg->name : "(null)";
742 ir_fprintf(stderr, "Verify warning: Input %d of %+F in block %+F(%s) uses register %s instead of %s\n",
743 i, node, get_nodes_block(node),
744 get_irg_dump_name(irg), pred_name, reg_name);
751 static void value_used(ir_node *block, ir_node *node)
753 const arch_register_t *reg;
756 if (arch_get_irn_reg_class_out(node) != regclass)
759 reg = arch_get_irn_register(node);
760 if (reg == NULL || reg->type & arch_register_type_virtual)
763 reg_node = registers[reg->index];
764 if (reg_node != NULL && reg_node != node) {
765 ir_fprintf(stderr, "Verify warning: Register %s assigned more than once in block %+F(%s) (nodes %+F %+F)\n",
766 reg->name, block, get_irg_dump_name(irg),
771 registers[reg->index] = node;
774 static void value_def(ir_node *node)
776 const arch_register_t *reg;
779 if (arch_get_irn_reg_class_out(node) != regclass)
782 reg = arch_get_irn_register(node);
783 if (reg == NULL || reg->type & arch_register_type_virtual)
786 reg_node = registers[reg->index];
788 /* a little cheat, since its so hard to remove all outedges to dead code
789 * in the backend. This particular case should never be a problem. */
790 if (reg_node == NULL && get_irn_n_edges(node) == 0)
793 if (reg_node != node) {
794 ir_fprintf(stderr, "Verify warning: Node %+F not registered as value for Register %s (but %+F) in block %+F(%s)\n",
795 node, reg->name, reg_node, get_nodes_block(node), get_irg_dump_name(irg));
798 registers[reg->index] = NULL;
801 static void verify_block_register_allocation(ir_node *block, void *data)
806 nregclasses = arch_env->n_register_classes;
807 for (i = 0; i < nregclasses; ++i) {
811 regclass = &arch_env->register_classes[i];
813 assert(lv->nodes && "live sets must be computed");
815 n_regs = arch_register_class_n_regs(regclass);
816 registers = ALLOCANZ(ir_node*, n_regs);
818 be_lv_foreach(lv, block, be_lv_state_end, idx) {
819 ir_node *lv_node = be_lv_get_irn(lv, block, idx);
820 value_used(block, lv_node);
823 sched_foreach_reverse(block, node) {
826 if (get_irn_mode(node) == mode_T) {
827 const ir_edge_t *edge;
828 foreach_out_edge(node, edge) {
829 ir_node *def = get_edge_src_irn(edge);
831 check_output_constraints(def);
835 check_output_constraints(node);
838 check_input_constraints(node);
840 /* process uses. (Phi inputs are no real uses) */
842 arity = get_irn_arity(node);
843 for (i2 = 0; i2 < arity; ++i2) {
844 ir_node *use = get_irn_n(node, i2);
845 value_used(block, use);
850 be_lv_foreach(lv, block, be_lv_state_in, idx) {
851 ir_node *lv_node = be_lv_get_irn(lv, block, idx);
855 /* set must be empty now */
856 for (i2 = 0; i2 < n_regs; ++i2) {
857 if (registers[i2] == NULL)
860 ir_fprintf(stderr, "Verify warning: Node %+F not live-in and no def found in block %+F(%s)\n",
861 registers[i2], block, get_irg_dump_name(irg));
867 int be_verify_register_allocation(ir_graph *new_irg)
870 arch_env = be_get_irg_arch_env(irg);
871 lv = be_liveness(irg);
874 be_liveness_assure_sets(lv);
875 irg_block_walk_graph(irg, verify_block_register_allocation, NULL, NULL);
877 be_liveness_free(lv);
879 return !problem_found;