2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Interblock liveness analysis.
23 * @author Sebastian Hack
32 #include "iredges_t.h"
34 #include "irprintf_t.h"
37 #include "irnodeset.h"
42 #include "besched_t.h"
45 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
47 /* see comment in compute_liveness() */
48 #define LV_COMPUTE_SORTED
49 #define LV_STD_SIZE 64
50 #define LV_USE_BINARY_SEARCH
51 #undef LV_INTESIVE_CHECKS
53 static INLINE int is_liveness_node(const ir_node *irn)
55 switch(get_irn_opcode(irn)) {
66 int (be_lv_next_irn)(const struct _be_lv_t *lv, const ir_node *bl, unsigned flags, int i)
68 return _be_lv_next_irn(lv, bl, flags, i);
71 const ir_node * (be_lv_get_irn)(const struct _be_lv_t *lv, const ir_node *bl, int i)
73 return _be_lv_get_irn(lv, bl, i);
76 int (be_is_live_in)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
78 return _be_is_live_xxx(lv, block, irn, be_lv_state_in);
81 int (be_is_live_out)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
83 return _be_is_live_xxx(lv, block, irn, be_lv_state_out);
86 int (be_is_live_end)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
88 return _be_is_live_xxx(lv, block, irn, be_lv_state_end);
92 #ifdef LV_USE_BINARY_SEARCH
93 static INLINE unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx)
95 struct _be_lv_info_t *payload = arr + 1;
97 unsigned n = arr[0].u.head.n_members;
106 if(idx < payload[0].u.node.idx)
109 if(idx > payload[n - 1].u.node.idx)
113 /* start a binary search for the requested node. */
115 int md = lo + ((hi - lo) >> 1);
116 unsigned md_idx = payload[md].u.node.idx;
120 else if(idx < md_idx)
124 assert(payload[res].u.node.idx == idx);
131 #ifdef LV_INTESIVE_CHECKS
134 for(i = res; i < n; ++i)
135 assert(payload[i].u.node.idx >= idx);
137 for(i = 0; i < res; ++i)
138 assert(payload[i].u.node.idx < idx);
148 * This function searches linearly for the node in the array.
150 static INLINE unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx) {
151 unsigned n = arr[0].u.head.n_members;
154 for(i = 0; i < n; ++i) {
155 if(arr[i + 1].u.node.idx == idx)
163 struct _be_lv_info_node_t *be_lv_get(const struct _be_lv_t *li, const ir_node *bl, const ir_node *irn)
165 struct _be_lv_info_t *irn_live = phase_get_irn_data(&li->ph, bl);
168 unsigned idx = get_irn_idx(irn);
170 /* Get the position of the index in the array. */
171 int pos = _be_liveness_bsearch(irn_live, idx);
173 /* Get the record in question. 1 must be added, since the first record contains information about the array and must be skipped. */
174 struct _be_lv_info_node_t *res = &irn_live[pos + 1].u.node;
176 /* Check, if the irn is in deed in the array. */
184 static struct _be_lv_info_node_t *be_lv_get_or_set(struct _be_lv_t *li, ir_node *bl, ir_node *irn)
186 struct _be_lv_info_t *irn_live = phase_get_or_set_irn_data(&li->ph, bl);
188 unsigned idx = get_irn_idx(irn);
190 /* Get the position of the index in the array. */
191 unsigned pos = _be_liveness_bsearch(irn_live, idx);
193 /* Get the record in question. 1 must be added, since the first record contains information about the array and must be skipped. */
194 struct _be_lv_info_node_t *res = &irn_live[pos + 1].u.node;
196 /* Check, if the irn is in deed in the array. */
197 if(res->idx != idx) {
198 struct _be_lv_info_t *payload;
199 unsigned n_members = irn_live[0].u.head.n_members;
200 unsigned n_size = irn_live[0].u.head.n_size;
203 if(n_members + 1 >= n_size) {
204 /* double the array size. Remember that the first entry is
205 * metadata about the array and not a real array element */
206 unsigned old_size_bytes = (n_size + 1) * sizeof(irn_live[0]);
207 unsigned new_size = (2 * n_size) + 1;
208 size_t new_size_bytes = new_size * sizeof(irn_live[0]);
209 struct _be_lv_info_t *nw = phase_alloc(&li->ph, new_size_bytes);
210 memcpy(nw, irn_live, old_size_bytes);
211 memset(((char*) nw) + old_size_bytes, 0,
212 new_size_bytes - old_size_bytes);
213 nw[0].u.head.n_size = new_size - 1;
215 phase_set_irn_data(&li->ph, bl, nw);
218 payload = &irn_live[1];
219 for(i = n_members; i > pos; --i) {
220 payload[i] = payload[i - 1];
223 ++irn_live[0].u.head.n_members;
225 res = &payload[pos].u.node;
230 #ifdef LV_INTESIVE_CHECKS
233 unsigned n = irn_live[0].u.head.n_members;
235 struct _be_lv_info_t *payload = &irn_live[1];
237 for(i = 0; i < n; ++i) {
238 assert(payload[i].u.node.idx >= last);
239 last = payload[i].u.node.idx;
248 * Removes a node from the list of live variables of a block.
249 * @return 1 if the node was live at that block, 0 if not.
251 static int be_lv_remove(struct _be_lv_t *li, ir_node *bl, ir_node *irn)
253 struct _be_lv_info_t *irn_live = phase_get_irn_data(&li->ph, bl);
256 unsigned n = irn_live[0].u.head.n_members;
257 unsigned idx = get_irn_idx(irn);
258 unsigned pos = _be_liveness_bsearch(irn_live, idx);
259 struct _be_lv_info_t *payload = irn_live + 1;
260 struct _be_lv_info_node_t *res = &payload[pos].u.node;
262 /* The node is in deed in the block's array. Let's remove it. */
263 if(res->idx == idx) {
266 for(i = pos + 1; i < n; ++i)
267 payload[i - 1] = payload[i];
269 payload[n - 1].u.node.idx = 0;
270 payload[n - 1].u.node.flags = 0;
272 --irn_live[0].u.head.n_members;
273 DBG((dbg, LEVEL_3, "\tdeleting %+F from %+F at pos %d\n", irn, bl, pos));
281 static void register_node(be_lv_t *lv, const ir_node *irn)
283 unsigned idx = get_irn_idx(irn);
284 if(idx >= bitset_size(lv->nodes)) {
285 bitset_t *nw = bitset_malloc(2 * idx);
286 bitset_copy(nw, lv->nodes);
287 bitset_free(lv->nodes);
291 bitset_set(lv->nodes, idx);
295 * Mark a node as live-in in a block.
297 static INLINE void mark_live_in(be_lv_t *lv, ir_node *block, ir_node *irn)
299 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
300 DBG((dbg, LEVEL_2, "marking %+F live in at %+F\n", irn, block));
301 n->flags |= be_lv_state_in;
302 register_node(lv, irn);
306 * Mark a node as live-out in a block.
308 static INLINE void mark_live_out(be_lv_t *lv, ir_node *block, ir_node *irn)
310 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
311 DBG((dbg, LEVEL_2, "marking %+F live out at %+F\n", irn, block));
312 n->flags |= be_lv_state_out | be_lv_state_end;
313 register_node(lv, irn);
317 * Mark a node as live-end in a block.
319 static INLINE void mark_live_end(be_lv_t *lv, ir_node *block, ir_node *irn)
321 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
322 DBG((dbg, LEVEL_2, "marking %+F live end at %+F\n", irn, block));
323 n->flags |= be_lv_state_end;
324 register_node(lv, irn);
328 * Mark a node (value) live out at a certain block. Do this also
329 * transitively, i.e. if the block is not the block of the value's
330 * definition, all predecessors are also marked live.
331 * @param def The node (value).
332 * @param block The block to mark the value live out of.
333 * @param visited A set were all visited blocks are recorded.
334 * @param is_true_out Is the node real out there or only live at the end
337 static void live_end_at_block(be_lv_t *lv, ir_node *def, ir_node *block, bitset_t *visited, int is_true_out)
339 mark_live_end(lv, block, def);
341 mark_live_out(lv, block, def);
343 if(!bitset_contains_irn(visited, block)) {
344 bitset_add_irn(visited, block);
347 * If this block is not the definition block, we have to go up
350 if(get_nodes_block(def) != block) {
353 mark_live_in(lv, block, def);
355 for(i = 0, n = get_Block_n_cfgpreds(block); i < n; ++i)
356 live_end_at_block(lv, def, get_Block_cfgpred_block(block, i), visited, 1);
362 struct _lv_walker_t {
368 * Liveness analysis for a value.
369 * This functions is meant to be called by a firm walker, to compute the
370 * set of all blocks a value is live in.
371 * @param irn The node (value).
372 * @param env Ignored.
374 static void liveness_for_node(ir_node *irn, void *data)
376 struct _lv_walker_t *walker = data;
377 be_lv_t *lv = walker->lv;
378 bitset_t *visited = walker->data;
379 const ir_edge_t *edge;
382 /* Don't compute liveness information for non-data nodes. */
383 if(!is_liveness_node(irn))
386 bitset_clear_all(visited);
387 def_block = get_nodes_block(irn);
389 /* Go over all uses of the value */
390 foreach_out_edge(irn, edge) {
391 ir_node *use = edge->src;
395 * If the usage is no data node, skip this use, since it does not
396 * affect the liveness of the node.
398 if(!is_liveness_node(use))
401 /* Get the block where the usage is in. */
402 use_block = get_nodes_block(use);
405 * If the use is a phi function, determine the corresponding block
406 * through which the value reaches the phi function and mark the
407 * value as live out of that block.
410 ir_node *pred_block = get_Block_cfgpred_block(use_block, edge->pos);
411 live_end_at_block(lv, irn, pred_block, visited, 0);
415 * Else, the value is live in at this block. Mark it and call live
416 * out on the predecessors.
418 else if(def_block != use_block) {
421 mark_live_in(lv, use_block, irn);
423 for(i = 0, n = get_Block_n_cfgpreds(use_block); i < n; ++i) {
424 ir_node *pred_block = get_Block_cfgpred_block(use_block, i);
425 live_end_at_block(lv, irn, pred_block, visited, 1);
431 static void lv_remove_irn_walker(ir_node *bl, void *data)
433 struct _lv_walker_t *w = data;
435 ir_node *irn = w->data;
436 be_lv_remove(lv, bl, irn);
439 static const char *lv_flags_to_str(unsigned flags)
441 static const char *states[] = {
452 return states[flags & 7];
455 static void lv_dump_block(void *context, FILE *f, const ir_node *bl)
458 be_lv_t *lv = context;
459 struct _be_lv_info_t *info = phase_get_irn_data(&lv->ph, bl);
461 fprintf(f, "liveness:\n");
463 unsigned n = info[0].u.head.n_members;
466 for(i = 0; i < n; ++i) {
467 struct _be_lv_info_node_t *n = &info[i+1].u.node;
468 ir_fprintf(f, "%s %+F\n", lv_flags_to_str(n->flags), get_idx_irn(lv->irg, n->idx));
474 static void *lv_phase_data_init(ir_phase *phase, ir_node *irn, void *old)
476 struct _be_lv_info_t *info = phase_alloc(phase, LV_STD_SIZE * sizeof(info[0]));
480 memset(info, 0, LV_STD_SIZE * sizeof(info[0]));
481 info[0].u.head.n_size = LV_STD_SIZE - 1;
485 static void collect_nodes(ir_node *irn, void *data)
487 struct obstack *obst = data;
488 if (is_liveness_node(irn))
489 obstack_ptr_grow(obst, irn);
492 static int node_idx_cmp(const void *a, const void *b)
494 int ia = get_irn_idx(a);
495 int ib = get_irn_idx(b);
499 static void compute_liveness(be_lv_t *lv)
502 struct _lv_walker_t w;
507 irg_walk_graph(lv->irg, collect_nodes, NULL, &obst);
508 n = obstack_object_size(&obst) / sizeof(nodes[0]);
509 nodes = obstack_finish(&obst);
512 * inserting the variables sorted by their ID is probably
513 * more efficient since the binary sorted set insertion
514 * will not need to move arounf the data.
515 * However, if sorting the variables a priori pays off
516 * needs to be checked, hence the define.
518 #ifdef LV_COMPUTE_SORTED
519 qsort(nodes, n, sizeof(nodes[0]), node_idx_cmp);
523 w.data = bitset_obstack_alloc(&obst, get_irg_last_idx(lv->irg));
525 for (i = 0; i < n; ++i)
526 liveness_for_node(nodes[i], &w);
528 obstack_free(&obst, NULL);
529 register_hook(hook_node_info, &lv->hook_info);
532 void be_liveness_assure_sets(be_lv_t *lv)
535 lv->nodes = bitset_malloc(2 * get_irg_last_idx(lv->irg));
536 phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
537 compute_liveness(lv);
541 void be_liveness_assure_chk(be_lv_t *lv)
544 be_liveness_assure_sets(be_lv_t *lv);
550 void be_liveness_invalidate(be_lv_t *lv)
552 if (lv && lv->nodes) {
553 unregister_hook(hook_node_info, &lv->hook_info);
555 bitset_free(lv->nodes);
560 /* Compute the inter block liveness for a graph. */
561 be_lv_t *be_liveness(ir_graph *irg)
563 be_lv_t *lv = xmalloc(sizeof(lv[0]));
565 memset(lv, 0, sizeof(lv[0]));
567 lv->lvc = lv_chk_new(irg);
568 lv->hook_info.context = lv;
569 lv->hook_info.hook._hook_node_info = lv_dump_block;
574 void be_liveness_recompute(be_lv_t *lv)
576 unsigned last_idx = get_irg_last_idx(lv->irg);
577 if(last_idx >= bitset_size(lv->nodes)) {
578 bitset_free(lv->nodes);
579 lv->nodes = bitset_malloc(last_idx * 2);
583 bitset_clear_all(lv->nodes);
586 phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
587 compute_liveness(lv);
591 void be_liveness_free(be_lv_t *lv)
593 be_liveness_invalidate(lv);
597 void be_liveness_remove(be_lv_t *lv, ir_node *irn)
600 unsigned idx = get_irn_idx(irn);
601 struct _lv_walker_t w;
604 * Removes a single irn from the liveness information.
605 * Since an irn can only be live at blocks dominated by the block of its
606 * definition, we only have to process that dominance subtree.
610 dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
611 if(idx < bitset_size(lv->nodes))
612 bitset_clear(lv->nodes, idx);
616 void be_liveness_introduce(be_lv_t *lv, ir_node *irn)
619 struct _lv_walker_t w;
621 w.data = bitset_malloc(get_irg_last_idx(lv->irg));
622 liveness_for_node(irn, &w);
627 void be_liveness_update(be_lv_t *lv, ir_node *irn)
629 be_liveness_remove(lv, irn);
630 be_liveness_introduce(lv, irn);
633 static void lv_check_walker(ir_node *bl, void *data)
635 struct _lv_walker_t *w = data;
637 be_lv_t *fresh = w->data;
639 struct _be_lv_info_t *curr = phase_get_irn_data(&lv->ph, bl);
640 struct _be_lv_info_t *fr = phase_get_irn_data(&fresh->ph, bl);
642 if(!fr && curr && curr[0].u.head.n_members > 0) {
645 ir_fprintf(stderr, "%+F liveness should be empty but current liveness contains:\n", bl);
646 for(i = 0; i < curr[0].u.head.n_members; ++i) {
647 ir_fprintf(stderr, "\t%+F\n", get_idx_irn(lv->irg, curr[1 + i].u.node.idx));
652 unsigned n_curr = curr[0].u.head.n_members;
653 unsigned n_fresh = fr[0].u.head.n_members;
657 if(n_curr != n_fresh) {
658 ir_fprintf(stderr, "%+F: liveness set sizes differ. curr %d, correct %d\n", bl, n_curr, n_fresh);
660 ir_fprintf(stderr, "current:\n");
661 for(i = 0; i < n_curr; ++i) {
662 struct _be_lv_info_node_t *n = &curr[1 + i].u.node;
663 ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
666 ir_fprintf(stderr, "correct:\n");
667 for(i = 0; i < n_fresh; ++i) {
668 struct _be_lv_info_node_t *n = &fr[1 + i].u.node;
669 ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
675 void be_liveness_check(be_lv_t *lv)
677 struct _lv_walker_t w;
678 be_lv_t *fresh = be_liveness(lv->irg);
682 irg_block_walk_graph(lv->irg, lv_check_walker, NULL, &w);
683 be_liveness_free(fresh);
687 static void lv_dump_block_walker(ir_node *irn, void *data)
689 struct _lv_walker_t *w = data;
691 lv_dump_block(w->lv, w->data, irn);
695 /* Dump the liveness information for a graph. */
696 void be_liveness_dump(const be_lv_t *lv, FILE *f)
698 struct _lv_walker_t w;
700 w.lv = (be_lv_t *) lv;
702 irg_block_walk_graph(lv->irg, lv_dump_block_walker, NULL, &w);
705 /* Dump the liveness information for a graph. */
706 void be_liveness_dumpto(const be_lv_t *lv, const char *cls_name)
710 ir_snprintf(buf, sizeof(buf), "%F_%s-live.txt", lv->irg, cls_name);
711 if((f = fopen(buf, "wt")) != NULL) {
712 be_liveness_dump(lv, f);
718 * Walker: checks the every predecessors of a node dominate
721 static void dom_check(ir_node *irn, void *data)
723 int *problem_found = data;
725 if(!is_Block(irn) && irn != get_irg_end(get_irn_irg(irn))) {
727 ir_node *bl = get_nodes_block(irn);
729 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
730 ir_node *op = get_irn_n(irn, i);
731 ir_node *def_bl = get_nodes_block(op);
732 ir_node *use_bl = bl;
735 use_bl = get_Block_cfgpred_block(bl, i);
737 if(get_irn_opcode(use_bl) != iro_Bad
738 && get_irn_opcode(def_bl) != iro_Bad
739 && !block_dominates(def_bl, use_bl)) {
740 ir_fprintf(stderr, "Verify warning: %+F in %+F must dominate %+F for user %+F (%s)\n", op, def_bl, use_bl, irn, get_irg_dump_name(get_irn_irg(op)));
747 /* Check, if the SSA dominance property is fulfilled. */
748 int be_check_dominance(ir_graph *irg)
750 int problem_found = 0;
753 irg_walk_graph(irg, dom_check, NULL, &problem_found);
755 return !problem_found;
758 pset *be_liveness_transfer(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *irn, pset *live)
762 /* You should better break out of your loop when hitting the first phi function. */
763 assert(!is_Phi(irn) && "liveness_transfer produces invalid results for phi nodes");
765 #ifndef SCHEDULE_PROJS
766 /* kill all Proj's if a node is killed */
767 if (get_irn_mode(irn) == mode_T) {
768 const ir_edge_t *edge;
770 foreach_out_edge(irn, edge) {
771 ir_node *proj = get_edge_src_irn(edge);
773 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
774 ir_node *del = pset_remove_ptr(live, proj);
782 if (arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
783 ir_node *del = pset_remove_ptr(live, irn);
788 for (i = 0, n = get_irn_arity(irn); i < n; ++i) {
789 ir_node *op = get_irn_n(irn, i);
791 if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
792 pset_insert_ptr(live, op);
798 void be_liveness_transfer_ir_nodeset(const arch_env_t *arch_env,
799 const arch_register_class_t *cls,
800 ir_node *node, ir_nodeset_t *nodeset)
804 /* You should better break out of your loop when hitting the first phi
806 assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
808 #ifndef SCHEDULE_PROJS
809 /* kill all Proj's if a node is killed */
810 if (get_irn_mode(node) == mode_T) {
811 const ir_edge_t *edge;
813 foreach_out_edge(node, edge) {
814 ir_node *proj = get_edge_src_irn(edge);
816 if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
817 ir_nodeset_remove(nodeset, proj);
823 if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
824 ir_nodeset_remove(nodeset, node);
827 arity = get_irn_arity(node);
828 for (i = 0; i < arity; ++i) {
829 ir_node *op = get_irn_n(node, i);
831 if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
832 ir_nodeset_insert(nodeset, op);
838 pset *be_liveness_end_of_block(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *bl, pset *live)
841 assert(lv->nodes && "live sets must be computed");
842 be_lv_foreach(lv, bl, be_lv_state_end, i) {
843 ir_node *irn = be_lv_get_irn(lv, bl, i);
844 if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
845 pset_insert_ptr(live, irn);
851 void be_liveness_end_of_block_ir_nodeset(const be_lv_t *lv,
852 const arch_env_t *arch_env,
853 const arch_register_class_t *cls,
854 const ir_node *block,
859 assert(lv->nodes && "live sets must be computed");
860 be_lv_foreach(lv, block, be_lv_state_end, i) {
861 ir_node *node = be_lv_get_irn(lv, block, i);
862 if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
865 ir_nodeset_insert(live, node);
871 pset *be_liveness_nodes_live_at(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
873 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
876 be_liveness_end_of_block(lv, arch_env, cls, bl, live);
877 sched_foreach_reverse(bl, irn) {
879 * If we encounter the node we want to insert the Perm after,
880 * exit immediately, so that this node is still live
885 be_liveness_transfer(arch_env, cls, irn, live);
891 pset *be_liveness_nodes_live_at_input(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
893 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
896 assert(lv->nodes && "live sets must be computed");
897 be_liveness_end_of_block(lv, arch_env, cls, bl, live);
898 sched_foreach_reverse(bl, irn) {
899 be_liveness_transfer(arch_env, cls, irn, live);
907 static void collect_node(ir_node *irn, void *data)
909 struct obstack *obst = data;
910 obstack_ptr_grow(obst, irn);
913 void be_live_chk_compare(be_lv_t *lv, lv_chk_t *lvc)
915 ir_graph *irg = lv->irg;
924 irg_block_walk_graph(irg, collect_node, NULL, &obst);
925 obstack_ptr_grow(&obst, NULL);
926 blocks = obstack_finish(&obst);
928 irg_walk_graph(irg, collect_node, NULL, &obst);
929 obstack_ptr_grow(&obst, NULL);
930 nodes = obstack_finish(&obst);
932 for (i = 0; blocks[i]; ++i) {
933 ir_node *bl = blocks[i];
935 for (j = 0; nodes[j]; ++j) {
936 ir_node *irn = nodes[j];
937 if (!is_Block(irn)) {
938 int lvr_in = be_is_live_in (lv, bl, irn);
939 int lvr_out = be_is_live_out(lv, bl, irn);
940 int lvr_end = be_is_live_end(lv, bl, irn);
942 int lvc_in = lv_chk_bl_in (lvc, bl, irn);
943 int lvc_out = lv_chk_bl_out(lvc, bl, irn);
944 int lvc_end = lv_chk_bl_end(lvc, bl, irn);
946 if (lvr_in - lvc_in != 0)
947 ir_fprintf(stderr, "live in info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_in, lvc_in);
949 if (lvr_end - lvc_end != 0)
950 ir_fprintf(stderr, "live end info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_end, lvc_end);
952 if (lvr_out - lvc_out != 0)
953 ir_fprintf(stderr, "live out info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_out, lvc_out);
959 obstack_free(&obst, NULL);
962 void be_init_live(void)
964 FIRM_DBG_REGISTER(dbg, "firm.be.liveness");
967 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_live);