2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Interblock liveness analysis.
23 * @author Sebastian Hack
29 /* statev is expensive here, only enable when needed */
30 #define DISABLE_STATEV
33 #include "iredges_t.h"
35 #include "irprintf_t.h"
38 #include "irnodeset.h"
47 #include "besched_t.h"
50 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
52 #define LV_STD_SIZE 64
54 /* if defined, use binary search for already live nodes, else linear */
55 #define LV_USE_BINARY_SEARCH
56 #undef LV_INTESIVE_CHECKS
58 void be_live_chk_compare(be_lv_t *lv, lv_chk_t *lvc);
61 * Filter out some nodes for which we never need liveness.
63 * @param irn the node t check
64 * @return 0 if no liveness info is needed, 1 else
66 static inline int is_liveness_node(const ir_node *irn)
68 switch (get_irn_opcode(irn)) {
80 int (be_lv_next_irn)(const struct _be_lv_t *lv, const ir_node *bl, unsigned flags, int i)
82 return _be_lv_next_irn(lv, bl, flags, i);
85 const ir_node * (be_lv_get_irn)(const struct _be_lv_t *lv, const ir_node *bl, int i)
87 return _be_lv_get_irn(lv, bl, i);
90 int (be_is_live_in)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
92 return _be_is_live_xxx(lv, block, irn, be_lv_state_in);
95 int (be_is_live_out)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
97 return _be_is_live_xxx(lv, block, irn, be_lv_state_out);
100 int (be_is_live_end)(const be_lv_t *lv, const ir_node *block, const ir_node *irn)
102 return _be_is_live_xxx(lv, block, irn, be_lv_state_end);
106 #ifdef LV_USE_BINARY_SEARCH
107 static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx)
109 struct _be_lv_info_t *payload = arr + 1;
111 unsigned n = arr[0].u.head.n_members;
120 int md = lo + ((hi - lo) >> 1);
121 unsigned md_idx = payload[md].u.node.idx;
125 else if(idx < md_idx)
129 assert(payload[res].u.node.idx == idx);
136 #ifdef LV_INTESIVE_CHECKS
139 for (i = res; i < n; ++i)
140 assert(payload[i].u.node.idx >= idx);
142 for(i = 0; i < res; ++i)
143 assert(payload[i].u.node.idx < idx);
153 * This function searches linearly for the node in the array.
155 static inline unsigned _be_liveness_bsearch(struct _be_lv_info_t *arr, unsigned idx) {
156 unsigned n = arr[0].u.head.n_members;
159 for(i = 0; i < n; ++i) {
160 if(arr[i + 1].u.node.idx == idx)
168 struct _be_lv_info_node_t *be_lv_get(const struct _be_lv_t *li, const ir_node *bl, const ir_node *irn)
170 struct _be_lv_info_t *irn_live;
171 struct _be_lv_info_node_t *res = NULL;
174 irn_live = phase_get_irn_data(&li->ph, bl);
176 unsigned idx = get_irn_idx(irn);
178 /* Get the position of the index in the array. */
179 int pos = _be_liveness_bsearch(irn_live, idx);
181 /* Get the record in question. 1 must be added, since the first record contains information about the array and must be skipped. */
182 struct _be_lv_info_node_t *rec = &irn_live[pos + 1].u.node;
184 /* Check, if the irn is in deed in the array. */
188 stat_ev_tim_pop("be_lv_get");
193 static struct _be_lv_info_node_t *be_lv_get_or_set(struct _be_lv_t *li, ir_node *bl, ir_node *irn)
195 struct _be_lv_info_t *irn_live = phase_get_or_set_irn_data(&li->ph, bl);
197 unsigned idx = get_irn_idx(irn);
199 /* Get the position of the index in the array. */
200 unsigned pos = _be_liveness_bsearch(irn_live, idx);
202 /* Get the record in question. 1 must be added, since the first record contains information about the array and must be skipped. */
203 struct _be_lv_info_node_t *res = &irn_live[pos + 1].u.node;
205 /* Check, if the irn is in deed in the array. */
206 if(res->idx != idx) {
207 struct _be_lv_info_t *payload;
208 unsigned n_members = irn_live[0].u.head.n_members;
209 unsigned n_size = irn_live[0].u.head.n_size;
212 if(n_members + 1 >= n_size) {
213 /* double the array size. Remember that the first entry is
214 * metadata about the array and not a real array element */
215 unsigned old_size_bytes = (n_size + 1) * sizeof(irn_live[0]);
216 unsigned new_size = (2 * n_size) + 1;
217 size_t new_size_bytes = new_size * sizeof(irn_live[0]);
218 struct _be_lv_info_t *nw = phase_alloc(&li->ph, new_size_bytes);
219 memcpy(nw, irn_live, old_size_bytes);
220 memset(((char*) nw) + old_size_bytes, 0,
221 new_size_bytes - old_size_bytes);
222 nw[0].u.head.n_size = new_size - 1;
224 phase_set_irn_data(&li->ph, bl, nw);
227 payload = &irn_live[1];
228 for(i = n_members; i > pos; --i) {
229 payload[i] = payload[i - 1];
232 ++irn_live[0].u.head.n_members;
234 res = &payload[pos].u.node;
239 #ifdef LV_INTESIVE_CHECKS
242 unsigned n = irn_live[0].u.head.n_members;
244 struct _be_lv_info_t *payload = &irn_live[1];
246 for(i = 0; i < n; ++i) {
247 assert(payload[i].u.node.idx >= last);
248 last = payload[i].u.node.idx;
257 * Removes a node from the list of live variables of a block.
258 * @return 1 if the node was live at that block, 0 if not.
260 static int be_lv_remove(struct _be_lv_t *li, const ir_node *bl,
263 struct _be_lv_info_t *irn_live = phase_get_irn_data(&li->ph, bl);
266 unsigned n = irn_live[0].u.head.n_members;
267 unsigned idx = get_irn_idx(irn);
268 unsigned pos = _be_liveness_bsearch(irn_live, idx);
269 struct _be_lv_info_t *payload = irn_live + 1;
270 struct _be_lv_info_node_t *res = &payload[pos].u.node;
272 /* The node is in deed in the block's array. Let's remove it. */
273 if(res->idx == idx) {
276 for(i = pos + 1; i < n; ++i)
277 payload[i - 1] = payload[i];
279 payload[n - 1].u.node.idx = 0;
280 payload[n - 1].u.node.flags = 0;
282 --irn_live[0].u.head.n_members;
283 DBG((dbg, LEVEL_3, "\tdeleting %+F from %+F at pos %d\n", irn, bl, pos));
291 static void register_node(be_lv_t *lv, const ir_node *irn)
293 unsigned idx = get_irn_idx(irn);
294 if(idx >= bitset_size(lv->nodes)) {
295 bitset_t *nw = bitset_malloc(2 * idx);
296 bitset_copy(nw, lv->nodes);
297 bitset_free(lv->nodes);
301 bitset_set(lv->nodes, idx);
305 * Mark a node as live-in in a block.
307 static inline void mark_live_in(be_lv_t *lv, ir_node *block, ir_node *irn)
309 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
310 DBG((dbg, LEVEL_2, "marking %+F live in at %+F\n", irn, block));
311 n->flags |= be_lv_state_in;
312 register_node(lv, irn);
316 * Mark a node as live-out in a block.
318 static inline void mark_live_out(be_lv_t *lv, ir_node *block, ir_node *irn)
320 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
321 DBG((dbg, LEVEL_2, "marking %+F live out at %+F\n", irn, block));
322 n->flags |= be_lv_state_out | be_lv_state_end;
323 register_node(lv, irn);
327 * Mark a node as live-end in a block.
329 static inline void mark_live_end(be_lv_t *lv, ir_node *block, ir_node *irn)
331 struct _be_lv_info_node_t *n = be_lv_get_or_set(lv, block, irn);
332 DBG((dbg, LEVEL_2, "marking %+F live end at %+F\n", irn, block));
333 n->flags |= be_lv_state_end;
334 register_node(lv, irn);
338 be_lv_t *lv; /**< The liveness object. */
339 ir_node *def; /**< The node (value). */
340 ir_node *def_block; /**< The block of def. */
341 bitset_t *visited; /**< A set were all visited blocks are recorded. */
345 * Mark a node (value) live out at a certain block. Do this also
346 * transitively, i.e. if the block is not the block of the value's
347 * definition, all predecessors are also marked live.
348 * @param block The block to mark the value live out of.
349 * @param is_true_out Is the node real out there or only live at the end
352 static void live_end_at_block(ir_node *block, int is_true_out)
355 ir_node *def = re.def;
358 mark_live_end(lv, block, def);
360 mark_live_out(lv, block, def);
362 visited = re.visited;
363 if (!bitset_contains_irn(visited, block)) {
364 bitset_add_irn(visited, block);
367 * If this block is not the definition block, we have to go up
370 if (re.def_block != block) {
373 mark_live_in(lv, block, def);
375 for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i)
376 live_end_at_block(get_Block_cfgpred_block(block, i), 1);
381 typedef struct _lv_walker_t {
386 typedef struct lv_remove_walker_t {
389 } lv_remove_walker_t;
393 * Liveness analysis for a value.
394 * Compute the set of all blocks a value is live in.
395 * @param irn The node (value).
397 static void liveness_for_node(ir_node *irn)
399 const ir_edge_t *edge;
402 bitset_clear_all(re.visited);
403 def_block = get_nodes_block(irn);
406 re.def_block = def_block;
408 /* Go over all uses of the value */
409 foreach_out_edge(irn, edge) {
410 ir_node *use = edge->src;
413 DBG((dbg, LEVEL_4, "%+F: use at %+F, pos %d in %+F\n", irn, use, edge->pos, get_block(use)));
414 assert(get_irn_n(use, edge->pos) == irn);
417 * If the usage is no data node, skip this use, since it does not
418 * affect the liveness of the node.
420 if (!is_liveness_node(use))
423 /* Get the block where the usage is in. */
424 use_block = get_nodes_block(use);
427 * If the use is a phi function, determine the corresponding block
428 * through which the value reaches the phi function and mark the
429 * value as live out of that block.
432 ir_node *pred_block = get_Block_cfgpred_block(use_block, edge->pos);
433 live_end_at_block(pred_block, 0);
437 * Else, the value is live in at this block. Mark it and call live
438 * out on the predecessors.
440 else if (def_block != use_block) {
443 mark_live_in(re.lv, use_block, irn);
445 for (i = get_Block_n_cfgpreds(use_block) - 1; i >= 0; --i) {
446 ir_node *pred_block = get_Block_cfgpred_block(use_block, i);
447 live_end_at_block(pred_block, 1);
453 static void lv_remove_irn_walker(ir_node *bl, void *data)
455 lv_remove_walker_t *w = data;
456 be_lv_remove(w->lv, bl, w->irn);
459 static const char *lv_flags_to_str(unsigned flags)
461 static const char *states[] = {
472 return states[flags & 7];
475 static void lv_dump_block(void *context, FILE *f, const ir_node *bl)
478 be_lv_t *lv = context;
479 struct _be_lv_info_t *info = phase_get_irn_data(&lv->ph, bl);
481 fprintf(f, "liveness:\n");
483 unsigned n = info[0].u.head.n_members;
486 for(i = 0; i < n; ++i) {
487 struct _be_lv_info_node_t *n = &info[i+1].u.node;
488 ir_fprintf(f, "%s %+F\n", lv_flags_to_str(n->flags), get_idx_irn(lv->irg, n->idx));
494 static void *lv_phase_data_init(ir_phase *phase, const ir_node *irn, void *old)
496 struct _be_lv_info_t *info = phase_alloc(phase, LV_STD_SIZE * sizeof(info[0]));
500 memset(info, 0, LV_STD_SIZE * sizeof(info[0]));
501 info[0].u.head.n_size = LV_STD_SIZE - 1;
506 * Walker, collect all nodes for which we want calculate liveness info
509 static void collect_liveness_nodes(ir_node *irn, void *data)
511 ir_node **nodes = data;
512 if (is_liveness_node(irn))
513 nodes[get_irn_idx(irn)] = irn;
516 static void compute_liveness(be_lv_t *lv)
522 n = get_irg_last_idx(lv->irg);
523 nodes = NEW_ARR_F(ir_node *, n);
524 memset(nodes, 0, sizeof(nodes[0]) * n);
527 * inserting the variables sorted by their ID is probably
528 * more efficient since the binary sorted set insertion
529 * will not need to move around the data.
531 irg_walk_graph(lv->irg, NULL, collect_liveness_nodes, nodes);
534 re.visited = bitset_malloc(n);
536 for (i = 0; i < n; ++i) {
537 if (nodes[i] != NULL)
538 liveness_for_node(nodes[i]);
543 register_hook(hook_node_info, &lv->hook_info);
544 stat_ev_tim_pop("be_lv_sets_cons");
547 void be_liveness_assure_sets(be_lv_t *lv)
550 BE_TIMER_PUSH(t_live);
552 lv->nodes = bitset_malloc(2 * get_irg_last_idx(lv->irg));
553 phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
554 compute_liveness(lv);
555 /* be_live_chk_compare(lv, lv->lvc); */
557 BE_TIMER_POP(t_live);
561 void be_liveness_assure_chk(be_lv_t *lv)
564 BE_TIMER_PUSH(t_verify);
565 be_liveness_assure_sets(lv);
566 BE_TIMER_POP(t_verify);
572 void be_liveness_invalidate(be_lv_t *lv)
574 if (lv && lv->nodes) {
575 unregister_hook(hook_node_info, &lv->hook_info);
577 bitset_free(lv->nodes);
582 /* Compute the inter block liveness for a graph. */
583 be_lv_t *be_liveness(ir_graph *irg)
585 be_lv_t *lv = XMALLOCZ(be_lv_t);
589 lv->dfs = dfs_new(&absgraph_irg_cfg_succ, irg);
590 lv->lvc = lv_chk_new(lv->irg, lv->dfs);
592 lv->hook_info.context = lv;
593 lv->hook_info.hook._hook_node_info = lv_dump_block;
598 void be_liveness_recompute(be_lv_t *lv)
602 BE_TIMER_PUSH(t_live);
603 last_idx = get_irg_last_idx(lv->irg);
604 if(last_idx >= bitset_size(lv->nodes)) {
605 bitset_free(lv->nodes);
606 lv->nodes = bitset_malloc(last_idx * 2);
608 bitset_clear_all(lv->nodes);
611 phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
612 compute_liveness(lv);
614 BE_TIMER_POP(t_live);
618 void be_liveness_free(be_lv_t *lv)
620 be_liveness_invalidate(lv);
622 lv_chk_free(lv->lvc);
628 void be_liveness_remove(be_lv_t *lv, const ir_node *irn)
631 unsigned idx = get_irn_idx(irn);
632 lv_remove_walker_t w;
635 * Removes a single irn from the liveness information.
636 * Since an irn can only be live at blocks dominated by the block of its
637 * definition, we only have to process that dominance subtree.
641 dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
642 if(idx < bitset_size(lv->nodes))
643 bitset_clear(lv->nodes, idx);
647 void be_liveness_introduce(be_lv_t *lv, ir_node *irn)
649 /* Don't compute liveness information for non-data nodes. */
650 if (lv->nodes && is_liveness_node(irn)) {
652 re.visited = bitset_malloc(get_irg_last_idx(lv->irg));
653 liveness_for_node(irn);
654 bitset_free(re.visited);
658 void be_liveness_update(be_lv_t *lv, ir_node *irn)
660 be_liveness_remove(lv, irn);
661 be_liveness_introduce(lv, irn);
664 static void lv_check_walker(ir_node *bl, void *data)
666 lv_walker_t *w = data;
668 be_lv_t *fresh = w->data;
670 struct _be_lv_info_t *curr = phase_get_irn_data(&lv->ph, bl);
671 struct _be_lv_info_t *fr = phase_get_irn_data(&fresh->ph, bl);
673 if(!fr && curr && curr[0].u.head.n_members > 0) {
676 ir_fprintf(stderr, "%+F liveness should be empty but current liveness contains:\n", bl);
677 for(i = 0; i < curr[0].u.head.n_members; ++i) {
678 ir_fprintf(stderr, "\t%+F\n", get_idx_irn(lv->irg, curr[1 + i].u.node.idx));
683 unsigned n_curr = curr[0].u.head.n_members;
684 unsigned n_fresh = fr[0].u.head.n_members;
688 if(n_curr != n_fresh) {
689 ir_fprintf(stderr, "%+F: liveness set sizes differ. curr %d, correct %d\n", bl, n_curr, n_fresh);
691 ir_fprintf(stderr, "current:\n");
692 for(i = 0; i < n_curr; ++i) {
693 struct _be_lv_info_node_t *n = &curr[1 + i].u.node;
694 ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
697 ir_fprintf(stderr, "correct:\n");
698 for(i = 0; i < n_fresh; ++i) {
699 struct _be_lv_info_node_t *n = &fr[1 + i].u.node;
700 ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
706 void be_liveness_check(be_lv_t *lv)
709 be_lv_t *fresh = be_liveness(lv->irg);
713 irg_block_walk_graph(lv->irg, lv_check_walker, NULL, &w);
714 be_liveness_free(fresh);
718 static void lv_dump_block_walker(ir_node *irn, void *data)
720 lv_walker_t *w = data;
722 lv_dump_block(w->lv, w->data, irn);
726 /* Dump the liveness information for a graph. */
727 void be_liveness_dump(const be_lv_t *lv, FILE *f)
731 w.lv = (be_lv_t *) lv;
733 irg_block_walk_graph(lv->irg, lv_dump_block_walker, NULL, &w);
736 /* Dump the liveness information for a graph. */
737 void be_liveness_dumpto(const be_lv_t *lv, const char *cls_name)
741 ir_snprintf(buf, sizeof(buf), "%F_%s-live.txt", lv->irg, cls_name);
742 if((f = fopen(buf, "wt")) != NULL) {
743 be_liveness_dump(lv, f);
749 * Walker: checks the every predecessors of a node dominate
752 static void dom_check(ir_node *irn, void *data)
754 int *problem_found = data;
756 if(!is_Block(irn) && irn != get_irg_end(get_irn_irg(irn))) {
758 ir_node *bl = get_nodes_block(irn);
760 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
761 ir_node *op = get_irn_n(irn, i);
762 ir_node *def_bl = get_nodes_block(op);
763 ir_node *use_bl = bl;
766 use_bl = get_Block_cfgpred_block(bl, i);
768 if(get_irn_opcode(use_bl) != iro_Bad
769 && get_irn_opcode(def_bl) != iro_Bad
770 && !block_dominates(def_bl, use_bl)) {
771 ir_fprintf(stderr, "Verify warning: %+F in %+F must dominate %+F for user %+F (%s)\n", op, def_bl, use_bl, irn, get_irg_dump_name(get_irn_irg(op)));
778 /* Check, if the SSA dominance property is fulfilled. */
779 int be_check_dominance(ir_graph *irg)
781 int problem_found = 0;
784 irg_walk_graph(irg, dom_check, NULL, &problem_found);
786 return !problem_found;
789 void be_liveness_transfer(const arch_register_class_t *cls,
790 ir_node *node, ir_nodeset_t *nodeset)
794 /* You should better break out of your loop when hitting the first phi
796 assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
798 if (get_irn_mode(node) == mode_T) {
799 const ir_edge_t *edge;
801 foreach_out_edge(node, edge) {
802 ir_node *proj = get_edge_src_irn(edge);
804 if (arch_irn_consider_in_reg_alloc(cls, proj)) {
805 ir_nodeset_remove(nodeset, proj);
808 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
809 ir_nodeset_remove(nodeset, node);
812 arity = get_irn_arity(node);
813 for (i = 0; i < arity; ++i) {
814 ir_node *op = get_irn_n(node, i);
816 if (arch_irn_consider_in_reg_alloc(cls, op))
817 ir_nodeset_insert(nodeset, op);
823 void be_liveness_end_of_block(const be_lv_t *lv,
824 const arch_register_class_t *cls,
825 const ir_node *block, ir_nodeset_t *live)
829 assert(lv->nodes && "live sets must be computed");
830 be_lv_foreach(lv, block, be_lv_state_end, i) {
831 ir_node *node = be_lv_get_irn(lv, block, i);
832 if (!arch_irn_consider_in_reg_alloc(cls, node))
835 ir_nodeset_insert(live, node);
841 void be_liveness_nodes_live_at(const be_lv_t *lv,
842 const arch_register_class_t *cls,
843 const ir_node *pos, ir_nodeset_t *live)
845 const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
848 be_liveness_end_of_block(lv, cls, bl, live);
849 sched_foreach_reverse(bl, irn) {
851 * If we encounter the node we want to insert the Perm after,
852 * exit immediately, so that this node is still live
857 be_liveness_transfer(cls, irn, live);
861 static void collect_node(ir_node *irn, void *data)
863 struct obstack *obst = data;
864 obstack_ptr_grow(obst, irn);
867 void be_live_chk_compare(be_lv_t *lv, lv_chk_t *lvc)
869 ir_graph *irg = lv->irg;
878 irg_block_walk_graph(irg, collect_node, NULL, &obst);
879 obstack_ptr_grow(&obst, NULL);
880 blocks = obstack_finish(&obst);
882 irg_walk_graph(irg, collect_node, NULL, &obst);
883 obstack_ptr_grow(&obst, NULL);
884 nodes = obstack_finish(&obst);
886 stat_ev_ctx_push("be_lv_chk_compare");
887 for (j = 0; nodes[j]; ++j) {
888 ir_node *irn = nodes[j];
889 for (i = 0; blocks[i]; ++i) {
890 ir_node *bl = blocks[i];
892 if (!is_Block(irn)) {
893 int lvr_in = be_is_live_in (lv, bl, irn);
894 int lvr_out = be_is_live_out(lv, bl, irn);
895 int lvr_end = be_is_live_end(lv, bl, irn);
897 int lvc_in = lv_chk_bl_in (lvc, bl, irn);
898 int lvc_out = lv_chk_bl_out(lvc, bl, irn);
899 int lvc_end = lv_chk_bl_end(lvc, bl, irn);
901 if (lvr_in - lvc_in != 0)
902 ir_fprintf(stderr, "live in info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_in, lvc_in);
904 if (lvr_end - lvc_end != 0)
905 ir_fprintf(stderr, "live end info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_end, lvc_end);
907 if (lvr_out - lvc_out != 0)
908 ir_fprintf(stderr, "live out info for %+F at %+F differs: nml: %d, chk: %d\n", irn, bl, lvr_out, lvc_out);
912 stat_ev_ctx_pop("be_lv_chk_compare");
914 obstack_free(&obst, NULL);
917 void be_init_live(void)
919 FIRM_DBG_REGISTER(dbg, "firm.be.liveness");
922 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_live);