#include "irflag_t.h"
#include "irhooks.h"
#include "irtools.h"
+#include "iropt_dbg.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
#endif
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
- assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
+
+ edges_deactivate(irg);
/* inform statistics that we started a dead-node elimination run */
hook_dead_node_elim(irg, 1);
* inlined procedure. The new entities must be in the link field of
* the entities.
*/
-static INLINE void
-copy_node_inline(ir_node *n, void *env) {
+static void copy_node_inline(ir_node *n, void *env) {
ir_node *nn;
ir_type *frame_tp = (ir_type *)env;
}
}
+/**
+ * Copies new predecessors of old node and move constants to
+ * the Start Block.
+ */
+static void copy_preds_inline(ir_node *n, void *env) {
+ ir_node *nn;
+
+ copy_preds(n, env);
+ nn = skip_Id(get_new_node(n));
+ if (is_irn_constlike(nn)) {
+ /* move Constants into the start block */
+ set_nodes_block(nn, get_irg_start_block(current_ir_graph));
+
+ n = identify_remember(current_ir_graph->value_table, nn);
+ if (nn != n) {
+ DBG_OPT_CSE(nn, n);
+ exchange(nn, n);
+ }
+ }
+}
+
/**
* Walker: checks if P_value_arg_base is used.
*/
ir_node *pre_call;
ir_node *post_call, *post_bl;
ir_node *in[pn_Start_max];
- ir_node *end, *end_bl;
+ ir_node *end, *end_bl, *block;
ir_node **res_pred;
ir_node **cf_pred;
+ ir_node **args_in;
ir_node *ret, *phi;
- int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity;
+ int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
enum exc_mode exc_handling;
- ir_type *called_frame, *curr_frame;
+ ir_type *called_frame, *curr_frame, *mtp, *ctp;
ir_entity *ent;
ir_graph *rem, *irg;
irg_inline_property prop = get_irg_inline_property(called_graph);
+ unsigned long visited;
if (prop == irg_inline_forbidden)
return 0;
ent = get_irg_entity(called_graph);
- /* Do not inline variadic functions. */
- if (get_method_variadicity(get_entity_type(ent)) == variadicity_variadic)
+ mtp = get_entity_type(ent);
+ ctp = get_Call_type(call);
+ if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
+ /* this is a bad feature of C: without a prototype, we can can call a function with less
+ parameters than needed. Currently we don't support this, although it would be
+ to use Unknown than. */
return 0;
+ }
- assert(get_method_n_params(get_entity_type(ent)) ==
- get_method_n_params(get_Call_type(call)));
+ /* Argh, compiling C has some bad consequences:
+ the call type AND the method type might be different.
+ It is implementation defendant what happens in that case.
+ We support inlining, if the bitsize of the types matches AND
+ the same arithmetic is used. */
+ n_params = get_method_n_params(mtp);
+ for (i = n_params - 1; i >= 0; --i) {
+ ir_type *param_tp = get_method_param_type(mtp, i);
+ ir_type *arg_tp = get_method_param_type(ctp, i);
+
+ if (param_tp != arg_tp) {
+ ir_mode *pmode = get_type_mode(param_tp);
+ ir_mode *amode = get_type_mode(arg_tp);
+
+ if (pmode == NULL || amode == NULL)
+ return 0;
+ if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
+ return 0;
+ if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
+ return 0;
+ /* otherwise we can simply "reinterpret" the bits */
+ }
+ }
irg = get_irn_irg(call);
rem = current_ir_graph;
current_ir_graph = irg;
- DB((dbg, SET_LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
+ DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
/* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
rem_opt = get_opt_optimize();
else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
}
+ /* create the argument tuple */
+ NEW_ARR_A(ir_type *, args_in, n_params);
+
+ block = get_nodes_block(call);
+ for (i = n_params - 1; i >= 0; --i) {
+ ir_node *arg = get_Call_param(call, i);
+ ir_type *param_tp = get_method_param_type(mtp, i);
+ ir_mode *mode = get_type_mode(param_tp);
+
+ if (mode != get_irn_mode(arg)) {
+ arg = new_r_Conv(irg, block, arg, mode);
+ }
+ args_in[i] = arg;
+ }
+
/* --
the procedure and later replaces the Start node of the called graph.
Post_call is the old Call node and collects the results of the called
in[pn_Start_X_initial_exec] = new_Jmp();
in[pn_Start_M] = get_Call_mem(call);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
- in[pn_Start_P_globals] = get_irg_globals(irg);
in[pn_Start_P_tls] = get_irg_tls(irg);
- in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
+ in[pn_Start_T_args] = new_Tuple(n_params, args_in);
/* in[pn_Start_P_value_arg_base] = ??? */
assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
pre_call = new_Tuple(pn_Start_max - 1, in);
/* Visited flags in calling irg must be >= flag in called irg.
Else walker and arity computation will not work. */
if (get_irg_visited(irg) <= get_irg_visited(called_graph))
- set_irg_visited(irg, get_irg_visited(called_graph)+1);
+ set_irg_visited(irg, get_irg_visited(called_graph) + 1);
if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
set_irg_block_visited(irg, get_irg_block_visited(called_graph));
+ visited = get_irg_visited(irg);
+
/* Set pre_call as new Start node in link field of the start node of
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
set_irn_link(get_irg_start(called_graph), pre_call);
- set_irn_visited(get_irg_start(called_graph), get_irg_visited(irg));
+ set_irn_visited(get_irg_start(called_graph), visited);
set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(irg));
- set_irn_link(get_irg_bad(called_graph), get_irg_bad(irg));
- set_irn_visited(get_irg_bad(called_graph), get_irg_visited(irg));
+ set_irn_visited(get_irg_start_block(called_graph), visited);
+
+ set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
+ set_irn_visited(get_irg_bad(called_graph), visited);
+
+ set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
+ set_irn_visited(get_irg_no_mem(called_graph), visited);
/* Initialize for compaction of in arrays */
inc_irg_block_visited(irg);
/* -- Performing dead node elimination inlines the graph -- */
/* Copies the nodes to the obstack of current_ir_graph. Updates links to new
entities. */
- irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
get_irg_frame_type(called_graph));
/* Repair called_graph */
/* -- Precompute some values -- */
end_bl = get_new_node(get_irg_end_block(called_graph));
end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = xmalloc(n_res * sizeof(*res_pred));
n_ret = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
n_ret++;
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret, *irn;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
+ ret = skip_Proj(get_Block_cfgpred(end_bl, i));
if (is_Call(ret)) {
cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
n_exc++;
/* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
ir_node *irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
/** Represents a possible inlinable call in a graph. */
typedef struct _call_entry call_entry;
struct _call_entry {
- ir_node *call; /**< the Call */
- ir_graph *callee; /**< the callee called here */
- call_entry *next; /**< for linking the next one */
- unsigned weight; /**< the weight of the call */
+ ir_node *call; /**< the Call node */
+ ir_graph *callee; /**< the callee IR-graph called here */
+ call_entry *next; /**< for linking the next one */
+ int loop_depth; /**< the loop depth of this call */
};
/**
/* The Call node calls a locally defined method. Remember to inline. */
inline_env_t *ienv = env;
call_entry *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
- entry->call = call;
- entry->callee = called_irg;
- entry->next = NULL;
- entry->weight = 0;
+ entry->call = call;
+ entry->callee = called_irg;
+ entry->next = NULL;
+ entry->loop_depth = 0;
if (ienv->tail == NULL)
ienv->head = entry;
int n_nodes; /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
int n_blocks; /**< Number of Blocks in graph without Start and End block. */
int n_nodes_orig; /**< for statistics */
- call_entry *call_head; /**< The head of the list of all call nodes in this graph. */
- call_entry *call_tail; /**< The tail of the list of all call nodes in this graph .*/
int n_call_nodes; /**< Number of Call nodes in the graph. */
int n_call_nodes_orig; /**< for statistics */
int n_callers; /**< Number of known graphs that call this graphs. */
int n_callers_orig; /**< for statistics */
unsigned got_inline:1; /**< Set, if at least one call inside this graph was inlined. */
unsigned local_vars:1; /**< Set, if a inlined function gets the address of an inlined variable. */
+ unsigned recursive:1; /**< Set, if this function is self recursive. */
+ call_entry *call_head; /**< The head of the list of all call nodes in this graph. */
+ call_entry *call_tail; /**< The tail of the list of all call nodes in this graph .*/
unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
} inline_irg_env;
env->n_callers_orig = 0;
env->got_inline = 0;
env->local_vars = 0;
+ env->recursive = 0;
env->local_weights = NULL;
return env;
}
typedef struct walker_env {
- inline_irg_env *x; /**< the inline environment */
- char ignore_runtime; /**< the ignore runtime flag */
- char ignore_callers; /**< if set, do change callers data */
+ inline_irg_env *x; /**< the inline environment */
+ call_entry *last_call; /**< points to the last inserted call */
+ char ignore_runtime; /**< the ignore runtime flag */
+ char ignore_callers; /**< if set, do change callers data */
} wenv_t;
/**
++callee_env->n_callers;
++callee_env->n_callers_orig;
}
+ if (callee == current_ir_graph)
+ x->recursive = 1;
/* link it in the list of possible inlinable entries */
entry = obstack_alloc(&temp_obst, sizeof(*entry));
- entry->call = call;
- entry->callee = callee;
- entry->next = NULL;
- if (x->call_tail == NULL)
+ entry->call = call;
+ entry->callee = callee;
+ entry->next = NULL;
+ entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
+
+ /* note: we use call_tail here as a pointer to the last inserted */
+ if (x->call_head == NULL) {
x->call_head = entry;
- else
- x->call_tail->next = entry;
- x->call_tail = entry;
+ } else {
+ if (entry->loop_depth == env->last_call->loop_depth) {
+ /* same depth as the last one, enqueue after it */
+ entry->next = env->last_call->next;
+ env->last_call->next = entry;
+ } else if (entry->loop_depth > x->call_head->loop_depth) {
+ /* put first */
+ entry->next = x->call_head;
+ x->call_head = entry;
+ } else {
+ /* search the insertion point */
+ call_entry *p;
+
+ for (p = x->call_head; p->next != NULL; p = p->next)
+ if (entry->loop_depth > p->next->loop_depth)
+ break;
+ entry->next = p->next;
+ p->next = entry;
+ }
+ }
+ env->last_call = entry;
+ if (entry->next == NULL) {
+ /* keep tail up to date */
+ x->call_tail = entry;
+ }
}
}
in the link field. */
for (entry = src; entry != NULL; entry = entry->next) {
nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
- nentry->call = get_irn_link(entry->call);
- nentry->callee = entry->callee;
- nentry->next = NULL;
+ nentry->call = get_irn_link(entry->call);
+ nentry->callee = entry->callee;
+ nentry->next = NULL;
+ nentry->loop_depth = entry->loop_depth;
dst->call_tail->next = nentry;
dst->call_tail = nentry;
}
}
+/**
+ * Add the nodes of the list src in front to the nodes of the list dst.
+ */
+static call_entry *replace_entry_by_call_list(call_entry *dst, call_entry *src) {
+ call_entry *entry, *nentry, *head, *tail;
+
+ /* Note that the src list points to Call nodes in the inlined graph, but
+ we need Call nodes in our graph. Luckily the inliner leaves this information
+ in the link field. */
+ head = tail = NULL;
+ for (entry = src; entry != NULL; entry = entry->next) {
+ nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
+ nentry->call = get_irn_link(entry->call);
+ nentry->callee = entry->callee;
+ nentry->next = NULL;
+ nentry->loop_depth = entry->loop_depth + dst->loop_depth;
+ if (head == NULL)
+ head = nentry;
+ else
+ tail->next = nentry;
+ tail = nentry;
+ }
+ /* skip the head of dst */
+ if (head != NULL) {
+ tail->next = dst->next;
+ } else {
+ head = dst->next;
+ }
+ return head;
+}
+
/*
* Inlines small leave methods at call sites where the called address comes
* from a Const node that references the entity representing the called
assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
+ assure_cf_loop(irg);
wenv.x = get_irg_link(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
+ assure_cf_loop(copy);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
optimize_cf(irg);
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
- DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
+ DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
env->n_callers_orig, env->n_callers,
get_entity_name(get_irg_entity(irg))));
}
}
}
+ break;
default:
/* any other node: unsupported yet or bad. */
return 0;
}
/**
- * calculate a benefice value for inlining the given call.
+ * Calculate a benefice value for inlining the given call.
+ *
+ * @param call the call node we have to inspect
+ * @param callee the called graph
+ * @param local_adr set after return if an address of a local variable is
+ * transmitted as a parameter
*/
static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
ir_entity *ent = get_irg_entity(callee);
ir_node *frame_ptr;
ir_type *mtp;
int weight = 0;
- int i, n_params;
+ int i, n_params, all_const;
unsigned cc, v;
inline_irg_env *curr_env, *callee_env;
/* constant parameters improve the benefice */
frame_ptr = get_irg_frame(current_ir_graph);
+ all_const = 1;
for (i = 0; i < n_params; ++i) {
ir_node *param = get_Call_param(call, i);
- if (is_Const(param) || is_SymConst(param))
+ if (is_Const(param)) {
weight += get_method_param_weight(ent, i);
- else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
- /*
- * An address of a local variable is transmitted. After inlining,
- * scalar_replacement might be able to remove the local variable,
- * so honor this.
- */
- v = get_method_local_adress_weight(callee, i);
- weight += v;
- if (v > 0)
- *local_adr = 1;
+ } else {
+ all_const = 0;
+ if (is_SymConst(param))
+ weight += get_method_param_weight(ent, i);
+ else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
+ /*
+ * An address of a local variable is transmitted. After inlining,
+ * scalar_replacement might be able to remove the local variable,
+ * so honor this.
+ */
+ v = get_method_local_adress_weight(callee, i);
+ weight += v;
+ if (v > 0)
+ *local_adr = 1;
+ }
}
}
/* reduce the benefice if the current function is already big */
curr_env = get_irg_link(current_ir_graph);
- weight -= curr_env->n_nodes / 100;
+ weight -= curr_env->n_nodes / 50;
/* give a bonus for functions with one block */
if (callee_env->n_blocks == 1)
weight = weight * 3 / 2;
+ /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
+ else if (callee_env->n_nodes < 20 && !callee_env->recursive)
+ weight += 5000;
+
+ /* and finally for leaves: they do not increase the register pressure
+ because of callee safe registers */
+ else if (callee_env->n_call_nodes == 0)
+ weight += 25;
+
+ /*
+ * Reduce the weight for recursive function IFF not all arguments are const.
+ * inlining recursive functions is rarely good.
+ */
+ if (callee_env->recursive && !all_const)
+ weight -= 500;
+
+ /*
+ * All arguments constant is probably a good sign, give an extra bonus
+ */
+ if (all_const)
+ weight += 100;
+
return weight;
}
/**
- * Heuristic inliner. Calculates a benifice value for every call and inlines
+ * Heuristic inliner. Calculates a benefice value for every call and inlines
* those calls with a value higher than the threshold.
*/
-void inline_functions(int inline_threshold) {
+void inline_functions(int maxsize, int inline_threshold) {
inline_irg_env *env;
int i, n_irgs;
ir_graph *rem;
int did_inline;
wenv_t wenv;
- call_entry *entry, *tail;
+ call_entry *curr_call, **last_call;
const call_entry *centry;
pmap *copied_graphs;
pmap_entry *pm_entry;
assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
- wenv.x = get_irg_link(irg);
+ wenv.x = get_irg_link(irg);
+ wenv.last_call = NULL;
+ assure_cf_loop(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}
env = get_irg_link(irg);
/* note that the list of possible calls is updated during the process */
- tail = NULL;
- for (entry = env->call_head; entry != NULL; entry = entry->next) {
+ last_call = &env->call_head;
+ for (curr_call = env->call_head; curr_call != NULL;) {
ir_graph *callee;
pmap_entry *e;
int benefice;
unsigned local_adr;
- call = entry->call;
- callee = entry->callee;
+ if (env->n_nodes > maxsize) break;
- /* calculate the benifice on the original call to prevent excessive inlining */
- local_adr = 0;
- benefice = calc_inline_benefice(call, callee, &local_adr);
- DB((dbg, SET_LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
+ call = curr_call->call;
+ callee = curr_call->callee;
e = pmap_find(copied_graphs, callee);
if (e != NULL) {
/*
- * Remap callee if we have a copy.
- * FIXME: Should we do this only for recursive Calls ?
- */
+ * Remap callee if we have a copy.
+ * FIXME: Should we do this only for recursive Calls ?
+ */
callee = e->value;
}
+ /* calculate the benefice on the original call to prevent excessive inlining */
+ local_adr = 0;
+ benefice = calc_inline_benefice(call, callee, &local_adr);
+ DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
+
if (benefice > -inline_threshold ||
(get_irg_inline_property(callee) >= irg_inline_forced)) {
if (current_ir_graph == callee) {
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
+ assure_cf_loop(copy);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/* was inlined, must be recomputed */
phiproj_computed = 0;
+ /* after we have inlined callee, all called methods inside callee
+ are now called once more */
+ for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
+ inline_irg_env *penv = get_irg_link(centry->callee);
+ ++penv->n_callers;
+ }
+
/* callee was inline. Append it's call list. */
env->got_inline = 1;
if (local_adr)
env->local_vars = 1;
--env->n_call_nodes;
- append_call_list(env, callee_env->call_head);
+ curr_call = replace_entry_by_call_list(curr_call, callee_env->call_head);
env->n_call_nodes += callee_env->n_call_nodes;
env->n_nodes += callee_env->n_nodes;
--callee_env->n_callers;
- /* after we have inlined callee, all called methods inside callee
- are now called once more */
- for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
- inline_irg_env *penv = get_irg_link(centry->callee);
- ++penv->n_callers;
- }
-
- /* remove this call from the list */
- if (tail != NULL)
- tail->next = entry->next;
- else
- env->call_head = entry->next;
+ /* remove the current call entry from the list */
+ *last_call = curr_call;
continue;
}
}
- tail = entry;
+ last_call = &curr_call->next;
+ curr_call = curr_call->next;
}
- env->call_tail = tail;
+ }
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+
+ env = get_irg_link(irg);
if (env->got_inline) {
/* this irg got calls inlined: optimize it */
optimize_graph_df(irg);
}
}
-
optimize_cf(irg);
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
- DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
+ DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
env->n_callers_orig, env->n_callers,
get_entity_name(get_irg_entity(irg))));