/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @brief Representation and computation of the callgraph.
* @author Goetz Lindenmaier
* @date 21.7.2004
- * @version $Id$
*/
#include "config.h"
#include "irnode_t.h"
#include "cgana.h"
-#include "execution_frequency.h"
#include "array.h"
#include "pmap.h"
}
/* Returns the number of procedures that call the given irg. */
-int get_irg_n_callers(const ir_graph *irg)
+size_t get_irg_n_callers(const ir_graph *irg)
{
- if (irg->callers) return ARR_LEN(irg->callers);
- return -1;
+ assert(irg->callers);
+ return irg->callers ? ARR_LEN(irg->callers) : 0;
}
/* Returns the caller at position pos. */
-ir_graph *get_irg_caller(const ir_graph *irg, int pos)
+ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
{
- assert(pos >= 0 && pos < get_irg_n_callers(irg));
- if (irg->callers) return irg->callers[pos];
- return NULL;
+ assert(pos < get_irg_n_callers(irg));
+ return irg->callers ? irg->callers[pos] : NULL;
}
/* Returns non-zero if the caller at position pos is "a backedge", i.e. a recursion. */
-int is_irg_caller_backedge(const ir_graph *irg, int pos)
+int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
{
- assert(pos >= 0 && pos < get_irg_n_callers(irg));
+ assert(pos < get_irg_n_callers(irg));
return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
}
-/** Search the caller in the list of all callers and set it's backedge property. */
-static void set_irg_caller_backedge(ir_graph *irg, ir_graph *caller)
+/** Search the caller in the list of all callers and set its backedge property. */
+static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
{
- int i, n_callers = get_irg_n_callers(irg);
+ size_t i, n_callers = get_irg_n_callers(irg);
/* allocate a new array on demand */
if (irg->caller_isbe == NULL)
/* Returns non-zero if the irg has a backedge caller. */
int has_irg_caller_backedge(const ir_graph *irg)
{
- int i, n_callers = get_irg_n_callers(irg);
+ size_t i, n_callers = get_irg_n_callers(irg);
if (irg->caller_isbe != NULL) {
for (i = 0; i < n_callers; ++i)
* Given the position pos_caller of an caller of irg, return
* irg's callee position on that caller.
*/
-static int reverse_pos(const ir_graph *callee, int pos_caller)
+static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
{
ir_graph *caller = get_irg_caller(callee, pos_caller);
/* search the other relation for the corresponding edge. */
- int pos_callee = -1;
- int i, n_callees = get_irg_n_callees(caller);
+ size_t i, n_callees = get_irg_n_callees(caller);
for (i = 0; i < n_callees; ++i) {
if (get_irg_callee(caller, i) == callee) {
- pos_callee = i;
- break;
+ return i;
}
}
- assert(pos_callee >= 0);
+ assert(!"reverse_pos() did not find position");
- return pos_callee;
+ return 0;
}
/* Returns the maximal loop depth of call nodes that call along this edge. */
-int get_irg_caller_loop_depth(const ir_graph *irg, int pos)
+size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
{
ir_graph *caller = get_irg_caller(irg, pos);
- int pos_callee = reverse_pos(irg, pos);
+ size_t pos_callee = reverse_pos(irg, pos);
return get_irg_callee_loop_depth(caller, pos_callee);
}
/* Returns the number of procedures that are called by the given irg. */
-int get_irg_n_callees(const ir_graph *irg)
+size_t get_irg_n_callees(const ir_graph *irg)
{
- if (irg->callees) return ARR_LEN(irg->callees);
- return -1;
+ assert(irg->callees);
+ return irg->callees ? ARR_LEN(irg->callees) : 0;
}
/* Returns the callee at position pos. */
-ir_graph *get_irg_callee(const ir_graph *irg, int pos)
+ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
{
- assert(pos >= 0 && pos < get_irg_n_callees(irg));
- if (irg->callees) return irg->callees[pos]->irg;
- return NULL;
+ assert(pos < get_irg_n_callees(irg));
+ return irg->callees ? irg->callees[pos]->irg : NULL;
}
/* Returns non-zero if the callee at position pos is "a backedge", i.e. a recursion. */
-int is_irg_callee_backedge(const ir_graph *irg, int pos)
+int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
{
- assert(pos >= 0 && pos < get_irg_n_callees(irg));
+ assert(pos < get_irg_n_callees(irg));
return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
}
/* Returns non-zero if the irg has a backedge callee. */
int has_irg_callee_backedge(const ir_graph *irg)
{
- int i, n_callees = get_irg_n_callees(irg);
+ size_t i, n_callees = get_irg_n_callees(irg);
if (irg->callee_isbe != NULL) {
for (i = 0; i < n_callees; ++i)
/**
* Mark the callee at position pos as a backedge.
*/
-static void set_irg_callee_backedge(ir_graph *irg, int pos)
+static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
{
- int n = get_irg_n_callees(irg);
+ size_t n = get_irg_n_callees(irg);
/* allocate a new array on demand */
if (irg->callee_isbe == NULL)
irg->callee_isbe = rbitset_malloc(n);
- assert(pos >= 0 && pos < n);
+ assert(pos < n);
rbitset_set(irg->callee_isbe, pos);
}
/* Returns the maximal loop depth of call nodes that call along this edge. */
-int get_irg_callee_loop_depth(const ir_graph *irg, int pos)
-{
- assert(pos >= 0 && pos < get_irg_n_callees(irg));
- if (irg->callees) return irg->callees[pos]->max_depth;
- return -1;
-}
-
-static double get_irg_callee_execution_frequency(const ir_graph *irg, int pos)
+size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
{
- ir_node **arr = irg->callees[pos]->call_list;
- int i, n_Calls = ARR_LEN(arr);
- double freq = 0.0;
-
- for (i = 0; i < n_Calls; ++i) {
- freq += get_irn_exec_freq(arr[i]);
- }
- return freq;
-}
-
-static double get_irg_callee_method_execution_frequency(const ir_graph *irg,
- int pos)
-{
- double call_freq = get_irg_callee_execution_frequency(irg, pos);
- double meth_freq = get_irg_method_execution_frequency(irg);
- return call_freq * meth_freq;
-}
-
-static double get_irg_caller_method_execution_frequency(const ir_graph *irg,
- int pos)
-{
- ir_graph *caller = get_irg_caller(irg, pos);
- int pos_callee = reverse_pos(irg, pos);
-
- return get_irg_callee_method_execution_frequency(caller, pos_callee);
+ assert(pos < get_irg_n_callees(irg));
+ return irg->callees ? irg->callees[pos]->max_depth : 0;
}
/* --------------------- Compute the callgraph ------------------------ */
/**
- * Walker called by compute_callgraph(), analyses all Call nodes.
+ * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
*/
static void ana_Call(ir_node *n, void *env)
{
- int i, n_callees;
+ size_t i, n_callees;
ir_graph *irg;
(void) env;
if (callee) {
cg_callee_entry buf;
cg_callee_entry *found;
- int depth;
+ unsigned depth;
buf.irg = callee;
pset_insert((pset *)callee->callers, irg, HASH_PTR(irg));
- found = pset_find((pset *)irg->callees, &buf, HASH_PTR(callee));
+ found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, HASH_PTR(callee));
if (found) { /* add Call node to list, compute new nesting. */
ir_node **arr = found->call_list;
ARR_APP1(ir_node *, arr, n);
/** compare two ir graphs in a cg_callee_entry */
static int cg_callee_entry_cmp(const void *elt, const void *key)
{
- const cg_callee_entry *e1 = elt;
- const cg_callee_entry *e2 = key;
+ const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
+ const cg_callee_entry *e2 = (const cg_callee_entry*) key;
return e1->irg != e2->irg;
}
/** compare two ir graphs for pointer identity */
static int graph_cmp(const void *elt, const void *key)
{
- const ir_graph *e1 = elt;
- const ir_graph *e2 = key;
+ const ir_graph *e1 = (const ir_graph*) elt;
+ const ir_graph *e2 = (const ir_graph*) key;
return e1 != e2;
}
/* Construct and destruct the callgraph. */
void compute_callgraph(void)
{
- int i, n_irgs;
+ size_t i, n_irgs;
/* initialize */
free_callgraph();
/* Change the sets to arrays. */
for (i = 0; i < n_irgs; ++i) {
- int j, count;
+ size_t j, count;
cg_callee_entry *callee;
ir_graph *c, *irg = get_irp_irg(i);
pset *callee_set, *caller_set;
count = pset_count(callee_set);
irg->callees = NEW_ARR_F(cg_callee_entry *, count);
irg->callee_isbe = NULL;
- callee = pset_first(callee_set);
+ callee = (cg_callee_entry*) pset_first(callee_set);
for (j = 0; j < count; ++j) {
irg->callees[j] = callee;
- callee = pset_next(callee_set);
+ callee = (cg_callee_entry*) pset_next(callee_set);
}
del_pset(callee_set);
assert(callee == NULL);
count = pset_count(caller_set);
irg->callers = NEW_ARR_F(ir_graph *, count);
irg->caller_isbe = NULL;
- c = pset_first(caller_set);
+ c = (ir_graph*) pset_first(caller_set);
for (j = 0; j < count; ++j) {
irg->callers[j] = c;
- c = pset_next(caller_set);
+ c = (ir_graph*) pset_next(caller_set);
}
del_pset(caller_set);
assert(c == NULL);
/* Destruct the callgraph. */
void free_callgraph(void)
{
- int i, n_irgs = get_irp_n_irgs();
+ size_t i, n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
if (irg->callees) DEL_ARR_F(irg->callees);
static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
{
- int i, n_callees;
+ size_t i, n_callees;
if (cg_irg_visited(irg))
return;
void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
{
- int i, n_irgs = get_irp_n_irgs();
+ size_t i, n_irgs = get_irp_n_irgs();
++master_cg_visited;
/* roots are methods which have no callers in the current program */
for */
static ir_loop *current_loop; /**< Current cfloop construction is working
on. */
-static int loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
+static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
Each cfloop node gets a unique number.
What for? ev. remove. @@@ */
-static int current_dfn = 1; /**< Counter to generate depth first numbering
+static size_t current_dfn = 1; /**< Counter to generate depth first numbering
of visited nodes. */
/*-----------------*/
/*-----------------*/
typedef struct scc_info {
+ size_t dfn; /**< Depth first search number. */
+ size_t uplink; /**< dfn number of ancestor. */
+ ir_visited_t visited; /**< visited counter */
int in_stack; /**< Marks whether node is on the stack. */
- int dfn; /**< Depth first search number. */
- int uplink; /**< dfn number of ancestor. */
- int visited;
} scc_info;
/**
/**
* Returns the visited flag of a graph.
*/
-static inline ir_visited_t get_cg_irg_visited(ir_graph *irg)
+static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
{
return irg->self_visited;
}
static inline void mark_irg_in_stack(ir_graph *irg)
{
- scc_info *info = get_irg_link(irg);
+ scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 1;
}
static inline void mark_irg_not_in_stack(ir_graph *irg)
{
- scc_info *info = get_irg_link(irg);
+ scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->in_stack = 0;
}
-static inline int irg_is_in_stack(ir_graph *irg)
+static inline int irg_is_in_stack(const ir_graph *irg)
{
- scc_info *info = get_irg_link(irg);
+ scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->in_stack;
}
-static inline void set_irg_uplink(ir_graph *irg, int uplink)
+static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
{
- scc_info *info = get_irg_link(irg);
+ scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->uplink = uplink;
}
-static inline int get_irg_uplink(ir_graph *irg)
+static inline size_t get_irg_uplink(const ir_graph *irg)
{
- scc_info *info = get_irg_link(irg);
+ const scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->uplink;
}
-static inline void set_irg_dfn(ir_graph *irg, int dfn)
+static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
{
- scc_info *info = get_irg_link(irg);
+ scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
info->dfn = dfn;
}
-static inline int get_irg_dfn(ir_graph *irg)
+static inline size_t get_irg_dfn(const ir_graph *irg)
{
- scc_info *info = get_irg_link(irg);
+ const scc_info *info = (scc_info*) get_irg_link(irg);
assert(info && "missing call to init_scc()");
return info->dfn;
}
/**********************************************************************/
static ir_graph **stack = NULL;
-static int tos = 0; /**< top of stack */
+static size_t tos = 0; /**< top of stack */
/**
* Initialize the irg stack.
static inline void push(ir_graph *irg)
{
if (tos == ARR_LEN(stack)) {
- int nlen = ARR_LEN(stack) * 2;
- ARR_RESIZE(ir_node *, stack, nlen);
+ size_t nlen = ARR_LEN(stack) * 2;
+ ARR_RESIZE(ir_graph*, stack, nlen);
}
stack [tos++] = irg;
mark_irg_in_stack(irg);
*/
static inline ir_graph *pop(void)
{
- ir_graph *irg = stack[--tos];
+ ir_graph *irg;
+
+ assert(tos > 0);
+ irg = stack[--tos];
mark_irg_not_in_stack(irg);
return irg;
}
do {
m = pop();
- loop_node_cnt++;
+ ++loop_node_cnt;
set_irg_dfn(m, loop_node_cnt);
add_loop_irg(current_loop, m);
m->l = current_loop;
can't they have two loops as sons? Does it never get that far? ) */
static void close_loop(ir_loop *l)
{
- int last = get_loop_n_elements(l) - 1;
+ size_t last = get_loop_n_elements(l) - 1;
loop_element lelement = get_loop_element(l, last);
ir_loop *last_son = lelement.son;
static void init_scc(struct obstack *obst)
{
- int i;
- int n_irgs;
+ size_t i, n_irgs;
current_dfn = 1;
loop_node_cnt = 0;
*
* @param root: only needed for assertion.
*/
-static int is_head(ir_graph *n, ir_graph *root)
+static int is_head(const ir_graph *n, const ir_graph *root)
{
- int i, arity;
+ size_t i, n_callees;
int some_outof_loop = 0, some_in_loop = 0;
- arity = get_irg_n_callees(n);
- for (i = 0; i < arity; i++) {
- ir_graph *pred = get_irg_callee(n, i);
+ n_callees = get_irg_n_callees(n);
+ for (i = 0; i < n_callees; ++i) {
+ const ir_graph *pred = get_irg_callee(n, i);
if (is_irg_callee_backedge(n, i)) continue;
if (!irg_is_in_stack(pred)) {
some_outof_loop = 1;
/**
* Returns non-zero if n is possible loop head of an endless loop.
- * I.e., it is a Block, Phi or Filter node and has only predecessors
+ * I.e., it is a Block or Phi node and has only predecessors
* within the loop.
* @arg root: only needed for assertion.
*/
-static int is_endless_head(ir_graph *n, ir_graph *root)
+static int is_endless_head(const ir_graph *n, const ir_graph *root)
{
- int i, arity;
+ size_t i, n_calless;
int some_outof_loop = 0, some_in_loop = 0;
- arity = get_irg_n_callees(n);
- for (i = 0; i < arity; i++) {
- ir_graph *pred = get_irg_callee(n, i);
+ n_calless = get_irg_n_callees(n);
+ for (i = 0; i < n_calless; ++i) {
+ const ir_graph *pred = get_irg_callee(n, i);
assert(pred);
if (is_irg_callee_backedge(n, i))
continue;
}
/**
- * Returns index of the predecessor with the smallest dfn number
+ * Finds index of the predecessor with the smallest dfn number
* greater-equal than limit.
*/
-static int smallest_dfn_pred(ir_graph *n, int limit)
+static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
{
- int i, index = -2, min = -1;
+ size_t index = 0, min = 0;
+ bool found = false;
- int arity = get_irg_n_callees(n);
- for (i = 0; i < arity; i++) {
- ir_graph *pred = get_irg_callee(n, i);
+ size_t i, n_callees = get_irg_n_callees(n);
+ for (i = 0; i < n_callees; ++i) {
+ const ir_graph *pred = get_irg_callee(n, i);
if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
continue;
- if (get_irg_dfn(pred) >= limit && (min == -1 || get_irg_dfn(pred) < min)) {
+ if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
index = i;
min = get_irg_dfn(pred);
+ found = true;
}
}
- return index;
+ *result = index;
+ return found;
}
-/** Returns index of the predecessor with the largest dfn number. */
-static int largest_dfn_pred(ir_graph *n)
+/** Finds index of the predecessor with the largest dfn number. */
+static bool largest_dfn_pred(const ir_graph *n, size_t *result)
{
- int i, index = -2, max = -1;
+ size_t index = 0, max = 0;
+ bool found = false;
- int arity = get_irg_n_callees(n);
- for (i = 0; i < arity; ++i) {
- ir_graph *pred = get_irg_callee(n, i);
- if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred)) continue;
+ size_t i, n_callees = get_irg_n_callees(n);
+ for (i = 0; i < n_callees; ++i) {
+ const ir_graph *pred = get_irg_callee(n, i);
+ if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
+ continue;
+ /* Note: dfn is always > 0 */
if (get_irg_dfn(pred) > max) {
index = i;
max = get_irg_dfn(pred);
+ found = true;
}
}
- return index;
+ *result = index;
+ return found;
}
-static ir_graph *find_tail(ir_graph *n)
+static ir_graph *find_tail(const ir_graph *n)
{
+ bool found = false;
ir_graph *m;
- int i, res_index = -2;
+ size_t i, res_index;
/*
if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
*/
- m = stack[tos-1]; /* tos = top of stack */
- if (is_head (m, n)) {
- res_index = smallest_dfn_pred(m, 0);
- if ((res_index == -2) && /* no smallest dfn pred found. */
- (n == m))
+ m = stack[tos - 1]; /* tos = top of stack */
+ if (is_head(m, n)) {
+ found = smallest_dfn_pred(m, 0, &res_index);
+ if (!found && /* no smallest dfn pred found. */
+ n == m)
return NULL;
} else {
if (m == n) return NULL; // Is this to catch Phi - self loops?
- for (i = tos-2; i >= 0; --i) {
- m = stack[i];
+ for (i = tos - 1; i > 0;) {
+ m = stack[--i];
if (is_head(m, n)) {
- res_index = smallest_dfn_pred(m, get_irg_dfn(m) + 1);
- if (res_index == -2) /* no smallest dfn pred found. */
- res_index = largest_dfn_pred(m);
+ found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
+ if (! found) /* no smallest dfn pred found. */
+ found = largest_dfn_pred(m, &res_index);
- if ((m == n) && (res_index == -2)) {
- i = -1;
- }
break;
}
/* We should not walk past our selves on the stack: The upcoming nodes
are not in this loop. We assume a loop not reachable from Start. */
if (m == n) {
- i = -1;
+ found = false;
break;
}
}
- if (i < 0) {
+ if (! found) {
/* A dead loop not reachable from Start. */
- for (i = tos-2; i >= 0; --i) {
- m = stack[i];
+ for (i = tos-1; i > 0;) {
+ m = stack[--i];
if (is_endless_head(m, n)) {
- res_index = smallest_dfn_pred(m, get_irg_dfn(m) + 1);
- if (res_index == -2) /* no smallest dfn pred found. */
- res_index = largest_dfn_pred(m);
+ found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
+ if (!found) /* no smallest dfn pred found. */
+ found = largest_dfn_pred(m, &res_index);
break;
}
- if (m == n) { break; } /* It's not an unreachable loop, either. */
+ /* It's not an unreachable loop, either. */
+ if (m == n)
+ break;
}
//assert(0 && "no head found on stack");
}
}
- assert (res_index > -2);
+ assert(found);
set_irg_callee_backedge(m, res_index);
return get_irg_callee(m, res_index);
static void cgscc(ir_graph *n)
{
- int i, arity;
+ size_t i, n_callees;
if (cg_irg_visited(n)) return;
mark_cg_irg_visited(n);
/* Initialize the node */
set_irg_dfn(n, current_dfn); /* Depth first number for this node */
set_irg_uplink(n, current_dfn); /* ... is default uplink. */
- current_dfn ++;
+ ++current_dfn;
push(n);
- arity = get_irg_n_callees(n);
- for (i = 0; i < arity; i++) {
+ n_callees = get_irg_n_callees(n);
+ for (i = 0; i < n_callees; ++i) {
ir_graph *m;
if (is_irg_callee_backedge(n, i)) continue;
m = get_irg_callee(n, i);
*/
static void reset_isbe(void)
{
- int i, n_irgs = get_irp_n_irgs();
+ size_t i, n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
}
}
-/* ----------------------------------------------------------------------------------- */
-/* Another algorithm to compute recursion nesting depth */
-/* Walk the callgraph. For each crossed edge increase the loop depth by the edge */
-/* weight. Assign graphs the maximal depth. */
-/* ----------------------------------------------------------------------------------- */
-
-static void compute_loop_depth(ir_graph *irg, void *env)
-{
- int current_nesting = *(int *) env;
- int old_nesting = irg->callgraph_loop_depth;
- ir_visited_t old_visited = get_cg_irg_visited(irg);
- int i, n_callees;
-
- //return ;
-
- if (cg_irg_visited(irg)) return;
-
- mark_cg_irg_visited(irg);
-
- if (old_nesting < current_nesting)
- irg->callgraph_loop_depth = current_nesting;
-
- if (current_nesting > irp->max_callgraph_loop_depth)
- irp->max_callgraph_loop_depth = current_nesting;
-
- if ((old_visited +1 < get_cg_irg_visited(irg)) || /* not yet visited */
- (old_nesting < current_nesting)) { /* propagate larger nesting */
- /* Don't walk the graph, but a tree that is an unfolded graph. */
- n_callees = get_irg_n_callees(irg);
- for (i = 0; i < n_callees; i++) {
- ir_graph *m = get_irg_callee(irg, i);
- *(int *)env += get_irg_callee_loop_depth(irg, i);
- compute_loop_depth(m, env);
- *(int *)env -= get_irg_callee_loop_depth(irg, i);
- }
- }
-
- set_cg_irg_visited(irg, master_cg_visited-1);
-}
-
-/* ------------------------------------------------------------------------------------ */
-/* Another algorithm to compute recursion nesting depth */
-/* Walk the callgraph. For each crossed loop increase the nesting depth by one. */
-/* Assign graphs the maximal nesting depth. Don't increase if passing loops more than */
-/* once. */
-/* ------------------------------------------------------------------------------------ */
-
-
-/* For callees, we want to remember the Call nodes, too. */
-typedef struct ana_entry2 {
- ir_loop **loop_stack; /**< a stack of ir_loop entries */
- int tos; /**< the top of stack entry */
- int recursion_nesting;
-} ana_entry2;
-
-/**
- * push a loop entry on the stack
- */
-static void push2(ana_entry2 *e, ir_loop *g)
-{
- if (ARR_LEN(e->loop_stack) == e->tos) {
- ARR_APP1(ir_loop *, e->loop_stack, g);
- } else {
- e->loop_stack[e->tos] = g;
- }
- ++e->tos;
-}
-
-/**
- * returns the top of stack and pop it
- */
-static ir_loop *pop2(ana_entry2 *e)
-{
- return e->loop_stack[--e->tos];
-}
-
-/**
- * check if a loop g in on the stack. Did not check the TOS.
- */
-static int in_stack(ana_entry2 *e, ir_loop *g)
-{
- int i;
- for (i = e->tos-1; i >= 0; --i) {
- if (e->loop_stack[i] == g) return 1;
- }
- return 0;
-}
-
-static void compute_rec_depth(ir_graph *irg, void *env)
-{
- ana_entry2 *e = (ana_entry2 *)env;
- ir_loop *l = irg->l;
- int depth, old_depth = irg->callgraph_recursion_depth;
- int i, n_callees;
- int pushed = 0;
-
- if (cg_irg_visited(irg))
- return;
- mark_cg_irg_visited(irg);
-
- /* -- compute and set the new nesting value -- */
- if ((l != irp->outermost_cg_loop) && !in_stack(e, l)) {
- push2(e, l);
- e->recursion_nesting++;
- pushed = 1;
- }
- depth = e->recursion_nesting;
-
- if (old_depth < depth)
- irg->callgraph_recursion_depth = depth;
-
- if (depth > irp->max_callgraph_recursion_depth)
- irp->max_callgraph_recursion_depth = depth;
-
- /* -- spread the nesting value -- */
- if (depth == 0 || old_depth < depth) {
- /* Don't walk the graph, but a tree that is an unfolded graph.
- Therefore we unset the visited flag at the end. */
- n_callees = get_irg_n_callees(irg);
- for (i = 0; i < n_callees; ++i) {
- ir_graph *m = get_irg_callee(irg, i);
- compute_rec_depth(m, env);
- }
- }
-
- /* -- clean up -- */
- if (pushed) {
- pop2(e);
- e->recursion_nesting--;
- }
- set_cg_irg_visited(irg, master_cg_visited-1);
-}
-
-
-/* ----------------------------------------------------------------------------------- */
-/* Another algorithm to compute the execution frequency of methods ignoring recursions. */
-/* Walk the callgraph. Ignore backedges. Use sum of execution frequencies of Call */
-/* nodes to evaluate a callgraph edge. */
-/* ----------------------------------------------------------------------------------- */
-
-/* Returns the method execution frequency of a graph. */
-double get_irg_method_execution_frequency(const ir_graph *irg)
-{
- return irg->method_execution_frequency;
-}
-
-/**
- * Increase the method execution frequency to freq if its current value is
- * smaller then this.
- */
-static void set_irg_method_execution_frequency(ir_graph *irg, double freq)
-{
- irg->method_execution_frequency = freq;
-
- if (irp->max_method_execution_frequency < freq)
- irp->max_method_execution_frequency = freq;
-}
-
-static void compute_method_execution_frequency(ir_graph *irg, void *env)
-{
- int i, n_callers;
- double freq;
- int found_edge;
- int n_callees;
- (void) env;
-
- if (cg_irg_visited(irg))
- return;
-
- /* We need the values of all predecessors (except backedges).
- So they must be marked. Else we will reach the node through
- one of the unmarked ones. */
- n_callers = get_irg_n_callers(irg);
- for (i = 0; i < n_callers; ++i) {
- ir_graph *m = get_irg_caller(irg, i);
- if (is_irg_caller_backedge(irg, i))
- continue;
- if (!cg_irg_visited(m)) {
- return;
- }
- }
- mark_cg_irg_visited(irg);
-
- /* Compute the new frequency. */
- freq = 0;
- found_edge = 0;
- for (i = 0; i < n_callers; i++) {
- if (! is_irg_caller_backedge(irg, i)) {
- double edge_freq = get_irg_caller_method_execution_frequency(irg, i);
- assert(edge_freq >= 0);
- freq += edge_freq;
- found_edge = 1;
- }
- }
-
- if (!found_edge) {
- /* A starting point: method only called from outside,
- or only backedges as predecessors. */
- freq = 1;
- }
-
- set_irg_method_execution_frequency(irg, freq);
-
- /* recur */
- n_callees = get_irg_n_callees(irg);
- for (i = 0; i < n_callees; ++i) {
- compute_method_execution_frequency(get_irg_callee(irg, i), NULL);
- }
-}
-
-
/* ----------------------------------------------------------------------------------- */
/* The recursion stuff driver. */
/* ----------------------------------------------------------------------------------- */
/* Compute the backedges that represent recursions. */
void find_callgraph_recursions(void)
{
- int i, n_irgs;
+ size_t i, n_irgs;
struct obstack temp;
reset_isbe();
/* -- Reverse the backedge information. -- */
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = get_irp_irg(i);
- int j, n_callees = get_irg_n_callees(irg);
+ size_t j, n_callees = get_irg_n_callees(irg);
for (j = 0; j < n_callees; ++j) {
if (is_irg_callee_backedge(irg, j))
set_irg_caller_backedge(get_irg_callee(irg, j), irg);
irp->callgraph_state = irp_callgraph_and_calltree_consistent;
}
-/* Compute interprocedural performance estimates. */
-void compute_performance_estimates(void)
-{
- int i, n_irgs = get_irp_n_irgs();
- int current_nesting;
- ana_entry2 e;
-
- assert(get_irp_exec_freq_state() != exec_freq_none && "execution frequency not calculated");
-
- /* -- compute the loop depth -- */
- current_nesting = 0;
- irp->max_callgraph_loop_depth = 0;
- master_cg_visited += 2;
- compute_loop_depth(get_irp_main_irg(), ¤t_nesting);
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if ((get_cg_irg_visited(irg) < master_cg_visited-1) &&
- get_irg_n_callers(irg) == 0) {
- compute_loop_depth(irg, ¤t_nesting);
- }
- }
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if (get_cg_irg_visited(irg) < master_cg_visited-1) {
- compute_loop_depth(irg, ¤t_nesting);
- }
- }
-
-
- /* -- compute the recursion depth -- */
- e.loop_stack = NEW_ARR_F(ir_loop *, 0);
- e.tos = 0;
- e.recursion_nesting = 0;
-
- irp->max_callgraph_recursion_depth = 0;
-
- master_cg_visited += 2;
- compute_rec_depth(get_irp_main_irg(), &e);
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if ((get_cg_irg_visited(irg) < master_cg_visited-1) &&
- get_irg_n_callers(irg) == 0) {
- compute_rec_depth(irg, &e);
- }
- }
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if (get_cg_irg_visited(irg) < master_cg_visited-1) {
- compute_rec_depth(irg, &e);
- }
- }
-
- DEL_ARR_F(e.loop_stack);
-
- /* -- compute the execution frequency -- */
- irp->max_method_execution_frequency = 0;
- master_cg_visited += 2;
- assert(get_irg_n_callers(get_irp_main_irg()) == 0);
- compute_method_execution_frequency(get_irp_main_irg(), NULL);
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if ((get_cg_irg_visited(irg) < master_cg_visited-1) &&
- get_irg_n_callers(irg) == 0) {
- compute_method_execution_frequency(irg, NULL);
- }
- }
- for (i = 0; i < n_irgs; i++) {
- ir_graph *irg = get_irp_irg(i);
- if (get_cg_irg_visited(irg) < master_cg_visited-1) {
- compute_method_execution_frequency(irg, NULL);
- }
- }
-}
-
/* Returns the maximal loop depth of all paths from an external visible method to
this irg. */
-int get_irg_loop_depth(const ir_graph *irg)
+size_t get_irg_loop_depth(const ir_graph *irg)
{
assert(irp->callgraph_state == irp_callgraph_consistent ||
irp->callgraph_state == irp_callgraph_and_calltree_consistent);
- return irg->callgraph_loop_depth;
+ return irg->callgraph_loop_depth;
}
/* Returns the maximal recursion depth of all paths from an external visible method to
this irg. */
-int get_irg_recursion_depth(const ir_graph *irg)
+size_t get_irg_recursion_depth(const ir_graph *irg)
{
assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
return irg->callgraph_recursion_depth;
/* Computes the interprocedural loop nesting information. */
void analyse_loop_nesting_depth(void)
{
- ir_entity **free_methods = NULL;
- int arr_len;
-
/* establish preconditions. */
if (get_irp_callee_info_state() != irg_callee_info_consistent) {
- cgana(&arr_len, &free_methods);
+ ir_entity **free_methods = NULL;
+
+ cgana(&free_methods);
+ xfree(free_methods);
}
if (irp_callgraph_consistent != get_irp_callgraph_state()) {
find_callgraph_recursions();
- compute_performance_estimates();
-
set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
}