X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fana%2Fcallgraph.c;h=a4df9f3fa137ac2cc2adefa86f64f901e0211a12;hb=5ea9d0c93d64963b085ff10cde05f739e1c2606b;hp=e7594ac7db17e9cfe557d20b14870490d0c79ef7;hpb=bbcf808fc4c4da0acd43eb3b8d27793bac9c0b6f;p=libfirm diff --git a/ir/ana/callgraph.c b/ir/ana/callgraph.c index e7594ac7d..a4df9f3fa 100644 --- a/ir/ana/callgraph.c +++ b/ir/ana/callgraph.c @@ -10,14 +10,15 @@ * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. */ #ifdef HAVE_CONFIG_H -#include "config.h" +# include "config.h" #endif #ifdef HAVE_STRING_H -#include +# include #endif - +# ifdef HAVE_STDLIB_H #include +#endif #include "callgraph.h" @@ -27,17 +28,19 @@ #include "irnode_t.h" #include "cgana.h" +#include "execution_frequency.h" #include "array.h" #include "pmap.h" +#include "hashptr.h" #include "irgwalk.h" /* For callees, we want to remember the Call nodes, too. */ struct ana_entry { - ir_graph *irg; - ir_node *call_list; - int max_depth; + ir_graph *irg; + ir_node **call_list; + int max_depth; }; typedef struct ana_entry ana_entry; @@ -86,14 +89,13 @@ int has_irg_caller_backedge(ir_graph *irg) { return 0; } -int get_irg_caller_loop_depth(ir_graph *irg, int pos) { - ir_graph *caller = get_irg_caller(irg, pos); - +static int reverse_pos(ir_graph *callee, int pos_caller) { + ir_graph *caller = get_irg_caller(callee, pos_caller); /* search the other relation for the corresponding edge. */ int pos_callee = -1; int i, n_callees = get_irg_n_callees(caller); for (i = 0; i < n_callees; ++i) { - if (get_irg_callee(caller, i) == irg) { + if (get_irg_callee(caller, i) == callee) { pos_callee = i; break; } @@ -101,6 +103,13 @@ int get_irg_caller_loop_depth(ir_graph *irg, int pos) { assert(pos_callee >= 0); + return pos_callee; +} + +int get_irg_caller_loop_depth(ir_graph *irg, int pos) { + ir_graph *caller = get_irg_caller(irg, pos); + int pos_callee = reverse_pos(irg, pos); + return get_irg_callee_loop_depth(caller, pos_callee); } @@ -136,10 +145,36 @@ int get_irg_callee_loop_depth(ir_graph *irg, int pos) { return -1; } -/* --------------------- Compute the callgraph ------------------------ */ -/* Hash an address */ -#define HASH_ADDRESS(adr) (((unsigned)(adr)) >> 3) + +double get_irg_callee_execution_frequency(ir_graph *irg, int pos) { + ir_node **arr = ((ana_entry *)irg->callees[pos])->call_list; + int i, n_Calls = ARR_LEN(arr); + double freq = 0; + + for (i = 0; i < n_Calls; ++i) { + freq += get_irn_exec_freq(arr[i]); + } + return freq; +} + +double get_irg_callee_method_execution_frequency(ir_graph *irg, int pos) { + double call_freq = get_irg_callee_execution_frequency(irg, pos); + double meth_freq = get_irg_method_execution_frequency(irg); + return call_freq * meth_freq; +} + + +double get_irg_caller_method_execution_frequency(ir_graph *irg, int pos) { + ir_graph *caller = get_irg_caller(irg, pos); + int pos_callee = reverse_pos(irg, pos); + + return get_irg_callee_method_execution_frequency(caller, pos_callee); +} + + + +/* --------------------- Compute the callgraph ------------------------ */ /** * Walker called by compute_callgraph() @@ -155,20 +190,25 @@ static void ana_Call(ir_node *n, void *env) { for (i = 0; i < n_callees; ++i) { entity *callee_e = get_Call_callee(n, i); ir_graph *callee = get_entity_irg(callee_e); - if (callee) { /* For unknown caller */ + + if (callee) { ana_entry buf = { callee, NULL, 0}; ana_entry *found; int depth; - pset_insert((pset *)callee->callers, irg, (unsigned)irg >> 3); - found = pset_find((pset *)irg->callees, &buf, HASH_ADDRESS(callee)); + pset_insert((pset *)callee->callers, irg, HASH_PTR(irg)); + found = pset_find((pset *)irg->callees, &buf, HASH_PTR(callee)); if (found) { /* add Call node to list, compute new nesting. */ + ir_node **arr = found->call_list; + ARR_APP1(ir_node *, arr, n); + found->call_list = arr; } else { /* New node, add Call node and init nesting. */ - found = (ana_entry *) obstack_alloc (irg->obst, sizeof (ana_entry)); - found->irg = callee; - found->call_list = NULL; - found->max_depth = 0; - pset_insert((pset *)irg->callees, found, HASH_ADDRESS(callee)); + found = (ana_entry *) obstack_alloc (irg->obst, sizeof (ana_entry)); + found->irg = callee; + found->call_list = NEW_ARR_F(ir_node *, 1); + found->call_list[0] = n; + found->max_depth = 0; + pset_insert((pset *)irg->callees, found, HASH_PTR(callee)); } depth = get_loop_depth(get_irn_loop(get_nodes_block(n))); found->max_depth = (depth > found->max_depth) ? depth : found->max_depth; @@ -202,13 +242,13 @@ void compute_callgraph(void) { assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent); irg->callees = (ir_graph **)new_pset(ana_entry_cmp, 8); irg->callers = (ir_graph **)new_pset(graph_cmp, 8); - construct_cf_backedges(irg); + //construct_cf_backedges(irg); } /* Compute the call graph */ for (i = 0; i < n_irgs; ++i) { ir_graph *irg = get_irp_irg(i); - construct_cf_backedges(irg); + construct_cf_backedges(irg); // We also find the maximal loop depth of a call. irg_walk_graph(irg, ana_Call, NULL, NULL); } @@ -320,7 +360,7 @@ static int current_dfn = 1; /**< Counter to generate depth first numberin /**********************************************************************/ typedef struct scc_info { - bool in_stack; /**< Marks whether node is on the stack. */ + int in_stack; /**< Marks whether node is on the stack. */ int dfn; /**< Depth first search number. */ int uplink; /**< dfn number of ancestor. */ int visited; @@ -363,17 +403,17 @@ static INLINE void mark_irg_in_stack (ir_graph *n) { scc_info *info = get_irg_link(n); assert(info); - info->in_stack = true; + info->in_stack = 1; } static INLINE void mark_irg_not_in_stack (ir_graph *n) { scc_info *info = get_irg_link(n); assert(info); - info->in_stack = false; + info->in_stack = 0; } -static INLINE bool +static INLINE int irg_is_in_stack (ir_graph *n) { scc_info *info = get_irg_link(n); assert(info); @@ -575,12 +615,12 @@ init_scc (void) { } } -/** Returns true if n is a loop header, i.e., it is a Block node +/** Returns non-zero if n is a loop header, i.e., it is a Block node * and has predecessors within the cfloop and out of the cfloop. * * @param root: only needed for assertion. */ -static bool +static int is_head (ir_graph *n, ir_graph *root) { int i, arity; @@ -605,12 +645,12 @@ is_head (ir_graph *n, ir_graph *root) } /** - * Returns true if n is possible loop head of an endless loop. + * Returns non-zero if n is possible loop head of an endless loop. * I.e., it is a Block, Phi or Filter node and has only predecessors * within the loop. * @arg root: only needed for assertion. */ -static bool +static int is_endless_head (ir_graph *n, ir_graph *root) { int i, arity; @@ -640,12 +680,12 @@ is_endless_head (ir_graph *n, ir_graph *root) * Check whether there is a parallel edge in the ip control flow. * Only */ -static bool +static int is_ip_head (ir_graph *n, ir_graph *pred) { int is_be = 0; int iv_rem = get_interprocedural_view(); - set_interprocedural_view(true); + set_interprocedural_view(1); { ir_node *sblock = get_irg_start_block(n); int i, arity = get_Block_n_cfgpreds(sblock); @@ -662,7 +702,7 @@ is_ip_head (ir_graph *n, ir_graph *pred) //printf(" "); DDMG(ip_pred); if ((ip_pred == pred) && is_backedge(sblock, i)) { //printf(" found\n"); - is_be = 1; + is_be = 1; } } } @@ -951,7 +991,7 @@ static void reset_isbe(void) { /* weight. Assign graphs the maximal depth. */ /* ----------------------------------------------------------------------------------- */ -void compute_loop_depth (ir_graph *irg, void *env) { +static void compute_loop_depth (ir_graph *irg, void *env) { int current_nesting = *(int *) env; int old_nesting = irg->callgraph_loop_depth; int old_visited = get_cg_irg_visited(irg); @@ -987,7 +1027,6 @@ void compute_loop_depth (ir_graph *irg, void *env) { set_cg_irg_visited(irg, master_cg_visited-1); } - /* ------------------------------------------------------------------------------------ */ /* Another algorithm to compute recursion nesting depth */ /* Walk the callgraph. For each crossed loop increase the nesting depth by one. */ @@ -1059,7 +1098,8 @@ static void compute_rec_depth (ir_graph *irg, void *env) { /* -- spread the nesting value -- */ if (depth == 0 || old_depth < depth) { - /* Don't walk the graph, but a tree that is an unfolded graph. */ + /* Don't walk the graph, but a tree that is an unfolded graph. + Therefore we unset the visited flag at the end. */ n_callees = get_irg_n_callees(irg); for (i = 0; i < n_callees; i++) { ir_graph *m = get_irg_callee(irg, i); @@ -1076,19 +1116,88 @@ static void compute_rec_depth (ir_graph *irg, void *env) { } +/* ----------------------------------------------------------------------------------- */ +/* Another algorithm to compute the execution frequency of methods ignoring recursions. */ +/* Walk the callgraph. Ignore backedges. Use sum of execution frequencies of Call */ +/* nodes to evaluate a callgraph edge. */ +/* ----------------------------------------------------------------------------------- */ + +double get_irg_method_execution_frequency (ir_graph *irg) { + return irg->method_execution_frequency; +} + +void set_irg_method_execution_frequency (ir_graph *irg, double freq) { + irg->method_execution_frequency = freq; + + if (irp->max_method_execution_frequency < freq) + irp->max_method_execution_frequency = freq; +} + +static void compute_method_execution_frequency (ir_graph *irg, void *env) { + int i, n_callers; + double freq; + int found_edge; + int n_callees; + + if (cg_irg_visited(irg)) return; + + /* We need the values of all predecessors (except backedges). + So they must be marked. Else we will reach the node through + one of the unmarked ones. */ + n_callers = get_irg_n_callers(irg); + for (i = 0; i < n_callers; i++) { + ir_graph *m = get_irg_caller(irg, i); + if (is_irg_caller_backedge(irg, i)) continue; + if (!cg_irg_visited(m)) { + return; + } + } + mark_cg_irg_visited(irg); + + /* Compute the new frequency. */ + freq = 0; + found_edge = 0; + for (i = 0; i < n_callers; i++) { + if (! is_irg_caller_backedge(irg, i)) { + double edge_freq = get_irg_caller_method_execution_frequency(irg, i); + assert(edge_freq >= 0); + freq += edge_freq; + found_edge = 1; + } + } + + if (!found_edge) { + /* A starting point: method only called from outside, + or only backedges as predecessors. */ + freq = 1; + } + + set_irg_method_execution_frequency(irg, freq); + + /* recur */ + n_callees = get_irg_n_callees(irg); + for (i = 0; i < n_callees; i++) { + compute_method_execution_frequency (get_irg_callee(irg, i), NULL); + } +} + + +/* ----------------------------------------------------------------------------------- */ +/* The recursion stuff driver. */ +/* ----------------------------------------------------------------------------------- */ + /* Compute the backedges that represent recursions. */ void find_callgraph_recursions(void) { int i, n_irgs = get_irp_n_irgs(); - int current_nesting; - ana_entry2 e; reset_isbe(); - /* -- Compute the looptree -- */ + /* -- compute the looptree. -- */ /* The outermost graph. We start here. Then we start at all functions in irg list that are never called, then at the remaining - unvisited ones. */ + unvisited ones. The third step is needed for functions that are not + reachable from the outermost graph, but call themselves in a cycle. */ assert(get_irp_main_irg()); outermost_ir_graph = get_irp_main_irg(); init_scc(); @@ -1116,10 +1225,18 @@ void find_callgraph_recursions(void) { int j, n_callees = get_irg_n_callees(irg); for (j = 0; j < n_callees; ++j) { if (is_irg_callee_backedge(irg, j)) - set_irg_caller_backedge(get_irg_callee(irg, j), irg); + set_irg_caller_backedge(get_irg_callee(irg, j), irg); } } + irp->callgraph_state = irp_callgraph_and_calltree_consistent; +} + +void compute_performance_estimates(void) { + int i, n_irgs = get_irp_n_irgs(); + int current_nesting; + ana_entry2 e; + /* -- compute the loop depth -- */ current_nesting = 0; irp->max_callgraph_loop_depth = 0; @@ -1142,10 +1259,12 @@ void find_callgraph_recursions(void) { } } + /* -- compute the recursion depth -- */ e.loop_stack = NEW_ARR_F(ir_loop *, 0); e.tos = 0; e.recursion_nesting = 0; + irp->max_callgraph_recursion_depth = 0; master_cg_visited += 2; @@ -1169,10 +1288,24 @@ void find_callgraph_recursions(void) { DEL_ARR_F(e.loop_stack); - //dump_callgraph("-with_nesting"); - - //dump_callgraph_loop_tree(current_loop, "-after_cons"); - irp->callgraph_state = irp_callgraph_and_calltree_consistent; + /* -- compute the execution frequency -- */ + irp->max_method_execution_frequency = 0; + master_cg_visited += 2; + assert(get_irg_n_callers(get_irp_main_irg()) == 0); + compute_method_execution_frequency(get_irp_main_irg(), NULL); + for (i = 0; i < n_irgs; i++) { + ir_graph *irg = get_irp_irg(i); + if ((get_cg_irg_visited(irg) < master_cg_visited-1) && + get_irg_n_callers(irg) == 0) { + compute_method_execution_frequency(irg, NULL); + } + } + for (i = 0; i < n_irgs; i++) { + ir_graph *irg = get_irp_irg(i); + if (get_cg_irg_visited(irg) < master_cg_visited-1) { + compute_method_execution_frequency(irg, NULL); + } + } } /* Maximal loop depth of all paths from an external visible method to @@ -1204,4 +1337,20 @@ void analyse_loop_nesting_depth(void) { } find_callgraph_recursions(); + + compute_performance_estimates(); + + set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent); +} + + +loop_nesting_depth_state get_irp_loop_nesting_depth_state(void) { + return irp->lnd_state; +} +void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s) { + irp->lnd_state = s; +} +void set_irp_loop_nesting_depth_state_inconsistent(void) { + if (irp->lnd_state == loop_nesting_depth_consistent) + irp->lnd_state = loop_nesting_depth_inconsistent; }