2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Provides several statistic functions for the backend.
23 * @author Christian Wuerdig
37 #include "dbginfo_t.h"
38 #include "firmstat_t.h"
49 #ifdef FIRM_STATISTICS
51 typedef struct _be_stat_irg_t {
52 ir_graph *irg; /**< the irg, the statistic is about */
53 pset *phases; /**< node statistics for each phase */
54 struct obstack obst; /**< the obstack containing the information */
55 const arch_env_t *arch_env; /**< the current arch env */
58 typedef struct _be_stat_phase_t {
59 const arch_env_t *arch_env; /**< the current arch env */
60 const char *phase; /**< the name of the phase the statistic is about */
61 unsigned long num_nodes; /**< overall number of reachable nodes in the irg */
62 unsigned long num_data; /**< number of data nodes ((mode_datab && ! Proj && ! Phi) || mode_T) */
63 unsigned long num_proj; /**< number of Projs */
64 unsigned long num_phi; /**< number of Phis */
65 unsigned long num_load; /**< number of Loads */
66 unsigned long num_store; /**< number of Stores */
67 unsigned long num_spill; /**< number of Spills */
68 unsigned long num_reload; /**< number of Reloads */
71 static set *be_stat_data = NULL;
73 static int cmp_stat_phase(const void *a, const void *b) {
74 const be_stat_phase_t *p1 = a;
75 const be_stat_phase_t *p2 = b;
77 return p1->phase != p2->phase;
80 static int cmp_stat_data(const void *a, const void *b, size_t len) {
81 const be_stat_irg_t *p1 = a;
82 const be_stat_irg_t *p2 = b;
85 return p1->irg != p2->irg;
88 static be_stat_irg_t *find_stat_irg_entry(ir_graph *irg) {
89 be_stat_irg_t *entry, key;
95 entry = set_find(be_stat_data, &key, sizeof(key), HASH_PTR(irg));
100 static be_stat_irg_t *get_stat_irg_entry(ir_graph *irg) {
101 be_stat_irg_t *entry, key;
106 entry = find_stat_irg_entry(irg);
110 entry = set_insert(be_stat_data, &key, sizeof(key), HASH_PTR(irg));
116 typedef struct pressure_walker_env_t pressure_walker_env_t;
117 struct pressure_walker_env_t {
123 const arch_register_class_t *cls;
126 static void check_reg_pressure_class(pressure_walker_env_t *env,
128 const arch_register_class_t *cls)
130 be_irg_t *birg = env->birg;
131 ir_graph *irg = be_get_birg_irg(birg);
132 const arch_env_t *aenv = be_get_birg_arch_env(birg);
134 ir_nodeset_t live_nodes;
137 ir_nodeset_init(&live_nodes);
138 be_liveness_end_of_block(env->lv, aenv, cls, block, &live_nodes);
139 max_live = ir_nodeset_size(&live_nodes);
140 env->regpressure += max_live;
142 sched_foreach_reverse(block, irn) {
148 be_liveness_transfer(aenv, cls, irn, &live_nodes);
149 cnt = ir_nodeset_size(&live_nodes);
150 max_live = cnt < max_live ? max_live : cnt;
151 env->regpressure += cnt;
155 if(max_live > env->max_pressure)
156 env->max_pressure = max_live;
158 stat_be_block_regpressure(irg, block, max_live, cls->name);
159 ir_nodeset_destroy(&live_nodes);
163 * Collect reg pressure statistics per block and per class.
165 static void stat_reg_pressure_block(ir_node *block, void *data) {
166 pressure_walker_env_t *env = data;
168 if(env->cls != NULL) {
169 check_reg_pressure_class(env, block, env->cls);
171 const arch_env_t *arch_env = be_get_birg_arch_env(env->birg);
174 n = arch_isa_get_n_reg_class(arch_env->isa);
175 for (i = 0; i < n; i++) {
176 const arch_register_class_t *cls
177 = arch_isa_get_reg_class(arch_env->isa, i);
179 check_reg_pressure_class(env, block, cls);
184 void be_do_stat_reg_pressure(be_irg_t *birg) {
185 pressure_walker_env_t env;
186 ir_graph *irg = be_get_birg_irg(birg);
187 double average_pressure;
191 env.max_pressure = 0;
193 be_liveness_assure_sets(be_assure_liveness(birg));
194 env.lv = be_get_birg_liveness(birg);
196 // hack for now, TODO: remove me later
200 env.cls = arch_isa_get_reg_class(
201 be_get_birg_arch_env(birg)->isa, 2);
204 /* Collect register pressure information for each block */
205 irg_block_walk_graph(irg, stat_reg_pressure_block, NULL, &env);
207 average_pressure = env.regpressure / env.insn_count;
208 stat_ev_emit("average_register_pressure", average_pressure);
209 stat_ev_emit("maximum_register_pressure", env.max_pressure);
213 * Notify statistic module about amount of ready nodes.
215 void be_do_stat_sched_ready(ir_node *block, const ir_nodeset_t *ready_set) {
216 if (stat_is_active()) {
217 stat_be_block_sched_ready(get_irn_irg(block), block, MIN(ir_nodeset_size(ready_set), 5));
222 * Pass information about a perm to the statistic module.
224 void be_do_stat_perm(const char *class_name, int n_regs, ir_node *perm, ir_node *block, int n, int real_size) {
225 if (stat_is_active()) {
226 stat_be_block_stat_perm(class_name, n_regs, perm, block, n, real_size);
231 * Pass information about a cycle or chain in a perm to the statistic module.
233 void be_do_stat_permcycle(const char *class_name, ir_node *perm, ir_node *block, int is_chain, int n_elems, int n_ops) {
234 if (stat_is_active()) {
235 stat_be_block_stat_permcycle(class_name, perm, block, is_chain, n_elems, n_ops);
240 * Updates nodes statistics.
242 static void do_nodes_stat(ir_node *irn, void *env) {
243 be_stat_phase_t *phase = env;
246 arch_irn_class_t irn_class;
251 mode = get_irn_mode(irn);
252 opc = get_irn_opcode(irn);
256 /* check for nodes we want to ignore */
257 if (be_is_Keep(irn) ||
258 be_is_CopyKeep(irn) ||
263 if (is_Proj(irn) && (mode != mode_X)) {
267 else if (is_Phi(irn)) {
271 else if (mode_is_datab(mode) || ((mode == mode_T) && ! is_be_node(irn)) || (is_Proj(irn) && (mode == mode_X)))
276 else if (opc == iro_Store)
279 irn_class = arch_irn_classify(phase->arch_env, irn);
280 if (irn_class & arch_irn_class_spill)
282 else if (irn_class & arch_irn_class_reload)
284 else if (irn_class & arch_irn_class_stackparam)
286 else if (irn_class & arch_irn_class_load)
288 else if (irn_class & arch_irn_class_store)
293 * Collects node statistics.
295 * @param irg the to do statistics for
296 * @param phase the phase to collect the statistic for
298 void be_do_stat_nodes(ir_graph *irg, const char *phase) {
299 be_stat_irg_t *irg_entry;
300 be_stat_phase_t *phase_entry, phase_key;
302 irg_entry = find_stat_irg_entry(irg);
307 phase_key.phase = phase;
308 phase_entry = pset_find_ptr(irg_entry->phases, &phase_key);
311 phase_entry = obstack_alloc(&irg_entry->obst, sizeof(*phase_entry));
312 phase_entry = pset_insert(irg_entry->phases, phase_entry, HASH_PTR(phase));
314 memset(phase_entry, 0, sizeof(*phase_entry));
316 phase_entry->phase = phase;
317 phase_entry->arch_env = irg_entry->arch_env;
319 irg_walk_blkwise_graph(irg_entry->irg, NULL, do_nodes_stat, phase_entry);
323 * Dumps statistics about nodes (called from dump_snapshot)
325 static void be_dump_node_stat(dumper_t *dmp, graph_entry_t *entry) {
326 be_stat_irg_t *stat_irg = find_stat_irg_entry(entry->irg);
327 be_stat_phase_t *phase;
329 if (! stat_irg || ! stat_irg->phases)
332 fprintf(dmp->f, "===> BE NODE STATISTIC BEGIN <===\n");
334 foreach_pset(stat_irg->phases, phase) {
335 fprintf(dmp->f, "--> Phase: %s\n", phase->phase);
336 fprintf(dmp->f, "# nodes: %ld\n", phase->num_nodes);
337 fprintf(dmp->f, "# data nodes: %ld\n", phase->num_data);
338 fprintf(dmp->f, "# Proj: %ld\n", phase->num_proj);
339 fprintf(dmp->f, "# Phi: %ld\n", phase->num_phi);
340 fprintf(dmp->f, "# Load: %ld\n", phase->num_load);
341 fprintf(dmp->f, "# Store: %ld\n", phase->num_store);
342 fprintf(dmp->f, "# Spill: %ld\n", phase->num_spill);
343 fprintf(dmp->f, "# Reload: %ld\n", phase->num_reload);
346 fprintf(dmp->f, "===> BE NODE STATISTIC END <===\n");
350 * Returns a be statistic object for the given irg.
352 void be_stat_init_irg(const arch_env_t *arch_env, ir_graph *irg) {
353 static int reg_func = 1;
355 if (stat_is_active()) {
356 be_stat_irg_t *stat_irg;
359 be_stat_data = new_set(cmp_stat_data, 8);
361 stat_irg = get_stat_irg_entry(irg);
363 stat_irg->phases = new_pset(cmp_stat_phase, 8);
364 stat_irg->arch_env = arch_env;
365 obstack_init(&stat_irg->obst);
368 /* first init: register dumper */
369 stat_register_dumper_func(be_dump_node_stat);
374 #endif /* FIRM_STATISTICS */
376 typedef struct _estimate_irg_costs_env_t {
377 const arch_env_t *arch_env;
378 ir_exec_freq *execfreqs;
380 } estimate_irg_costs_env_t;
382 static void estimate_block_costs(ir_node *block, void *data)
384 estimate_irg_costs_env_t *env = data;
388 sched_foreach(block, node) {
389 costs += arch_get_op_estimated_cost(env->arch_env, node);
392 env->costs += costs * get_block_execfreq(env->execfreqs, block);
395 double be_estimate_irg_costs(ir_graph *irg, const arch_env_t *arch_env, ir_exec_freq *execfreqs)
397 estimate_irg_costs_env_t env;
399 env.arch_env = arch_env;
400 env.execfreqs = execfreqs;
403 irg_block_walk_graph(irg, estimate_block_costs, NULL, &env);
408 #ifdef FIRM_STATISTICS
411 #else /* FIRM_STATISTICS */
413 void (be_stat_init_irg)(const arch_env_t *arch_env, ir_graph *irg) {}
414 void (be_do_stat_nodes)(ir_graph *irg, const char *phase) {}
415 void (be_do_stat_reg_pressure)(be_irg_t *birg) {}
416 void (be_do_stat_sched_ready)(ir_node *block, ir_nodeset_t *ready_set) {}
417 void (be_do_stat_perm)(const char *class_name, int n_regs, ir_node *perm, ir_node *block, int n, int real_size) {}
419 #endif /* FIRM_STATISTICS */