+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_reload)
+ ++env->stat->n_reloads;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_copy)
+ ++env->stat->n_copies;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_perm)
+ ++env->stat->n_perms;
+ }
+
+ /* a mem phi is a PhiM with a mem phi operand or a Spill operand */
+ else if(is_Phi(irn) && get_irn_mode(irn) == mode_M) {
+ int i;
+
+ for(i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if((is_Phi(op) && bitset_contains_irn(env->mem_phis, op)) || (arch_irn_classify(aenv, op) & arch_irn_class_spill)) {
+ bitset_add_irn(env->mem_phis, irn);
+ env->stat->n_mem_phis++;
+ break;
+ }
+ }
+ }
+}
+
+static void node_stats(const be_chordal_env_t *cenv, node_stat_t *stat)
+{
+ struct node_stat_walker env;
+
+ memset(stat, 0, sizeof(stat[0]));
+ env.cenv = cenv;
+ env.mem_phis = bitset_irg_malloc(cenv->irg);
+ env.stat = stat;
+ irg_walk_graph(cenv->irg, NULL, node_stat_walker, &env);
+ bitset_free(env.mem_phis);
+}
+
+static void insn_count_walker(ir_node *irn, void *data)
+{
+ int *cnt = data;
+
+ switch(get_irn_opcode(irn)) {
+ case iro_Proj:
+ case iro_Phi:
+ case iro_Start:
+ case iro_End:
+ break;
+ default:
+ (*cnt)++;
+ }
+}
+
+static unsigned int count_insns(ir_graph *irg)
+{
+ int cnt = 0;
+ irg_walk_graph(irg, insn_count_walker, NULL, &cnt);
+ return cnt;
+}
+
+#ifdef WITH_LIBCORE
+/**
+ * Initialize all timers.
+ */
+static void be_init_timer(be_options_t *main_opts)
+{