+void check_ifg_implementations(be_chordal_env_t *chordal_env)
+{
+ FILE *f;
+
+ f = be_chordal_open(chordal_env, "std", ".log");
+ chordal_env->ifg = be_ifg_std_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "list", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_list_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "clique", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_clique_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "pointer", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_pointer_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ chordal_env->ifg = NULL;
+};
+
+/**
+ * Checks for every reload if it's user can perform the load on itself.
+ */
+static void memory_operand_walker(ir_node *irn, void *env) {
+ be_chordal_env_t *cenv = env;
+ const arch_env_t *aenv = cenv->birg->main_env->arch_env;
+ const ir_edge_t *edge, *ne;
+ ir_node *block;
+ ir_node *spill;
+
+ if (! be_is_Reload(irn))
+ return;
+
+ /* always use addressmode, it's good for x86 */
+#if 0
+ /* only use memory operands, if the reload is only used by 1 node */
+ if(get_irn_n_edges(irn) > 1)
+ return;
+#endif
+
+ spill = be_get_Reload_mem(irn);
+ block = get_nodes_block(irn);
+
+ foreach_out_edge_safe(irn, edge, ne) {
+ ir_node *src = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ if (! src)
+ continue;
+
+ if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) {
+ DBG((cenv->dbg, LEVEL_3, "performing memory operand %+F at %+F\n", irn, src));
+ arch_perform_memory_operand(aenv, src, spill, pos);
+ }
+ }
+
+ /* kill the Reload */
+ if (get_irn_n_edges(irn) == 0) {
+ sched_remove(irn);
+ set_irn_n(irn, 0, new_Bad());
+ set_irn_n(irn, 1, new_Bad());
+ }
+}
+
+/**
+ * Starts a walk for memory operands if supported by the backend.
+ */
+static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
+ irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
+}
+
+/**
+ * Sorry for doing stats again...
+ */
+typedef struct _node_stat_t {
+ unsigned int n_phis; /**< Phis of the current register class. */
+ unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */
+ unsigned int n_copies; /**< Copies */
+ unsigned int n_perms; /**< Perms */
+ unsigned int n_spills; /**< Spill nodes */
+ unsigned int n_reloads; /**< Reloads */
+} node_stat_t;
+
+struct node_stat_walker {
+ node_stat_t *stat;
+ const be_chordal_env_t *cenv;
+ bitset_t *mem_phis;
+};
+
+static void node_stat_walker(ir_node *irn, void *data)
+{
+ struct node_stat_walker *env = data;
+ const arch_env_t *aenv = env->cenv->birg->main_env->arch_env;
+
+ if(arch_irn_consider_in_reg_alloc(aenv, env->cenv->cls, irn)) {
+
+ /* if the node is a normal phi */
+ if(is_Phi(irn))
+ env->stat->n_phis++;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_spill)
+ ++env->stat->n_spills;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_reload)
+ ++env->stat->n_reloads;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_copy)
+ ++env->stat->n_copies;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_perm)
+ ++env->stat->n_perms;
+ }
+
+ /* a mem phi is a PhiM with a mem phi operand or a Spill operand */
+ else if(is_Phi(irn) && get_irn_mode(irn) == mode_M) {
+ int i;
+
+ for(i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if((is_Phi(op) && bitset_contains_irn(env->mem_phis, op)) || (arch_irn_classify(aenv, op) & arch_irn_class_spill)) {
+ bitset_add_irn(env->mem_phis, irn);
+ env->stat->n_mem_phis++;
+ break;
+ }
+ }
+ }
+}
+
+static void node_stats(const be_chordal_env_t *cenv, node_stat_t *stat)
+{
+ struct node_stat_walker env;
+
+ memset(stat, 0, sizeof(stat[0]));
+ env.cenv = cenv;
+ env.mem_phis = bitset_irg_malloc(cenv->irg);
+ env.stat = stat;
+ irg_walk_graph(cenv->irg, NULL, node_stat_walker, &env);
+ bitset_free(env.mem_phis);
+}
+
+static void insn_count_walker(ir_node *irn, void *data)
+{
+ int *cnt = data;
+
+ switch(get_irn_opcode(irn)) {
+ case iro_Proj:
+ case iro_Phi:
+ case iro_Start:
+ case iro_End:
+ break;
+ default:
+ (*cnt)++;
+ }
+}
+
+static unsigned int count_insns(ir_graph *irg)
+{
+ int cnt = 0;
+ irg_walk_graph(irg, insn_count_walker, NULL, &cnt);
+ return cnt;
+}
+
+#ifdef WITH_LIBCORE
+/**
+ * Initialize all timers.
+ */
+static void be_init_timer(be_options_t *main_opts)