+ }
+}
+#endif /* WITH_LIBCORE */
+
+static void dump(unsigned mask, ir_graph *irg,
+ const arch_register_class_t *cls,
+ const char *suffix,
+ void (*dump_func)(ir_graph *, const char *))
+{
+ if((options.dump_flags & mask) == mask) {
+ if (cls) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "-%s%s", cls->name, suffix);
+ be_dump(irg, buf, dump_func);
+ }
+ else
+ be_dump(irg, suffix, dump_func);
+ }
+}
+
+static void put_ignore_colors(be_chordal_env_t *chordal_env)
+{
+ int n_colors = chordal_env->cls->n_regs;
+ int i;
+
+ bitset_clear_all(chordal_env->ignore_colors);
+ be_abi_put_ignore_regs(chordal_env->birg->abi, chordal_env->cls, chordal_env->ignore_colors);
+ for(i = 0; i < n_colors; ++i)
+ if(arch_register_type_is(&chordal_env->cls->regs[i], ignore))
+ bitset_set(chordal_env->ignore_colors, i);
+}
+
+FILE *be_chordal_open(const be_chordal_env_t *env, const char *prefix, const char *suffix)
+{
+ char buf[1024];
+
+ ir_snprintf(buf, sizeof(buf), "%s%F_%s%s", prefix, env->irg, env->cls->name, suffix);
+ return fopen(buf, "wt");
+}
+
+void check_ifg_implementations(be_chordal_env_t *chordal_env)
+{
+ FILE *f;
+
+ f = be_chordal_open(chordal_env, "std", ".log");
+ chordal_env->ifg = be_ifg_std_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "list", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_list_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "clique", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_clique_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "pointer", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_pointer_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ chordal_env->ifg = NULL;
+};
+
+/**
+ * Checks for every reload if it's user can perform the load on itself.
+ */
+static void memory_operand_walker(ir_node *irn, void *env) {
+ be_chordal_env_t *cenv = env;
+ const arch_env_t *aenv = cenv->birg->main_env->arch_env;
+ const ir_edge_t *edge, *ne;
+ ir_node *block;
+ ir_node *spill;
+
+ if (! be_is_Reload(irn))
+ return;
+
+ // only use memory operands, if the reload is only used by 1 node
+ if(get_irn_n_edges(irn) > 1)
+ return;
+
+ spill = be_get_Reload_mem(irn);
+ block = get_nodes_block(irn);
+
+ foreach_out_edge_safe(irn, edge, ne) {
+ ir_node *src = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ if (! src)
+ continue;
+
+ if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) {
+ DBG((cenv->dbg, LEVEL_3, "performing memory operand %+F at %+F\n", irn, src));
+ arch_perform_memory_operand(aenv, src, spill, pos);
+ }
+ }
+
+ /* kill the Reload */
+ if (get_irn_n_edges(irn) == 0) {
+ sched_remove(irn);
+ set_irn_n(irn, 0, new_Bad());
+ set_irn_n(irn, 1, new_Bad());
+ }
+}
+
+/**
+ * Starts a walk for memory operands if supported by the backend.
+ */
+static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
+ irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
+}
+
+/**
+ * Sorry for doing stats again...
+ */
+typedef struct _node_stat_t {
+ unsigned int n_phis; /**< Phis of the current register class. */
+ unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */
+ unsigned int n_copies; /**< Copies */
+ unsigned int n_perms; /**< Perms */
+ unsigned int n_spills; /**< Spill nodes */
+ unsigned int n_reloads; /**< Reloads */
+} node_stat_t;
+
+struct node_stat_walker {
+ node_stat_t *stat;
+ const be_chordal_env_t *cenv;
+ bitset_t *mem_phis;
+};
+
+static void node_stat_walker(ir_node *irn, void *data)
+{
+ struct node_stat_walker *env = data;
+ const arch_env_t *aenv = env->cenv->birg->main_env->arch_env;
+
+ if(arch_irn_consider_in_reg_alloc(aenv, env->cenv->cls, irn)) {
+
+ /* if the node is a normal phi */
+ if(is_Phi(irn))
+ env->stat->n_phis++;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_spill)
+ ++env->stat->n_spills;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_reload)
+ ++env->stat->n_reloads;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_copy)
+ ++env->stat->n_copies;