+static void put_ignore_colors(be_chordal_env_t *chordal_env)
+{
+ int n_colors = chordal_env->cls->n_regs;
+ int i;
+
+ bitset_clear_all(chordal_env->ignore_colors);
+ be_abi_put_ignore_regs(chordal_env->birg->abi, chordal_env->cls, chordal_env->ignore_colors);
+ for(i = 0; i < n_colors; ++i)
+ if(arch_register_type_is(&chordal_env->cls->regs[i], ignore))
+ bitset_set(chordal_env->ignore_colors, i);
+}
+
+FILE *be_chordal_open(const be_chordal_env_t *env, const char *prefix, const char *suffix)
+{
+ char buf[1024];
+
+ ir_snprintf(buf, sizeof(buf), "%s%F_%s%s", prefix, env->irg, env->cls->name, suffix);
+ return fopen(buf, "wt");
+}
+
+void check_ifg_implementations(be_chordal_env_t *chordal_env)
+{
+ FILE *f;
+
+ f = be_chordal_open(chordal_env, "std", ".log");
+ chordal_env->ifg = be_ifg_std_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "list", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_list_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "clique", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_clique_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ f = be_chordal_open(chordal_env, "pointer", ".log");
+ be_ifg_free(chordal_env->ifg);
+ chordal_env->ifg = be_ifg_pointer_new(chordal_env);
+ be_ifg_check_sorted_to_file(chordal_env->ifg, f);
+ fclose(f);
+
+ chordal_env->ifg = NULL;
+};
+
+/**
+ * Checks for every reload if it's user can perform the load on itself.
+ */
+static void memory_operand_walker(ir_node *irn, void *env) {
+ be_chordal_env_t *cenv = env;
+ const arch_env_t *aenv = cenv->birg->main_env->arch_env;
+ const ir_edge_t *edge, *ne;
+ ir_node *block;
+ ir_node *spill;
+
+ if (! be_is_Reload(irn))
+ return;
+
+ /* always use addressmode, it's good for x86 */
+#if 0
+ /* only use memory operands, if the reload is only used by 1 node */
+ if(get_irn_n_edges(irn) > 1)
+ return;
+#endif
+
+ spill = be_get_Reload_mem(irn);
+ block = get_nodes_block(irn);
+
+ foreach_out_edge_safe(irn, edge, ne) {
+ ir_node *src = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ if (! src)
+ continue;
+
+ if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) {
+ DBG((cenv->dbg, LEVEL_3, "performing memory operand %+F at %+F\n", irn, src));
+ arch_perform_memory_operand(aenv, src, spill, pos);
+ }
+ }
+
+ /* kill the Reload */
+ if (get_irn_n_edges(irn) == 0) {
+ sched_remove(irn);
+ set_irn_n(irn, 0, new_Bad());
+ set_irn_n(irn, 1, new_Bad());
+ }
+}
+
+/**
+ * Starts a walk for memory operands if supported by the backend.
+ */
+static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
+ irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
+}
+
+/**
+ * Sorry for doing stats again...
+ */
+typedef struct _node_stat_t {
+ unsigned int n_phis; /**< Phis of the current register class. */
+ unsigned int n_mem_phis; /**< Memory Phis (Phis with spill operands). */
+ unsigned int n_copies; /**< Copies */
+ unsigned int n_perms; /**< Perms */
+ unsigned int n_spills; /**< Spill nodes */
+ unsigned int n_reloads; /**< Reloads */
+} node_stat_t;
+
+struct node_stat_walker {
+ node_stat_t *stat;
+ const be_chordal_env_t *cenv;
+ bitset_t *mem_phis;
+};
+
+static void node_stat_walker(ir_node *irn, void *data)
+{
+ struct node_stat_walker *env = data;
+ const arch_env_t *aenv = env->cenv->birg->main_env->arch_env;
+
+ if(arch_irn_consider_in_reg_alloc(aenv, env->cenv->cls, irn)) {
+
+ /* if the node is a normal phi */
+ if(is_Phi(irn))
+ env->stat->n_phis++;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_spill)
+ ++env->stat->n_spills;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_reload)
+ ++env->stat->n_reloads;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_copy)
+ ++env->stat->n_copies;
+
+ else if(arch_irn_classify(aenv, irn) & arch_irn_class_perm)
+ ++env->stat->n_perms;
+ }
+
+ /* a mem phi is a PhiM with a mem phi operand or a Spill operand */
+ else if(is_Phi(irn) && get_irn_mode(irn) == mode_M) {
+ int i;
+
+ for(i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if((is_Phi(op) && bitset_contains_irn(env->mem_phis, op)) || (arch_irn_classify(aenv, op) & arch_irn_class_spill)) {
+ bitset_add_irn(env->mem_phis, irn);
+ env->stat->n_mem_phis++;
+ break;
+ }
+ }
+ }
+}
+
+static void node_stats(const be_chordal_env_t *cenv, node_stat_t *stat)
+{
+ struct node_stat_walker env;
+
+ memset(stat, 0, sizeof(stat[0]));
+ env.cenv = cenv;
+ env.mem_phis = bitset_irg_malloc(cenv->irg);
+ env.stat = stat;
+ irg_walk_graph(cenv->irg, NULL, node_stat_walker, &env);
+ bitset_free(env.mem_phis);
+}
+
+static void insn_count_walker(ir_node *irn, void *data)
+{
+ int *cnt = data;
+
+ switch(get_irn_opcode(irn)) {
+ case iro_Proj:
+ case iro_Phi:
+ case iro_Start:
+ case iro_End:
+ break;
+ default:
+ (*cnt)++;
+ }
+}
+
+static unsigned int count_insns(ir_graph *irg)
+{
+ int cnt = 0;
+ irg_walk_graph(irg, insn_count_walker, NULL, &cnt);
+ return cnt;
+}
+
+#ifdef WITH_LIBCORE
+/**
+ * Initialize all timers.
+ */
+static void be_init_timer(be_options_t *main_opts)
+{
+ if (main_opts->timing == BE_TIME_ON) {
+ ra_timer.t_prolog = lc_timer_register("ra_prolog", "regalloc prolog");
+ ra_timer.t_epilog = lc_timer_register("ra_epilog", "regalloc epilog");
+ ra_timer.t_live = lc_timer_register("ra_liveness", "be liveness");
+ ra_timer.t_spill = lc_timer_register("ra_spill", "spiller");
+ ra_timer.t_spillslots = lc_timer_register("ra_spillslots", "spillslots");
+ ra_timer.t_color = lc_timer_register("ra_color", "graph coloring");
+ ra_timer.t_ifg = lc_timer_register("ra_ifg", "interference graph");
+ ra_timer.t_copymin = lc_timer_register("ra_copymin", "copy minimization");
+ ra_timer.t_ssa = lc_timer_register("ra_ssadestr", "ssa destruction");
+ ra_timer.t_verify = lc_timer_register("ra_verify", "graph verification");
+ ra_timer.t_other = lc_timer_register("ra_other", "other time");
+
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_prolog);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_epilog);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_live);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_spill);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_spillslots);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_color);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_ifg);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_copymin);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_ssa);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_verify);
+ LC_STOP_AND_RESET_TIMER(ra_timer.t_other);
+ }
+}
+
+#define BE_TIMER_INIT(main_opts) be_init_timer(main_opts)
+
+#define BE_TIMER_PUSH(timer) \
+ if (main_opts->timing == BE_TIME_ON) { \
+ if (! lc_timer_push(timer)) { \
+ if (options.vrfy_option == BE_CH_VRFY_ASSERT) \
+ assert(!"Timer already on stack, cannot be pushed twice."); \
+ else if (options.vrfy_option == BE_CH_VRFY_WARN) \
+ fprintf(stderr, "Timer %s already on stack, cannot be pushed twice.\n", \
+ lc_timer_get_name(timer)); \
+ } \
+ }
+#define BE_TIMER_POP(timer) \
+ if (main_opts->timing == BE_TIME_ON) { \
+ lc_timer_t *tmp = lc_timer_pop(); \
+ if (options.vrfy_option == BE_CH_VRFY_ASSERT) \
+ assert(tmp == timer && "Attempt to pop wrong timer."); \
+ else if (options.vrfy_option == BE_CH_VRFY_WARN && tmp != timer) \
+ fprintf(stderr, "Attempt to pop wrong timer. %s is on stack, trying to pop %s.\n", \
+ lc_timer_get_name(tmp), lc_timer_get_name(timer)); \
+ timer = tmp; \
+ }
+#else
+
+#define BE_TIMER_INIT(main_opts)
+#define BE_TIMER_PUSH(timer)
+#define BE_TIMER_POP(timer)
+
+#endif /* WITH_LIBCORE */
+
+enum {
+ STAT_TAG_FILE = 0,
+ STAT_TAG_TIME = 1,
+ STAT_TAG_IRG = 2,
+ STAT_TAG_CLS = 3,
+ STAT_TAG_LAST
+};
+
+/**
+ * Performs chordal register allocation for each register class on given irg.
+ *
+ * @param bi Backend irg object
+ * @return Structure containing timer for the single phases or NULL if no timing requested.
+ */
+static be_ra_timer_t *be_ra_chordal_main(const be_irg_t *bi)