+ if (! arch_code_generator_has_spiller(birg->cg)) {
+ /* use one of the generic spiller */
+
+ /* Perform the following for each register class. */
+ for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
+ post_spill_env_t pse;
+ const arch_register_class_t *cls
+ = arch_env_get_reg_class(arch_env, j);
+
+ if(arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
+ continue;
+
+
+ stat_ev_ctx_push_str("bechordal_cls", cls->name);
+
+ stat_ev_if {
+ be_do_stat_reg_pressure(birg, cls);
+ }
+
+ memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
+ pse.birg = birg;
+ pre_spill(&pse, cls);
+
+ BE_TIMER_PUSH(t_ra_spill);
+ be_do_spill(birg, cls);
+ BE_TIMER_POP(t_ra_spill);
+
+ dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill",
+ dump_ir_block_graph_sched);
+
+ post_spill(&pse, 0);
+
+ stat_ev_if {
+ be_node_stats_t node_stats;
+
+ be_collect_node_stats(&node_stats, birg);
+ be_subtract_node_stats(&node_stats, &last_node_stats);
+ be_emit_node_stats(&node_stats, "bechordal_");
+
+ be_copy_node_stats(&last_node_stats, &node_stats);
+ stat_ev_ctx_pop("bechordal_cls");
+ }
+ }
+ } else {
+ post_spill_env_t *pse;
+
+ /* the backend has its own spiller */
+ m = arch_env_get_n_reg_class(arch_env);
+
+ pse = alloca(m * sizeof(pse[0]));
+
+ for (j = 0; j < m; ++j) {
+ memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
+ pse[j].birg = birg;
+ pre_spill(&pse[j], pse[j].cls);
+ }
+
+ BE_TIMER_PUSH(t_ra_spill);
+ arch_code_generator_spill(birg->cg, birg);
+ BE_TIMER_POP(t_ra_spill);
+ dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
+
+ for (j = 0; j < m; ++j) {
+ post_spill(&pse[j], j);
+ }
+ }