+ /* Compute loop nesting information (for weighting copies) */
+ dump(DUMP_PREPARED, irg, "-prepared", dump_ir_block_graph);
+ BE_TIMER_ONLY(num_nodes_r = get_num_reachable_nodes(irg));
+
+ if (be_options.vrfy_option == BE_VRFY_WARN) {
+ be_check_dominance(irg);
+ be_verify_out_edges(irg);
+ }
+ else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
+ assert(be_verify_out_edges(irg));
+ assert(be_check_dominance(irg) && "Dominance verification failed");
+ }
+
+ /**
+ * Create execution frequencies from profile data or estimate some
+ */
+ if (be_profile_has_data()) {
+ birg->exec_freq = be_create_execfreqs_from_profile(irg);
+ } else {
+ birg->exec_freq = compute_execfreq(irg, 10);
+ }
+
+ /* let backend prepare scheduling */
+ BE_TIMER_PUSH(t_codegen);
+ arch_code_generator_before_sched(birg->cg);
+ BE_TIMER_POP(t_codegen);
+
+ /* schedule the irg */
+ BE_TIMER_PUSH(t_sched);
+ switch (be_options.scheduler) {
+ default:
+ fprintf(stderr, "Warning: invalid scheduler (%d) selected, falling back to list scheduler.\n", be_options.scheduler);
+ case BE_SCHED_LIST:
+ list_sched(birg, &be_options);
+ break;
+#ifdef WITH_ILP
+ case BE_SCHED_ILP:
+ be_ilp_sched(birg);
+ //fprintf(stderr, "Warning: ILP scheduler not yet fully implemented, falling back to list scheduler.\n");
+ //list_sched(birg, &be_options);
+ break;
+#endif /* WITH_ILP */
+ };
+ BE_TIMER_POP(t_sched);
+
+ dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
+
+ /* check schedule */
+ BE_TIMER_PUSH(t_verify);
+ be_sched_vrfy(irg, be_options.vrfy_option);
+ BE_TIMER_POP(t_verify);
+
+ be_do_stat_nodes(irg, "04 Schedule");
+
+ /* introduce patterns to assure constraints */
+ BE_TIMER_PUSH(t_constr);
+ /* we switch off optimizations here, because they might cause trouble */
+ save_optimization_state(&state);
+ set_optimize(0);
+ set_opt_normalize(0);
+
+ /* add Keeps for should_be_different constrained nodes */
+ /* beware: needs schedule due to usage of be_ssa_constr */
+ assure_constraints(birg);
+ BE_TIMER_POP(t_constr);
+
+ dump(DUMP_SCHED, irg, "-assured", dump_ir_block_graph_sched);
+ be_do_stat_nodes(irg, "05 Constraints");
+
+ /* connect all stack modifying nodes together (see beabi.c) */
+ BE_TIMER_PUSH(t_abi);
+ be_abi_fix_stack_nodes(birg->abi, NULL);
+ BE_TIMER_POP(t_abi);
+
+ dump(DUMP_SCHED, irg, "-fix_stack", dump_ir_block_graph_sched);
+
+ /* check schedule */
+ BE_TIMER_PUSH(t_verify);
+ be_sched_vrfy(irg, be_options.vrfy_option);
+ BE_TIMER_POP(t_verify);
+
+ /* do some statistics */
+ be_do_stat_reg_pressure(birg);
+
+ /* stuff needs to be done after scheduling but before register allocation */
+ BE_TIMER_PUSH(t_codegen);
+ arch_code_generator_before_ra(birg->cg);
+ BE_TIMER_POP(t_codegen);
+
+#ifdef FIRM_STATISTICS
+ if(be_stat_ev_is_active()) {
+ be_stat_ev_l("costs_before_ra",
+ (long) be_estimate_irg_costs(irg, env.arch_env, birg->exec_freq));
+ }
+#endif
+
+ /* Do register allocation */
+ BE_TIMER_PUSH(t_regalloc);
+ be_allocate_registers(birg);
+ BE_TIMER_POP(t_regalloc);
+
+#ifdef FIRM_STATISTICS
+ if(be_stat_ev_is_active()) {
+ be_stat_ev_l("costs_after_ra",
+ (long) be_estimate_irg_costs(irg, env.arch_env, birg->exec_freq));
+ }
+#endif
+
+ dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
+ be_do_stat_nodes(irg, "06 Register Allocation");
+
+ /* let the code generator prepare the graph for emitter */
+ BE_TIMER_PUSH(t_finish);
+ arch_code_generator_after_ra(birg->cg);
+ BE_TIMER_POP(t_finish);
+
+ /* fix stack offsets */
+ BE_TIMER_PUSH(t_abi);
+ be_abi_fix_stack_nodes(birg->abi, NULL);
+ be_remove_dead_nodes_from_schedule(irg);
+ be_abi_fix_stack_bias(birg->abi);
+ BE_TIMER_POP(t_abi);
+
+ dump(DUMP_SCHED, irg, "-fix_stack_after_ra", dump_ir_block_graph_sched);
+
+ BE_TIMER_PUSH(t_finish);
+ arch_code_generator_finish(birg->cg);
+ BE_TIMER_POP(t_finish);
+
+ dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
+
+ /* check schedule and register allocation */
+ BE_TIMER_PUSH(t_verify);
+ if (be_options.vrfy_option == BE_VRFY_WARN) {
+ //irg_verify(irg, VRFY_ENFORCE_SSA);
+ be_check_dominance(irg);
+ be_verify_out_edges(irg);
+ be_verify_schedule(irg);
+ be_verify_register_allocation(env.arch_env, irg);
+ }
+ else if (be_options.vrfy_option == BE_VRFY_ASSERT) {
+ //assert(irg_verify(irg, VRFY_ENFORCE_SSA) && "irg verification failed");
+ assert(be_verify_out_edges(irg));
+ assert(be_check_dominance(irg) && "Dominance verification failed");
+ assert(be_verify_schedule(irg) && "Schedule verification failed");
+ assert(be_verify_register_allocation(env.arch_env, irg)
+ && "register allocation verification failed");
+ }
+ BE_TIMER_POP(t_verify);
+
+ /* emit assembler code */
+ BE_TIMER_PUSH(t_emit);
+ arch_code_generator_done(birg->cg);
+ BE_TIMER_POP(t_emit);
+
+ dump(DUMP_FINAL, irg, "-end", dump_ir_block_graph_sched);
+
+ BE_TIMER_PUSH(t_abi);
+ be_abi_free(birg->abi);
+ BE_TIMER_POP(t_abi);
+
+ be_do_stat_nodes(irg, "07 Final");
+ restore_optimization_state(&state);
+
+ BE_TIMER_ONLY(num_nodes_a = get_num_reachable_nodes(irg));
+ BE_TIMER_POP(t_other);
+
+#define LC_EMIT(timer) \
+ if(!be_stat_ev_is_active()) { \
+ printf("%-20s: %.3lf msec\n", lc_timer_get_description(timer), (double)lc_timer_elapsed_usec(timer) / 1000.0); \
+ } else { \
+ be_stat_ev_l(lc_timer_get_name(timer), lc_timer_elapsed_msec(timer)); \