+ /*
+ * Since the code generator made a lot of new nodes and skipped
+ * a lot of old ones, we should do dead node elimination here.
+ * Note that this requires disabling the edges here.
+ */
+ edges_deactivate(irg);
+ //dead_node_elimination(irg);
+ edges_activate(irg);
+
+ /* Compute loop nesting information (for weighting copies) */
+ construct_cf_backedges(irg);
+ dump(DUMP_PREPARED, irg, "-prepared", dump_ir_block_graph);
+ BE_TIMER_ONLY(num_nodes_r = get_num_reachable_nodes(irg));
+
+ /* let backend prepare scheduling */
+ BE_TIMER_PUSH(t_codegen);
+ arch_code_generator_before_sched(birg.cg);
+ BE_TIMER_POP(t_codegen);
+
+ /* schedule the irg */
+ BE_TIMER_PUSH(t_sched);
+ list_sched(&birg, &be_options);
+ BE_TIMER_POP(t_sched);
+
+ dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
+
+ /* check schedule */
+ BE_TIMER_PUSH(t_verify);
+ be_sched_vrfy(birg.irg, vrfy_option);
+ BE_TIMER_POP(t_verify);
+
+ be_do_stat_nodes(irg, "04 Schedule");
+
+ /* introduce patterns to assure constraints */
+ BE_TIMER_PUSH(t_constr);
+ /* we switch off optimizations here, because they might cause trouble */
+ save_optimization_state(&state);
+ set_optimize(0);
+ set_opt_normalize(0);
+
+ /* add Keeps for should_be_different constrained nodes */
+ /* beware: needs schedule due to usage of be_ssa_constr */
+ assure_constraints(&birg);
+ BE_TIMER_POP(t_constr);
+
+ dump(DUMP_SCHED, irg, "-assured", dump_ir_block_graph_sched);
+ be_do_stat_nodes(irg, "05 Constraints");
+
+ /* connect all stack modifying nodes together (see beabi.c) */
+ BE_TIMER_PUSH(t_abi);
+ be_abi_fix_stack_nodes(birg.abi, NULL);
+ BE_TIMER_POP(t_abi);
+
+ dump(DUMP_SCHED, irg, "-fix_stack", dump_ir_block_graph_sched);
+
+ /* check schedule */
+ BE_TIMER_PUSH(t_verify);
+ be_sched_vrfy(birg.irg, vrfy_option);
+ BE_TIMER_POP(t_verify);
+
+ /* do some statistics */
+ be_do_stat_reg_pressure(&birg);
+
+ /* stuff needs to be done after scheduling but before register allocation */
+ BE_TIMER_PUSH(t_codegen);
+ arch_code_generator_before_ra(birg.cg);
+ BE_TIMER_POP(t_codegen);
+
+ /* Do register allocation */
+ BE_TIMER_ONLY(lc_timer_start(t_regalloc));
+ ra_timer = ra->allocate(&birg);
+ BE_TIMER_ONLY(lc_timer_stop(t_regalloc));
+
+ dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
+ be_do_stat_nodes(irg, "06 Register Allocation");
+
+ /* let the codegenerator prepare the graph for emitter */
+ BE_TIMER_PUSH(t_finish);
+ arch_code_generator_after_ra(birg.cg);
+ BE_TIMER_POP(t_finish);
+
+ /* fix stack offsets */
+ BE_TIMER_PUSH(t_abi);
+ //be_abi_fix_stack_bias(birg.abi);
+ BE_TIMER_POP(t_abi);
+
+ BE_TIMER_PUSH(t_finish);
+ arch_code_generator_finish(birg.cg);
+ BE_TIMER_POP(t_finish);
+
+ /* fix stack offsets */
+ BE_TIMER_PUSH(t_abi);
+ be_abi_fix_stack_nodes(birg.abi, NULL);
+ be_remove_dead_nodes_from_schedule(birg.irg);
+ be_abi_fix_stack_bias(birg.abi);
+ BE_TIMER_POP(t_abi);
+
+ dump(DUMP_FINAL, irg, "-finish", dump_ir_block_graph_sched);
+
+ /* check schedule and register allocation */
+ BE_TIMER_PUSH(t_verify);
+ if (vrfy_option == BE_VRFY_WARN) {
+ //irg_verify(birg.irg, VRFY_ENFORCE_SSA);
+ be_check_dominance(birg.irg);
+ be_verify_schedule(birg.irg);
+ be_verify_register_allocation(env.arch_env, birg.irg);