3 * @author Sebastian Hack
13 #include <libcore/lc_opts.h>
14 #include <libcore/lc_opts_enum.h>
15 #endif /* WITH_LIBCORE */
26 #include "iredges_t.h"
32 #include "firm/bearch_firm.h"
33 #include "ia32/bearch_ia32.h"
34 #include "arm/bearch_arm.h"
35 #include "ppc32/bearch_ppc32.h"
36 #include "mips/bearch_mips.h"
43 #include "besched_t.h"
44 #include "belistsched.h"
46 #include "bespillilp.h"
47 #include "bespillbelady.h"
49 #include "beraextern.h"
50 #include "bechordal_t.h"
52 #include "beifg_impl.h"
53 #include "becopyopt.h"
54 #include "becopystat.h"
55 #include "bessadestr.h"
58 #include "beschedmris.h"
62 #define DUMP_INITIAL (1 << 0)
63 #define DUMP_ABI (1 << 1)
64 #define DUMP_SCHED (1 << 2)
65 #define DUMP_PREPARED (1 << 3)
66 #define DUMP_RA (1 << 4)
67 #define DUMP_FINAL (1 << 5)
69 /* options visible for anyone */
70 static be_options_t be_options = {
72 "i44pc52.info.uni-karlsruhe.de",
79 static unsigned dump_flags = 0;
81 /* register allocator to use. */
82 static const be_ra_t *ra = &be_ra_chordal_allocator;
84 /* back end instruction set architecture to use */
85 static const arch_isa_if_t *isa_if = &ia32_isa_if;
87 static int be_disable_mris = 0;
91 static lc_opt_entry_t *be_grp_root = NULL;
93 /* possible dumping options */
94 static const lc_opt_enum_mask_items_t dump_items[] = {
96 { "initial", DUMP_INITIAL },
98 { "sched", DUMP_SCHED },
99 { "prepared", DUMP_PREPARED },
100 { "regalloc", DUMP_RA },
101 { "final", DUMP_FINAL },
102 { "all", 2 * DUMP_FINAL - 1 },
106 /* register allocators */
107 static const lc_opt_enum_const_ptr_items_t ra_items[] = {
108 { "chordal", &be_ra_chordal_allocator },
109 { "external", &be_ra_external_allocator },
113 /* instruction set architectures. */
114 static const lc_opt_enum_const_ptr_items_t isa_items[] = {
115 { "ia32", &ia32_isa_if },
117 { "arm", &arm_isa_if },
118 { "ppc32", &ppc32_isa_if },
119 { "mips", &mips_isa_if },
124 static lc_opt_enum_mask_var_t dump_var = {
125 &dump_flags, dump_items
128 static lc_opt_enum_const_ptr_var_t ra_var = {
129 (const void **) &ra, ra_items
132 static lc_opt_enum_const_ptr_var_t isa_var = {
133 (const void **) &isa_if, isa_items
136 static const lc_opt_table_entry_t be_main_options[] = {
137 LC_OPT_ENT_ENUM_MASK("dump", "dump irg on several occasions", &dump_var),
138 LC_OPT_ENT_ENUM_PTR ("ra", "register allocator", &ra_var),
139 LC_OPT_ENT_ENUM_PTR ("isa", "the instruction set architecture", &isa_var),
140 LC_OPT_ENT_NEGBOOL ("noomitfp", "do not omit frame pointer", &be_omit_fp),
141 LC_OPT_ENT_NEGBOOL ("nomris", "disable mris schedule preparation", &be_disable_mris),
144 LC_OPT_ENT_STR ("ilp.server", "the ilp server name", be_options.ilp_server, sizeof(be_options.ilp_server)),
145 LC_OPT_ENT_STR ("ilp.solver", "the ilp solver name", be_options.ilp_solver, sizeof(be_options.ilp_solver)),
146 #endif /* WITH_ILP */
150 #endif /* WITH_LIBCORE */
152 void be_opt_register(void)
156 lc_opt_entry_t *be_grp_ra;
157 static int run_once = 0;
161 be_grp_root = lc_opt_get_grp(firm_opt_get_root(), "be");
162 be_grp_ra = lc_opt_get_grp(be_grp_root, "ra");
164 lc_opt_add_table(be_grp_root, be_main_options);
166 /* register allocator options */
167 for(i = 0; ra_items[i].name != NULL; ++i) {
168 const be_ra_t *ra = ra_items[i].value;
169 ra->register_options(be_grp_ra);
172 /* register isa options */
173 for(i = 0; isa_items[i].name != NULL; ++i) {
174 const arch_isa_if_t *isa = isa_items[i].value;
175 isa->register_options(be_grp_root);
178 #endif /* WITH_LIBCORE */
181 /* Parse one argument. */
182 int be_parse_arg(const char *arg) {
184 if (strcmp(arg, "help") == 0 || (arg[0] == '?' && arg[1] == '\0')) {
185 lc_opt_print_help(be_grp_root, stdout);
188 return lc_opt_from_single_arg(be_grp_root, NULL, arg, NULL);
191 #endif /* WITH_LIBCORE */
194 /** The be parameters returned by default, all off. */
195 const static backend_params be_params = {
202 /* Initialize the Firm backend. Must be run BEFORE init_firm()! */
203 const backend_params *be_init(void)
214 if (isa_if->get_params)
215 return isa_if->get_params();
219 static be_main_env_t *be_init_env(be_main_env_t *env, FILE *file_handle)
221 memset(env, 0, sizeof(*env));
222 obstack_init(&env->obst);
223 env->arch_env = obstack_alloc(&env->obst, sizeof(env->arch_env[0]));
224 env->options = &be_options;
225 FIRM_DBG_REGISTER(env->dbg, "be.main");
227 arch_env_init(env->arch_env, isa_if, file_handle);
229 /* Register the irn handler of the architecture */
230 if (arch_isa_get_irn_handler(env->arch_env->isa))
231 arch_env_push_irn_handler(env->arch_env, arch_isa_get_irn_handler(env->arch_env->isa));
234 * Register the node handler of the back end infrastructure.
235 * This irn handler takes care of the platform independent
236 * spill, reload and perm nodes.
238 arch_env_push_irn_handler(env->arch_env, &be_node_irn_handler);
239 env->phi_handler = be_phi_handler_new(env->arch_env);
240 arch_env_push_irn_handler(env->arch_env, env->phi_handler);
245 static void be_done_env(be_main_env_t *env)
247 env->arch_env->isa->impl->done(env->arch_env->isa);
248 be_phi_handler_free(env->phi_handler);
249 obstack_free(&env->obst, NULL);
252 static void dump(int mask, ir_graph *irg, const char *suffix,
253 void (*dumper)(ir_graph *, const char *))
255 if(dump_flags & mask)
256 be_dump(irg, suffix, dumper);
260 * Prepare a backend graph for code generation.
262 static void prepare_graph(be_irg_t *birg)
264 ir_graph *irg = birg->irg;
266 /* Normalize proj nodes. */
267 normalize_proj_nodes(irg);
269 /* Make just one return node. */
270 normalize_one_return(irg);
272 /* Remove critical edges */
273 remove_critical_cf_edges(irg);
275 /* Compute the dominance information. */
279 /* Ensure, that the ir_edges are computed. */
282 /* check, if the dominance property is fulfilled. */
283 be_check_dominance(irg);
285 /* reset the phi handler. */
286 be_phi_handler_reset(birg->main_env->phi_handler);
290 * The Firm backend main loop.
291 * Do architecture specific lowering for all graphs
292 * and call the architecture specific code generator.
294 static void be_main_loop(FILE *file_handle)
300 be_init_env(&env, file_handle);
302 isa = arch_env_get_isa(env.arch_env);
304 // /* for debugging, anchors helps */
305 // dump_all_anchors(1);
308 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
309 ir_graph *irg = get_irp_irg(i);
310 const arch_code_generator_if_t *cg_if;
312 int save_optimize, save_normalize;
315 birg.main_env = &env;
317 DBG((env.dbg, LEVEL_2, "====> IRG: %F\n", irg));
318 dump(DUMP_INITIAL, irg, "-begin", dump_ir_block_graph);
320 be_stat_init_irg(env.arch_env, irg);
321 be_do_stat_nodes(irg, "01 Begin");
323 /* set the current graph (this is important for several firm functions) */
324 current_ir_graph = birg.irg;
326 /* Get the code generator interface. */
327 cg_if = isa->impl->get_code_generator_if(isa);
329 /* get a code generator for this graph. */
330 birg.cg = cg_if->init(&birg);
332 /* create the code generator and generate code. */
333 prepare_graph(&birg);
335 /* some transformations need to be done before abi introduce */
336 arch_code_generator_before_abi(birg.cg);
338 /* implement the ABI conventions. */
339 birg.abi = be_abi_introduce(&birg);
340 dump(DUMP_ABI, irg, "-abi", dump_ir_block_graph);
342 be_do_stat_nodes(irg, "02 Abi");
345 arch_code_generator_prepare_graph(birg.cg);
347 be_do_stat_nodes(irg, "03 Prepare");
350 * Since the code generator made a lot of new nodes and skipped
351 * a lot of old ones, we should do dead node elimination here.
352 * Note that this requires disabling the edges here.
354 edges_deactivate(irg);
355 //dead_node_elimination(irg);
358 /* Compute loop nesting information (for weighting copies) */
359 construct_cf_backedges(irg);
361 dump(DUMP_PREPARED, irg, "-prepared", dump_ir_block_graph);
363 /* Schedule the graphs. */
364 arch_code_generator_before_sched(birg.cg);
365 list_sched(&birg, be_disable_mris);
366 dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
368 assert(be_verify_schedule(birg.irg));
370 be_do_stat_nodes(irg, "04 Schedule");
372 /* we switch off optimizations here, because they might cause trouble */
373 save_optimize = get_optimize();
374 save_normalize = get_opt_normalize();
376 set_opt_normalize(0);
378 /* add Keeps for should_be_different constrained nodes */
379 /* beware: needs schedule due to usage of be_ssa_constr */
380 assure_constraints(&birg);
381 dump(DUMP_SCHED, irg, "-assured", dump_ir_block_graph_sched);
383 be_do_stat_nodes(irg, "05 Constraints");
385 /* connect all stack modifying nodes together (see beabi.c) */
386 be_abi_fix_stack_nodes(birg.abi);
387 dump(DUMP_SCHED, irg, "-fix_stack", dump_ir_block_graph_sched);
389 /* Verify the schedule */
390 assert(sched_verify_irg(irg));
392 /* do some statistics */
393 be_do_stat_reg_pressure(&birg);
395 /* Do register allocation */
396 arch_code_generator_before_ra(birg.cg);
398 dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
400 be_do_stat_nodes(irg, "06 Register Allocation");
402 arch_code_generator_after_ra(birg.cg);
403 be_abi_fix_stack_bias(birg.abi);
405 assert(be_verify_schedule(birg.irg));
407 arch_code_generator_done(birg.cg);
408 dump(DUMP_FINAL, irg, "-end", dump_ir_extblock_graph_sched);
409 be_abi_free(birg.abi);
411 be_do_stat_nodes(irg, "07 Final");
413 /* reset the optimizations */
414 set_optimize(save_optimize);
415 set_opt_normalize(save_normalize);
417 /* switched of due to statistics (statistic module needs all irgs) */
418 // free_ir_graph(irg);
423 /* Main interface to the frontend. */
424 void be_main(FILE *file_handle)
426 /* never build code for pseudo irgs */
427 set_visit_pseudo_irgs(0);
430 be_main_loop(file_handle);
433 /** The debug info retriever function. */
434 static retrieve_dbg_func retrieve_dbg = NULL;
436 /* Sets a debug info retriever. */
437 void be_set_debug_retrieve(retrieve_dbg_func func) {
441 /* Retrieve the debug info. */
442 const char *be_retrieve_dbg_info(const dbg_info *dbg, unsigned *line) {
444 return retrieve_dbg(dbg, line);