3 * @author Sebastian Hack
13 #include <libcore/lc_opts.h>
14 #include <libcore/lc_opts_enum.h>
15 #endif /* WITH_LIBCORE */
26 #include "iredges_t.h"
32 #include "firm/bearch_firm.h"
33 #include "ia32/bearch_ia32.h"
40 #include "besched_t.h"
41 #include "belistsched.h"
43 #include "bespillilp.h"
44 #include "bespillbelady.h"
46 #include "beraextern.h"
47 #include "bechordal_t.h"
49 #include "beifg_impl.h"
50 #include "becopyopt.h"
51 #include "becopystat.h"
52 #include "bessadestr.h"
56 #define DUMP_INITIAL (1 << 0)
57 #define DUMP_ABI (1 << 1)
58 #define DUMP_SCHED (1 << 2)
59 #define DUMP_PREPARED (1 << 3)
60 #define DUMP_RA (1 << 4)
61 #define DUMP_FINAL (1 << 5)
63 /* options visible for anyone */
64 static be_options_t be_options = {
66 "i44pc52.info.uni-karlsruhe.de",
73 static unsigned dump_flags = 2 * DUMP_FINAL - 1;
75 /* register allocator to use. */
76 static const be_ra_t *ra = &be_ra_chordal_allocator;
78 /* back end instruction set architecture to use */
79 static const arch_isa_if_t *isa_if = &ia32_isa_if;
82 static lc_opt_entry_t *be_grp_root = NULL;
84 /* possible dumping options */
85 static const lc_opt_enum_mask_items_t dump_items[] = {
87 { "initial", DUMP_INITIAL },
89 { "sched", DUMP_SCHED },
90 { "prepared", DUMP_PREPARED },
91 { "regalloc", DUMP_RA },
92 { "final", DUMP_FINAL },
93 { "all", 2 * DUMP_FINAL - 1 },
97 /* register allocators */
98 static const lc_opt_enum_const_ptr_items_t ra_items[] = {
99 { "chordal", &be_ra_chordal_allocator },
100 { "external", &be_ra_external_allocator },
104 /* instruction set architectures. */
105 static const lc_opt_enum_const_ptr_items_t isa_items[] = {
106 { "firm", &firm_isa },
107 { "ia32", &ia32_isa_if },
111 static lc_opt_enum_mask_var_t dump_var = {
112 &dump_flags, dump_items
115 static lc_opt_enum_const_ptr_var_t ra_var = {
116 (const void **) &ra, ra_items
119 static lc_opt_enum_const_ptr_var_t isa_var = {
120 (const void **) &isa_if, isa_items
123 static const lc_opt_table_entry_t be_main_options[] = {
124 LC_OPT_ENT_ENUM_MASK("dump", "dump irg on several occasions", &dump_var),
125 LC_OPT_ENT_ENUM_PTR("ra", "register allocator", &ra_var),
126 LC_OPT_ENT_ENUM_PTR("isa", "the instruction set architecture", &isa_var),
129 LC_OPT_ENT_STR ("ilp.server", "the ilp server name", be_options.ilp_server, sizeof(be_options.ilp_server)),
130 LC_OPT_ENT_STR ("ilp.solver", "the ilp solver name", be_options.ilp_solver, sizeof(be_options.ilp_solver)),
131 #endif /* WITH_ILP */
135 #endif /* WITH_LIBCORE */
137 void be_opt_register(void)
141 lc_opt_entry_t *be_grp_ra;
143 be_grp_root = lc_opt_get_grp(firm_opt_get_root(), "be");
144 be_grp_ra = lc_opt_get_grp(be_grp_root, "ra");
146 lc_opt_add_table(be_grp_root, be_main_options);
148 /* register allocator options */
149 for(i = 0; ra_items[i].name != NULL; ++i) {
150 const be_ra_t *ra = ra_items[i].value;
151 ra->register_options(be_grp_ra);
154 /* register isa options */
155 for(i = 0; isa_items[i].name != NULL; ++i) {
156 const arch_isa_if_t *isa = isa_items[i].value;
157 isa->register_options(be_grp_root);
159 #endif /* WITH_LIBCORE */
175 static be_main_env_t *be_init_env(be_main_env_t *env)
177 memset(env, 0, sizeof(*env));
178 obstack_init(&env->obst);
179 env->dbg = firm_dbg_register("be.main");
180 env->arch_env = obstack_alloc(&env->obst, sizeof(env->arch_env[0]));
181 env->options = &be_options;
183 arch_env_init(env->arch_env, isa_if);
185 /* Register the irn handler of the architecture */
186 if (arch_isa_get_irn_handler(env->arch_env->isa))
187 arch_env_push_irn_handler(env->arch_env, arch_isa_get_irn_handler(env->arch_env->isa));
190 * Register the node handler of the back end infrastructure.
191 * This irn handler takes care of the platform independent
192 * spill, reload and perm nodes.
194 arch_env_push_irn_handler(env->arch_env, &be_node_irn_handler);
195 env->phi_handler = be_phi_handler_new(env->arch_env);
196 arch_env_push_irn_handler(env->arch_env, env->phi_handler);
201 static void be_done_env(be_main_env_t *env)
203 env->arch_env->isa->impl->done(env->arch_env->isa);
204 be_phi_handler_free(env->phi_handler);
205 obstack_free(&env->obst, NULL);
208 static void dump(int mask, ir_graph *irg, const char *suffix,
209 void (*dumper)(ir_graph *, const char *))
211 if(dump_flags & mask)
212 be_dump(irg, suffix, dumper);
216 * Prepare a backend graph for code generation.
218 static void prepare_graph(be_irg_t *birg)
220 ir_graph *irg = birg->irg;
222 /* Normalize proj nodes. */
223 normalize_proj_nodes(irg);
225 /* Make just one return node. */
226 normalize_one_return(irg);
228 /* Remove critical edges */
229 remove_critical_cf_edges(irg);
231 /* Compute the dominance information. */
235 /* Ensure, that the ir_edges are computed. */
238 /* check, if the dominance property is fulfilled. */
239 be_check_dominance(irg);
241 /* reset the phi handler. */
242 be_phi_handler_reset(birg->main_env->phi_handler);
246 * The Firm backend main loop.
247 * Do architecture specific lowering for all graphs
248 * and call the architecture specific code generator.
250 static void be_main_loop(FILE *file_handle)
258 isa = arch_env_get_isa(env.arch_env);
261 for(i = 0, n = get_irp_n_irgs(); i < n; ++i) {
262 ir_graph *irg = get_irp_irg(i);
263 const arch_code_generator_if_t *cg_if;
267 birg.main_env = &env;
269 DBG((env.dbg, LEVEL_2, "====> IRG: %F\n", irg));
270 dump(DUMP_INITIAL, irg, "-begin", dump_ir_block_graph);
272 /* set the current graph (this is important for several firm functions) */
273 current_ir_graph = birg.irg;
275 /* Get the code generator interface. */
276 cg_if = isa->impl->get_code_generator_if(isa);
278 /* get a code generator for this graph. */
279 birg.cg = cg_if->init(file_handle, &birg);
281 /* create the code generator and generate code. */
282 prepare_graph(&birg);
284 /* some transformations need to be done before abi introduce */
285 arch_code_generator_before_abi(birg.cg);
287 /* implement the ABI conventions. */
288 birg.abi = be_abi_introduce(&birg);
289 dump(DUMP_ABI, irg, "-abi", dump_ir_block_graph);
292 arch_code_generator_prepare_graph(birg.cg);
295 * Since the code generator made a lot of new nodes and skipped
296 * a lot of old ones, we should do dead node elimination here.
297 * Note that this requires disabling the edges here.
299 edges_deactivate(irg);
300 dead_node_elimination(irg);
303 /* Compute loop nesting information (for weighting copies) */
304 construct_cf_backedges(irg);
306 dump(DUMP_PREPARED, irg, "-prepared", dump_ir_block_graph);
308 /* add Keeps for should_be_different constrained nodes */
309 // assure_constraints(&birg);
310 dump(DUMP_PREPARED, irg, "-assured", dump_ir_block_graph);
312 /* Schedule the graphs. */
313 arch_code_generator_before_sched(birg.cg);
314 list_sched(env.arch_env, irg);
316 /* connect all stack modifying nodes together (see beabi.c) */
317 be_abi_fix_stack_nodes(birg.abi);
318 dump(DUMP_SCHED, irg, "-sched", dump_ir_block_graph_sched);
320 /* Verify the schedule */
321 sched_verify_irg(irg);
323 /* Do register allocation */
324 arch_code_generator_before_ra(birg.cg);
326 dump(DUMP_RA, irg, "-ra", dump_ir_block_graph_sched);
328 arch_code_generator_after_ra(birg.cg);
329 be_abi_fix_stack_bias(birg.abi);
331 arch_code_generator_done(birg.cg);
332 dump(DUMP_FINAL, irg, "-end", dump_ir_block_graph_sched);
333 be_abi_free(birg.abi);
339 /* Main interface to the frontend. */
340 void be_main(FILE *file_handle)
342 /* never build code for pseudo irgs */
343 set_visit_pseudo_irgs(0);
346 be_main_loop(file_handle);
349 /** The debug info retriever function. */
350 static retrieve_dbg_func retrieve_dbg = NULL;
352 /* Sets a debug info retriever. */
353 void be_set_debug_retrieve(retrieve_dbg_func func) {
357 /* Retrieve the debug info. */
358 const char *be_retrieve_dbg_info(const dbg_info *dbg, unsigned *line) {
360 return retrieve_dbg(dbg, line);