3 * @file firm_opt.c -- Firm-generating back end optimizations.
5 * (C) 2005-2010 Michael Beck beck@ipd.info.uni-karlsruhe.de
17 #include <libfirm/firm.h>
20 #include "firm_codegen.h"
21 #include "firm_cmdline.h"
22 #include "firm_timing.h"
25 #if defined(_DEBUG) || defined(FIRM_DEBUG)
26 #define DBG(x) dbg_printf x
28 #define DBG(x) ((void)0)
29 #endif /* _DEBUG || FIRM_DEBUG */
31 static ir_timer_t *t_vcg_dump;
32 static ir_timer_t *t_verify;
33 static ir_timer_t *t_all_opt;
34 static bool do_irg_opt(ir_graph *irg, const char *name);
36 /** dump all the graphs depending on cond */
38 static void dump_all(const char *suffix) {
39 if (firm_dump.ir_graph) {
40 timer_push(t_vcg_dump);
41 if (firm_dump.no_blocks)
42 dump_all_ir_graphs(dump_ir_graph, suffix);
43 else if (firm_dump.extbb)
44 dump_all_ir_graphs(dump_ir_extblock_graph, suffix);
46 dump_all_ir_graphs(dump_ir_block_graph, suffix);
47 timer_pop(t_vcg_dump);
51 /* set by the backend parameters */
52 static const ir_settings_arch_dep_t *ad_param = NULL;
53 static create_intrinsic_fkt *arch_create_intrinsic = NULL;
54 static void *create_intrinsic_ctx = NULL;
55 static const ir_settings_if_conv_t *if_conv_info = NULL;
57 /* entities of runtime functions */
58 ir_entity_ptr rts_entities[rts_max];
61 * factory for setting architecture dependent parameters
63 static const ir_settings_arch_dep_t *arch_factory(void)
65 static const ir_settings_arch_dep_t param = {
66 1, /* also use subs */
67 4, /* maximum shifts */
68 31, /* maximum shift amount */
69 NULL, /* use default evaluator */
73 32 /* Mulh allowed up to 32 bit */
76 return ad_param ? ad_param : ¶m;
80 * Map runtime functions.
82 static void rts_map(void) {
84 ir_entity_ptr *ent; /**< address of the rts entity */
85 i_mapper_func func; /**< mapper function. */
88 { &rts_entities[rts_abs], i_mapper_abs },
89 { &rts_entities[rts_labs], i_mapper_abs },
90 { &rts_entities[rts_llabs], i_mapper_abs },
91 { &rts_entities[rts_imaxabs], i_mapper_abs },
93 /* double -> double */
94 { &rts_entities[rts_fabs], i_mapper_abs },
95 { &rts_entities[rts_sqrt], i_mapper_sqrt },
96 { &rts_entities[rts_cbrt], i_mapper_cbrt },
97 { &rts_entities[rts_pow], i_mapper_pow },
98 { &rts_entities[rts_exp], i_mapper_exp },
99 { &rts_entities[rts_exp2], i_mapper_exp },
100 { &rts_entities[rts_exp10], i_mapper_exp },
101 { &rts_entities[rts_log], i_mapper_log },
102 { &rts_entities[rts_log2], i_mapper_log2 },
103 { &rts_entities[rts_log10], i_mapper_log10 },
104 { &rts_entities[rts_sin], i_mapper_sin },
105 { &rts_entities[rts_cos], i_mapper_cos },
106 { &rts_entities[rts_tan], i_mapper_tan },
107 { &rts_entities[rts_asin], i_mapper_asin },
108 { &rts_entities[rts_acos], i_mapper_acos },
109 { &rts_entities[rts_atan], i_mapper_atan },
110 { &rts_entities[rts_sinh], i_mapper_sinh },
111 { &rts_entities[rts_cosh], i_mapper_cosh },
112 { &rts_entities[rts_tanh], i_mapper_tanh },
115 { &rts_entities[rts_fabsf], i_mapper_abs },
116 { &rts_entities[rts_sqrtf], i_mapper_sqrt },
117 { &rts_entities[rts_cbrtf], i_mapper_cbrt },
118 { &rts_entities[rts_powf], i_mapper_pow },
119 { &rts_entities[rts_expf], i_mapper_exp },
120 { &rts_entities[rts_exp2f], i_mapper_exp },
121 { &rts_entities[rts_exp10f], i_mapper_exp },
122 { &rts_entities[rts_logf], i_mapper_log },
123 { &rts_entities[rts_log2f], i_mapper_log2 },
124 { &rts_entities[rts_log10f], i_mapper_log10 },
125 { &rts_entities[rts_sinf], i_mapper_sin },
126 { &rts_entities[rts_cosf], i_mapper_cos },
127 { &rts_entities[rts_tanf], i_mapper_tan },
128 { &rts_entities[rts_asinf], i_mapper_asin },
129 { &rts_entities[rts_acosf], i_mapper_acos },
130 { &rts_entities[rts_atanf], i_mapper_atan },
131 { &rts_entities[rts_sinhf], i_mapper_sinh },
132 { &rts_entities[rts_coshf], i_mapper_cosh },
133 { &rts_entities[rts_tanhf], i_mapper_tanh },
135 /* long double -> long double */
136 { &rts_entities[rts_fabsl], i_mapper_abs },
137 { &rts_entities[rts_sqrtl], i_mapper_sqrt },
138 { &rts_entities[rts_cbrtl], i_mapper_cbrt },
139 { &rts_entities[rts_powl], i_mapper_pow },
140 { &rts_entities[rts_expl], i_mapper_exp },
141 { &rts_entities[rts_exp2l], i_mapper_exp },
142 { &rts_entities[rts_exp10l], i_mapper_exp },
143 { &rts_entities[rts_logl], i_mapper_log },
144 { &rts_entities[rts_log2l], i_mapper_log2 },
145 { &rts_entities[rts_log10l], i_mapper_log10 },
146 { &rts_entities[rts_sinl], i_mapper_sin },
147 { &rts_entities[rts_cosl], i_mapper_cos },
148 { &rts_entities[rts_tanl], i_mapper_tan },
149 { &rts_entities[rts_asinl], i_mapper_asin },
150 { &rts_entities[rts_acosl], i_mapper_acos },
151 { &rts_entities[rts_atanl], i_mapper_atan },
152 { &rts_entities[rts_sinhl], i_mapper_sinh },
153 { &rts_entities[rts_coshl], i_mapper_cosh },
154 { &rts_entities[rts_tanhl], i_mapper_tanh },
157 { &rts_entities[rts_strcmp], i_mapper_strcmp },
158 { &rts_entities[rts_strncmp], i_mapper_strncmp },
159 { &rts_entities[rts_strcpy], i_mapper_strcpy },
160 { &rts_entities[rts_strlen], i_mapper_strlen },
161 { &rts_entities[rts_memcpy], i_mapper_memcpy },
162 { &rts_entities[rts_mempcpy], i_mapper_mempcpy },
163 { &rts_entities[rts_memmove], i_mapper_memmove },
164 { &rts_entities[rts_memset], i_mapper_memset },
165 { &rts_entities[rts_memcmp], i_mapper_memcmp }
167 i_record rec[sizeof(mapper)/sizeof(mapper[0])];
170 for (i = n_map = 0; i < sizeof(mapper)/sizeof(mapper[0]); ++i) {
171 if (*mapper[i].ent != NULL) {
172 rec[n_map].i_call.kind = INTRINSIC_CALL;
173 rec[n_map].i_call.i_ent = *mapper[i].ent;
174 rec[n_map].i_call.i_mapper = mapper[i].func;
175 rec[n_map].i_call.ctx = NULL;
176 rec[n_map].i_call.link = NULL;
182 lower_intrinsics(rec, n_map, /* part_block_used=*/0);
185 static int *irg_dump_no;
187 static void dump_graph_count(ir_graph *const irg, const char *const suffix)
190 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++,
193 timer_push(t_vcg_dump);
194 if (firm_dump.no_blocks)
195 dump_ir_graph(irg, name);
196 else if (firm_dump.extbb)
197 dump_ir_extblock_graph(irg, name);
199 dump_ir_block_graph(irg, name);
200 timer_pop(t_vcg_dump);
203 static void remove_unused_functions(void)
205 ir_entity **keep_methods;
208 /* Analysis that finds the free methods,
209 i.e. methods that are dereferenced.
210 Optimizes polymorphic calls :-). */
211 cgana(&arr_len, &keep_methods);
213 /* Remove methods that are never called. */
214 gc_irgs(arr_len, keep_methods);
218 static int firm_const_exists;
220 static void do_optimize_funccalls(void)
222 optimize_funccalls(firm_const_exists, NULL);
225 static void do_gcse(ir_graph *irg)
227 set_opt_global_cse(1);
228 optimize_graph_df(irg);
230 set_opt_global_cse(0);
233 static void do_lower_highlevel(ir_graph *irg)
235 lower_highlevel_graph(irg, firm_opt.lower_bitfields);
238 static void do_if_conv(ir_graph *irg)
240 opt_if_conv(irg, if_conv_info);
243 static void do_stred(ir_graph *irg)
245 opt_osr(irg, osr_flag_default | osr_flag_keep_reg_pressure | osr_flag_ignore_x86_shift);
248 static void after_inline_opt(ir_graph *irg)
250 do_irg_opt(irg, "scalar-replace");
251 do_irg_opt(irg, "local");
252 do_irg_opt(irg, "control-flow");
253 do_irg_opt(irg, "combo");
256 static void do_inline(void)
258 inline_functions(firm_opt.inline_maxsize, firm_opt.inline_threshold,
262 static void do_cloning(void)
264 proc_cloning((float) firm_opt.clone_threshold);
267 static void do_lower_switch(ir_graph *irg)
269 lower_switch(irg, firm_opt.spare_size);
272 static void do_lower_mux(ir_graph *irg)
274 lower_mux(irg, NULL);
277 static void do_lower_dw_ops(void)
279 lwrdw_param_t init = {
282 get_atomic_mode(ATOMIC_TYPE_LONGLONG),
283 get_atomic_mode(ATOMIC_TYPE_ULONGLONG),
284 get_atomic_mode(ATOMIC_TYPE_INT),
285 get_atomic_mode(ATOMIC_TYPE_UINT),
286 def_create_intrinsic_fkt,
290 if (arch_create_intrinsic) {
291 init.create_intrinsic = arch_create_intrinsic;
292 init.ctx = create_intrinsic_ctx;
297 typedef enum opt_target {
298 OPT_TARGET_IRG, /**< optimization function works on a single graph */
299 OPT_TARGET_IRP /**< optimization function works on the complete program */
302 typedef enum opt_flags {
304 OPT_FLAG_ENABLED = 1 << 0, /**< enable the optimization */
305 OPT_FLAG_NO_DUMP = 1 << 1, /**< don't dump after transformation */
306 OPT_FLAG_NO_VERIFY = 1 << 2, /**< don't verify after transformation */
307 OPT_FLAG_HIDE_OPTIONS = 1 << 3, /**< do not automatically process
308 -foptions for this transformation */
309 OPT_FLAG_ESSENTIAL = 1 << 4, /**< output won't work without this pass
310 so we need it even with -O0 */
313 typedef void (*transform_irg_func)(ir_graph *irg);
314 typedef void (*transform_irp_func)(void);
315 typedef void (*func_ptr_t)(void);
321 const char *description;
325 static opt_config_t opts[] = {
326 { OPT_TARGET_IRP, "rts", (func_ptr_t) rts_map, "optimization of known library functions", OPT_FLAG_HIDE_OPTIONS },
327 { OPT_TARGET_IRG, "combo", (func_ptr_t) combo, "combined CCE, UCE and GVN", OPT_FLAG_NONE},
328 { OPT_TARGET_IRG, "control-flow", (func_ptr_t) optimize_cf, "optimization of control-flow", OPT_FLAG_HIDE_OPTIONS },
329 { OPT_TARGET_IRG, "local", (func_ptr_t) optimize_graph_df, "local graph optimizations", OPT_FLAG_HIDE_OPTIONS },
330 { OPT_TARGET_IRP, "remove-unused", (func_ptr_t) remove_unused_functions, "removal of unused functions", OPT_FLAG_NO_DUMP | OPT_FLAG_NO_VERIFY },
331 { OPT_TARGET_IRP, "opt-tail-rec", (func_ptr_t) opt_tail_recursion, "tail-recursion eliminiation", OPT_FLAG_NONE },
332 { OPT_TARGET_IRP, "opt-func-call", (func_ptr_t) do_optimize_funccalls, "function call optimization", OPT_FLAG_NONE },
333 { OPT_TARGET_IRG, "lower", (func_ptr_t) do_lower_highlevel, "lowering", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_ESSENTIAL },
334 { OPT_TARGET_IRP, "lower-const", (func_ptr_t) lower_const_code, "lowering of constant code", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_NO_DUMP | OPT_FLAG_NO_VERIFY | OPT_FLAG_ESSENTIAL },
335 { OPT_TARGET_IRP, "lower-dw", (func_ptr_t) do_lower_dw_ops, "lowering of doubleword operations", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_ESSENTIAL },
336 { OPT_TARGET_IRG, "lower-switch", (func_ptr_t) do_lower_switch, "switch lowering", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_ESSENTIAL },
337 { OPT_TARGET_IRG, "one-return", (func_ptr_t) normalize_one_return, "normalisation to 1 return", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_NO_DUMP | OPT_FLAG_NO_VERIFY },
338 { OPT_TARGET_IRG, "scalar-replace", (func_ptr_t) scalar_replacement_opt, "scalar replacement", OPT_FLAG_NONE },
339 { OPT_TARGET_IRG, "reassociation", (func_ptr_t) optimize_reassociation, "reassociation", OPT_FLAG_NONE },
340 { OPT_TARGET_IRG, "gcse", (func_ptr_t) do_gcse, "global common subexpression elimination", OPT_FLAG_NONE },
341 { OPT_TARGET_IRG, "place", (func_ptr_t) place_code, "code placement", OPT_FLAG_NONE },
342 { OPT_TARGET_IRG, "confirm", (func_ptr_t) construct_confirms, "confirm optimisation", OPT_FLAG_HIDE_OPTIONS },
343 { OPT_TARGET_IRG, "opt-load-store", (func_ptr_t) optimize_load_store, "load store optimization", OPT_FLAG_NONE },
344 { OPT_TARGET_IRG, "parallelize-mem", (func_ptr_t) opt_parallelize_mem, "parallelize memory", OPT_FLAG_NONE },
345 { OPT_TARGET_IRG, "deconv", (func_ptr_t) conv_opt, "conv node elimination", OPT_FLAG_NONE },
346 { OPT_TARGET_IRG, "thread-jumps", (func_ptr_t) opt_jumpthreading, "path-sensitive jumpthreading", OPT_FLAG_NONE },
347 { OPT_TARGET_IRG, "remove-confirms", (func_ptr_t) remove_confirms, "confirm removal", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_NO_DUMP | OPT_FLAG_NO_VERIFY },
348 { OPT_TARGET_IRG, "gvn-pre", (func_ptr_t) do_gvn_pre, "global value numbering partial redundancy elimination", OPT_FLAG_NONE },
349 { OPT_TARGET_IRG, "if-conversion", (func_ptr_t) do_if_conv, "if-conversion", OPT_FLAG_NONE },
350 { OPT_TARGET_IRG, "bool", (func_ptr_t) opt_bool, "bool simplification", OPT_FLAG_NONE },
351 { OPT_TARGET_IRG, "shape-blocks", (func_ptr_t) shape_blocks, "block shaping", OPT_FLAG_NONE },
352 { OPT_TARGET_IRG, "ivopts", (func_ptr_t) do_stred, "induction variable strength reduction", OPT_FLAG_NONE },
353 { OPT_TARGET_IRG, "remove-phi-cycles", (func_ptr_t) remove_phi_cycles, "removal of phi cycles", OPT_FLAG_HIDE_OPTIONS },
354 { OPT_TARGET_IRG, "dead", (func_ptr_t) dead_node_elimination, "dead node elimination", OPT_FLAG_HIDE_OPTIONS | OPT_FLAG_NO_DUMP | OPT_FLAG_NO_VERIFY },
355 { OPT_TARGET_IRP, "inline", (func_ptr_t) do_inline, "inlining", OPT_FLAG_NONE },
356 { OPT_TARGET_IRP, "opt-proc-clone", (func_ptr_t) do_cloning, "procedure cloning", OPT_FLAG_NONE },
357 { OPT_TARGET_IRG, "invert-loops", (func_ptr_t) do_loop_inversion, "loop inversion", OPT_FLAG_NONE },
358 { OPT_TARGET_IRG, "peel-loops", (func_ptr_t) do_loop_peeling, "loop peeling", OPT_FLAG_NONE },
359 { OPT_TARGET_IRG, "lower-mux", (func_ptr_t) do_lower_mux, "mux lowering", OPT_FLAG_NONE },
361 static const int n_opts = sizeof(opts) / sizeof(opts[0]);
362 ir_timer_t *timers[sizeof(opts)/sizeof(opts[0])];
364 static opt_config_t *get_opt(const char *name)
367 for (i = 0; i < n_opts; ++i) {
368 opt_config_t *config = &opts[i];
369 if (strcmp(config->name, name) == 0)
376 static void set_opt_enabled(const char *name, bool enabled)
378 opt_config_t *config = get_opt(name);
379 config->flags = (config->flags & ~OPT_FLAG_ENABLED)
380 | (enabled ? OPT_FLAG_ENABLED : 0);
383 static bool get_opt_enabled(const char *name)
385 opt_config_t *config = get_opt(name);
386 return (config->flags & OPT_FLAG_ENABLED) != 0;
390 * perform an optimisation on a single graph
392 * @return true if something changed, false otherwise
394 static bool do_irg_opt(ir_graph *irg, const char *name)
396 transform_irg_func func;
398 opt_config_t *config = get_opt(name);
399 size_t n = config - opts;
400 assert(config != NULL);
401 assert(config->target == OPT_TARGET_IRG);
402 if (! (config->flags & OPT_FLAG_ENABLED))
406 old_irg = current_ir_graph;
407 current_ir_graph = irg;
409 func = (transform_irg_func) config->func;
411 timer_push(timers[n]);
413 timer_pop(timers[n]);
415 if (firm_dump.all_phases && firm_dump.ir_graph) {
416 dump_graph_count(irg, name);
419 if (firm_opt.check_all) {
420 timer_push(t_verify);
421 irg_verify(irg, VRFY_ENFORCE_SSA);
425 current_ir_graph = old_irg;
429 static void do_irp_opt(const char *name)
431 transform_irp_func func;
432 opt_config_t *config = get_opt(name);
433 size_t n = config - opts;
434 assert(config->target == OPT_TARGET_IRP);
435 if (! (config->flags & OPT_FLAG_ENABLED))
438 func = (transform_irp_func) config->func;
440 timer_push(timers[n]);
442 timer_pop(timers[n]);
444 if (firm_dump.ir_graph && firm_dump.all_phases) {
446 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
447 ir_graph *irg = get_irp_irg(i);
448 dump_graph_count(irg, name);
452 if (firm_opt.check_all) {
454 timer_push(t_verify);
455 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
456 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
463 * Enable transformations which should be always safe (and cheap) to perform
465 static void enable_safe_defaults(void)
467 set_opt_enabled("remove-unused", true);
468 set_opt_enabled("opt-tail-rec", true);
469 set_opt_enabled("opt-func-call", true);
470 set_opt_enabled("reassociation", true);
471 set_opt_enabled("control-flow", true);
472 set_opt_enabled("local", true);
473 set_opt_enabled("lower-const", true);
474 set_opt_enabled("scalar-replace", true);
475 set_opt_enabled("place", true);
476 set_opt_enabled("confirm", true);
477 set_opt_enabled("opt-load-store", true);
478 set_opt_enabled("lower", true);
479 set_opt_enabled("deconv", true);
480 set_opt_enabled("remove-confirms", true);
481 set_opt_enabled("ivopts", true);
482 set_opt_enabled("dead", true);
483 set_opt_enabled("lower-switch", true);
484 set_opt_enabled("remove-phi-cycles", true);
488 * run all the Firm optimizations
490 * @param input_filename the name of the (main) source file
492 static void do_firm_optimizations(const char *input_filename)
497 set_opt_alias_analysis(firm_opt.alias_analysis);
499 aa_opt = aa_opt_no_opt;
500 if (firm_opt.strict_alias)
501 aa_opt |= aa_opt_type_based | aa_opt_byte_type_may_alias;
502 if (firm_opt.no_alias)
503 aa_opt = aa_opt_no_alias;
505 set_irp_memory_disambiguator_options(aa_opt);
507 /* parameter passing code should set them directly sometime... */
508 set_opt_enabled("rts", !firm_opt.freestanding);
509 set_opt_enabled("gcse", firm_opt.gcse);
510 set_opt_enabled("place", !firm_opt.gcse);
511 set_opt_enabled("confirm", firm_opt.confirm);
512 set_opt_enabled("remove-confirms", firm_opt.confirm);
514 /* osr supersedes remove_phi_cycles */
515 if (get_opt_enabled("ivopts"))
516 set_opt_enabled("remove-phi-cycles", false);
518 timer_start(t_all_opt);
522 /* first step: kill dead code */
523 for (i = 0; i < get_irp_n_irgs(); i++) {
524 ir_graph *irg = get_irp_irg(i);
525 do_irg_opt(irg, "combo");
526 do_irg_opt(irg, "local");
527 do_irg_opt(irg, "control-flow");
530 do_irp_opt("remove-unused");
531 do_irp_opt("opt-tail-rec");
532 do_irp_opt("opt-func-call");
533 do_irp_opt("lower-const");
535 for (i = 0; i < get_irp_n_irgs(); i++) {
536 ir_graph *irg = get_irp_irg(i);
538 do_irg_opt(irg, "scalar-replace");
539 do_irg_opt(irg, "invert-loops");
540 do_irg_opt(irg, "local");
541 do_irg_opt(irg, "reassociation");
542 do_irg_opt(irg, "local");
543 do_irg_opt(irg, "gcse");
545 if (firm_opt.confirm) {
546 /* Confirm construction currently can only handle blocks with only
547 one control flow predecessor. Calling optimize_cf here removes
548 Bad predecessors and help the optimization of switch constructs.
550 do_irg_opt(irg, "control-flow");
551 do_irg_opt(irg, "confirm");
552 do_irg_opt(irg, "local");
555 do_irg_opt(irg, "control-flow");
556 do_irg_opt(irg, "opt-load-store");
557 do_irg_opt(irg, "lower");
558 do_irg_opt(irg, "deconv");
559 do_irg_opt(irg, "thread-jumps");
560 do_irg_opt(irg, "remove-confirms");
561 do_irg_opt(irg, "gvn-pre");
562 do_irg_opt(irg, "place");
563 do_irg_opt(irg, "control-flow");
565 if (do_irg_opt(irg, "if-conversion")) {
566 do_irg_opt(irg, "local");
567 do_irg_opt(irg, "control-flow");
569 /* this doesn't make too much sense but tests the mux destruction... */
570 do_irg_opt(irg, "lower-mux");
572 do_irg_opt(irg, "bool");
573 do_irg_opt(irg, "shape-blocks");
574 do_irg_opt(irg, "lower-switch");
575 do_irg_opt(irg, "ivopts");
576 do_irg_opt(irg, "local");
577 do_irg_opt(irg, "dead");
580 do_irp_opt("inline");
581 do_irp_opt("opt-proc-clone");
583 for (i = 0; i < get_irp_n_irgs(); i++) {
584 ir_graph *irg = get_irp_irg(i);
585 do_irg_opt(irg, "local");
586 do_irg_opt(irg, "control-flow");
587 do_irg_opt(irg, "thread-jumps");
588 do_irg_opt(irg, "local");
589 do_irg_opt(irg, "control-flow");
592 if (firm_dump.ir_graph) {
593 /* recompute backedges for nicer dumps */
594 for (i = 0; i < get_irp_n_irgs(); i++)
595 construct_cf_backedges(get_irp_irg(i));
598 do_irp_opt("remove-unused");
602 if (firm_dump.statistic & STAT_AFTER_OPT)
603 stat_dump_snapshot(input_filename, "opt");
605 timer_stop(t_all_opt);
611 * @param input_filename the name of the (main) source file
613 static void do_firm_lowering(const char *input_filename)
617 do_irp_opt("lower-dw");
619 if (firm_dump.statistic & STAT_AFTER_LOWER)
620 stat_dump_snapshot(input_filename, "low");
624 if (firm_opt.enabled) {
625 timer_start(t_all_opt);
627 /* run reassociation first on all graphs BEFORE the architecture
628 dependent optimizations are enabled */
629 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
630 ir_graph *irg = get_irp_irg(i);
631 do_irg_opt(irg, "reassociation");
634 /* enable architecture dependent optimizations */
635 arch_dep_set_opts((arch_dep_opts_t)
636 ((firm_opt.muls ? arch_dep_mul_to_shift : arch_dep_none) |
637 (firm_opt.divs ? arch_dep_div_by_const : arch_dep_none) |
638 (firm_opt.mods ? arch_dep_mod_by_const : arch_dep_none) ));
640 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
641 ir_graph *irg = get_irp_irg(i);
643 current_ir_graph = irg;
645 do_irg_opt(irg, "local");
646 do_irg_opt(irg, "gcse");
647 do_irg_opt(irg, "opt-load-store");
648 do_irg_opt(irg, "local");
649 do_irg_opt(irg, "control-flow");
651 if (do_irg_opt(irg, "if-conversion")) {
652 do_irg_opt(irg, "local");
653 do_irg_opt(irg, "control-flow");
656 do_irg_opt(irg, "parallelize-mem");
658 timer_stop(t_all_opt);
660 dump_all("-low-opt");
664 mark_private_methods();
666 /* set the phase to low */
667 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
668 set_irg_phase_low(get_irp_irg(i));
670 /* all graphs are lowered, set the irp phase to low */
671 set_irp_phase_state(phase_low);
673 if (firm_dump.statistic & STAT_FINAL) {
674 stat_dump_snapshot(input_filename, "final");
679 * Initialize for the Firm-generating back end.
681 void gen_firm_init(void)
683 firm_parameter_t params;
684 unsigned pattern = 0;
687 for (i = 0; i < n_opts; ++i) {
688 timers[i] = ir_timer_new();
689 timer_register(timers[i], opts[i].description);
691 t_verify = ir_timer_new();
692 timer_register(t_verify, "Firm: verify pass");
693 t_vcg_dump = ir_timer_new();
694 timer_register(t_vcg_dump, "Firm: vcg dumping");
695 t_all_opt = ir_timer_new();
696 timer_register(t_all_opt, "Firm: all optimizations");
698 if (firm_dump.stat_pattern)
699 pattern |= FIRMSTAT_PATTERN_ENABLED;
701 if (firm_dump.stat_dag)
702 pattern |= FIRMSTAT_COUNT_DAG;
704 memset(¶ms, 0, sizeof(params));
705 params.size = sizeof(params);
706 params.enable_statistics = firm_dump.statistic == STAT_NONE ? 0 :
707 FIRMSTAT_ENABLED | FIRMSTAT_COUNT_STRONG_OP | FIRMSTAT_COUNT_CONSTS
709 params.initialize_local_func = uninitialized_local_var;
710 params.cc_mask = 0; /* no regparam, cdecl */
714 if (firm_be_opt.selection == BE_FIRM_BE) {
715 const backend_params *be_params = be_get_backend_param();
717 if (be_params->do_dw_lowering)
718 set_opt_enabled("lower-dw", true);
720 arch_create_intrinsic = be_params->arch_create_intrinsic_fkt;
721 create_intrinsic_ctx = be_params->create_intrinsic_ctx;
723 ad_param = be_params->dep_param;
724 if_conv_info = be_params->if_conv_info;
727 edges_init_dbg(firm_opt.vrfy_edges);
729 /* Sel node cannot produce NULL pointers */
730 set_opt_sel_based_null_check_elim(1);
732 /* dynamic dispatch works currently only if whole world scenarios */
733 set_opt_dyn_meth_dispatch(0);
735 arch_dep_init(arch_factory);
737 /* do not run architecture dependent optimizations in building phase */
738 arch_dep_set_opts(arch_dep_none);
740 do_node_verification((firm_verification_t) firm_opt.vrfy);
741 if (firm_dump.filter)
742 only_dump_method_with_name(new_id_from_str(firm_dump.filter));
744 if (firm_opt.enabled) {
746 set_opt_constant_folding(firm_opt.const_folding);
747 set_opt_algebraic_simplification(firm_opt.const_folding);
748 set_opt_cse(firm_opt.cse);
749 set_opt_global_cse(0);
750 set_opt_unreachable_code(1);
751 set_opt_control_flow(firm_opt.control_flow);
752 set_opt_control_flow_weak_simplification(1);
753 set_opt_control_flow_strong_simplification(1);
758 /* do not dump entity ld names */
763 * Called, after the Firm generation is completed,
764 * do all optimizations and backend call here.
766 * @param out a file handle for the output, may be NULL
767 * @param input_filename the name of the (main) source file
768 * @param c_mode non-zero if "C" was compiled
769 * @param new_firm_const_exists non-zero, if the const attribute was used on functions
771 void gen_firm_finish(FILE *out, const char *input_filename, int c_mode,
772 int new_firm_const_exists)
777 if (firm_opt.enable_statev) {
779 snprintf(buf, sizeof(buf), "%s.ev", input_filename);
780 ir_stat_ev_begin(input_filename, firm_opt.statev_filter);
781 ir_stat_ev_compilation_unit(input_filename);
785 firm_const_exists = new_firm_const_exists;
787 /* the general for dumping option must be set, or the others will not work*/
789 = (a_byte) (firm_dump.ir_graph | firm_dump.all_phases | firm_dump.extbb);
791 dump_keepalive_edges(1);
792 dump_consts_local(1);
793 dump_dominator_information(1);
794 dump_loop_information(0);
796 if (!firm_dump.edge_labels)
797 turn_off_edge_labels();
799 /* FIXME: cloning might ADD new graphs. */
800 irg_dump_no = calloc(get_irp_last_idx(), sizeof(*irg_dump_no));
802 if (firm_dump.all_types) {
805 dump_class_hierarchy(0, "");
806 dump_class_hierarchy(1, "-with-entities");
810 /* finalize all graphs */
811 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
812 ir_graph *irg = get_irp_irg(i);
813 irg_finalize_cons(irg);
817 timer_push(t_verify);
821 /* all graphs are finalized, set the irp phase to high */
822 set_irp_phase_state(phase_high);
824 /* BEWARE: kill unreachable code before doing compound lowering */
825 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
826 ir_graph *irg = get_irp_irg(i);
827 do_irg_opt(irg, "control-flow");
830 /* lower all compound call return values */
831 lower_compound_params();
833 /* lower copyb nodes */
834 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
835 ir_graph *irg = get_irp_irg(i);
836 lower_CopyB(irg, 128, 4);
839 if (firm_dump.statistic & STAT_BEFORE_OPT) {
840 stat_dump_snapshot(input_filename, "noopt");
843 if (firm_opt.enabled)
844 do_firm_optimizations(input_filename);
847 do_firm_lowering(input_filename);
849 /* set the phase to low */
850 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
851 set_irg_phase_low(get_irp_irg(i));
853 if (firm_dump.statistic & STAT_FINAL_IR)
854 stat_dump_snapshot(input_filename, "final-ir");
856 /* run the code generator */
857 if (firm_be_opt.selection != BE_NONE)
858 do_codegen(out, input_filename);
860 if (firm_dump.statistic & STAT_FINAL)
861 stat_dump_snapshot(input_filename, "final");
864 void disable_all_opts(void)
866 for (int i = 0; i < n_opts; ++i) {
867 opt_config_t *config = &opts[i];
868 if (config->flags & OPT_FLAG_ESSENTIAL) {
869 config->flags |= OPT_FLAG_ENABLED;
871 config->flags &= ~OPT_FLAG_ENABLED;
876 int firm_opt_option(const char *opt)
879 if (strncmp(opt, "no-", 3) == 0) {
884 opt_config_t *config = get_opt(opt);
885 if (config == NULL || (config->flags & OPT_FLAG_HIDE_OPTIONS))
888 config->flags &= ~OPT_FLAG_ENABLED;
889 config->flags |= enable ? OPT_FLAG_ENABLED : 0;
893 void firm_opt_option_help(void)
897 for (i = 0; i < n_opts; ++i) {
901 const opt_config_t *config = &opts[i];
902 if (config->flags & OPT_FLAG_HIDE_OPTIONS)
905 snprintf(buf2, sizeof(buf2), "firm: enable %s", config->description);
906 print_option_help(config->name, buf2);
907 snprintf(buf, sizeof(buf), "no-%s", config->name);
908 snprintf(buf2, sizeof(buf2), "firm: disable %s", config->description);
909 print_option_help(buf, buf2);
914 * Do very early initializations
916 void firm_early_init(void)
918 /* arg: need this here for command line options */
921 enable_safe_defaults();