3 * @file firm_opt.c -- Firm-generating back end optimizations.
5 * (C) 2005-2007 Michael Beck beck@ipd.info.uni-karlsruhe.de
11 #include <libfirm/firm.h>
14 #include <libfirm/be.h>
15 #endif /* FIRM_BACKEND */
19 #include "firm_codegen.h"
20 #include "firm_cmdline.h"
21 #include "firm_timing.h"
23 #if defined(_DEBUG) || defined(FIRM_DEBUG)
24 #define DBG(x) dbg_printf x
27 #endif /* _DEBUG || FIRM_DEBUG */
30 /** dump all the graphs depending on cond */
31 #define DUMP_ALL(cond, suffix) \
34 timer_push(TV_VCG_DUMP); \
35 if (firm_dump.no_blocks) \
36 dump_all_ir_graphs(dump_ir_graph, suffix); \
37 else if (firm_dump.extbb) \
38 dump_all_ir_graphs(dump_ir_extblock_graph, suffix);\
40 dump_all_ir_graphs(dump_ir_block_graph, suffix); \
45 /** dump all control flow graphs depending on cond */
46 #define DUMP_ALL_CFG(cond, suffix) \
49 timer_push(TV_VCG_DUMP); \
50 dump_all_ir_graphs(dump_cfg, suffix); \
55 /** check all graphs depending on cond */
56 #define CHECK_ALL(cond) \
60 timer_push(TV_VERIFY); \
61 for (i = get_irp_n_irgs() - 1; i >= 0; --i) \
62 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA); \
69 /** dump graphs irg depending on cond */
70 #define DUMP_ONE(cond, irg, suffix) \
73 timer_push(TV_VCG_DUMP); \
74 if (firm_dump.no_blocks) \
75 dump_ir_graph(irg, suffix); \
76 else if (firm_dump.extbb) \
77 dump_ir_extblock_graph(irg, suffix); \
79 dump_ir_block_graph(irg, suffix); \
84 /** dump control flow graph irg depending on cond */
85 #define DUMP_ONE_CFG(cond, irg, suffix) \
88 timer_push(TV_VCG_DUMP); \
89 dump_cfg(irg, suffix); \
94 /** check a graph irg depending on cond */
95 #define CHECK_ONE(cond, irg) \
98 timer_push(TV_VERIFY); \
99 irg_verify(irg, VRFY_ENFORCE_SSA); \
105 /* set by the backend parameters */
106 static const ir_settings_arch_dep_t *ad_param = NULL;
107 static create_intrinsic_fkt *arch_create_intrinsic = NULL;
108 static void *create_intrinsic_ctx = NULL;
109 static const ir_settings_if_conv_t *if_conv_info = NULL;
110 static unsigned char be_support_inline_asm = FALSE;
112 /* entities of runtime functions */
113 ir_entity_ptr rts_entities[rts_max];
116 * factory for setting architecture dependent parameters
118 static const ir_settings_arch_dep_t *arch_factory(void)
120 static const ir_settings_arch_dep_t param = {
121 1, /* also use subs */
122 4, /* maximum shifts */
123 31, /* maximum shift amount */
124 NULL, /* use default evaluator */
128 32 /* Mulh allowed up to 32 bit */
131 return ad_param ? ad_param : ¶m;
135 * Map runtime functions.
137 static void rts_map(void) {
138 static const struct {
139 ir_entity_ptr *ent; /**< address of the rts entity */
140 i_mapper_func func; /**< mapper function. */
143 { &rts_entities[rts_abs], i_mapper_abs },
144 { &rts_entities[rts_labs], i_mapper_abs },
145 { &rts_entities[rts_llabs], i_mapper_abs },
146 { &rts_entities[rts_imaxabs], i_mapper_abs },
148 /* double -> double */
149 { &rts_entities[rts_fabs], i_mapper_abs },
150 { &rts_entities[rts_sqrt], i_mapper_sqrt },
151 { &rts_entities[rts_cbrt], i_mapper_cbrt },
152 { &rts_entities[rts_pow], i_mapper_pow },
153 { &rts_entities[rts_exp], i_mapper_exp },
154 { &rts_entities[rts_exp2], i_mapper_exp },
155 { &rts_entities[rts_exp10], i_mapper_exp },
156 { &rts_entities[rts_log], i_mapper_log },
157 { &rts_entities[rts_log2], i_mapper_log2 },
158 { &rts_entities[rts_log10], i_mapper_log10 },
159 { &rts_entities[rts_sin], i_mapper_sin },
160 { &rts_entities[rts_cos], i_mapper_cos },
161 { &rts_entities[rts_tan], i_mapper_tan },
162 { &rts_entities[rts_asin], i_mapper_asin },
163 { &rts_entities[rts_acos], i_mapper_acos },
164 { &rts_entities[rts_atan], i_mapper_atan },
165 { &rts_entities[rts_sinh], i_mapper_sinh },
166 { &rts_entities[rts_cosh], i_mapper_cosh },
167 { &rts_entities[rts_tanh], i_mapper_tanh },
170 { &rts_entities[rts_fabsf], i_mapper_abs },
171 { &rts_entities[rts_sqrtf], i_mapper_sqrt },
172 { &rts_entities[rts_cbrtf], i_mapper_cbrt },
173 { &rts_entities[rts_powf], i_mapper_pow },
174 { &rts_entities[rts_expf], i_mapper_exp },
175 { &rts_entities[rts_exp2f], i_mapper_exp },
176 { &rts_entities[rts_exp10f], i_mapper_exp },
177 { &rts_entities[rts_logf], i_mapper_log },
178 { &rts_entities[rts_log2f], i_mapper_log2 },
179 { &rts_entities[rts_log10f], i_mapper_log10 },
180 { &rts_entities[rts_sinf], i_mapper_sin },
181 { &rts_entities[rts_cosf], i_mapper_cos },
182 { &rts_entities[rts_tanf], i_mapper_tan },
183 { &rts_entities[rts_asinf], i_mapper_asin },
184 { &rts_entities[rts_acosf], i_mapper_acos },
185 { &rts_entities[rts_atanf], i_mapper_atan },
186 { &rts_entities[rts_sinhf], i_mapper_sinh },
187 { &rts_entities[rts_coshf], i_mapper_cosh },
188 { &rts_entities[rts_tanhf], i_mapper_tanh },
190 /* long double -> long double */
191 { &rts_entities[rts_fabsl], i_mapper_abs },
192 { &rts_entities[rts_sqrtl], i_mapper_sqrt },
193 { &rts_entities[rts_cbrtl], i_mapper_cbrt },
194 { &rts_entities[rts_powl], i_mapper_pow },
195 { &rts_entities[rts_expl], i_mapper_exp },
196 { &rts_entities[rts_exp2l], i_mapper_exp },
197 { &rts_entities[rts_exp10l], i_mapper_exp },
198 { &rts_entities[rts_logl], i_mapper_log },
199 { &rts_entities[rts_log2l], i_mapper_log2 },
200 { &rts_entities[rts_log10l], i_mapper_log10 },
201 { &rts_entities[rts_sinl], i_mapper_sin },
202 { &rts_entities[rts_cosl], i_mapper_cos },
203 { &rts_entities[rts_tanl], i_mapper_tan },
204 { &rts_entities[rts_asinl], i_mapper_asin },
205 { &rts_entities[rts_acosl], i_mapper_acos },
206 { &rts_entities[rts_atanl], i_mapper_atan },
207 { &rts_entities[rts_sinhl], i_mapper_sinh },
208 { &rts_entities[rts_coshl], i_mapper_cosh },
209 { &rts_entities[rts_tanhl], i_mapper_tanh },
212 { &rts_entities[rts_memcpy], i_mapper_memcpy },
213 { &rts_entities[rts_memset], i_mapper_memset },
214 { &rts_entities[rts_strcmp], i_mapper_strcmp },
215 { &rts_entities[rts_strncmp], i_mapper_strncmp },
216 { &rts_entities[rts_strlen], i_mapper_strlen }
218 i_record rec[sizeof(mapper)/sizeof(mapper[0])];
221 for (i = n_map = 0; i < sizeof(mapper)/sizeof(mapper[0]); ++i)
222 if (*mapper[i].ent != NULL) {
223 rec[n_map].i_call.kind = INTRINSIC_CALL;
224 rec[n_map].i_call.i_ent = *mapper[i].ent;
225 rec[n_map].i_call.i_mapper = mapper[i].func;
226 rec[n_map].i_call.ctx = NULL;
227 rec[n_map].i_call.link = NULL;
231 lower_intrinsics(rec, n_map, /* part_block_used=*/0);
234 static int *irg_dump_no;
236 static void dump_graph_count(ir_graph *const irg, const char *const suffix)
239 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
240 DUMP_ONE(1, irg, name);
243 static void dump_graph_cfg_count(ir_graph *const irg, const char *const suffix)
246 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
247 DUMP_ONE_CFG(1, irg, name);
250 static void dump_all_count(const char *const suffix)
252 const int n_irgs = get_irp_n_irgs();
255 for (i = 0; i < n_irgs; ++i)
256 dump_graph_count(get_irp_irg(i), suffix);
259 #define DUMP_ONE_C(cond, irg, suffix) \
262 dump_graph_count((irg), (suffix)); \
266 #define DUMP_ONE_CFG_C(cond, irg, suffix) \
269 dump_graph_cfg_count((irg), (suffix)); \
273 #define DUMP_ALL_C(cond, suffix) \
276 dump_all_count((suffix)); \
281 * run all the Firm optimizations
283 * @param input_filename the name of the (main) source file
284 * @param firm_const_exists non-zero, if the const attribute was used on functions
286 static void do_firm_optimizations(const char *input_filename, int firm_const_exists)
288 ir_entity **keep_methods;
293 irg_dump_no = calloc(get_irp_last_idx(), sizeof(*irg_dump_no));
295 set_opt_strength_red(firm_opt.strength_red);
296 set_opt_scalar_replacement(firm_opt.scalar_replace);
297 set_opt_auto_create_sync(firm_opt.auto_sync);
298 set_opt_alias_analysis(firm_opt.alias_analysis);
300 aa_opt = aa_opt_no_opt;
301 if (firm_opt.strict_alias)
302 aa_opt |= aa_opt_type_based | aa_opt_byte_type_may_alias;
303 if (firm_opt.no_alias)
304 aa_opt = aa_opt_no_alias;
306 set_irp_memory_disambiguator_options(aa_opt);
308 timer_start(TV_ALL_OPT);
310 if (firm_opt.remove_unused) {
311 /* Analysis that finds the free methods,
312 i.e. methods that are dereferenced.
313 Optimizes polymorphic calls :-). */
314 cgana(&arr_len, &keep_methods);
316 /* Remove methods that are never called. */
317 gc_irgs(arr_len, keep_methods);
322 if (! firm_opt.freestanding) {
324 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "rts");
325 CHECK_ALL(firm_opt.check_all);
328 if (firm_opt.tail_rec) {
329 timer_push(TV_TAIL_REC);
330 opt_tail_recursion();
333 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "tail_rec");
334 CHECK_ALL(firm_opt.check_all);
337 if (firm_opt.func_calls) {
338 timer_push(TV_REAL_FUNC_CALL);
339 optimize_funccalls(firm_const_exists);
341 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "func_call");
342 CHECK_ALL(firm_opt.check_all);
345 if (firm_opt.do_inline) {
346 timer_push(TV_INLINE);
347 inline_leave_functions(500, 80, 30, FALSE);
349 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "inl");
350 CHECK_ALL(firm_opt.check_all);
353 for (i = 0; i < get_irp_n_irgs(); i++) {
354 irg = current_ir_graph = get_irp_irg(i);
358 /* If SIMD optimization is on, make sure we have only 1 return */
359 if (firm_ext_grs.create_pattern || firm_ext_grs.simd_opt)
360 normalize_one_return(irg);
365 if (firm_opt.modes) {
366 /* convert all modes into integer if possible */
367 arch_mode_conversion(irg, predefs.mode_uint);
370 timer_push(TV_SCALAR_REPLACE);
371 scalar_replacement_opt(irg);
373 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "scalar");
374 CHECK_ONE(firm_opt.check_all, irg);
376 timer_push(TV_LOCAL_OPT);
377 optimize_graph_df(irg);
379 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
380 CHECK_ONE(firm_opt.check_all, irg);
382 timer_push(TV_REASSOCIATION);
383 optimize_reassociation(irg);
385 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "reassoc");
386 CHECK_ONE(firm_opt.check_all, irg);
388 if (firm_opt.confirm) {
389 /* Confirm construction currently can only handle blocks with only one control
390 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
391 the optimization of switch constructs. */
392 timer_push(TV_CF_OPT);
395 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
396 CHECK_ONE(firm_opt.check_all, irg);
397 timer_push(TV_CONFIRM_CREATE);
398 construct_confirms(irg);
400 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "confirms");
401 CHECK_ONE(firm_opt.check_all, irg);
404 timer_push(TV_LOCAL_OPT);
405 optimize_graph_df(irg);
407 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
408 CHECK_ONE(firm_opt.check_all, irg);
411 CHECK_ONE(firm_opt.check_all, irg);
413 if (firm_opt.code_place) {
414 timer_push(TV_CODE_PLACE);
415 set_opt_global_cse(1);
416 optimize_graph_df(irg);
418 set_opt_global_cse(0);
420 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "place");
421 CHECK_ONE(firm_opt.check_all, irg);
424 if (firm_opt.luffig) {
426 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst2");
429 timer_push(TV_CF_OPT);
432 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
433 CHECK_ONE(firm_opt.check_all, irg);
435 /* should we really remove the Confirm here? */
436 if (firm_opt.confirm) {
437 timer_push(TV_CONFIRM_CREATE);
438 remove_confirms(irg);
442 irg_verify(irg, VRFY_ENFORCE_SSA);
443 if (firm_opt.gvn_pre) {
445 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "pre");
446 CHECK_ONE(firm_opt.check_all, irg);
447 irg_verify(irg, VRFY_ENFORCE_SSA);
450 if (firm_opt.loop_unrolling) {
451 timer_push(TV_LOOP_UNROLL);
452 optimize_loop_unrolling(irg);
454 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "loop");
455 CHECK_ONE(firm_opt.check_all, irg);
458 if (firm_opt.load_store) {
459 timer_push(TV_LOAD_STORE);
460 optimize_load_store(irg);
462 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst");
463 CHECK_ONE(firm_opt.check_all, irg);
466 lower_highlevel_graph(irg);
468 if (firm_opt.deconv) {
469 timer_push(TV_DECONV);
472 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "deconv");
473 CHECK_ONE(firm_opt.check_all, irg);
476 if (firm_opt.cond_eval) {
477 timer_push(TV_COND_EVAL);
480 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cond_eval");
481 CHECK_ONE(firm_opt.check_all, irg);
485 compute_postdoms(irg);
486 DUMP_ONE_CFG_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dom");
487 CHECK_ONE(firm_opt.check_all, irg);
489 construct_backedges(irg);
491 timer_push(TV_CF_OPT);
494 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
495 CHECK_ONE(firm_opt.check_all, irg);
497 if (firm_opt.if_conversion) {
498 timer_push(TV_IF_CONV);
499 opt_if_conv(current_ir_graph, if_conv_info);
501 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "if");
502 CHECK_ONE(firm_opt.check_all, current_ir_graph);
504 timer_push(TV_LOCAL_OPT);
505 optimize_graph_df(current_ir_graph);
507 timer_push(TV_CF_OPT);
508 optimize_cf(current_ir_graph);
510 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "after_if");
511 CHECK_ONE(firm_opt.check_all, current_ir_graph);
514 if (firm_opt.bool_opt) {
516 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "bool");
517 CHECK_ONE(firm_opt.check_all, irg);
521 opt_osr(current_ir_graph, osr_flag_default /*| osr_flag_ignore_x86_shift*/);
523 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "stred");
524 CHECK_ONE(firm_opt.check_all, irg);
526 timer_push(TV_LOCAL_OPT);
527 optimize_graph_df(irg);
529 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
530 CHECK_ONE(firm_opt.check_all, irg);
532 edges_deactivate(irg);
533 timer_push(TV_DEAD_NODE);
534 dead_node_elimination(irg);
536 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dead");
537 CHECK_ONE(firm_opt.check_all, irg);
540 if (firm_opt.cloning) {
541 proc_cloning((float)firm_opt.clone_threshold);
542 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "clone");
545 if (firm_dump.ir_graph) {
546 /* recompute backedges for nicer dumps */
547 for (i = 0; i < get_irp_n_irgs(); i++)
548 construct_cf_backedges(get_irp_irg(i));
551 if (firm_opt.remove_unused) {
552 ir_entity **keep_methods;
555 /* Analysis that finds the free methods,
556 i.e. methods that are dereferenced.
557 Optimizes polymorphic calls :-). */
558 cgana(&arr_len, &keep_methods);
560 /* Remove methods that are never called. */
561 gc_irgs(arr_len, keep_methods);
567 DUMP_ALL(firm_dump.ir_graph, "-opt");
569 /* verify optimized graphs */
570 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
571 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
573 if (firm_dump.statistic & STAT_AFTER_OPT)
574 stat_dump_snapshot(input_filename, "opt");
576 timer_stop(TV_ALL_OPT);
577 } /* do_firm_optimizations */
580 * compute the size of a type (do implicit lowering)
582 * @param ty a Firm type
584 static int compute_type_size(ir_type *ty)
586 optimization_state_t state;
587 unsigned align_all = 1;
588 int n, size = 0, set = 0;
591 if (get_type_state(ty) == layout_fixed) {
592 /* do not layout already layouted types again */
596 if (is_Method_type(ty) || ty == get_glob_type()) {
597 /* no need for size calculation for method types or the global type */
601 DBG(("compute type size visiting: %s\n", get_type_name(ty)));
603 switch (get_type_tpop_code(ty)) {
606 for (i = 0, n = get_compound_n_members(ty); i < n; ++i) {
607 ir_entity *ent = get_compound_member(ty, i);
608 ir_type *ent_ty = get_entity_type(ent);
609 unsigned align, misalign;
611 /* compute member types */
612 if (! compute_type_size(ent_ty))
615 align = get_type_alignment_bytes(ent_ty);
616 align_all = align > align_all ? align : align_all;
617 misalign = (align ? size % align : 0);
618 size += (misalign ? align - misalign : 0);
620 set_entity_offset(ent, size);
621 size += get_type_size_bytes(ent_ty);
623 DBG((" member %s %s -> (size: %u, align: %u)\n",
624 get_type_name(ent_ty), get_entity_name(ent),
625 get_type_size_bytes(ent_ty), get_type_alignment_bytes(ent_ty)));
627 if (align_all > 0 && size % align_all) {
628 DBG(("align of the struct member: %u, type size: %d\n", align_all, size));
629 size += align_all - (size % align_all);
630 DBG(("correcting type-size to %d\n", size));
632 set_type_alignment_bytes(ty, align_all);
637 for (i = 0, n = get_union_n_members(ty); i < n; ++i) {
638 ir_entity *ent = get_union_member(ty, i);
640 if (! compute_type_size(get_entity_type(ent)))
642 s = get_type_size_bytes(get_entity_type(ent));
644 set_entity_offset(ent, 0);
645 size = (s > size ? s : size);
651 dims = get_array_n_dimensions(ty);
653 if (! compute_type_size(get_array_element_type(ty)))
658 save_optimization_state(&state);
660 set_opt_constant_folding(1);
662 for (i = 0; i < dims; ++i) {
663 ir_node *lower = get_array_lower_bound(ty, i);
664 ir_node *upper = get_array_upper_bound(ty, i);
665 ir_graph *rem = current_ir_graph;
666 tarval *tv_lower, *tv_upper;
668 current_ir_graph = get_const_code_irg();
669 local_optimize_node(lower);
670 local_optimize_node(upper);
671 current_ir_graph = rem;
673 tv_lower = computed_value(lower);
674 tv_upper = computed_value(upper);
676 if (tv_lower == tarval_bad || tv_upper == tarval_bad) {
678 * we cannot calculate the size of this array yet, it
679 * even might be unknown until the end, like argv[]
681 restore_optimization_state(&state);
685 size *= get_tarval_long(tv_upper) - get_tarval_long(tv_lower);
687 restore_optimization_state(&state);
689 DBG(("array %s -> (elements: %d, element type size: %d)\n",
691 size, get_type_size_bytes(get_array_element_type(ty))));
692 size *= get_type_size_bytes(get_array_element_type(ty));
701 set_type_size_bytes(ty, size);
702 set_type_state(ty, layout_fixed);
705 DBG(("size: %d\n", get_type_size_bytes(ty)));
708 } /* compute_type_size */
711 * layout all types of the Firm graph
713 static void compute_type_sizes(void)
719 /* all frame types */
720 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
721 irg = get_irp_irg(i);
722 /* do not optimize away variables in debug mode */
723 if (firm_opt.debug_mode == DBG_MODE_NONE)
725 compute_type_size(get_irg_frame_type(irg));
728 /* all other types */
729 for (i = get_irp_n_types() - 1; i >= 0; --i) {
730 tp = get_irp_type(i);
731 compute_type_size(tp);
733 if (is_Method_type(tp)) {
734 tp = get_method_value_res_type(tp);
737 /* we have a value result type for this method, lower */
738 compute_type_size(tp);
742 } /* compute_type_sizes */
747 * @param input_filename the name of the (main) source file
749 static void do_firm_lowering(const char *input_filename)
753 /* do class lowering and vtbl creation */
754 // lower_classes_to_struct("vtbl", "m");
757 timer_push(TV_LOWER);
762 if (firm_opt.lower_ll) {
763 lwrdw_param_t init = {
768 def_create_intrinsic_fkt,
773 if (arch_create_intrinsic) {
774 init.create_intrinsic = arch_create_intrinsic;
775 init.ctx = create_intrinsic_ctx;
777 timer_push(TV_DW_LOWER);
779 DUMP_ALL(firm_dump.ir_graph, "-dw");
783 if (firm_dump.statistic & STAT_AFTER_LOWER)
784 stat_dump_snapshot(input_filename, "low");
786 /* verify lowered graphs */
787 timer_push(TV_VERIFY);
788 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
789 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
792 DUMP_ALL(firm_dump.ir_graph, "-low");
794 if (firm_opt.enabled) {
795 timer_start(TV_ALL_OPT);
797 /* run reassociation first on all graphs BEFORE the architecture dependent optimizations
799 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
800 current_ir_graph = get_irp_irg(i);
802 timer_push(TV_REASSOCIATION);
803 optimize_reassociation(current_ir_graph);
805 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "reassoc");
806 CHECK_ONE(firm_opt.check_all, current_ir_graph);
809 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
810 current_ir_graph = get_irp_irg(i);
812 if (firm_opt.code_place)
813 set_opt_global_cse(1);
815 timer_push(TV_LOCAL_OPT);
816 optimize_graph_df(current_ir_graph);
818 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
819 if (! firm_opt.code_place)
820 CHECK_ONE(firm_opt.check_all, current_ir_graph);
822 if (firm_opt.code_place) {
823 timer_push(TV_CODE_PLACE);
824 place_code(current_ir_graph);
825 set_opt_global_cse(0);
827 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "place");
828 CHECK_ONE(firm_opt.check_all, current_ir_graph);
831 // set_opt_global_cse(0);
832 timer_push(TV_LOAD_STORE);
833 optimize_load_store(current_ir_graph);
835 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "ldst");
836 CHECK_ONE(firm_opt.check_all, current_ir_graph);
839 timer_push(TV_LOCAL_OPT);
840 optimize_graph_df(current_ir_graph);
842 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
844 timer_push(TV_CF_OPT);
845 optimize_cf(current_ir_graph);
847 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "cf");
848 CHECK_ONE(firm_opt.check_all, current_ir_graph);
850 if (firm_opt.if_conversion) {
851 timer_push(TV_IF_CONV);
852 opt_if_conv(current_ir_graph, if_conv_info);
854 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "if");
855 CHECK_ONE(firm_opt.check_all, current_ir_graph);
857 timer_push(TV_LOCAL_OPT);
858 optimize_graph_df(current_ir_graph);
860 timer_push(TV_CF_OPT);
861 optimize_cf(current_ir_graph);
863 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "after_if");
864 CHECK_ONE(firm_opt.check_all, current_ir_graph);
867 timer_stop(TV_ALL_OPT);
869 DUMP_ALL(firm_dump.ir_graph, "-low-opt");
873 mark_private_methods();
875 /* set the phase to low */
876 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
877 set_irg_phase_low(get_irp_irg(i));
879 /* all graphs are lowered, set the irp phase to low */
880 set_irp_phase_state(phase_low);
882 if (firm_dump.statistic & STAT_FINAL) {
883 stat_dump_snapshot(input_filename, "final");
885 } /* do_firm_lowering */
888 * Initialize for the Firm-generating back end.
890 void gen_firm_init(void)
892 firm_parameter_t params;
894 unsigned pattern = 0;
896 /* the automatic state is only set if inlining is enabled */
897 firm_opt.auto_inline = firm_opt.do_inline;
899 if (firm_dump.stat_pattern)
900 pattern |= FIRMSTAT_PATTERN_ENABLED;
902 if (firm_dump.stat_dag)
903 pattern |= FIRMSTAT_COUNT_DAG;
905 memset(¶ms, 0, sizeof(params));
906 params.size = sizeof(params);
907 params.enable_statistics = firm_dump.statistic == STAT_NONE ? 0 :
908 FIRMSTAT_ENABLED | FIRMSTAT_COUNT_STRONG_OP | FIRMSTAT_COUNT_CONSTS | pattern;
909 params.initialize_local_func = uninitialized_local_var;
910 params.cc_mask = 0; /* no regparam, cdecl */
911 params.builtin_dbg = NULL;
914 if (firm_be_opt.selection == BE_FIRM_BE) {
915 const backend_params *be_params = be_init();
917 be_support_inline_asm = be_params->support_inline_asm;
919 firm_opt.lower_ll = be_params->do_dw_lowering;
920 params.arch_op_settings = be_params->arch_op_settings;
922 arch_create_intrinsic = be_params->arch_create_intrinsic_fkt;
923 create_intrinsic_ctx = be_params->create_intrinsic_ctx;
925 ad_param = be_params->dep_param;
926 if_conv_info = be_params->if_conv_info;
928 #endif /* FIRM_BACKEND */
931 /* Activate Graph rewriting if SIMD optimization is turned on */
932 /* This has to be done before init_firm() is called! */
933 if (firm_ext_grs.simd_opt)
938 dbg_init(NULL, NULL, dbg_snprint);
939 edges_init_dbg(firm_opt.vrfy_edges);
940 //cbackend_set_debug_retrieve(dbg_retrieve);
942 set_opt_precise_exc_context(firm_opt.precise_exc);
943 set_opt_fragile_ops(firm_opt.fragile_ops);
945 /* dynamic dispatch works currently only if whole world scenarios */
946 set_opt_dyn_meth_dispatch(0);
948 arch_dep_init(arch_factory);
950 /* do not run architecture dependent optimizations in building phase */
951 arch_dep_set_opts(arch_dep_none);
953 do_node_verification(firm_opt.vrfy);
954 if (firm_dump.filter)
955 only_dump_method_with_name(new_id_from_str(firm_dump.filter));
957 if (firm_opt.enabled) {
959 set_opt_constant_folding(firm_opt.const_folding);
960 set_opt_cse(firm_opt.cse);
961 set_opt_global_cse (0);
962 set_opt_unreachable_code(1);
963 set_opt_control_flow(firm_opt.control_flow);
964 set_opt_control_flow_weak_simplification(1);
965 set_opt_control_flow_strong_simplification(1);
970 dump_filter = getenv("FIRM_DUMP_FILTER");
972 only_dump_method_with_name(new_id_from_str(dump_filter));
974 /* do not dump entity ld names */
978 /* init the ycomp debugger extension */
979 if (firm_opt.ycomp_dbg)
980 firm_init_ycomp_debugger(firm_opt.ycomp_host, firm_opt.ycomp_port);
982 } /* gen_firm_init */
985 * Called, after the Firm generation is completed,
986 * do all optimizations and backend call here.
988 * @param out a file handle for the output, may be NULL
989 * @param input_filename the name of the (main) source file
990 * @param c_mode non-zero if "C" was compiled
991 * @param firm_const_exists non-zero, if the const attribute was used on functions
993 void gen_firm_finish(FILE *out, const char *input_filename, int c_mode, int firm_const_exists)
997 /* the general for dumping option must be set, or the others will not work */
998 firm_dump.ir_graph |= firm_dump.all_phases | firm_dump.extbb;
1000 dump_keepalive_edges(1);
1001 dump_consts_local(1);
1002 dump_dominator_information(1);
1003 dump_loop_information(0);
1005 if (!firm_dump.edge_labels)
1006 turn_off_edge_labels();
1008 if (firm_dump.all_types) {
1011 dump_class_hierarchy(0, "");
1012 dump_class_hierarchy(1, "-with-entities");
1016 /* finalize all graphs */
1017 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1018 ir_graph *irg = get_irp_irg(i);
1020 irg_finalize_cons(irg);
1021 DUMP_ONE(firm_dump.ir_graph, irg, "");
1023 /* verify the graph */
1024 timer_push(TV_VERIFY);
1025 irg_verify(irg, VRFY_ENFORCE_SSA);
1029 timer_push(TV_VERIFY);
1033 /* all graphs are finalized, set the irp phase to high */
1034 set_irp_phase_state(phase_high);
1036 /* lower all compound call return values */
1037 lower_compound_params();
1039 /* computes the sizes of all types that are still not computed */
1040 compute_type_sizes();
1042 if (firm_dump.statistic & STAT_BEFORE_OPT) {
1043 stat_dump_snapshot(input_filename, "noopt");
1046 if (firm_opt.enabled)
1047 do_firm_optimizations(input_filename, firm_const_exists);
1049 if (firm_dump.gen_firm_asm) {
1050 timer_push(TV_FIRM_ASM);
1051 gen_Firm_assembler(input_filename);
1057 do_firm_lowering(input_filename);
1059 /* set the phase to low */
1060 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
1061 set_irg_phase_low(get_irp_irg(i));
1065 /** SIMD Optimization Extensions **/
1067 /* Pattern creation step. No code has to be generated, so
1068 exit after pattern creation */
1069 if (firm_ext_grs.create_pattern) {
1070 ext_grs_create_pattern();
1074 /* SIMD optimization step. Uses graph patterns to find
1075 rich instructions and rewrite */
1076 if (firm_ext_grs.simd_opt)
1080 /* enable architecture dependent optimizations */
1081 arch_dep_set_opts((firm_opt.muls ? arch_dep_mul_to_shift : arch_dep_none) |
1082 (firm_opt.divs ? arch_dep_div_by_const : arch_dep_none) |
1083 (firm_opt.mods ? arch_dep_mod_by_const : arch_dep_none) );
1086 if (firm_dump.statistic & STAT_FINAL_IR)
1087 stat_dump_snapshot(input_filename, "final-ir");
1089 /* run the code generator */
1090 if (firm_be_opt.selection != BE_NONE)
1091 do_codegen(out, input_filename);
1093 if (firm_dump.statistic & STAT_FINAL)
1094 stat_dump_snapshot(input_filename, "final");
1097 if (firm_opt.ycomp_dbg)
1098 firm_finish_ycomp_debugger();
1100 } /* gen_firm_finish */
1103 * Do very early initializations
1105 void firm_early_init(void) {
1107 /* arg: need this here for command line options */
1110 firm_init_options(NULL, 0, NULL);
1111 } /* firm_early_init */