3 * @file firm_opt.c -- Firm-generating back end optimizations.
5 * (C) 2005-2007 Michael Beck beck@ipd.info.uni-karlsruhe.de
11 #include <libfirm/firm.h>
14 #include <libfirm/be.h>
15 #endif /* FIRM_BACKEND */
19 #include "firm_codegen.h"
20 #include "firm_cmdline.h"
21 #include "firm_timing.h"
24 #define snprintf _snprintf
27 #if defined(_DEBUG) || defined(FIRM_DEBUG)
28 #define DBG(x) dbg_printf x
31 #endif /* _DEBUG || FIRM_DEBUG */
34 /** dump all the graphs depending on cond */
35 #define DUMP_ALL(cond, suffix) \
38 timer_push(TV_VCG_DUMP); \
39 if (firm_dump.no_blocks) \
40 dump_all_ir_graphs(dump_ir_graph, suffix); \
41 else if (firm_dump.extbb) \
42 dump_all_ir_graphs(dump_ir_extblock_graph, suffix);\
44 dump_all_ir_graphs(dump_ir_block_graph, suffix); \
49 /** dump all control flow graphs depending on cond */
50 #define DUMP_ALL_CFG(cond, suffix) \
53 timer_push(TV_VCG_DUMP); \
54 dump_all_ir_graphs(dump_cfg, suffix); \
59 /** check all graphs depending on cond */
60 #define CHECK_ALL(cond) \
64 timer_push(TV_VERIFY); \
65 for (ii = get_irp_n_irgs() - 1; ii >= 0; --ii) \
66 irg_verify(get_irp_irg(ii), VRFY_ENFORCE_SSA); \
73 /** dump graphs irg depending on cond */
74 #define DUMP_ONE(cond, irg, suffix) \
77 timer_push(TV_VCG_DUMP); \
78 if (firm_dump.no_blocks) \
79 dump_ir_graph(irg, suffix); \
80 else if (firm_dump.extbb) \
81 dump_ir_extblock_graph(irg, suffix); \
83 dump_ir_block_graph(irg, suffix); \
88 /** dump control flow graph irg depending on cond */
89 #define DUMP_ONE_CFG(cond, irg, suffix) \
92 timer_push(TV_VCG_DUMP); \
93 dump_cfg(irg, suffix); \
98 /** check a graph irg depending on cond */
99 #define CHECK_ONE(cond, irg) \
102 timer_push(TV_VERIFY); \
103 irg_verify(irg, VRFY_ENFORCE_SSA); \
109 /* set by the backend parameters */
110 static const ir_settings_arch_dep_t *ad_param = NULL;
111 static create_intrinsic_fkt *arch_create_intrinsic = NULL;
112 static void *create_intrinsic_ctx = NULL;
113 static const ir_settings_if_conv_t *if_conv_info = NULL;
115 /* entities of runtime functions */
116 ir_entity_ptr rts_entities[rts_max];
119 * factory for setting architecture dependent parameters
121 static const ir_settings_arch_dep_t *arch_factory(void)
123 static const ir_settings_arch_dep_t param = {
124 1, /* also use subs */
125 4, /* maximum shifts */
126 31, /* maximum shift amount */
127 NULL, /* use default evaluator */
131 32 /* Mulh allowed up to 32 bit */
134 return ad_param ? ad_param : ¶m;
138 * Map runtime functions.
140 static void rts_map(void) {
141 static const struct {
142 ir_entity_ptr *ent; /**< address of the rts entity */
143 i_mapper_func func; /**< mapper function. */
146 { &rts_entities[rts_abs], i_mapper_abs },
147 { &rts_entities[rts_labs], i_mapper_abs },
148 { &rts_entities[rts_llabs], i_mapper_abs },
149 { &rts_entities[rts_imaxabs], i_mapper_abs },
151 /* double -> double */
152 { &rts_entities[rts_fabs], i_mapper_abs },
153 { &rts_entities[rts_sqrt], i_mapper_sqrt },
154 { &rts_entities[rts_cbrt], i_mapper_cbrt },
155 { &rts_entities[rts_pow], i_mapper_pow },
156 { &rts_entities[rts_exp], i_mapper_exp },
157 { &rts_entities[rts_exp2], i_mapper_exp },
158 { &rts_entities[rts_exp10], i_mapper_exp },
159 { &rts_entities[rts_log], i_mapper_log },
160 { &rts_entities[rts_log2], i_mapper_log2 },
161 { &rts_entities[rts_log10], i_mapper_log10 },
162 { &rts_entities[rts_sin], i_mapper_sin },
163 { &rts_entities[rts_cos], i_mapper_cos },
164 { &rts_entities[rts_tan], i_mapper_tan },
165 { &rts_entities[rts_asin], i_mapper_asin },
166 { &rts_entities[rts_acos], i_mapper_acos },
167 { &rts_entities[rts_atan], i_mapper_atan },
168 { &rts_entities[rts_sinh], i_mapper_sinh },
169 { &rts_entities[rts_cosh], i_mapper_cosh },
170 { &rts_entities[rts_tanh], i_mapper_tanh },
173 { &rts_entities[rts_fabsf], i_mapper_abs },
174 { &rts_entities[rts_sqrtf], i_mapper_sqrt },
175 { &rts_entities[rts_cbrtf], i_mapper_cbrt },
176 { &rts_entities[rts_powf], i_mapper_pow },
177 { &rts_entities[rts_expf], i_mapper_exp },
178 { &rts_entities[rts_exp2f], i_mapper_exp },
179 { &rts_entities[rts_exp10f], i_mapper_exp },
180 { &rts_entities[rts_logf], i_mapper_log },
181 { &rts_entities[rts_log2f], i_mapper_log2 },
182 { &rts_entities[rts_log10f], i_mapper_log10 },
183 { &rts_entities[rts_sinf], i_mapper_sin },
184 { &rts_entities[rts_cosf], i_mapper_cos },
185 { &rts_entities[rts_tanf], i_mapper_tan },
186 { &rts_entities[rts_asinf], i_mapper_asin },
187 { &rts_entities[rts_acosf], i_mapper_acos },
188 { &rts_entities[rts_atanf], i_mapper_atan },
189 { &rts_entities[rts_sinhf], i_mapper_sinh },
190 { &rts_entities[rts_coshf], i_mapper_cosh },
191 { &rts_entities[rts_tanhf], i_mapper_tanh },
193 /* long double -> long double */
194 { &rts_entities[rts_fabsl], i_mapper_abs },
195 { &rts_entities[rts_sqrtl], i_mapper_sqrt },
196 { &rts_entities[rts_cbrtl], i_mapper_cbrt },
197 { &rts_entities[rts_powl], i_mapper_pow },
198 { &rts_entities[rts_expl], i_mapper_exp },
199 { &rts_entities[rts_exp2l], i_mapper_exp },
200 { &rts_entities[rts_exp10l], i_mapper_exp },
201 { &rts_entities[rts_logl], i_mapper_log },
202 { &rts_entities[rts_log2l], i_mapper_log2 },
203 { &rts_entities[rts_log10l], i_mapper_log10 },
204 { &rts_entities[rts_sinl], i_mapper_sin },
205 { &rts_entities[rts_cosl], i_mapper_cos },
206 { &rts_entities[rts_tanl], i_mapper_tan },
207 { &rts_entities[rts_asinl], i_mapper_asin },
208 { &rts_entities[rts_acosl], i_mapper_acos },
209 { &rts_entities[rts_atanl], i_mapper_atan },
210 { &rts_entities[rts_sinhl], i_mapper_sinh },
211 { &rts_entities[rts_coshl], i_mapper_cosh },
212 { &rts_entities[rts_tanhl], i_mapper_tanh },
215 { &rts_entities[rts_memcpy], i_mapper_memcpy },
216 { &rts_entities[rts_memset], i_mapper_memset },
217 { &rts_entities[rts_strcmp], i_mapper_strcmp },
218 { &rts_entities[rts_strncmp], i_mapper_strncmp },
219 { &rts_entities[rts_strlen], i_mapper_strlen }
221 i_record rec[sizeof(mapper)/sizeof(mapper[0])];
224 for (i = n_map = 0; i < sizeof(mapper)/sizeof(mapper[0]); ++i)
225 if (*mapper[i].ent != NULL) {
226 rec[n_map].i_call.kind = INTRINSIC_CALL;
227 rec[n_map].i_call.i_ent = *mapper[i].ent;
228 rec[n_map].i_call.i_mapper = mapper[i].func;
229 rec[n_map].i_call.ctx = NULL;
230 rec[n_map].i_call.link = NULL;
234 lower_intrinsics(rec, n_map, /* part_block_used=*/0);
237 static int *irg_dump_no;
239 static void dump_graph_count(ir_graph *const irg, const char *const suffix)
242 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
243 DUMP_ONE(1, irg, name);
246 static void dump_graph_cfg_count(ir_graph *const irg, const char *const suffix)
249 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
250 DUMP_ONE_CFG(1, irg, name);
253 static void dump_all_count(const char *const suffix)
255 const int n_irgs = get_irp_n_irgs();
258 for (i = 0; i < n_irgs; ++i)
259 dump_graph_count(get_irp_irg(i), suffix);
262 #define DUMP_ONE_C(cond, irg, suffix) \
265 dump_graph_count((irg), (suffix)); \
269 #define DUMP_ONE_CFG_C(cond, irg, suffix) \
272 dump_graph_cfg_count((irg), (suffix)); \
276 #define DUMP_ALL_C(cond, suffix) \
279 dump_all_count((suffix)); \
284 * run all the Firm optimizations
286 * @param input_filename the name of the (main) source file
287 * @param firm_const_exists non-zero, if the const attribute was used on functions
289 static void do_firm_optimizations(const char *input_filename, int firm_const_exists)
295 /* FIXME: cloning might ADD new graphs. */
296 irg_dump_no = calloc(get_irp_last_idx(), sizeof(*irg_dump_no));
298 set_opt_strength_red(firm_opt.strength_red);
299 set_opt_scalar_replacement(firm_opt.scalar_replace);
300 set_opt_auto_create_sync(firm_opt.auto_sync);
301 set_opt_alias_analysis(firm_opt.alias_analysis);
303 aa_opt = aa_opt_no_opt;
304 if (firm_opt.strict_alias)
305 aa_opt |= aa_opt_type_based | aa_opt_byte_type_may_alias;
306 if (firm_opt.no_alias)
307 aa_opt = aa_opt_no_alias;
309 set_irp_memory_disambiguator_options(aa_opt);
311 timer_start(TV_ALL_OPT);
313 if (firm_opt.remove_unused) {
314 ir_entity **keep_methods;
317 /* Analysis that finds the free methods,
318 i.e. methods that are dereferenced.
319 Optimizes polymorphic calls :-). */
320 cgana(&arr_len, &keep_methods);
322 /* Remove methods that are never called. */
323 gc_irgs(arr_len, keep_methods);
328 if (! firm_opt.freestanding) {
330 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "rts");
331 CHECK_ALL(firm_opt.check_all);
334 /* first step: kill dead code */
335 for (i = 0; i < get_irp_n_irgs(); i++) {
336 irg = current_ir_graph = get_irp_irg(i);
337 /* Confirm construction currently can only handle blocks with only one control
338 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
339 the optimization of switch constructs. */
340 timer_push(TV_CF_OPT);
341 optimize_graph_df(irg);
346 if (firm_opt.func_calls) {
347 timer_push(TV_REAL_FUNC_CALL);
348 optimize_funccalls(firm_const_exists, NULL);
350 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "func_call");
351 CHECK_ALL(firm_opt.check_all);
354 /* do lowering on the const code irg */
357 for (i = 0; i < get_irp_n_irgs(); i++) {
358 irg = current_ir_graph = get_irp_irg(i);
361 /* If SIMD optimization is on, make sure we have only 1 return */
362 if (firm_ext_grs.create_pattern || firm_ext_grs.simd_opt)
363 normalize_one_return(irg);
368 if (firm_opt.modes) {
369 /* convert all modes into integer if possible */
370 arch_mode_conversion(irg, predefs.mode_uint);
373 timer_push(TV_SCALAR_REPLACE);
374 scalar_replacement_opt(irg);
376 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "scalar");
377 CHECK_ONE(firm_opt.check_all, irg);
379 timer_push(TV_LOCAL_OPT);
380 optimize_graph_df(irg);
382 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
383 CHECK_ONE(firm_opt.check_all, irg);
385 timer_push(TV_REASSOCIATION);
386 optimize_reassociation(irg);
388 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "reassoc");
389 CHECK_ONE(firm_opt.check_all, irg);
391 if (firm_opt.confirm) {
392 /* Confirm construction currently can only handle blocks with only one control
393 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
394 the optimization of switch constructs. */
395 timer_push(TV_CF_OPT);
398 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
399 CHECK_ONE(firm_opt.check_all, irg);
400 timer_push(TV_CONFIRM_CREATE);
401 construct_confirms(irg);
403 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "confirms");
404 CHECK_ONE(firm_opt.check_all, irg);
407 timer_push(TV_LOCAL_OPT);
408 optimize_graph_df(irg);
410 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
411 CHECK_ONE(firm_opt.check_all, irg);
414 CHECK_ONE(firm_opt.check_all, irg);
416 if (firm_opt.code_place) {
417 timer_push(TV_CODE_PLACE);
418 set_opt_global_cse(1);
419 optimize_graph_df(irg);
421 set_opt_global_cse(0);
423 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "place");
424 CHECK_ONE(firm_opt.check_all, irg);
427 if (firm_opt.luffig) {
429 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst2");
430 CHECK_ONE(firm_opt.check_all, irg);
433 timer_push(TV_CF_OPT);
436 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
437 CHECK_ONE(firm_opt.check_all, irg);
439 /* should we really remove the Confirm here? */
440 if (firm_opt.confirm) {
441 timer_push(TV_CONFIRM_CREATE);
442 remove_confirms(irg);
446 irg_verify(irg, VRFY_ENFORCE_SSA);
447 if (firm_opt.gvn_pre) {
449 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "pre");
450 CHECK_ONE(firm_opt.check_all, irg);
451 irg_verify(irg, VRFY_ENFORCE_SSA);
454 if (firm_opt.load_store) {
455 timer_push(TV_LOAD_STORE);
456 optimize_load_store(irg);
458 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst");
459 CHECK_ONE(firm_opt.check_all, irg);
462 lower_highlevel_graph(irg, firm_opt.lower_bitfields);
464 if (firm_opt.deconv) {
465 timer_push(TV_DECONV);
468 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "deconv");
469 CHECK_ONE(firm_opt.check_all, irg);
472 if (firm_opt.cond_eval) {
473 timer_push(TV_COND_EVAL);
476 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cond_eval");
477 CHECK_ONE(firm_opt.check_all, irg);
481 compute_postdoms(irg);
482 DUMP_ONE_CFG_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dom");
483 CHECK_ONE(firm_opt.check_all, irg);
485 construct_backedges(irg);
487 timer_push(TV_CF_OPT);
490 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
491 CHECK_ONE(firm_opt.check_all, irg);
493 if (firm_opt.if_conversion) {
494 timer_push(TV_IF_CONV);
495 opt_if_conv(current_ir_graph, if_conv_info);
497 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "if");
498 CHECK_ONE(firm_opt.check_all, current_ir_graph);
500 timer_push(TV_LOCAL_OPT);
501 optimize_graph_df(current_ir_graph);
503 timer_push(TV_CF_OPT);
504 optimize_cf(current_ir_graph);
506 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "after_if");
507 CHECK_ONE(firm_opt.check_all, current_ir_graph);
510 if (firm_opt.bool_opt) {
512 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "bool");
513 CHECK_ONE(firm_opt.check_all, irg);
517 opt_osr(current_ir_graph, osr_flag_default | osr_flag_keep_reg_pressure | osr_flag_ignore_x86_shift);
519 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "stred");
520 CHECK_ONE(firm_opt.check_all, irg);
522 timer_push(TV_LOCAL_OPT);
523 optimize_graph_df(irg);
525 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
526 CHECK_ONE(firm_opt.check_all, irg);
528 edges_deactivate(irg);
529 timer_push(TV_DEAD_NODE);
530 dead_node_elimination(irg);
532 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dead");
533 CHECK_ONE(firm_opt.check_all, irg);
536 if (firm_opt.do_inline) {
537 inline_functions(500, 50);
538 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "inl");
539 CHECK_ALL(firm_opt.check_all);
541 if (firm_opt.cloning) {
542 proc_cloning((float)firm_opt.clone_threshold);
543 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "clone");
544 CHECK_ALL(firm_opt.check_all);
546 if (firm_opt.tail_rec) {
547 timer_push(TV_TAIL_REC);
548 opt_tail_recursion();
551 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "tail_rec");
552 CHECK_ALL(firm_opt.check_all);
555 if (firm_dump.ir_graph) {
556 /* recompute backedges for nicer dumps */
557 for (i = 0; i < get_irp_n_irgs(); i++)
558 construct_cf_backedges(get_irp_irg(i));
561 if (firm_opt.remove_unused) {
562 ir_entity **keep_methods;
565 /* Analysis that finds the free methods,
566 i.e. methods that are dereferenced.
567 Optimizes polymorphic calls :-). */
568 cgana(&arr_len, &keep_methods);
570 /* Remove methods that are never called. */
571 gc_irgs(arr_len, keep_methods);
577 DUMP_ALL(firm_dump.ir_graph, "-opt");
579 /* verify optimized graphs */
580 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
581 ir_graph *irg = get_irp_irg(i);
582 irg_verify(irg, VRFY_ENFORCE_SSA);
585 if (firm_dump.statistic & STAT_AFTER_OPT)
586 stat_dump_snapshot(input_filename, "opt");
588 timer_stop(TV_ALL_OPT);
589 } /* do_firm_optimizations */
592 * compute the size of a type (do implicit lowering)
594 * @param ty a Firm type
596 static int compute_type_size(ir_type *ty)
598 optimization_state_t state;
599 unsigned align_all = 1;
600 int n, size = 0, set = 0;
603 if (get_type_state(ty) == layout_fixed) {
604 /* do not layout already layouted types again */
608 if (is_Method_type(ty) || ty == get_glob_type()) {
609 /* no need for size calculation for method types or the global type */
613 DBG(("compute type size visiting: %s\n", get_type_name(ty)));
615 switch (get_type_tpop_code(ty)) {
618 for (i = 0, n = get_compound_n_members(ty); i < n; ++i) {
619 ir_entity *ent = get_compound_member(ty, i);
620 ir_type *ent_ty = get_entity_type(ent);
621 unsigned align, misalign;
623 /* compute member types */
624 if (! compute_type_size(ent_ty))
627 align = get_type_alignment_bytes(ent_ty);
628 align_all = align > align_all ? align : align_all;
629 misalign = (align ? size % align : 0);
630 size += (misalign ? align - misalign : 0);
632 set_entity_offset(ent, size);
633 size += get_type_size_bytes(ent_ty);
635 DBG((" member %s %s -> (size: %u, align: %u)\n",
636 get_type_name(ent_ty), get_entity_name(ent),
637 get_type_size_bytes(ent_ty), get_type_alignment_bytes(ent_ty)));
639 if (align_all > 0 && size % align_all) {
640 DBG(("align of the struct member: %u, type size: %d\n", align_all, size));
641 size += align_all - (size % align_all);
642 DBG(("correcting type-size to %d\n", size));
644 set_type_alignment_bytes(ty, align_all);
649 for (i = 0, n = get_union_n_members(ty); i < n; ++i) {
650 ir_entity *ent = get_union_member(ty, i);
652 if (! compute_type_size(get_entity_type(ent)))
654 s = get_type_size_bytes(get_entity_type(ent));
656 set_entity_offset(ent, 0);
657 size = (s > size ? s : size);
663 dims = get_array_n_dimensions(ty);
665 if (! compute_type_size(get_array_element_type(ty)))
670 save_optimization_state(&state);
672 set_opt_constant_folding(1);
674 for (i = 0; i < dims; ++i) {
675 ir_node *lower = get_array_lower_bound(ty, i);
676 ir_node *upper = get_array_upper_bound(ty, i);
677 ir_graph *rem = current_ir_graph;
678 tarval *tv_lower, *tv_upper;
679 long val_lower, val_upper;
681 current_ir_graph = get_const_code_irg();
682 local_optimize_node(lower);
683 local_optimize_node(upper);
684 current_ir_graph = rem;
686 tv_lower = computed_value(lower);
687 tv_upper = computed_value(upper);
689 if (tv_lower == tarval_bad || tv_upper == tarval_bad) {
691 * we cannot calculate the size of this array yet, it
692 * even might be unknown until the end, like argv[]
694 restore_optimization_state(&state);
698 val_upper = get_tarval_long(tv_upper);
699 val_lower = get_tarval_long(tv_lower);
700 size *= val_upper - val_lower;
702 restore_optimization_state(&state);
704 DBG(("array %s -> (elements: %d, element type size: %d)\n",
706 size, get_type_size_bytes(get_array_element_type(ty))));
707 size *= get_type_size_bytes(get_array_element_type(ty));
716 set_type_size_bytes(ty, size);
717 set_type_state(ty, layout_fixed);
720 DBG(("size: %d\n", get_type_size_bytes(ty)));
723 } /* compute_type_size */
726 * layout all non-frame types of the Firm graph
728 static void compute_type_sizes(void)
733 /* all non-frame other types */
734 for (i = get_irp_n_types() - 1; i >= 0; --i) {
735 tp = get_irp_type(i);
736 compute_type_size(tp);
738 if (is_Method_type(tp)) {
739 tp = get_method_value_res_type(tp);
742 /* we have a value result type for this method, lower */
743 compute_type_size(tp);
747 } /* compute_type_sizes */
750 * layout all frame-types of the Firm graph
752 static void compute_frame_type_sizes(void)
757 /* all frame types */
758 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
759 irg = get_irp_irg(i);
760 /* do not optimize away variables in debug mode */
761 if (firm_opt.debug_mode == DBG_MODE_NONE)
763 compute_type_size(get_irg_frame_type(irg));
765 } /* compute_frame_type_sizes */
770 * @param input_filename the name of the (main) source file
772 static void do_firm_lowering(const char *input_filename)
776 /* do class lowering and vtbl creation */
777 // lower_classes_to_struct("vtbl", "m");
780 timer_push(TV_LOWER);
785 if (firm_opt.lower_ll) {
786 lwrdw_param_t init = {
791 def_create_intrinsic_fkt,
796 if (arch_create_intrinsic) {
797 init.create_intrinsic = arch_create_intrinsic;
798 init.ctx = create_intrinsic_ctx;
800 timer_push(TV_DW_LOWER);
805 if (firm_dump.statistic & STAT_AFTER_LOWER)
806 stat_dump_snapshot(input_filename, "low");
808 /* verify lowered graphs */
809 timer_push(TV_VERIFY);
810 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
811 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
814 DUMP_ALL(firm_dump.ir_graph, "-low");
816 if (firm_opt.enabled) {
817 timer_start(TV_ALL_OPT);
819 /* run reassociation first on all graphs BEFORE the architecture dependent optimizations
821 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
822 current_ir_graph = get_irp_irg(i);
824 timer_push(TV_REASSOCIATION);
825 optimize_reassociation(current_ir_graph);
827 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "reassoc");
828 CHECK_ONE(firm_opt.check_all, current_ir_graph);
831 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
832 current_ir_graph = get_irp_irg(i);
834 if (firm_opt.code_place)
835 set_opt_global_cse(1);
837 timer_push(TV_LOCAL_OPT);
838 optimize_graph_df(current_ir_graph);
840 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
841 if (! firm_opt.code_place)
842 CHECK_ONE(firm_opt.check_all, current_ir_graph);
844 if (firm_opt.code_place) {
845 timer_push(TV_CODE_PLACE);
846 place_code(current_ir_graph);
847 set_opt_global_cse(0);
849 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "place");
850 CHECK_ONE(firm_opt.check_all, current_ir_graph);
853 timer_push(TV_LOAD_STORE);
854 optimize_load_store(current_ir_graph);
856 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "ldst");
857 CHECK_ONE(firm_opt.check_all, current_ir_graph);
860 timer_push(TV_LOCAL_OPT);
861 optimize_graph_df(current_ir_graph);
863 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
865 timer_push(TV_CF_OPT);
866 optimize_cf(current_ir_graph);
868 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "cf");
869 CHECK_ONE(firm_opt.check_all, current_ir_graph);
871 if (firm_opt.if_conversion) {
872 timer_push(TV_IF_CONV);
873 opt_if_conv(current_ir_graph, if_conv_info);
875 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "if");
876 CHECK_ONE(firm_opt.check_all, current_ir_graph);
878 timer_push(TV_LOCAL_OPT);
879 optimize_graph_df(current_ir_graph);
881 timer_push(TV_CF_OPT);
882 optimize_cf(current_ir_graph);
884 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "after_if");
885 CHECK_ONE(firm_opt.check_all, current_ir_graph);
888 timer_stop(TV_ALL_OPT);
890 DUMP_ALL(firm_dump.ir_graph, "-low-opt");
894 mark_private_methods();
896 /* set the phase to low */
897 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
898 set_irg_phase_low(get_irp_irg(i));
900 /* all graphs are lowered, set the irp phase to low */
901 set_irp_phase_state(phase_low);
903 if (firm_dump.statistic & STAT_FINAL) {
904 stat_dump_snapshot(input_filename, "final");
906 } /* do_firm_lowering */
909 * Initialize for the Firm-generating back end.
911 void gen_firm_init(void)
913 firm_parameter_t params;
915 unsigned pattern = 0;
917 /* the automatic state is only set if inlining is enabled */
918 firm_opt.auto_inline = firm_opt.do_inline;
920 if (firm_dump.stat_pattern)
921 pattern |= FIRMSTAT_PATTERN_ENABLED;
923 if (firm_dump.stat_dag)
924 pattern |= FIRMSTAT_COUNT_DAG;
926 memset(¶ms, 0, sizeof(params));
927 params.size = sizeof(params);
928 params.enable_statistics = firm_dump.statistic == STAT_NONE ? 0 :
929 FIRMSTAT_ENABLED | FIRMSTAT_COUNT_STRONG_OP | FIRMSTAT_COUNT_CONSTS | pattern;
930 params.initialize_local_func = uninitialized_local_var;
931 params.cc_mask = 0; /* no regparam, cdecl */
932 params.builtin_dbg = NULL;
935 if (firm_be_opt.selection == BE_FIRM_BE) {
936 const backend_params *be_params = be_init();
938 firm_opt.lower_ll = (a_byte) be_params->do_dw_lowering;
939 params.arch_op_settings = be_params->arch_op_settings;
941 arch_create_intrinsic = be_params->arch_create_intrinsic_fkt;
942 create_intrinsic_ctx = be_params->create_intrinsic_ctx;
944 ad_param = be_params->dep_param;
945 if_conv_info = be_params->if_conv_info;
947 #endif /* FIRM_BACKEND */
950 /* Activate Graph rewriting if SIMD optimization is turned on */
951 /* This has to be done before init_firm() is called! */
952 if (firm_ext_grs.simd_opt)
957 dbg_init(NULL, NULL, dbg_snprint);
958 edges_init_dbg(firm_opt.vrfy_edges);
959 //cbackend_set_debug_retrieve(dbg_retrieve);
961 set_opt_precise_exc_context(firm_opt.precise_exc);
962 set_opt_fragile_ops(firm_opt.fragile_ops);
964 /* Sel node cannot produce NULL pointers */
965 set_opt_sel_based_null_check_elim(1);
967 /* dynamic dispatch works currently only if whole world scenarios */
968 set_opt_dyn_meth_dispatch(0);
970 arch_dep_init(arch_factory);
972 /* do not run architecture dependent optimizations in building phase */
973 arch_dep_set_opts(arch_dep_none);
975 do_node_verification((firm_verification_t) firm_opt.vrfy);
976 if (firm_dump.filter)
977 only_dump_method_with_name(new_id_from_str(firm_dump.filter));
979 if (firm_opt.enabled) {
981 set_opt_constant_folding(firm_opt.const_folding);
982 set_opt_cse(firm_opt.cse);
983 set_opt_global_cse (0);
984 set_opt_unreachable_code(1);
985 set_opt_control_flow(firm_opt.control_flow);
986 set_opt_control_flow_weak_simplification(1);
987 set_opt_control_flow_strong_simplification(1);
992 dump_filter = getenv("FIRM_DUMP_FILTER");
994 only_dump_method_with_name(new_id_from_str(dump_filter));
996 /* do not dump entity ld names */
1000 /* init the ycomp debugger extension */
1001 if (firm_opt.ycomp_dbg)
1002 firm_init_ycomp_debugger(firm_opt.ycomp_host, firm_opt.ycomp_port);
1004 } /* gen_firm_init */
1007 * Called, after the Firm generation is completed,
1008 * do all optimizations and backend call here.
1010 * @param out a file handle for the output, may be NULL
1011 * @param input_filename the name of the (main) source file
1012 * @param c_mode non-zero if "C" was compiled
1013 * @param firm_const_exists non-zero, if the const attribute was used on functions
1015 void gen_firm_finish(FILE *out, const char *input_filename, int c_mode, int firm_const_exists)
1019 /* the general for dumping option must be set, or the others will not work */
1021 = (a_byte) (firm_dump.ir_graph | firm_dump.all_phases | firm_dump.extbb);
1023 dump_keepalive_edges(1);
1024 dump_consts_local(1);
1025 dump_dominator_information(1);
1026 dump_loop_information(0);
1028 if (!firm_dump.edge_labels)
1029 turn_off_edge_labels();
1031 if (firm_dump.all_types) {
1034 dump_class_hierarchy(0, "");
1035 dump_class_hierarchy(1, "-with-entities");
1039 /* finalize all graphs */
1040 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1041 ir_graph *irg = get_irp_irg(i);
1043 irg_finalize_cons(irg);
1044 DUMP_ONE(firm_dump.ir_graph, irg, "");
1046 /* verify the graph */
1047 timer_push(TV_VERIFY);
1048 irg_verify(irg, VRFY_ENFORCE_SSA);
1052 timer_push(TV_VERIFY);
1056 /* all graphs are finalized, set the irp phase to high */
1057 set_irp_phase_state(phase_high);
1059 /* lower all compound call return values */
1060 lower_compound_params();
1062 /* computes the sizes of all types that are still not computed */
1063 compute_type_sizes();
1065 /* lower copyb nodes */
1066 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1067 ir_graph *irg = get_irp_irg(i);
1068 lower_CopyB(irg, 128, 4);
1071 if (firm_dump.statistic & STAT_BEFORE_OPT) {
1072 stat_dump_snapshot(input_filename, "noopt");
1075 if (firm_opt.enabled)
1076 do_firm_optimizations(input_filename, firm_const_exists);
1078 if (firm_dump.gen_firm_asm) {
1079 timer_push(TV_FIRM_ASM);
1080 gen_Firm_assembler(input_filename);
1086 do_firm_lowering(input_filename);
1088 /* computes the sizes of all frame types */
1089 compute_frame_type_sizes();
1091 /* set the phase to low */
1092 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
1093 set_irg_phase_low(get_irp_irg(i));
1097 /** SIMD Optimization Extensions **/
1099 /* Pattern creation step. No code has to be generated, so
1100 exit after pattern creation */
1101 if (firm_ext_grs.create_pattern) {
1102 ext_grs_create_pattern();
1106 /* SIMD optimization step. Uses graph patterns to find
1107 rich instructions and rewrite */
1108 if (firm_ext_grs.simd_opt)
1112 /* enable architecture dependent optimizations */
1113 arch_dep_set_opts((arch_dep_opts_t)
1114 ((firm_opt.muls ? arch_dep_mul_to_shift : arch_dep_none) |
1115 (firm_opt.divs ? arch_dep_div_by_const : arch_dep_none) |
1116 (firm_opt.mods ? arch_dep_mod_by_const : arch_dep_none) ));
1119 if (firm_dump.statistic & STAT_FINAL_IR)
1120 stat_dump_snapshot(input_filename, "final-ir");
1122 /* run the code generator */
1123 if (firm_be_opt.selection != BE_NONE)
1124 do_codegen(out, input_filename);
1126 if (firm_dump.statistic & STAT_FINAL)
1127 stat_dump_snapshot(input_filename, "final");
1130 if (firm_opt.ycomp_dbg)
1131 firm_finish_ycomp_debugger();
1133 } /* gen_firm_finish */
1136 * Do very early initializations
1138 void firm_early_init(void) {
1140 /* arg: need this here for command line options */
1143 firm_init_options(NULL, 0, NULL);
1144 } /* firm_early_init */