3 * @file firm_opt.c -- Firm-generating back end optimizations.
5 * (C) 2005-2007 Michael Beck beck@ipd.info.uni-karlsruhe.de
11 #include <libfirm/firm.h>
12 #include <libfirm/be.h>
16 #include "firm_codegen.h"
17 #include "firm_cmdline.h"
18 #include "firm_timing.h"
21 #define snprintf _snprintf
24 #if defined(_DEBUG) || defined(FIRM_DEBUG)
25 #define DBG(x) dbg_printf x
28 #endif /* _DEBUG || FIRM_DEBUG */
31 /** dump all the graphs depending on cond */
32 #define DUMP_ALL(cond, suffix) \
35 timer_push(TV_VCG_DUMP); \
36 if (firm_dump.no_blocks) \
37 dump_all_ir_graphs(dump_ir_graph, suffix); \
38 else if (firm_dump.extbb) \
39 dump_all_ir_graphs(dump_ir_extblock_graph, suffix);\
41 dump_all_ir_graphs(dump_ir_block_graph, suffix); \
46 /** dump all control flow graphs depending on cond */
47 #define DUMP_ALL_CFG(cond, suffix) \
50 timer_push(TV_VCG_DUMP); \
51 dump_all_ir_graphs(dump_cfg, suffix); \
56 /** check all graphs depending on cond */
57 #define CHECK_ALL(cond) \
61 timer_push(TV_VERIFY); \
62 for (ii = get_irp_n_irgs() - 1; ii >= 0; --ii) \
63 irg_verify(get_irp_irg(ii), VRFY_ENFORCE_SSA); \
70 /** dump graphs irg depending on cond */
71 #define DUMP_ONE(cond, irg, suffix) \
74 timer_push(TV_VCG_DUMP); \
75 if (firm_dump.no_blocks) \
76 dump_ir_graph(irg, suffix); \
77 else if (firm_dump.extbb) \
78 dump_ir_extblock_graph(irg, suffix); \
80 dump_ir_block_graph(irg, suffix); \
85 /** dump control flow graph irg depending on cond */
86 #define DUMP_ONE_CFG(cond, irg, suffix) \
89 timer_push(TV_VCG_DUMP); \
90 dump_cfg(irg, suffix); \
95 /** check a graph irg depending on cond */
96 #define CHECK_ONE(cond, irg) \
99 timer_push(TV_VERIFY); \
100 irg_verify(irg, VRFY_ENFORCE_SSA); \
106 /* set by the backend parameters */
107 static const ir_settings_arch_dep_t *ad_param = NULL;
108 static create_intrinsic_fkt *arch_create_intrinsic = NULL;
109 static void *create_intrinsic_ctx = NULL;
110 static const ir_settings_if_conv_t *if_conv_info = NULL;
112 ir_mode *firm_imm_fp_mode = NULL;
114 /* entities of runtime functions */
115 ir_entity_ptr rts_entities[rts_max];
118 * factory for setting architecture dependent parameters
120 static const ir_settings_arch_dep_t *arch_factory(void)
122 static const ir_settings_arch_dep_t param = {
123 1, /* also use subs */
124 4, /* maximum shifts */
125 31, /* maximum shift amount */
126 NULL, /* use default evaluator */
130 32 /* Mulh allowed up to 32 bit */
133 return ad_param ? ad_param : ¶m;
137 * Map runtime functions.
139 static void rts_map(void) {
140 static const struct {
141 ir_entity_ptr *ent; /**< address of the rts entity */
142 i_mapper_func func; /**< mapper function. */
145 { &rts_entities[rts_abs], i_mapper_abs },
146 { &rts_entities[rts_labs], i_mapper_abs },
147 { &rts_entities[rts_llabs], i_mapper_abs },
148 { &rts_entities[rts_imaxabs], i_mapper_abs },
150 /* double -> double */
151 { &rts_entities[rts_fabs], i_mapper_abs },
152 { &rts_entities[rts_sqrt], i_mapper_sqrt },
153 { &rts_entities[rts_cbrt], i_mapper_cbrt },
154 { &rts_entities[rts_pow], i_mapper_pow },
155 { &rts_entities[rts_exp], i_mapper_exp },
156 { &rts_entities[rts_exp2], i_mapper_exp },
157 { &rts_entities[rts_exp10], i_mapper_exp },
158 { &rts_entities[rts_log], i_mapper_log },
159 { &rts_entities[rts_log2], i_mapper_log2 },
160 { &rts_entities[rts_log10], i_mapper_log10 },
161 { &rts_entities[rts_sin], i_mapper_sin },
162 { &rts_entities[rts_cos], i_mapper_cos },
163 { &rts_entities[rts_tan], i_mapper_tan },
164 { &rts_entities[rts_asin], i_mapper_asin },
165 { &rts_entities[rts_acos], i_mapper_acos },
166 { &rts_entities[rts_atan], i_mapper_atan },
167 { &rts_entities[rts_sinh], i_mapper_sinh },
168 { &rts_entities[rts_cosh], i_mapper_cosh },
169 { &rts_entities[rts_tanh], i_mapper_tanh },
172 { &rts_entities[rts_fabsf], i_mapper_abs },
173 { &rts_entities[rts_sqrtf], i_mapper_sqrt },
174 { &rts_entities[rts_cbrtf], i_mapper_cbrt },
175 { &rts_entities[rts_powf], i_mapper_pow },
176 { &rts_entities[rts_expf], i_mapper_exp },
177 { &rts_entities[rts_exp2f], i_mapper_exp },
178 { &rts_entities[rts_exp10f], i_mapper_exp },
179 { &rts_entities[rts_logf], i_mapper_log },
180 { &rts_entities[rts_log2f], i_mapper_log2 },
181 { &rts_entities[rts_log10f], i_mapper_log10 },
182 { &rts_entities[rts_sinf], i_mapper_sin },
183 { &rts_entities[rts_cosf], i_mapper_cos },
184 { &rts_entities[rts_tanf], i_mapper_tan },
185 { &rts_entities[rts_asinf], i_mapper_asin },
186 { &rts_entities[rts_acosf], i_mapper_acos },
187 { &rts_entities[rts_atanf], i_mapper_atan },
188 { &rts_entities[rts_sinhf], i_mapper_sinh },
189 { &rts_entities[rts_coshf], i_mapper_cosh },
190 { &rts_entities[rts_tanhf], i_mapper_tanh },
192 /* long double -> long double */
193 { &rts_entities[rts_fabsl], i_mapper_abs },
194 { &rts_entities[rts_sqrtl], i_mapper_sqrt },
195 { &rts_entities[rts_cbrtl], i_mapper_cbrt },
196 { &rts_entities[rts_powl], i_mapper_pow },
197 { &rts_entities[rts_expl], i_mapper_exp },
198 { &rts_entities[rts_exp2l], i_mapper_exp },
199 { &rts_entities[rts_exp10l], i_mapper_exp },
200 { &rts_entities[rts_logl], i_mapper_log },
201 { &rts_entities[rts_log2l], i_mapper_log2 },
202 { &rts_entities[rts_log10l], i_mapper_log10 },
203 { &rts_entities[rts_sinl], i_mapper_sin },
204 { &rts_entities[rts_cosl], i_mapper_cos },
205 { &rts_entities[rts_tanl], i_mapper_tan },
206 { &rts_entities[rts_asinl], i_mapper_asin },
207 { &rts_entities[rts_acosl], i_mapper_acos },
208 { &rts_entities[rts_atanl], i_mapper_atan },
209 { &rts_entities[rts_sinhl], i_mapper_sinh },
210 { &rts_entities[rts_coshl], i_mapper_cosh },
211 { &rts_entities[rts_tanhl], i_mapper_tanh },
214 { &rts_entities[rts_memcpy], i_mapper_memcpy },
215 { &rts_entities[rts_memset], i_mapper_memset },
216 { &rts_entities[rts_strcmp], i_mapper_strcmp },
217 { &rts_entities[rts_strncmp], i_mapper_strncmp },
218 { &rts_entities[rts_strlen], i_mapper_strlen }
220 i_record rec[sizeof(mapper)/sizeof(mapper[0])];
223 for (i = n_map = 0; i < sizeof(mapper)/sizeof(mapper[0]); ++i)
224 if (*mapper[i].ent != NULL) {
225 rec[n_map].i_call.kind = INTRINSIC_CALL;
226 rec[n_map].i_call.i_ent = *mapper[i].ent;
227 rec[n_map].i_call.i_mapper = mapper[i].func;
228 rec[n_map].i_call.ctx = NULL;
229 rec[n_map].i_call.link = NULL;
233 lower_intrinsics(rec, n_map, /* part_block_used=*/0);
236 static int *irg_dump_no;
238 static void dump_graph_count(ir_graph *const irg, const char *const suffix)
241 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
242 DUMP_ONE(1, irg, name);
245 static void dump_graph_cfg_count(ir_graph *const irg, const char *const suffix)
248 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
249 DUMP_ONE_CFG(1, irg, name);
252 static void dump_all_count(const char *const suffix)
254 const int n_irgs = get_irp_n_irgs();
257 for (i = 0; i < n_irgs; ++i)
258 dump_graph_count(get_irp_irg(i), suffix);
261 #define DUMP_ONE_C(cond, irg, suffix) \
264 dump_graph_count((irg), (suffix)); \
268 #define DUMP_ONE_CFG_C(cond, irg, suffix) \
271 dump_graph_cfg_count((irg), (suffix)); \
275 #define DUMP_ALL_C(cond, suffix) \
278 dump_all_count((suffix)); \
283 * run all the Firm optimizations
285 * @param input_filename the name of the (main) source file
286 * @param firm_const_exists non-zero, if the const attribute was used on functions
288 static void do_firm_optimizations(const char *input_filename, int firm_const_exists)
294 /* FIXME: cloning might ADD new graphs. */
295 irg_dump_no = calloc(get_irp_last_idx(), sizeof(*irg_dump_no));
297 set_opt_strength_red(firm_opt.strength_red);
298 set_opt_scalar_replacement(firm_opt.scalar_replace);
299 set_opt_auto_create_sync(firm_opt.auto_sync);
300 set_opt_alias_analysis(firm_opt.alias_analysis);
302 aa_opt = aa_opt_no_opt;
303 if (firm_opt.strict_alias)
304 aa_opt |= aa_opt_type_based | aa_opt_byte_type_may_alias;
305 if (firm_opt.no_alias)
306 aa_opt = aa_opt_no_alias;
308 set_irp_memory_disambiguator_options(aa_opt);
310 timer_start(TV_ALL_OPT);
312 if (! firm_opt.freestanding) {
314 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "rts");
315 CHECK_ALL(firm_opt.check_all);
318 if (firm_opt.combo) {
319 for (i = 0; i < get_irp_n_irgs(); i++) {
320 timer_push(TV_COMBO);
321 irg = get_irp_irg(i);
324 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "combo");
325 CHECK_ONE(firm_opt.check_all, irg);
329 /* first step: kill dead code */
330 for (i = 0; i < get_irp_n_irgs(); i++) {
331 irg = current_ir_graph = get_irp_irg(i);
332 /* Confirm construction currently can only handle blocks with only one control
333 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
334 the optimization of switch constructs. */
335 timer_push(TV_CF_OPT);
336 optimize_graph_df(irg);
341 if (firm_opt.remove_unused) {
342 ir_entity **keep_methods;
345 timer_push(TV_CGANA);
346 /* Analysis that finds the free methods,
347 i.e. methods that are dereferenced.
348 Optimizes polymorphic calls :-). */
349 cgana(&arr_len, &keep_methods);
351 /* Remove methods that are never called. */
352 gc_irgs(arr_len, keep_methods);
358 if (firm_opt.tail_rec) {
359 timer_push(TV_TAIL_REC);
360 opt_tail_recursion();
363 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "tail_rec");
364 CHECK_ALL(firm_opt.check_all);
366 if (firm_opt.func_calls) {
367 timer_push(TV_REAL_FUNC_CALL);
368 optimize_funccalls(firm_const_exists, NULL);
370 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "func_call");
371 CHECK_ALL(firm_opt.check_all);
374 /* do lowering on the const code irg */
377 for (i = 0; i < get_irp_n_irgs(); i++) {
378 irg = current_ir_graph = get_irp_irg(i);
381 /* If SIMD optimization is on, make sure we have only 1 return */
382 if (firm_ext_grs.create_pattern || firm_ext_grs.simd_opt)
383 normalize_one_return(irg);
388 if (firm_opt.modes) {
389 /* convert all modes into integer if possible */
390 arch_mode_conversion(irg, predefs.mode_uint);
393 timer_push(TV_SCALAR_REPLACE);
394 scalar_replacement_opt(irg);
396 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "scalar");
397 CHECK_ONE(firm_opt.check_all, irg);
399 timer_push(TV_LOCAL_OPT);
400 optimize_graph_df(irg);
402 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
403 CHECK_ONE(firm_opt.check_all, irg);
405 timer_push(TV_REASSOCIATION);
406 optimize_reassociation(irg);
408 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "reassoc");
409 CHECK_ONE(firm_opt.check_all, irg);
412 CHECK_ONE(firm_opt.check_all, irg);
415 timer_push(TV_CODE_PLACE);
416 set_opt_global_cse(1);
417 optimize_graph_df(irg);
419 set_opt_global_cse(0);
421 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "place");
422 CHECK_ONE(firm_opt.check_all, irg);
425 if (firm_opt.confirm) {
426 /* Confirm construction currently can only handle blocks with only one control
427 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
428 the optimization of switch constructs. */
429 timer_push(TV_CF_OPT);
432 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
433 CHECK_ONE(firm_opt.check_all, irg);
434 timer_push(TV_CONFIRM_CREATE);
435 construct_confirms(irg);
437 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "confirms");
438 CHECK_ONE(firm_opt.check_all, irg);
440 timer_push(TV_LOCAL_OPT);
441 optimize_graph_df(irg);
443 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
444 CHECK_ONE(firm_opt.check_all, irg);
447 if (firm_opt.luffig) {
449 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst2");
450 CHECK_ONE(firm_opt.check_all, irg);
453 timer_push(TV_CF_OPT);
456 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
457 CHECK_ONE(firm_opt.check_all, irg);
459 if (firm_opt.load_store) {
460 timer_push(TV_LOAD_STORE);
461 optimize_load_store(irg);
463 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "ldst");
464 CHECK_ONE(firm_opt.check_all, irg);
467 lower_highlevel_graph(irg, firm_opt.lower_bitfields);
469 if (firm_opt.deconv) {
470 timer_push(TV_DECONV);
473 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "deconv");
474 CHECK_ONE(firm_opt.check_all, irg);
477 if (firm_opt.cond_eval) {
478 timer_push(TV_COND_EVAL);
481 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cond_eval");
482 CHECK_ONE(firm_opt.check_all, irg);
485 #ifdef THIS_NEEDS_FURTHER_TESTING_AND_A_PARAMETER_FOR_SPARE_SIZE
486 lower_Switch(irg, 128);
487 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lower_switch");
490 /* should we really remove the Confirm here? */
491 if (firm_opt.confirm) {
492 timer_push(TV_CONFIRM_CREATE);
493 remove_confirms(irg);
497 if (firm_opt.gvn_pre) {
499 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "pre");
500 CHECK_ONE(firm_opt.check_all, irg);
501 irg_verify(irg, VRFY_ENFORCE_SSA);
504 if (! firm_opt.gcse) {
505 timer_push(TV_CODE_PLACE);
506 optimize_graph_df(irg);
509 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "place");
510 CHECK_ONE(firm_opt.check_all, irg);
514 compute_postdoms(irg);
515 DUMP_ONE_CFG_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dom");
516 CHECK_ONE(firm_opt.check_all, irg);
518 construct_backedges(irg);
520 timer_push(TV_CF_OPT);
523 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
524 CHECK_ONE(firm_opt.check_all, irg);
526 if (firm_opt.if_conversion) {
527 timer_push(TV_IF_CONV);
528 opt_if_conv(current_ir_graph, if_conv_info);
530 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "if");
531 CHECK_ONE(firm_opt.check_all, current_ir_graph);
533 timer_push(TV_LOCAL_OPT);
534 optimize_graph_df(current_ir_graph);
536 timer_push(TV_CF_OPT);
537 optimize_cf(current_ir_graph);
539 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "after_if");
540 CHECK_ONE(firm_opt.check_all, current_ir_graph);
543 if (firm_opt.bool_opt) {
545 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "bool");
546 CHECK_ONE(firm_opt.check_all, irg);
550 opt_osr(current_ir_graph, osr_flag_default | osr_flag_keep_reg_pressure | osr_flag_ignore_x86_shift);
552 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "stred");
553 CHECK_ONE(firm_opt.check_all, irg);
555 timer_push(TV_LOCAL_OPT);
556 optimize_graph_df(irg);
558 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "lopt");
559 CHECK_ONE(firm_opt.check_all, irg);
561 edges_deactivate(irg);
562 timer_push(TV_DEAD_NODE);
563 dead_node_elimination(irg);
565 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "dead");
566 CHECK_ONE(firm_opt.check_all, irg);
569 if (firm_opt.do_inline) {
570 inline_functions(500, 50);
571 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "inl");
572 CHECK_ALL(firm_opt.check_all);
574 if (firm_opt.cloning) {
575 proc_cloning((float)firm_opt.clone_threshold);
576 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, "clone");
577 CHECK_ALL(firm_opt.check_all);
580 if (firm_opt.cond_eval) {
581 for (i = 0; i < get_irp_n_irgs(); i++) {
582 irg = get_irp_irg(i);
583 timer_push(TV_COND_EVAL);
586 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cond_eval");
587 CHECK_ONE(firm_opt.check_all, irg);
591 /* final run of local optimizations */
592 for (i = 0; i < get_irp_n_irgs(); i++) {
593 irg = get_irp_irg(i);
594 timer_push(TV_LOCAL_OPT);
595 optimize_graph_df(irg);
597 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "local_opt");
598 CHECK_ONE(firm_opt.check_all, irg);
600 timer_push(TV_CF_OPT);
603 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, "cfopt");
604 CHECK_ONE(firm_opt.check_all, irg);
607 if (firm_dump.ir_graph) {
608 /* recompute backedges for nicer dumps */
609 for (i = 0; i < get_irp_n_irgs(); i++)
610 construct_cf_backedges(get_irp_irg(i));
613 if (firm_opt.remove_unused) {
614 ir_entity **keep_methods;
617 timer_push(TV_CGANA);
618 /* Analysis that finds the free methods,
619 i.e. methods that are dereferenced.
620 Optimizes polymorphic calls :-). */
621 cgana(&arr_len, &keep_methods);
623 /* Remove methods that are never called. */
624 gc_irgs(arr_len, keep_methods);
631 DUMP_ALL(firm_dump.ir_graph, "-opt");
633 /* verify optimized graphs */
634 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
635 ir_graph *irg = get_irp_irg(i);
636 irg_verify(irg, VRFY_ENFORCE_SSA);
639 if (firm_dump.statistic & STAT_AFTER_OPT)
640 stat_dump_snapshot(input_filename, "opt");
642 timer_stop(TV_ALL_OPT);
643 } /* do_firm_optimizations */
646 * compute the size of a type (do implicit lowering)
648 * @param ty a Firm type
650 static int compute_type_size(ir_type *ty)
652 optimization_state_t state;
653 unsigned align_all = 1;
654 int n, size = 0, set = 0;
657 if (get_type_state(ty) == layout_fixed) {
658 /* do not layout already layouted types again */
662 if (is_Method_type(ty) || ty == get_glob_type()) {
663 /* no need for size calculation for method types or the global type */
667 DBG(("compute type size visiting: %s\n", get_type_name(ty)));
669 switch (get_type_tpop_code(ty)) {
672 for (i = 0, n = get_compound_n_members(ty); i < n; ++i) {
673 ir_entity *ent = get_compound_member(ty, i);
674 ir_type *ent_ty = get_entity_type(ent);
675 unsigned align, misalign;
677 /* compute member types */
678 if (! compute_type_size(ent_ty))
681 align = get_type_alignment_bytes(ent_ty);
682 align_all = align > align_all ? align : align_all;
683 misalign = (align ? size % align : 0);
684 size += (misalign ? align - misalign : 0);
686 set_entity_offset(ent, size);
687 size += get_type_size_bytes(ent_ty);
689 DBG((" member %s %s -> (size: %u, align: %u)\n",
690 get_type_name(ent_ty), get_entity_name(ent),
691 get_type_size_bytes(ent_ty), get_type_alignment_bytes(ent_ty)));
693 if (align_all > 0 && size % align_all) {
694 DBG(("align of the struct member: %u, type size: %d\n", align_all, size));
695 size += align_all - (size % align_all);
696 DBG(("correcting type-size to %d\n", size));
698 set_type_alignment_bytes(ty, align_all);
703 for (i = 0, n = get_union_n_members(ty); i < n; ++i) {
704 ir_entity *ent = get_union_member(ty, i);
706 if (! compute_type_size(get_entity_type(ent)))
708 s = get_type_size_bytes(get_entity_type(ent));
710 set_entity_offset(ent, 0);
711 size = (s > size ? s : size);
717 dims = get_array_n_dimensions(ty);
719 if (! compute_type_size(get_array_element_type(ty)))
724 save_optimization_state(&state);
726 set_opt_constant_folding(1);
727 set_opt_algebraic_simplification(1);
729 for (i = 0; i < dims; ++i) {
730 ir_node *lower = get_array_lower_bound(ty, i);
731 ir_node *upper = get_array_upper_bound(ty, i);
732 ir_graph *rem = current_ir_graph;
733 tarval *tv_lower, *tv_upper;
734 long val_lower, val_upper;
736 current_ir_graph = get_const_code_irg();
737 local_optimize_node(lower);
738 local_optimize_node(upper);
739 current_ir_graph = rem;
741 tv_lower = computed_value(lower);
742 tv_upper = computed_value(upper);
744 if (tv_lower == tarval_bad || tv_upper == tarval_bad) {
746 * we cannot calculate the size of this array yet, it
747 * even might be unknown until the end, like argv[]
749 restore_optimization_state(&state);
753 val_upper = get_tarval_long(tv_upper);
754 val_lower = get_tarval_long(tv_lower);
755 size *= val_upper - val_lower;
757 restore_optimization_state(&state);
759 DBG(("array %s -> (elements: %d, element type size: %d)\n",
761 size, get_type_size_bytes(get_array_element_type(ty))));
762 size *= get_type_size_bytes(get_array_element_type(ty));
771 set_type_size_bytes(ty, size);
772 set_type_state(ty, layout_fixed);
775 DBG(("size: %d\n", get_type_size_bytes(ty)));
778 } /* compute_type_size */
781 * layout all non-frame types of the Firm graph
783 static void compute_type_sizes(void)
788 /* all non-frame other types */
789 for (i = get_irp_n_types() - 1; i >= 0; --i) {
790 tp = get_irp_type(i);
791 compute_type_size(tp);
793 if (is_Method_type(tp)) {
794 tp = get_method_value_res_type(tp);
797 /* we have a value result type for this method, lower */
798 compute_type_size(tp);
802 } /* compute_type_sizes */
805 * layout all frame-types of the Firm graph
807 static void compute_frame_type_sizes(void)
812 /* all frame types */
813 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
814 irg = get_irp_irg(i);
815 /* do not optimize away variables in debug mode */
816 if (firm_opt.debug_mode == DBG_MODE_NONE)
818 compute_type_size(get_irg_frame_type(irg));
820 } /* compute_frame_type_sizes */
825 * @param input_filename the name of the (main) source file
827 static void do_firm_lowering(const char *input_filename)
831 /* do class lowering and vtbl creation */
832 // lower_classes_to_struct("vtbl", "m");
835 timer_push(TV_LOWER);
840 if (firm_opt.lower_ll) {
841 lwrdw_param_t init = {
846 def_create_intrinsic_fkt,
851 if (arch_create_intrinsic) {
852 init.create_intrinsic = arch_create_intrinsic;
853 init.ctx = create_intrinsic_ctx;
855 timer_push(TV_DW_LOWER);
860 if (firm_dump.statistic & STAT_AFTER_LOWER)
861 stat_dump_snapshot(input_filename, "low");
863 /* verify lowered graphs */
864 timer_push(TV_VERIFY);
865 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
866 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
869 DUMP_ALL(firm_dump.ir_graph, "-low");
871 if (firm_opt.enabled) {
872 timer_start(TV_ALL_OPT);
874 /* run reassociation first on all graphs BEFORE the architecture dependent optimizations
876 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
877 current_ir_graph = get_irp_irg(i);
879 timer_push(TV_REASSOCIATION);
880 optimize_reassociation(current_ir_graph);
882 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "reassoc");
883 CHECK_ONE(firm_opt.check_all, current_ir_graph);
886 /* enable architecture dependent optimizations */
887 arch_dep_set_opts((arch_dep_opts_t)
888 ((firm_opt.muls ? arch_dep_mul_to_shift : arch_dep_none) |
889 (firm_opt.divs ? arch_dep_div_by_const : arch_dep_none) |
890 (firm_opt.mods ? arch_dep_mod_by_const : arch_dep_none) ));
892 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
893 current_ir_graph = get_irp_irg(i);
896 set_opt_global_cse(1);
898 timer_push(TV_LOCAL_OPT);
899 optimize_graph_df(current_ir_graph);
901 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
903 CHECK_ONE(firm_opt.check_all, current_ir_graph);
906 timer_push(TV_CODE_PLACE);
907 place_code(current_ir_graph);
908 set_opt_global_cse(0);
910 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "place");
911 CHECK_ONE(firm_opt.check_all, current_ir_graph);
914 timer_push(TV_LOAD_STORE);
915 optimize_load_store(current_ir_graph);
917 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "ldst");
918 CHECK_ONE(firm_opt.check_all, current_ir_graph);
921 timer_push(TV_LOCAL_OPT);
922 optimize_graph_df(current_ir_graph);
924 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
926 timer_push(TV_CF_OPT);
927 optimize_cf(current_ir_graph);
929 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "cf");
930 CHECK_ONE(firm_opt.check_all, current_ir_graph);
932 if (firm_opt.if_conversion) {
933 timer_push(TV_IF_CONV);
934 opt_if_conv(current_ir_graph, if_conv_info);
936 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "if");
937 CHECK_ONE(firm_opt.check_all, current_ir_graph);
939 timer_push(TV_LOCAL_OPT);
940 optimize_graph_df(current_ir_graph);
942 timer_push(TV_CF_OPT);
943 optimize_cf(current_ir_graph);
945 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "after_if");
946 CHECK_ONE(firm_opt.check_all, current_ir_graph);
949 timer_stop(TV_ALL_OPT);
951 DUMP_ALL(firm_dump.ir_graph, "-low-opt");
955 mark_private_methods();
957 /* set the phase to low */
958 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
959 set_irg_phase_low(get_irp_irg(i));
961 /* all graphs are lowered, set the irp phase to low */
962 set_irp_phase_state(phase_low);
964 if (firm_dump.statistic & STAT_FINAL) {
965 stat_dump_snapshot(input_filename, "final");
967 } /* do_firm_lowering */
970 * Initialize for the Firm-generating back end.
972 void gen_firm_init(void)
974 firm_parameter_t params;
975 unsigned pattern = 0;
977 /* the automatic state is only set if inlining is enabled */
978 firm_opt.auto_inline = firm_opt.do_inline;
980 if (firm_dump.stat_pattern)
981 pattern |= FIRMSTAT_PATTERN_ENABLED;
983 if (firm_dump.stat_dag)
984 pattern |= FIRMSTAT_COUNT_DAG;
986 memset(¶ms, 0, sizeof(params));
987 params.size = sizeof(params);
988 params.enable_statistics = firm_dump.statistic == STAT_NONE ? 0 :
989 FIRMSTAT_ENABLED | FIRMSTAT_COUNT_STRONG_OP | FIRMSTAT_COUNT_CONSTS | pattern;
990 params.initialize_local_func = uninitialized_local_var;
991 params.cc_mask = 0; /* no regparam, cdecl */
992 params.builtin_dbg = NULL;
995 /* Activate Graph rewriting if SIMD optimization is turned on */
996 /* This has to be done before init_firm() is called! */
997 if (firm_ext_grs.simd_opt)
1003 if (firm_be_opt.selection == BE_FIRM_BE) {
1004 const backend_params *be_params = be_get_backend_param();
1006 firm_opt.lower_ll = (a_byte) be_params->do_dw_lowering;
1007 params.arch_op_settings = be_params->arch_op_settings;
1009 arch_create_intrinsic = be_params->arch_create_intrinsic_fkt;
1010 create_intrinsic_ctx = be_params->create_intrinsic_ctx;
1012 ad_param = be_params->dep_param;
1013 if_conv_info = be_params->if_conv_info;
1015 if (be_params->has_imm_fp_mode)
1016 firm_imm_fp_mode = be_params->imm_fp_mode;
1018 /* OS option must be set to the backend */
1019 switch (firm_opt.os_support) {
1020 case OS_SUPPORT_MINGW:
1021 firm_be_option("ia32-gasmode=mingw");
1023 case OS_SUPPORT_MACHO:
1024 firm_be_option("ia32-gasmode=macho");
1026 case OS_SUPPORT_LINUX:
1028 firm_be_option("ia32-gasmode=linux");
1032 dbg_init(NULL, NULL, dbg_snprint);
1033 edges_init_dbg(firm_opt.vrfy_edges);
1034 //cbackend_set_debug_retrieve(dbg_retrieve);
1036 set_opt_precise_exc_context(firm_opt.precise_exc);
1037 set_opt_fragile_ops(firm_opt.fragile_ops);
1039 /* Sel node cannot produce NULL pointers */
1040 set_opt_sel_based_null_check_elim(1);
1042 /* dynamic dispatch works currently only if whole world scenarios */
1043 set_opt_dyn_meth_dispatch(0);
1045 arch_dep_init(arch_factory);
1047 /* do not run architecture dependent optimizations in building phase */
1048 arch_dep_set_opts(arch_dep_none);
1050 do_node_verification((firm_verification_t) firm_opt.vrfy);
1051 if (firm_dump.filter)
1052 only_dump_method_with_name(new_id_from_str(firm_dump.filter));
1054 if (firm_opt.enabled) {
1056 set_opt_constant_folding(firm_opt.const_folding);
1057 set_opt_algebraic_simplification(firm_opt.const_folding);
1058 set_opt_cse(firm_opt.cse);
1059 set_opt_global_cse(0);
1060 set_opt_unreachable_code(1);
1061 set_opt_control_flow(firm_opt.control_flow);
1062 set_opt_control_flow_weak_simplification(1);
1063 set_opt_control_flow_strong_simplification(1);
1068 /* do not dump entity ld names */
1072 /* init the ycomp debugger extension */
1073 if (firm_opt.ycomp_dbg)
1074 firm_init_ycomp_debugger(firm_opt.ycomp_host, firm_opt.ycomp_port);
1076 } /* gen_firm_init */
1079 * Called, after the Firm generation is completed,
1080 * do all optimizations and backend call here.
1082 * @param out a file handle for the output, may be NULL
1083 * @param input_filename the name of the (main) source file
1084 * @param c_mode non-zero if "C" was compiled
1085 * @param firm_const_exists non-zero, if the const attribute was used on functions
1087 void gen_firm_finish(FILE *out, const char *input_filename, int c_mode, int firm_const_exists)
1091 /* the general for dumping option must be set, or the others will not work */
1093 = (a_byte) (firm_dump.ir_graph | firm_dump.all_phases | firm_dump.extbb);
1095 dump_keepalive_edges(1);
1096 dump_consts_local(1);
1097 dump_dominator_information(1);
1098 dump_loop_information(0);
1100 if (!firm_dump.edge_labels)
1101 turn_off_edge_labels();
1103 if (firm_dump.all_types) {
1106 dump_class_hierarchy(0, "");
1107 dump_class_hierarchy(1, "-with-entities");
1111 /* finalize all graphs */
1112 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1113 ir_graph *irg = get_irp_irg(i);
1115 irg_finalize_cons(irg);
1116 DUMP_ONE(firm_dump.ir_graph, irg, "");
1118 /* verify the graph */
1119 timer_push(TV_VERIFY);
1120 irg_verify(irg, VRFY_ENFORCE_SSA);
1124 timer_push(TV_VERIFY);
1128 /* all graphs are finalized, set the irp phase to high */
1129 set_irp_phase_state(phase_high);
1131 /* BEWARE: kill unreachable code before doing compound lowering */
1132 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1133 ir_graph *irg = get_irp_irg(i);
1137 /* lower all compound call return values */
1138 lower_compound_params();
1140 /* computes the sizes of all types that are still not computed */
1141 compute_type_sizes();
1143 /* lower copyb nodes */
1144 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1145 ir_graph *irg = get_irp_irg(i);
1146 lower_CopyB(irg, 128, 4);
1149 if (firm_dump.statistic & STAT_BEFORE_OPT) {
1150 stat_dump_snapshot(input_filename, "noopt");
1153 if (firm_opt.enabled)
1154 do_firm_optimizations(input_filename, firm_const_exists);
1156 if (firm_dump.gen_firm_asm) {
1157 timer_push(TV_FIRM_ASM);
1158 gen_Firm_assembler(input_filename);
1164 do_firm_lowering(input_filename);
1166 /* computes the sizes of all frame types */
1167 compute_frame_type_sizes();
1169 /* set the phase to low */
1170 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
1171 set_irg_phase_low(get_irp_irg(i));
1175 /** SIMD Optimization Extensions **/
1177 /* Pattern creation step. No code has to be generated, so
1178 exit after pattern creation */
1179 if (firm_ext_grs.create_pattern) {
1180 ext_grs_create_pattern();
1184 /* SIMD optimization step. Uses graph patterns to find
1185 rich instructions and rewrite */
1186 if (firm_ext_grs.simd_opt)
1190 if (firm_dump.statistic & STAT_FINAL_IR)
1191 stat_dump_snapshot(input_filename, "final-ir");
1193 /* run the code generator */
1194 if (firm_be_opt.selection != BE_NONE)
1195 do_codegen(out, input_filename);
1197 if (firm_dump.statistic & STAT_FINAL)
1198 stat_dump_snapshot(input_filename, "final");
1201 if (firm_opt.ycomp_dbg)
1202 firm_finish_ycomp_debugger();
1204 } /* gen_firm_finish */
1207 * Do very early initializations
1209 void firm_early_init(void) {
1210 /* arg: need this here for command line options */
1212 firm_init_options(NULL, 0, NULL);
1213 } /* firm_early_init */