3 * @file firm_opt.c -- Firm-generating back end optimizations.
5 * (C) 2005-2007 Michael Beck beck@ipd.info.uni-karlsruhe.de
14 #include <libfirm/firm.h>
15 #include <libfirm/be.h>
19 #include "firm_codegen.h"
20 #include "firm_cmdline.h"
21 #include "firm_timing.h"
24 #define snprintf _snprintf
27 #if defined(_DEBUG) || defined(FIRM_DEBUG)
28 #define DBG(x) dbg_printf x
31 #endif /* _DEBUG || FIRM_DEBUG */
34 /** dump all the graphs depending on cond */
35 #define DUMP_ALL(cond, suffix) \
38 timer_push(TV_VCG_DUMP); \
39 if (firm_dump.no_blocks) \
40 dump_all_ir_graphs(dump_ir_graph, suffix); \
41 else if (firm_dump.extbb) \
42 dump_all_ir_graphs(dump_ir_extblock_graph, suffix);\
44 dump_all_ir_graphs(dump_ir_block_graph, suffix); \
49 /** dump all control flow graphs depending on cond */
50 #define DUMP_ALL_CFG(cond, suffix) \
53 timer_push(TV_VCG_DUMP); \
54 dump_all_ir_graphs(dump_cfg, suffix); \
59 /** check all graphs depending on cond */
60 #define CHECK_ALL(cond) \
64 timer_push(TV_VERIFY); \
65 for (ii = get_irp_n_irgs() - 1; ii >= 0; --ii) \
66 irg_verify(get_irp_irg(ii), VRFY_ENFORCE_SSA); \
73 /** dump graphs irg depending on cond */
74 #define DUMP_ONE(cond, irg, suffix) \
77 timer_push(TV_VCG_DUMP); \
78 if (firm_dump.no_blocks) \
79 dump_ir_graph(irg, suffix); \
80 else if (firm_dump.extbb) \
81 dump_ir_extblock_graph(irg, suffix); \
83 dump_ir_block_graph(irg, suffix); \
88 /** dump control flow graph irg depending on cond */
89 #define DUMP_ONE_CFG(cond, irg, suffix) \
92 timer_push(TV_VCG_DUMP); \
93 dump_cfg(irg, suffix); \
98 /** check a graph irg depending on cond */
99 #define CHECK_ONE(cond, irg) \
102 timer_push(TV_VERIFY); \
103 irg_verify(irg, VRFY_ENFORCE_SSA); \
109 /* set by the backend parameters */
110 static const ir_settings_arch_dep_t *ad_param = NULL;
111 static create_intrinsic_fkt *arch_create_intrinsic = NULL;
112 static void *create_intrinsic_ctx = NULL;
113 static const ir_settings_if_conv_t *if_conv_info = NULL;
115 ir_mode *firm_imm_fp_mode = NULL;
117 /* entities of runtime functions */
118 ir_entity_ptr rts_entities[rts_max];
121 * factory for setting architecture dependent parameters
123 static const ir_settings_arch_dep_t *arch_factory(void)
125 static const ir_settings_arch_dep_t param = {
126 1, /* also use subs */
127 4, /* maximum shifts */
128 31, /* maximum shift amount */
129 NULL, /* use default evaluator */
133 32 /* Mulh allowed up to 32 bit */
136 return ad_param ? ad_param : ¶m;
140 * Map runtime functions.
142 static void rts_map(void) {
143 static const struct {
144 ir_entity_ptr *ent; /**< address of the rts entity */
145 i_mapper_func func; /**< mapper function. */
148 { &rts_entities[rts_abs], i_mapper_abs },
149 { &rts_entities[rts_labs], i_mapper_abs },
150 { &rts_entities[rts_llabs], i_mapper_abs },
151 { &rts_entities[rts_imaxabs], i_mapper_abs },
153 /* double -> double */
154 { &rts_entities[rts_fabs], i_mapper_abs },
155 { &rts_entities[rts_sqrt], i_mapper_sqrt },
156 { &rts_entities[rts_cbrt], i_mapper_cbrt },
157 { &rts_entities[rts_pow], i_mapper_pow },
158 { &rts_entities[rts_exp], i_mapper_exp },
159 { &rts_entities[rts_exp2], i_mapper_exp },
160 { &rts_entities[rts_exp10], i_mapper_exp },
161 { &rts_entities[rts_log], i_mapper_log },
162 { &rts_entities[rts_log2], i_mapper_log2 },
163 { &rts_entities[rts_log10], i_mapper_log10 },
164 { &rts_entities[rts_sin], i_mapper_sin },
165 { &rts_entities[rts_cos], i_mapper_cos },
166 { &rts_entities[rts_tan], i_mapper_tan },
167 { &rts_entities[rts_asin], i_mapper_asin },
168 { &rts_entities[rts_acos], i_mapper_acos },
169 { &rts_entities[rts_atan], i_mapper_atan },
170 { &rts_entities[rts_sinh], i_mapper_sinh },
171 { &rts_entities[rts_cosh], i_mapper_cosh },
172 { &rts_entities[rts_tanh], i_mapper_tanh },
175 { &rts_entities[rts_fabsf], i_mapper_abs },
176 { &rts_entities[rts_sqrtf], i_mapper_sqrt },
177 { &rts_entities[rts_cbrtf], i_mapper_cbrt },
178 { &rts_entities[rts_powf], i_mapper_pow },
179 { &rts_entities[rts_expf], i_mapper_exp },
180 { &rts_entities[rts_exp2f], i_mapper_exp },
181 { &rts_entities[rts_exp10f], i_mapper_exp },
182 { &rts_entities[rts_logf], i_mapper_log },
183 { &rts_entities[rts_log2f], i_mapper_log2 },
184 { &rts_entities[rts_log10f], i_mapper_log10 },
185 { &rts_entities[rts_sinf], i_mapper_sin },
186 { &rts_entities[rts_cosf], i_mapper_cos },
187 { &rts_entities[rts_tanf], i_mapper_tan },
188 { &rts_entities[rts_asinf], i_mapper_asin },
189 { &rts_entities[rts_acosf], i_mapper_acos },
190 { &rts_entities[rts_atanf], i_mapper_atan },
191 { &rts_entities[rts_sinhf], i_mapper_sinh },
192 { &rts_entities[rts_coshf], i_mapper_cosh },
193 { &rts_entities[rts_tanhf], i_mapper_tanh },
195 /* long double -> long double */
196 { &rts_entities[rts_fabsl], i_mapper_abs },
197 { &rts_entities[rts_sqrtl], i_mapper_sqrt },
198 { &rts_entities[rts_cbrtl], i_mapper_cbrt },
199 { &rts_entities[rts_powl], i_mapper_pow },
200 { &rts_entities[rts_expl], i_mapper_exp },
201 { &rts_entities[rts_exp2l], i_mapper_exp },
202 { &rts_entities[rts_exp10l], i_mapper_exp },
203 { &rts_entities[rts_logl], i_mapper_log },
204 { &rts_entities[rts_log2l], i_mapper_log2 },
205 { &rts_entities[rts_log10l], i_mapper_log10 },
206 { &rts_entities[rts_sinl], i_mapper_sin },
207 { &rts_entities[rts_cosl], i_mapper_cos },
208 { &rts_entities[rts_tanl], i_mapper_tan },
209 { &rts_entities[rts_asinl], i_mapper_asin },
210 { &rts_entities[rts_acosl], i_mapper_acos },
211 { &rts_entities[rts_atanl], i_mapper_atan },
212 { &rts_entities[rts_sinhl], i_mapper_sinh },
213 { &rts_entities[rts_coshl], i_mapper_cosh },
214 { &rts_entities[rts_tanhl], i_mapper_tanh },
217 { &rts_entities[rts_memcpy], i_mapper_memcpy },
218 { &rts_entities[rts_memset], i_mapper_memset },
219 { &rts_entities[rts_strcmp], i_mapper_strcmp },
220 { &rts_entities[rts_strncmp], i_mapper_strncmp },
221 { &rts_entities[rts_strlen], i_mapper_strlen }
223 i_record rec[sizeof(mapper)/sizeof(mapper[0])];
226 for (i = n_map = 0; i < sizeof(mapper)/sizeof(mapper[0]); ++i)
227 if (*mapper[i].ent != NULL) {
228 rec[n_map].i_call.kind = INTRINSIC_CALL;
229 rec[n_map].i_call.i_ent = *mapper[i].ent;
230 rec[n_map].i_call.i_mapper = mapper[i].func;
231 rec[n_map].i_call.ctx = NULL;
232 rec[n_map].i_call.link = NULL;
236 lower_intrinsics(rec, n_map, /* part_block_used=*/0);
239 static int *irg_dump_no;
241 static void dump_graph_count(ir_graph *const irg, const char *const suffix)
244 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
245 DUMP_ONE(1, irg, name);
249 static void dump_graph_cfg_count(ir_graph *const irg, const char *const suffix)
252 snprintf(name, sizeof(name), "-%02d_%s", irg_dump_no[get_irg_idx(irg)]++, suffix);
253 DUMP_ONE_CFG(1, irg, name);
257 static void dump_all_count(const char *const suffix)
259 const int n_irgs = get_irp_n_irgs();
262 for (i = 0; i < n_irgs; ++i)
263 dump_graph_count(get_irp_irg(i), suffix);
266 #define DUMP_ONE_C(cond, irg, suffix) \
269 dump_graph_count((irg), (suffix)); \
273 #define DUMP_ONE_CFG_C(cond, irg, suffix) \
276 dump_graph_cfg_count((irg), (suffix)); \
280 #define DUMP_ALL_C(cond, suffix) \
283 dump_all_count((suffix)); \
287 static void remove_unused_functions(void)
289 ir_entity **keep_methods;
292 /* Analysis that finds the free methods,
293 i.e. methods that are dereferenced.
294 Optimizes polymorphic calls :-). */
295 cgana(&arr_len, &keep_methods);
297 /* Remove methods that are never called. */
298 gc_irgs(arr_len, keep_methods);
302 static int firm_const_exists;
304 static void do_optimize_funccalls(void)
306 optimize_funccalls(firm_const_exists, NULL);
309 static void do_gcse(ir_graph *irg)
311 set_opt_global_cse(1);
313 set_opt_global_cse(0);
316 static void do_lower_highlevel(ir_graph *irg)
318 lower_highlevel_graph(irg, firm_opt.lower_bitfields);
321 static void do_if_conv(ir_graph *irg)
323 opt_if_conv(irg, if_conv_info);
326 static void do_stred(ir_graph *irg)
328 opt_osr(irg, osr_flag_default | osr_flag_keep_reg_pressure | osr_flag_ignore_x86_shift);
331 static void do_inline(void)
333 inline_functions(500, 50);
336 static void do_cloning(void)
338 proc_cloning((float) firm_opt.clone_threshold);
341 static void do_lower_switch(ir_graph *irg)
343 lower_switch(irg, 128);
346 typedef enum opt_target {
351 typedef void (*transform_irg_func)(ir_graph *irg);
352 typedef void (*transform_irp_func)(void);
353 typedef void (*func_ptr_t)(void);
365 static opt_config_t opts[] = {
366 { OPT_TARGET_IRP, "rts", (func_ptr_t) rts_map, true, true, true, -1 },
367 { OPT_TARGET_IRG, "combo", (func_ptr_t) combo, true, true, true, TV_COMBO },
368 { OPT_TARGET_IRG, "controlflow", (func_ptr_t) optimize_cf, true, true, true, TV_CF_OPT },
369 { OPT_TARGET_IRG, "local", (func_ptr_t) optimize_graph_df, true, true, true, TV_LOCAL_OPT },
370 { OPT_TARGET_IRP, "gc_irgs", (func_ptr_t) remove_unused_functions, true, false, false, TV_CGANA },
371 { OPT_TARGET_IRP, "tailrec", (func_ptr_t) opt_tail_recursion, true, true, true, TV_TAIL_REC },
372 { OPT_TARGET_IRP, "funccalls", (func_ptr_t) do_optimize_funccalls, true, true, true, TV_REAL_FUNC_CALL },
373 { OPT_TARGET_IRP, "lowerconst", (func_ptr_t) lower_const_code, true, false, false, -1 },
374 { OPT_TARGET_IRG, "onereturn", (func_ptr_t) normalize_one_return, true, false, false, -1 },
375 { OPT_TARGET_IRG, "scalar", (func_ptr_t) scalar_replacement_opt, true, true, true, TV_SCALAR_REPLACE },
376 { OPT_TARGET_IRG, "reassoc", (func_ptr_t) optimize_reassociation, true, true, true, TV_REASSOCIATION },
377 { OPT_TARGET_IRG, "gcse", (func_ptr_t) do_gcse, true, true, true, TV_CODE_PLACE },
378 { OPT_TARGET_IRG, "place", (func_ptr_t) place_code, true, true, true, TV_CODE_PLACE },
379 { OPT_TARGET_IRG, "confirm", (func_ptr_t) construct_confirms, true, true, true, TV_CONFIRM_CREATE },
380 { OPT_TARGET_IRG, "ldst", (func_ptr_t) optimize_load_store, true, true, true, TV_LOAD_STORE },
381 { OPT_TARGET_IRG, "ldst2", (func_ptr_t) opt_ldst2, true, true, true, -1 },
382 { OPT_TARGET_IRG, "lower", (func_ptr_t) do_lower_highlevel, true, true, true, -1 },
383 { OPT_TARGET_IRG, "deconv", (func_ptr_t) conv_opt, true, true, true, TV_DECONV },
384 { OPT_TARGET_IRG, "condeval", (func_ptr_t) opt_cond_eval, true, true, true, TV_COND_EVAL },
385 { OPT_TARGET_IRG, "remove_confirms", (func_ptr_t) remove_confirms, true, false, false, TV_CONFIRM_CREATE },
386 { OPT_TARGET_IRG, "gvnpre", (func_ptr_t) do_gvn_pre, true, true, true, -1 },
387 { OPT_TARGET_IRG, "ifconv", (func_ptr_t) do_if_conv, true, true, true, TV_IF_CONV },
388 { OPT_TARGET_IRG, "bool", (func_ptr_t) opt_bool, true, true, true, -1 },
389 { OPT_TARGET_IRG, "stred", (func_ptr_t) do_stred, true, true, true, TV_OSR },
390 { OPT_TARGET_IRG, "dead", (func_ptr_t) dead_node_elimination, true, false, true, TV_DEAD_NODE },
391 { OPT_TARGET_IRP, "inline", (func_ptr_t) do_inline, true, true, true, -1 },
392 { OPT_TARGET_IRP, "clone", (func_ptr_t) do_cloning, true, true, true, -1 },
393 { OPT_TARGET_IRG, "lower_switch", (func_ptr_t) do_lower_switch, true, true, true, -1 },
395 static const int n_opts = sizeof(opts) / sizeof(opts[0]);
397 static opt_config_t *get_opt(const char *name)
400 for (i = 0; i < n_opts; ++i) {
401 opt_config_t *config = &opts[i];
402 if (strcmp(config->name, name) == 0)
410 static void set_opt_enabled(const char *name, bool enabled)
412 opt_config_t *config = get_opt(name);
413 config->enabled = enabled;
416 static void do_irg_opt(ir_graph *irg, const char *name)
418 transform_irg_func func;
420 opt_config_t *config = get_opt(name);
421 assert(config->target == OPT_TARGET_IRG);
422 if (!config->enabled)
425 if (config->timer != -1)
426 timer_push(config->timer);
428 old_irg = current_ir_graph;
429 current_ir_graph = irg;
431 func = (transform_irg_func) config->func;
434 if (config->timer != -1)
437 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, irg, config->name);
438 CHECK_ONE(firm_opt.check_all, irg);
440 current_ir_graph = old_irg;
443 static void do_irp_opt(const char *name)
445 transform_irp_func func;
446 opt_config_t *config = get_opt(name);
447 assert(config->target == OPT_TARGET_IRP);
448 if (!config->enabled)
451 if (config->timer != -1)
452 timer_push(config->timer);
454 func = (transform_irp_func) config->func;
457 DUMP_ALL_C(firm_dump.ir_graph && firm_dump.all_phases, config->name);
458 CHECK_ALL(firm_opt.check_all);
460 if (config->timer != -1)
465 * run all the Firm optimizations
467 * @param input_filename the name of the (main) source file
469 static void do_firm_optimizations(const char *input_filename)
474 /* FIXME: cloning might ADD new graphs. */
475 irg_dump_no = calloc(get_irp_last_idx(), sizeof(*irg_dump_no));
477 set_opt_strength_red(firm_opt.strength_red);
478 set_opt_scalar_replacement(firm_opt.scalar_replace);
479 set_opt_auto_create_sync(firm_opt.auto_sync);
480 set_opt_alias_analysis(firm_opt.alias_analysis);
482 aa_opt = aa_opt_no_opt;
483 if (firm_opt.strict_alias)
484 aa_opt |= aa_opt_type_based | aa_opt_byte_type_may_alias;
485 if (firm_opt.no_alias)
486 aa_opt = aa_opt_no_alias;
488 set_irp_memory_disambiguator_options(aa_opt);
490 /* parameter passing code should set them directly sometime... */
491 set_opt_enabled("rts", firm_opt.freestanding);
492 set_opt_enabled("gc_irgs", firm_opt.remove_unused);
493 set_opt_enabled("tailrec", firm_opt.tail_rec);
494 set_opt_enabled("funccalls", firm_opt.func_calls);
495 set_opt_enabled("gcse", firm_opt.gcse);
496 set_opt_enabled("place", !firm_opt.gcse);
497 set_opt_enabled("confirm", firm_opt.confirm);
498 set_opt_enabled("remove_confirms", firm_opt.confirm);
499 set_opt_enabled("ldst2", firm_opt.luffig);
500 set_opt_enabled("ldst", firm_opt.load_store);
501 set_opt_enabled("deconv", firm_opt.deconv);
502 set_opt_enabled("condeval", firm_opt.cond_eval);
503 set_opt_enabled("gvnpre", firm_opt.gvn_pre);
504 set_opt_enabled("ifconv", firm_opt.if_conversion);
505 set_opt_enabled("bool", firm_opt.bool_opt);
506 set_opt_enabled("inline", firm_opt.do_inline);
507 set_opt_enabled("clone", firm_opt.cloning);
508 set_opt_enabled("combo", firm_opt.combo);
510 timer_start(TV_ALL_OPT);
514 for (i = 0; i < get_irp_n_irgs(); i++) {
515 ir_graph *irg = get_irp_irg(i);
516 do_irg_opt(irg, "combo");
517 do_irg_opt(irg, "local");
519 /* Confirm construction currently can only handle blocks with only one control
520 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
521 the optimization of switch constructs. */
522 do_irg_opt(irg, "controlflow");
525 do_irp_opt("gc_irgs");
526 do_irp_opt("tailrec");
527 do_irp_opt("funccalls");
528 do_irp_opt("lowerconst");
530 for (i = 0; i < get_irp_n_irgs(); i++) {
531 ir_graph *irg = get_irp_irg(i);
534 /* If SIMD optimization is on, make sure we have only 1 return */
535 if (firm_ext_grs.create_pattern || firm_ext_grs.simd_opt)
536 do_irg_opt("onereturn");
539 do_irg_opt(irg, "scalar");
540 do_irg_opt(irg, "local");
541 do_irg_opt(irg, "reassoc");
542 do_irg_opt(irg, "local");
543 do_irg_opt(irg, "gcse");
545 if (firm_opt.confirm) {
546 /* Confirm construction currently can only handle blocks with only one control
547 flow predecessor. Calling optimize_cf here removes Bad predecessors and help
548 the optimization of switch constructs. */
549 do_irg_opt(irg, "controlflow");
550 do_irg_opt(irg, "confirm");
551 do_irg_opt(irg, "local");
554 do_irg_opt(irg, "ldst2");
555 do_irg_opt(irg, "controlflow");
556 do_irg_opt(irg, "ldst");
557 do_irg_opt(irg, "lower");
558 do_irg_opt(irg, "deconv");
559 do_irg_opt(irg, "condeval");
560 do_irg_opt(irg, "lower_switch");
561 do_irg_opt(irg, "remove_confirms");
562 do_irg_opt(irg, "gvnpre");
563 do_irg_opt(irg, "place");
564 do_irg_opt(irg, "controlflow");
566 if (firm_opt.if_conversion) {
567 do_irg_opt(irg, "ifconv");
568 do_irg_opt(irg, "local");
569 do_irg_opt(irg, "controlflow");
572 do_irg_opt(irg, "bool");
573 do_irg_opt(irg, "stred");
574 do_irg_opt(irg, "local");
575 do_irg_opt(irg, "dead");
578 do_irp_opt("inline");
581 for (i = 0; i < get_irp_n_irgs(); i++) {
582 ir_graph *irg = get_irp_irg(i);
583 do_irg_opt(irg, "local");
584 do_irg_opt(irg, "controlflow");
585 do_irg_opt(irg, "condeval");
586 do_irg_opt(irg, "local");
587 do_irg_opt(irg, "controlflow");
590 if (firm_dump.ir_graph) {
591 /* recompute backedges for nicer dumps */
592 for (i = 0; i < get_irp_n_irgs(); i++)
593 construct_cf_backedges(get_irp_irg(i));
596 do_irp_opt("gc_irgs");
598 DUMP_ALL(firm_dump.ir_graph, "-opt");
599 /* verify optimized graphs */
600 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
601 ir_graph *irg = get_irp_irg(i);
602 irg_verify(irg, VRFY_ENFORCE_SSA);
605 if (firm_dump.statistic & STAT_AFTER_OPT)
606 stat_dump_snapshot(input_filename, "opt");
608 timer_stop(TV_ALL_OPT);
609 } /* do_firm_optimizations */
612 * compute the size of a type (do implicit lowering)
614 * @param ty a Firm type
616 static int compute_type_size(ir_type *ty)
618 optimization_state_t state;
619 unsigned align_all = 1;
620 int n, size = 0, set = 0;
623 if (get_type_state(ty) == layout_fixed) {
624 /* do not layout already layouted types again */
628 if (is_Method_type(ty) || ty == get_glob_type()) {
629 /* no need for size calculation for method types or the global type */
633 DBG(("compute type size visiting: %s\n", get_type_name(ty)));
635 switch (get_type_tpop_code(ty)) {
638 for (i = 0, n = get_compound_n_members(ty); i < n; ++i) {
639 ir_entity *ent = get_compound_member(ty, i);
640 ir_type *ent_ty = get_entity_type(ent);
641 unsigned align, misalign;
643 /* compute member types */
644 if (! compute_type_size(ent_ty))
647 align = get_type_alignment_bytes(ent_ty);
648 align_all = align > align_all ? align : align_all;
649 misalign = (align ? size % align : 0);
650 size += (misalign ? align - misalign : 0);
652 set_entity_offset(ent, size);
653 size += get_type_size_bytes(ent_ty);
655 DBG((" member %s %s -> (size: %u, align: %u)\n",
656 get_type_name(ent_ty), get_entity_name(ent),
657 get_type_size_bytes(ent_ty), get_type_alignment_bytes(ent_ty)));
659 if (align_all > 0 && size % align_all) {
660 DBG(("align of the struct member: %u, type size: %d\n", align_all, size));
661 size += align_all - (size % align_all);
662 DBG(("correcting type-size to %d\n", size));
664 set_type_alignment_bytes(ty, align_all);
669 for (i = 0, n = get_union_n_members(ty); i < n; ++i) {
670 ir_entity *ent = get_union_member(ty, i);
672 if (! compute_type_size(get_entity_type(ent)))
674 s = get_type_size_bytes(get_entity_type(ent));
676 set_entity_offset(ent, 0);
677 size = (s > size ? s : size);
683 dims = get_array_n_dimensions(ty);
685 if (! compute_type_size(get_array_element_type(ty)))
690 save_optimization_state(&state);
692 set_opt_constant_folding(1);
693 set_opt_algebraic_simplification(1);
695 for (i = 0; i < dims; ++i) {
696 ir_node *lower = get_array_lower_bound(ty, i);
697 ir_node *upper = get_array_upper_bound(ty, i);
698 ir_graph *rem = current_ir_graph;
699 tarval *tv_lower, *tv_upper;
700 long val_lower, val_upper;
702 current_ir_graph = get_const_code_irg();
703 local_optimize_node(lower);
704 local_optimize_node(upper);
705 current_ir_graph = rem;
707 tv_lower = computed_value(lower);
708 tv_upper = computed_value(upper);
710 if (tv_lower == tarval_bad || tv_upper == tarval_bad) {
712 * we cannot calculate the size of this array yet, it
713 * even might be unknown until the end, like argv[]
715 restore_optimization_state(&state);
719 val_upper = get_tarval_long(tv_upper);
720 val_lower = get_tarval_long(tv_lower);
721 size *= val_upper - val_lower;
723 restore_optimization_state(&state);
725 DBG(("array %s -> (elements: %d, element type size: %d)\n",
727 size, get_type_size_bytes(get_array_element_type(ty))));
728 size *= get_type_size_bytes(get_array_element_type(ty));
737 set_type_size_bytes(ty, size);
738 set_type_state(ty, layout_fixed);
741 DBG(("size: %d\n", get_type_size_bytes(ty)));
744 } /* compute_type_size */
747 * layout all non-frame types of the Firm graph
749 static void compute_type_sizes(void)
754 /* all non-frame other types */
755 for (i = get_irp_n_types() - 1; i >= 0; --i) {
756 tp = get_irp_type(i);
757 compute_type_size(tp);
759 if (is_Method_type(tp)) {
760 tp = get_method_value_res_type(tp);
763 /* we have a value result type for this method, lower */
764 compute_type_size(tp);
768 } /* compute_type_sizes */
771 * layout all frame-types of the Firm graph
773 static void compute_frame_type_sizes(void)
778 /* all frame types */
779 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
780 irg = get_irp_irg(i);
781 /* do not optimize away variables in debug mode */
782 if (firm_opt.debug_mode == DBG_MODE_NONE)
784 compute_type_size(get_irg_frame_type(irg));
786 } /* compute_frame_type_sizes */
791 * @param input_filename the name of the (main) source file
793 static void do_firm_lowering(const char *input_filename)
797 /* do class lowering and vtbl creation */
798 // lower_classes_to_struct("vtbl", "m");
801 timer_push(TV_LOWER);
806 if (firm_opt.lower_ll) {
807 lwrdw_param_t init = {
812 def_create_intrinsic_fkt,
817 if (arch_create_intrinsic) {
818 init.create_intrinsic = arch_create_intrinsic;
819 init.ctx = create_intrinsic_ctx;
821 timer_push(TV_DW_LOWER);
826 if (firm_dump.statistic & STAT_AFTER_LOWER)
827 stat_dump_snapshot(input_filename, "low");
829 /* verify lowered graphs */
830 timer_push(TV_VERIFY);
831 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
832 irg_verify(get_irp_irg(i), VRFY_ENFORCE_SSA);
835 DUMP_ALL(firm_dump.ir_graph, "-low");
837 if (firm_opt.enabled) {
838 timer_start(TV_ALL_OPT);
840 /* run reassociation first on all graphs BEFORE the architecture dependent optimizations
842 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
843 current_ir_graph = get_irp_irg(i);
845 timer_push(TV_REASSOCIATION);
846 optimize_reassociation(current_ir_graph);
848 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "reassoc");
849 CHECK_ONE(firm_opt.check_all, current_ir_graph);
852 /* enable architecture dependent optimizations */
853 arch_dep_set_opts((arch_dep_opts_t)
854 ((firm_opt.muls ? arch_dep_mul_to_shift : arch_dep_none) |
855 (firm_opt.divs ? arch_dep_div_by_const : arch_dep_none) |
856 (firm_opt.mods ? arch_dep_mod_by_const : arch_dep_none) ));
858 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
859 current_ir_graph = get_irp_irg(i);
862 set_opt_global_cse(1);
864 timer_push(TV_LOCAL_OPT);
865 optimize_graph_df(current_ir_graph);
867 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
869 CHECK_ONE(firm_opt.check_all, current_ir_graph);
872 timer_push(TV_CODE_PLACE);
873 place_code(current_ir_graph);
874 set_opt_global_cse(0);
876 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "place");
877 CHECK_ONE(firm_opt.check_all, current_ir_graph);
880 timer_push(TV_LOAD_STORE);
881 optimize_load_store(current_ir_graph);
883 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "ldst");
884 CHECK_ONE(firm_opt.check_all, current_ir_graph);
887 timer_push(TV_LOCAL_OPT);
888 optimize_graph_df(current_ir_graph);
890 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "lopt");
892 timer_push(TV_CF_OPT);
893 optimize_cf(current_ir_graph);
895 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "cf");
896 CHECK_ONE(firm_opt.check_all, current_ir_graph);
898 if (firm_opt.if_conversion) {
899 timer_push(TV_IF_CONV);
900 opt_if_conv(current_ir_graph, if_conv_info);
902 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "if");
903 CHECK_ONE(firm_opt.check_all, current_ir_graph);
905 timer_push(TV_LOCAL_OPT);
906 optimize_graph_df(current_ir_graph);
908 timer_push(TV_CF_OPT);
909 optimize_cf(current_ir_graph);
911 DUMP_ONE_C(firm_dump.ir_graph && firm_dump.all_phases, current_ir_graph, "after_if");
912 CHECK_ONE(firm_opt.check_all, current_ir_graph);
915 timer_stop(TV_ALL_OPT);
917 DUMP_ALL(firm_dump.ir_graph, "-low-opt");
921 mark_private_methods();
923 /* set the phase to low */
924 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
925 set_irg_phase_low(get_irp_irg(i));
927 /* all graphs are lowered, set the irp phase to low */
928 set_irp_phase_state(phase_low);
930 if (firm_dump.statistic & STAT_FINAL) {
931 stat_dump_snapshot(input_filename, "final");
933 } /* do_firm_lowering */
936 * Initialize for the Firm-generating back end.
938 void gen_firm_init(void)
940 firm_parameter_t params;
941 unsigned pattern = 0;
943 /* the automatic state is only set if inlining is enabled */
944 firm_opt.auto_inline = firm_opt.do_inline;
946 if (firm_dump.stat_pattern)
947 pattern |= FIRMSTAT_PATTERN_ENABLED;
949 if (firm_dump.stat_dag)
950 pattern |= FIRMSTAT_COUNT_DAG;
952 memset(¶ms, 0, sizeof(params));
953 params.size = sizeof(params);
954 params.enable_statistics = firm_dump.statistic == STAT_NONE ? 0 :
955 FIRMSTAT_ENABLED | FIRMSTAT_COUNT_STRONG_OP | FIRMSTAT_COUNT_CONSTS | pattern;
956 params.initialize_local_func = uninitialized_local_var;
957 params.cc_mask = 0; /* no regparam, cdecl */
958 params.builtin_dbg = NULL;
961 /* Activate Graph rewriting if SIMD optimization is turned on */
962 /* This has to be done before init_firm() is called! */
963 if (firm_ext_grs.simd_opt)
969 if (firm_be_opt.selection == BE_FIRM_BE) {
970 const backend_params *be_params = be_get_backend_param();
972 firm_opt.lower_ll = (a_byte) be_params->do_dw_lowering;
973 params.arch_op_settings = be_params->arch_op_settings;
975 arch_create_intrinsic = be_params->arch_create_intrinsic_fkt;
976 create_intrinsic_ctx = be_params->create_intrinsic_ctx;
978 ad_param = be_params->dep_param;
979 if_conv_info = be_params->if_conv_info;
981 if (be_params->has_imm_fp_mode)
982 firm_imm_fp_mode = be_params->imm_fp_mode;
984 /* OS option must be set to the backend */
985 switch (firm_opt.os_support) {
986 case OS_SUPPORT_MINGW:
987 firm_be_option("ia32-gasmode=mingw");
989 case OS_SUPPORT_MACHO:
990 firm_be_option("ia32-gasmode=macho");
992 case OS_SUPPORT_LINUX:
994 firm_be_option("ia32-gasmode=linux");
998 dbg_init(NULL, NULL, dbg_snprint);
999 edges_init_dbg(firm_opt.vrfy_edges);
1000 //cbackend_set_debug_retrieve(dbg_retrieve);
1002 set_opt_precise_exc_context(firm_opt.precise_exc);
1003 set_opt_fragile_ops(firm_opt.fragile_ops);
1005 /* Sel node cannot produce NULL pointers */
1006 set_opt_sel_based_null_check_elim(1);
1008 /* dynamic dispatch works currently only if whole world scenarios */
1009 set_opt_dyn_meth_dispatch(0);
1011 arch_dep_init(arch_factory);
1013 /* do not run architecture dependent optimizations in building phase */
1014 arch_dep_set_opts(arch_dep_none);
1016 do_node_verification((firm_verification_t) firm_opt.vrfy);
1017 if (firm_dump.filter)
1018 only_dump_method_with_name(new_id_from_str(firm_dump.filter));
1020 if (firm_opt.enabled) {
1022 set_opt_constant_folding(firm_opt.const_folding);
1023 set_opt_algebraic_simplification(firm_opt.const_folding);
1024 set_opt_cse(firm_opt.cse);
1025 set_opt_global_cse(0);
1026 set_opt_unreachable_code(1);
1027 set_opt_control_flow(firm_opt.control_flow);
1028 set_opt_control_flow_weak_simplification(1);
1029 set_opt_control_flow_strong_simplification(1);
1034 /* do not dump entity ld names */
1036 } /* gen_firm_init */
1039 * Called, after the Firm generation is completed,
1040 * do all optimizations and backend call here.
1042 * @param out a file handle for the output, may be NULL
1043 * @param input_filename the name of the (main) source file
1044 * @param c_mode non-zero if "C" was compiled
1045 * @param new_firm_const_exists non-zero, if the const attribute was used on functions
1047 void gen_firm_finish(FILE *out, const char *input_filename, int c_mode, int new_firm_const_exists)
1051 firm_const_exists = new_firm_const_exists;
1053 /* the general for dumping option must be set, or the others will not work */
1055 = (a_byte) (firm_dump.ir_graph | firm_dump.all_phases | firm_dump.extbb);
1057 dump_keepalive_edges(1);
1058 dump_consts_local(1);
1059 dump_dominator_information(1);
1060 dump_loop_information(0);
1062 if (!firm_dump.edge_labels)
1063 turn_off_edge_labels();
1065 if (firm_dump.all_types) {
1068 dump_class_hierarchy(0, "");
1069 dump_class_hierarchy(1, "-with-entities");
1073 /* finalize all graphs */
1074 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1075 ir_graph *irg = get_irp_irg(i);
1077 irg_finalize_cons(irg);
1078 DUMP_ONE(firm_dump.ir_graph, irg, "");
1080 /* verify the graph */
1081 timer_push(TV_VERIFY);
1082 irg_verify(irg, VRFY_ENFORCE_SSA);
1086 timer_push(TV_VERIFY);
1090 /* all graphs are finalized, set the irp phase to high */
1091 set_irp_phase_state(phase_high);
1093 /* BEWARE: kill unreachable code before doing compound lowering */
1094 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1095 ir_graph *irg = get_irp_irg(i);
1099 /* lower all compound call return values */
1100 lower_compound_params();
1102 /* computes the sizes of all types that are still not computed */
1103 compute_type_sizes();
1105 /* lower copyb nodes */
1106 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1107 ir_graph *irg = get_irp_irg(i);
1108 lower_CopyB(irg, 128, 4);
1111 if (firm_dump.statistic & STAT_BEFORE_OPT) {
1112 stat_dump_snapshot(input_filename, "noopt");
1115 if (firm_opt.enabled)
1116 do_firm_optimizations(input_filename);
1118 if (firm_dump.gen_firm_asm) {
1119 timer_push(TV_FIRM_ASM);
1120 gen_Firm_assembler(input_filename);
1126 do_firm_lowering(input_filename);
1128 /* computes the sizes of all frame types */
1129 compute_frame_type_sizes();
1131 /* set the phase to low */
1132 for (i = get_irp_n_irgs() - 1; i >= 0; --i)
1133 set_irg_phase_low(get_irp_irg(i));
1137 /** SIMD Optimization Extensions **/
1139 /* Pattern creation step. No code has to be generated, so
1140 exit after pattern creation */
1141 if (firm_ext_grs.create_pattern) {
1142 ext_grs_create_pattern();
1146 /* SIMD optimization step. Uses graph patterns to find
1147 rich instructions and rewrite */
1148 if (firm_ext_grs.simd_opt)
1152 if (firm_dump.statistic & STAT_FINAL_IR)
1153 stat_dump_snapshot(input_filename, "final-ir");
1155 /* run the code generator */
1156 if (firm_be_opt.selection != BE_NONE)
1157 do_codegen(out, input_filename);
1159 if (firm_dump.statistic & STAT_FINAL)
1160 stat_dump_snapshot(input_filename, "final");
1163 if (firm_opt.ycomp_dbg)
1164 firm_finish_ycomp_debugger();
1166 } /* gen_firm_finish */
1169 * Do very early initializations
1171 void firm_early_init(void) {
1172 /* arg: need this here for command line options */
1174 firm_init_options(NULL, 0, NULL);
1175 } /* firm_early_init */