2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Available Optimisations of libFirm.
25 #ifndef FIRM_IROPTIMIZE_H
26 #define FIRM_IROPTIMIZE_H
28 #include "firm_types.h"
31 * Control flow optimization.
33 * Removes empty blocks doing if simplifications and loop simplifications.
34 * A block is empty if it contains only a Jmp node and Phi nodes.
35 * Merges single entry single exit blocks with their predecessor
36 * and propagates dead control flow by calling equivalent_node().
37 * Independent of compiler flag it removes Tuples from cf edges,
38 * Bad predecessors from Blocks and Phis, and unnecessary predecessors of End.
40 * @bug So far destroys backedge information.
41 * @bug Chokes on Id nodes if called in a certain order with other
42 * optimizations. Call local_optimize_graph() before to remove
45 void optimize_cf(ir_graph *irg);
48 * Creates an ir_graph pass for optimize_cf().
50 * @param name the name of this pass or NULL
51 * @param verify should this pass be verified?
52 * @param dump should this pass result be dumped?
54 * @return the newly created ir_graph pass
56 ir_graph_pass_t *optimize_cf_pass(const char *name, int verify, int dump);
59 * Perform path-sensitive jump threading on the given graph.
61 * @param irg the graph
63 void opt_jumpthreading(ir_graph* irg);
66 * Creates an ir_graph pass for opt_jumpthreading().
68 * @param name the name of this pass or NULL
69 * @param verify should this pass be verified?
70 * @param dump should this pass result be dumped?
72 * @return the newly created ir_graph pass
74 ir_graph_pass_t *opt_jumpthreading_pass(const char *name, int verify, int dump);
77 * Try to simplify boolean expression in the given ir graph.
78 * eg. x < 5 && x < 6 becomes x < 5
80 * @param irg the graph
82 void opt_bool(ir_graph *irg);
85 * Creates an ir_graph pass for opt_bool().
87 * @param name the name of this pass or NULL
88 * @param verify should this pass be verified?
89 * @param dump should this pass result be dumped?
91 * @return the newly created ir_graph pass
93 ir_graph_pass_t *opt_bool_pass(const char *name, int verify, int dump);
96 * Try to reduce the number of conv nodes in the given ir graph.
98 * @param irg the graph
100 * @return non-zero if the optimization could be applied, 0 else
102 int conv_opt(ir_graph *irg);
105 * Creates an ir_graph pass for conv_opt().
107 * @param name the name of this pass or NULL
108 * @param verify should this pass be verified?
109 * @param dump should this pass result be dumped?
111 * @return the newly created ir_graph pass
113 ir_graph_pass_t *conv_opt_pass(const char *name, int verify, int dump);
116 * Do the scalar replacement optimization.
117 * Make a date flow analyze and split the
120 * @param irg the graph which should be optimized
122 void data_flow_scalar_replacement_opt(ir_graph *irg);
125 * A callback that checks whether a entity is an allocation
128 typedef int (*check_alloc_entity_func)(ir_entity *ent);
131 * Do simple and fast escape analysis for one graph.
133 * @param irg the graph
134 * @param callback a callback function to check whether a
135 * given entity is a allocation call
137 void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback);
140 * Do simple and fast escape analysis for all graphs.
142 * This optimization implements a simple and fast but inexact
143 * escape analysis. Some addresses might be marked as 'escaped' even
145 * The advantage is a low memory footprint and fast speed.
147 * @param run_scalar_replace if this flag in non-zero, scalar
148 * replacement optimization is run on graphs with removed
150 * @param callback a callback function to check whether a
151 * given entity is a allocation call
153 * This optimization removes allocation which are not used (rare) and replace
154 * allocation that can be proved dead at the end of the graph which stack variables.
156 * The creation of stack variable allows scalar replacement to be run only
157 * on those graphs that have been changed.
159 * This is most effective on Java where no other stack variables exists.
161 void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback);
164 * Optimize function calls by handling const functions.
166 * This optimization first detects all "const functions", i.e.,
167 * IR graphs that neither read nor write memory (and hence did
168 * not create exceptions, as these use memory in Firm).
170 * The result of calls to such functions depends only on its
171 * arguments, hence those calls are no more pinned.
173 * This is a rather strong criteria, so do not expect that a
174 * lot of functions will be found. Moreover, all of them might
175 * already be inlined if inlining is activated.
176 * Anyway, it might be good for handling builtin's or pseudo-graphs,
177 * even if the later read/write memory (but we know how).
179 * This optimizations read the irg_const_function property of
180 * entities and and sets the irg_const_function property of
183 * If callee information is valid, we also optimize polymorphic Calls.
185 * @param force_run if non-zero, an optimization run is started even
186 * if no const function graph was detected.
187 * Else calls are only optimized if at least one
188 * const function graph was detected.
189 * @param callback a callback function to check whether a
190 * given entity is a allocation call
192 * If the frontend created external entities with the irg_const_function
193 * property set, the force_run parameter should be set, else
196 * @note This optimization destroys the link fields of nodes.
198 void optimize_funccalls(int force_run, check_alloc_entity_func callback);
201 * Creates an ir_prog pass for optimize_funccalls().
203 * @param name the name of this pass or NULL
204 * @param verify should this pass be verified?
205 * @param dump should this pass result be dumped?
206 * @param force_run if non-zero, an optimization run is started even
207 * if no const function graph was detected.
208 * Else calls are only optimized if at least one
209 * const function graph was detected.
210 * @param callback a callback function to check whether a
211 * given entity is a allocation call
213 * @return the newly created ir_prog pass
215 ir_prog_pass_t *optimize_funccalls_pass(
216 const char *name, int verify, int dump,
217 int force_run, check_alloc_entity_func callback);
220 * Does Partial Redundancy Elimination combined with
221 * Global Value Numbering.
222 * Can be used to replace place_code() completely.
224 * Based on VanDrunen and Hosking 2004.
226 * @param irg the graph
228 void do_gvn_pre(ir_graph *irg);
231 * Creates an ir_graph pass for do_gvn_pre().
233 * @param name the name of this pass or NULL
234 * @param verify should this pass be verified?
235 * @param dump should this pass result be dumped?
237 * @return the newly created ir_graph pass
239 ir_graph_pass_t *do_gvn_pre_pass(const char *name, int verify, int dump);
242 * This function is called to evaluate, if a mux can build
243 * of the current architecture.
244 * If it returns non-zero, a mux is created, else the code
246 * @param sel A selector of a Cond.
247 * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
248 * @param i First data predecessor involved in if conversion
249 * @param j Second data predecessor involved in if conversion
251 typedef int (*arch_allow_ifconv_func)(ir_node *sel, ir_node* phi_list, int i, int j);
254 * The parameters structure.
256 struct ir_settings_if_conv_t {
257 int max_depth; /**< The maximum depth up to which expressions
258 are examined when it has to be decided if they
259 can be placed into another block. */
260 arch_allow_ifconv_func allow_ifconv; /**< Evaluator function, if not set all possible Psi
261 nodes will be created. */
265 * Perform If conversion on a graph.
267 * @param irg The graph.
268 * @param params The parameters for the if conversion.
270 * Cannot handle blocks with Bad control predecessors, so call it after control
273 void opt_if_conv(ir_graph *irg, const ir_settings_if_conv_t *params);
276 * Creates an ir_graph pass for opt_if_conv().
278 * @param name the name of this pass or NULL
279 * @param verify should this pass be verified?
280 * @param dump should this pass result be dumped?
281 * @param params The parameters for the if conversion.
283 * @return the newly created ir_graph pass
285 ir_graph_pass_t *opt_if_conv_pass(
286 const char *name, int verify, int dump, const ir_settings_if_conv_t *params);
288 void opt_sync(ir_graph *irg);
291 * Check if we can replace the load by a given const from
292 * the const code irg.
294 * @param load the load to replace
295 * @param c the constant
297 * @return in the modes match or can be transformed using a reinterpret cast
298 * returns a copy of the constant (possibly Conv'ed) on the
301 ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c);
304 * Load/Store optimization.
306 * Removes redundant non-volatile Loads and Stores.
307 * May introduce Bad nodes if exceptional control flow
308 * is removed. The following cases are optimized:
310 * Load without result: A Load which has only a memory use
313 * Load after Store: A Load after a Store is removed, if
314 * the Load doesn't have an exception handler OR is in
315 * the same block as the Store.
317 * Load after Load: A Load after a Load is removed, if the
318 * Load doesn't have an exception handler OR is in the
319 * same block as the previous Load.
321 * Store before Store: A Store immediately before another
322 * Store in the same block is removed, if the Store doesn't
323 * have an exception handler.
325 * Store after Load: A Store after a Load is removed, if the
326 * Store doesn't have an exception handler.
328 * @return non-zero if the optimization could be applied, 0 else
330 int optimize_load_store(ir_graph *irg);
333 * New experimental alternative to optimize_load_store.
334 * Based on a dataflow analysis, so load/stores are moved out of loops
337 int opt_ldst(ir_graph *irg);
340 * Do Loop unrolling in the given graph.
342 void optimize_loop_unrolling(ir_graph *irg);
345 * Optimize the frame type of an irg by removing
346 * never touched entities.
348 * @param irg The graph whose frame type will be optimized
350 * This function did not change the graph, only it's frame type.
351 * The layout state of the frame type will be set to layout_undefined
352 * if entities were removed.
354 void opt_frame_irg(ir_graph *irg);
356 /** Possible flags for the Operator Scalar Replacement. */
357 typedef enum osr_flags {
358 osr_flag_none = 0, /**< no additional flags */
359 osr_flag_lftr_with_ov_check = 1, /**< do linear function test replacement
360 only if no overflow can occur. */
361 osr_flag_ignore_x86_shift = 2, /**< ignore Multiplications by 2, 4, 8 */
362 osr_flag_keep_reg_pressure = 4 /**< do NOT increase register pressure by introducing new
363 induction variables. */
366 /* FirmJNI cannot handle identical enum values... */
368 /** default setting */
369 #define osr_flag_default osr_flag_lftr_with_ov_check
372 * Do the Operator Scalar Replacement optimization and linear
373 * function test replacement for loop control.
374 * Can be switched off using the set_opt_strength_red() flag.
375 * In that case, only remove_phi_cycles() is executed.
377 * @param irg the graph which should be optimized
378 * @param flags set of osr_flags
380 * The linear function replacement test is controlled by the flags.
381 * If the osr_flag_lftr_with_ov_check is set, the replacement is only
382 * done if do overflow can occur.
383 * Otherwise it is ALWAYS done which might be insecure.
387 * for (i = 0; i < 100; ++i)
389 * might be replaced by
391 * for (i = 0; i < 400; i += 4)
395 * for (i = 0; i < 0x7FFFFFFF; ++i)
397 * will not be replaced by
399 * for (i = 0; i < 0xFFFFFFFC; i += 4)
401 * because of overflow.
405 * for (i = 0; i <= 0xF; ++i)
407 * will NOT be transformed into
409 * for (i = 0xFFFFFFF0; i <= 0xFFFFFFFF; ++i)
411 * although here is no direct overflow. The OV occurs when the ++i
412 * is executed (and would created an endless loop here!).
414 * For the same reason, a loop
416 * for (i = 0; i <= 9; i += x)
418 * will NOT be transformed because we cannot estimate whether an overflow
419 * might happen adding x.
421 * Note that i < a + 400 is also not possible with the current implementation
422 * although this might be allowed by other compilers...
424 * Note further that tests for equality can be handled some simpler (but are not
427 * This algorithm destroys the link field of nodes.
429 void opt_osr(ir_graph *irg, unsigned flags);
432 * Removes useless Phi cycles, i.e cycles of Phi nodes with only one
434 * This is automatically done in opt_osr(), so there is no need to call it
437 * @param irg the graph which should be optimized
439 * This algorithm destroys the link field of nodes.
441 void remove_phi_cycles(ir_graph *irg);
443 /** A default threshold. */
444 #define DEFAULT_CLONE_THRESHOLD 300
447 * Do procedure cloning. Evaluate a heuristic weight for every
448 * Call(..., Const, ...). If the weight is bigger than threshold,
449 * clone the entity and fix the calls.
451 * @param threshold the threshold for cloning
453 * The threshold is an estimation of how many instructions are saved
454 * when executing a cloned method. If threshold is 0.0, every possible
457 void proc_cloning(float threshold);
462 * Applies Reassociation rules to integer expressions.
463 * Beware: Works only if integer overflow might be ignored, as for C, Java
464 * and for address expression.
465 * Works only if Constant folding is activated.
467 * Uses loop information to detect loop-invariant (ie contant
468 * inside the loop) values.
470 * See Muchnik 12.3.1 Algebraic Simplification and Reassociation of
471 * Addressing Expressions.
473 * @return non-zero if the optimization could be applied, 0 else
475 int optimize_reassociation(ir_graph *irg);
478 * Normalize the Returns of a graph by creating a new End block
479 * with One Return(Phi).
480 * This is the preferred input for the if-conversion.
482 * In pseudocode, it means:
489 * is transformed into
497 void normalize_one_return(ir_graph *irg);
500 * Normalize the Returns of a graph by moving
501 * the Returns upwards as much as possible.
502 * This might be preferred for code generation.
504 * In pseudocode, it means:
512 * is transformed into
519 void normalize_n_returns(ir_graph *irg);
522 * Do the scalar replacement optimization.
523 * Replace local compound entities (like structures and arrays)
524 * with atomic values if possible. Does not handle classes yet.
526 * @param irg the graph which should be optimized
528 * @return non-zero, if at least one entity was replaced
530 int scalar_replacement_opt(ir_graph *irg);
532 /** Performs strength reduction for the passed graph. */
533 void reduce_strength(ir_graph *irg);
536 * Optimizes tail-recursion calls by converting them into loops.
537 * Depends on the flag opt_tail_recursion.
538 * Currently supports the following forms:
540 * - return x + func();
541 * - return func() - x;
542 * - return x * func();
545 * Does not work for Calls that use the exception stuff.
547 * @param irg the graph to be optimized
549 * @return non-zero if the optimization could be applied, 0 else
551 int opt_tail_rec_irg(ir_graph *irg);
554 * Optimize tail-recursion calls for all IR-Graphs.
555 * Can currently handle:
556 * - direct return value, i.e. return func().
557 * - additive return value, i.e. return x +/- func()
558 * - multiplicative return value, i.e. return x * func() or return -func()
560 * The current implementation must be run before optimize_funccalls(),
561 * because it expects the memory edges pointing to calls, which might be
562 * removed by optimize_funccalls().
564 void opt_tail_recursion(void);
566 /** This is the type for a method, that returns a pointer type to
567 * tp. This is needed in the normalization. */
568 typedef ir_type *(*gen_pointer_type_to_func)(ir_type *tp);
570 /** Insert Casts so that class type casts conform exactly with the type hierarchy.
572 * Formulated in Java, this achieves the following:
574 * For a class hierarchy
576 * class B extends A {}
577 * class C extends B {}
578 * we transforms a cast
583 * The algorithm works for Casts with class types, but also for Casts
584 * with all pointer types that point (over several indirections,
585 * i.e. ***A) to a class type. Normalizes all graphs. Computes type
586 * information (@see irtypeinfo.h) if not available.
587 * Invalidates trout information as new casts are generated.
589 * @param gppt_fct A function that returns a pointer type that points
590 * to the type given as argument. If this parameter is NULL, a default
591 * function is used that either uses trout information or performs a O(n)
592 * search to find an existing pointer type. If it can not find a type,
593 * generates a pointer type with mode_P_mach and suffix "cc_ptr_tp".
595 void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct);
598 /** Insert Casts so that class type casts conform exactly with the type hierarchy
601 * For more details see normalize_irp_class_casts().
603 * This transformation requires that type information is computed. @see irtypeinfo.h.
605 void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct);
608 /** Optimize casting between class types.
611 * class B extends A { }
612 * class C extends B {}
613 * Performs the following transformations:
614 * C c = (C)(B)(A)(B)new C() --> C c = (C)(B)newC() --> C c = new C()
615 * (Optimizing downcasts as A a = (A)(B)(new A()) --> A a = new A() can
616 * be suppressed by setting the flag opt_suppress_downcast_optimization.
617 * Downcasting A to B might cause an exception. It is not clear
618 * whether this is modeled by the Firm Cast node, as it has no exception
620 * If there is inh_m() that overwrites m() in B:
621 * ((A) new B()).m() --> (new B()).inh_m()
622 * Phi((A)x, (A)y) --> (A) Phi (x, y) if (A) is an upcast.
624 * Computes type information if not available. @see irtypeinfo.h.
625 * Typeinformation is valid after optimization.
626 * Invalidates trout information.
628 void optimize_class_casts(void);
631 * CLiff Click's combo algorithm from "Combining Analyses, combining Optimizations".
633 * Does conditional constant propagation, unreachable code elimination and optimistic
634 * global value numbering at once.
636 * @param irg the graph to run on
638 void combo(ir_graph *irg);
641 * Creates an ir_graph pass for combo.
643 * @param name the name of this pass or NULL
644 * @param verify should this pass be verified?
645 * @param dump should this pass result be dumped?
647 * @return the newly created ir_graph pass
649 ir_graph_pass_t *combo_pass(const char *name, int verify, int dump);
651 /** Inlines all small methods at call sites where the called address comes
652 * from a SymConst node that references the entity representing the called
655 * The size argument is a rough measure for the code size of the method:
656 * Methods where the obstack containing the firm graph is smaller than
657 * size are inlined. Further only a limited number of calls are inlined.
658 * If the method contains more than 1024 inlineable calls none will be
660 * Inlining is only performed if flags `optimize' and `inlineing' are set.
661 * The graph may not be in state phase_building.
662 * It is recommended to call local_optimize_graph() after inlining as this
663 * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
664 * combination as control flow operation.
666 void inline_small_irgs(ir_graph *irg, int size);
669 /** Inlineing with a different heuristic than inline_small_irgs().
671 * Inlines leave functions. If inlinening creates new leave
672 * function inlines these, too. (If g calls f, and f calls leave h,
673 * h is first inlined in f and then f in g.)
675 * Then inlines all small functions (this is not recursive).
677 * For a heuristic this inlineing uses firm node counts. It does
678 * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
679 * If the ignore_runtime flag is set, calls to functions marked with the
680 * mtp_property_runtime property are ignored.
682 * @param maxsize Do not inline any calls if a method has more than
683 * maxsize firm nodes. It may reach this limit by
685 * @param leavesize Inline leave functions if they have less than leavesize
687 * @param size Inline all function smaller than size.
688 * @param ignore_runtime count a function only calling runtime functions as
691 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
692 unsigned size, int ignore_runtime);
695 * Heuristic inliner. Calculates a benefice value for every call and inlines
696 * those calls with a value higher than the threshold.
698 * @param maxsize Do not inline any calls if a method has more than
699 * maxsize firm nodes. It may reach this limit by
701 * @param threshold inlining threshold
703 void inline_functions(unsigned maxsize, int inline_threshold);
706 * Combines congruent blocks into one.
708 * @param irg The IR-graph to optimize.
710 * @return non-zero if the graph was transformed
712 int shape_blocks(ir_graph *irg);