/**
* @file
* @brief Available Optimisations of libFirm.
- * @version $Id: cfopt.h 13543 2007-04-29 19:29:02Z beck $
+ * @version $Id$
*/
#ifndef FIRM_IROPTIMIZE_H
#define FIRM_IROPTIMIZE_H
* Try to reduce the number of conv nodes in the given ir graph.
*
* @param irg the graph
+ *
+ * @return non-zero if the optimization could be applied, 0 else
*/
-void conv_opt(ir_graph *irg);
+int conv_opt(ir_graph *irg);
/**
* Do the scalar replacement optimization.
* Based on VanDrunen and Hosking 2004.
*
* @param irg the graph
- *
- * @note
- * Currently completely broken because the used sets do NOT
- * preserve the topological sort of its elements.
*/
void do_gvn_pre(ir_graph *irg);
* If it returns non-zero, a mux is created, else the code
* is not modified.
* @param sel A selector of a Cond.
- * @param phi_list List of Phi nodes about to be converted (linked via link field)
+ * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field)
* @param i First data predecessor involved in if conversion
* @param j Second data predecessor involved in if conversion
*/
*/
void opt_if_conv(ir_graph *irg, const ir_settings_if_conv_t *params);
-void opt_ldst2(ir_graph *irg);
+void opt_sync(ir_graph *irg);
+
+/*
+ * Check if we can replace the load by a given const from
+ * the const code irg.
+ *
+ * @param load the load to replace
+ * @param c the constant
+ *
+ * @return in the modes match or can be transformed using a reinterpret cast
+ * returns a copy of the constant (possibly Conv'ed) on the
+ * current_ir_graph
+ */
+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c);
/**
* Load/Store optimization.
*
* Store after Load: A Store after a Load is removed, if the
* Store doesn't have an exception handler.
+ *
+ * @return non-zero if the optimization could be applied, 0 else
*/
-void optimize_load_store(ir_graph *irg);
+int optimize_load_store(ir_graph *irg);
/**
* Do Loop unrolling in the given graph.
osr_flag_none = 0, /**< no additional flags */
osr_flag_lftr_with_ov_check = 1, /**< do linear function test replacement
only if no overflow can occur. */
- osr_flag_ignore_x86_shift = 2 /**< ignore Multiplications by 2, 4, 8 */
+ osr_flag_ignore_x86_shift = 2, /**< ignore Multiplications by 2, 4, 8 */
+ osr_flag_keep_reg_pressure = 4 /**< do NOT increase register pressure by introducing new
+ induction variables. */
} osr_flags;
/* FirmJNI cannot handle identical enum values... */
* See Muchnik 12.3.1 Algebraic Simplification and Reassociation of
* Addressing Expressions.
*
- *
+ * @return non-zero if the optimization could be applied, 0 else
*/
-void optimize_reassociation(ir_graph *irg);
+int optimize_reassociation(ir_graph *irg);
/**
* Normalize the Returns of a graph by creating a new End block
* with atomic values if possible. Does not handle classes yet.
*
* @param irg the graph which should be optimized
+ *
+ * @return non-zero, if at least one entity was replaced
*/
-void scalar_replacement_opt(ir_graph *irg);
+int scalar_replacement_opt(ir_graph *irg);
/** Performs strength reduction for the passed graph. */
void reduce_strength(ir_graph *irg);
/**
- * Optimizes simple tail-recursion calls by
- * converting them into loops. Depends on the flag opt_tail_recursion.
+ * Optimizes tail-recursion calls by converting them into loops.
+ * Depends on the flag opt_tail_recursion.
+ * Currently supports the following forms:
+ * - return func();
+ * - return x + func();
+ * - return func() - x;
+ * - return x * func();
+ * - return -func();
*
* Does not work for Calls that use the exception stuff.
*
*/
int opt_tail_rec_irg(ir_graph *irg);
-/*
+/**
* Optimize tail-recursion calls for all IR-Graphs.
- * Depends on the flag opt_tail_recursion.
+ * Can currently handle:
+ * - direct return value, i.e. return func().
+ * - additive return value, i.e. return x +/- func()
+ * - multiplicative return value, i.e. return x * func() or return -func()
+ *
+ * The current implementation must be run before optimize_funccalls(),
+ * because it expects the memory edges pointing to calls, which might be
+ * removed by optimize_funccalls().
*/
void opt_tail_recursion(void);
*/
void optimize_class_casts(void);
+/**
+ * CLiff Click's combo algorithm from "Combining Analyses, combining Optimizations".
+ *
+ * Does conditional constant propagation, unreachable code elimination and optimistic
+ * global value numbering at once.
+ *
+ * @param irg the graph to run on
+ */
+void combo(ir_graph *irg);
+
+/** Inlines all small methods at call sites where the called address comes
+ * from a SymConst node that references the entity representing the called
+ * method.
+ *
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined. Further only a limited number of calls are inlined.
+ * If the method contains more than 1024 inlineable calls none will be
+ * inlined.
+ * Inlining is only performed if flags `optimize' and `inlineing' are set.
+ * The graph may not be in state phase_building.
+ * It is recommended to call local_optimize_graph() after inlining as this
+ * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
+ * combination as control flow operation.
+ */
+void inline_small_irgs(ir_graph *irg, int size);
+
+
+/** Inlineing with a different heuristic than inline_small_irgs().
+ *
+ * Inlines leave functions. If inlinening creates new leave
+ * function inlines these, too. (If g calls f, and f calls leave h,
+ * h is first inlined in f and then f in g.)
+ *
+ * Then inlines all small functions (this is not recursive).
+ *
+ * For a heuristic this inlineing uses firm node counts. It does
+ * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
+ * If the ignore_runtime flag is set, calls to functions marked with the
+ * mtp_property_runtime property are ignored.
+ *
+ * @param maxsize Do not inline any calls if a method has more than
+ * maxsize firm nodes. It may reach this limit by
+ * inlineing.
+ * @param leavesize Inline leave functions if they have less than leavesize
+ * nodes.
+ * @param size Inline all function smaller than size.
+ * @param ignore_runtime count a function only calling runtime functions as
+ * leave
+ */
+void inline_leave_functions(unsigned maxsize, unsigned leavesize,
+ unsigned size, int ignore_runtime);
+
+/**
+ * Heuristic inliner. Calculates a benefice value for every call and inlines
+ * those calls with a value higher than the threshold.
+ *
+ * @param maxsize Do not inline any calls if a method has more than
+ * maxsize firm nodes. It may reach this limit by
+ * inlineing.
+ * @param threshold inlining threshold
+ */
+void inline_functions(unsigned maxsize, int inline_threshold);
+
+/**
+ * Combines congruent blocks into one.
+ *
+ * @param irg The IR-graph to optimize.
+ *
+ * @return non-zero if the graph was transformed
+ */
+int shape_blocks(ir_graph *irg);
+
#endif