X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=include%2Flibfirm%2Firoptimize.h;h=61103da18d9b3bff575f9c0088118c4842c19e87;hb=fa9c992b5f23e7a059ac91fdd04b409b951ebcc4;hp=1b8dfb8af22a171105fb7545134d954a99dcb65b;hpb=037b862d722d4392d020639e00d4018c072f3fe2;p=libfirm diff --git a/include/libfirm/iroptimize.h b/include/libfirm/iroptimize.h index 1b8dfb8af..61103da18 100644 --- a/include/libfirm/iroptimize.h +++ b/include/libfirm/iroptimize.h @@ -20,7 +20,7 @@ /** * @file * @brief Available Optimisations of libFirm. - * @version $Id: cfopt.h 13543 2007-04-29 19:29:02Z beck $ + * @version $Id$ */ #ifndef FIRM_IROPTIMIZE_H #define FIRM_IROPTIMIZE_H @@ -63,8 +63,10 @@ void opt_bool(ir_graph *irg); * Try to reduce the number of conv nodes in the given ir graph. * * @param irg the graph + * + * @return non-zero if the optimization could be applied, 0 else */ -void conv_opt(ir_graph *irg); +int conv_opt(ir_graph *irg); /** * Do the scalar replacement optimization. @@ -159,10 +161,6 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback); * Based on VanDrunen and Hosking 2004. * * @param irg the graph - * - * @note - * Currently completely broken because the used sets do NOT - * preserve the topological sort of its elements. */ void do_gvn_pre(ir_graph *irg); @@ -172,7 +170,7 @@ void do_gvn_pre(ir_graph *irg); * If it returns non-zero, a mux is created, else the code * is not modified. * @param sel A selector of a Cond. - * @param phi_list List of Phi nodes about to be converted (linked via link field) + * @param phi_list List of Phi nodes about to be converted (linked via get_Phi_next() field) * @param i First data predecessor involved in if conversion * @param j Second data predecessor involved in if conversion */ @@ -200,7 +198,20 @@ struct ir_settings_if_conv_t { */ void opt_if_conv(ir_graph *irg, const ir_settings_if_conv_t *params); -void opt_ldst2(ir_graph *irg); +void opt_sync(ir_graph *irg); + +/* + * Check if we can replace the load by a given const from + * the const code irg. + * + * @param load the load to replace + * @param c the constant + * + * @return in the modes match or can be transformed using a reinterpret cast + * returns a copy of the constant (possibly Conv'ed) on the + * current_ir_graph + */ +ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c); /** * Load/Store optimization. @@ -226,8 +237,10 @@ void opt_ldst2(ir_graph *irg); * * Store after Load: A Store after a Load is removed, if the * Store doesn't have an exception handler. + * + * @return non-zero if the optimization could be applied, 0 else */ -void optimize_load_store(ir_graph *irg); +int optimize_load_store(ir_graph *irg); /** * Do Loop unrolling in the given graph. @@ -363,9 +376,9 @@ void proc_cloning(float threshold); * See Muchnik 12.3.1 Algebraic Simplification and Reassociation of * Addressing Expressions. * - * + * @return non-zero if the optimization could be applied, 0 else */ -void optimize_reassociation(ir_graph *irg); +int optimize_reassociation(ir_graph *irg); /** * Normalize the Returns of a graph by creating a new End block @@ -417,15 +430,23 @@ void normalize_n_returns(ir_graph *irg); * with atomic values if possible. Does not handle classes yet. * * @param irg the graph which should be optimized + * + * @return non-zero, if at least one entity was replaced */ -void scalar_replacement_opt(ir_graph *irg); +int scalar_replacement_opt(ir_graph *irg); /** Performs strength reduction for the passed graph. */ void reduce_strength(ir_graph *irg); /** - * Optimizes simple tail-recursion calls by - * converting them into loops. Depends on the flag opt_tail_recursion. + * Optimizes tail-recursion calls by converting them into loops. + * Depends on the flag opt_tail_recursion. + * Currently supports the following forms: + * - return func(); + * - return x + func(); + * - return func() - x; + * - return x * func(); + * - return -func(); * * Does not work for Calls that use the exception stuff. * @@ -435,9 +456,16 @@ void reduce_strength(ir_graph *irg); */ int opt_tail_rec_irg(ir_graph *irg); -/* +/** * Optimize tail-recursion calls for all IR-Graphs. - * Depends on the flag opt_tail_recursion. + * Can currently handle: + * - direct return value, i.e. return func(). + * - additive return value, i.e. return x +/- func() + * - multiplicative return value, i.e. return x * func() or return -func() + * + * The current implementation must be run before optimize_funccalls(), + * because it expects the memory edges pointing to calls, which might be + * removed by optimize_funccalls(). */ void opt_tail_recursion(void); @@ -505,4 +533,68 @@ void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct) */ void optimize_class_casts(void); +/** + * CLiff Click's combo algorithm from "Combining Analyses, combining Optimizations". + * + * Does conditional constant propagation, unreachable code elimination and optimistic + * global value numbering at once. + * + * @param irg the graph to run on + */ +void combo(ir_graph *irg); + +/** Inlines all small methods at call sites where the called address comes + * from a SymConst node that references the entity representing the called + * method. + * + * The size argument is a rough measure for the code size of the method: + * Methods where the obstack containing the firm graph is smaller than + * size are inlined. Further only a limited number of calls are inlined. + * If the method contains more than 1024 inlineable calls none will be + * inlined. + * Inlining is only performed if flags `optimize' and `inlineing' are set. + * The graph may not be in state phase_building. + * It is recommended to call local_optimize_graph() after inlining as this + * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp + * combination as control flow operation. + */ +void inline_small_irgs(ir_graph *irg, int size); + + +/** Inlineing with a different heuristic than inline_small_irgs(). + * + * Inlines leave functions. If inlinening creates new leave + * function inlines these, too. (If g calls f, and f calls leave h, + * h is first inlined in f and then f in g.) + * + * Then inlines all small functions (this is not recursive). + * + * For a heuristic this inlineing uses firm node counts. It does + * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync. + * If the ignore_runtime flag is set, calls to functions marked with the + * mtp_property_runtime property are ignored. + * + * @param maxsize Do not inline any calls if a method has more than + * maxsize firm nodes. It may reach this limit by + * inlineing. + * @param leavesize Inline leave functions if they have less than leavesize + * nodes. + * @param size Inline all function smaller than size. + * @param ignore_runtime count a function only calling runtime functions as + * leave + */ +void inline_leave_functions(unsigned maxsize, unsigned leavesize, + unsigned size, int ignore_runtime); + +/** + * Heuristic inliner. Calculates a benefice value for every call and inlines + * those calls with a value higher than the threshold. + * + * @param maxsize Do not inline any calls if a method has more than + * maxsize firm nodes. It may reach this limit by + * inlineing. + * @param threshold inlining threshold + */ +void inline_functions(unsigned maxsize, int inline_threshold); + #endif