X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=include%2Flibfirm%2Firoptimize.h;h=61103da18d9b3bff575f9c0088118c4842c19e87;hb=fa9c992b5f23e7a059ac91fdd04b409b951ebcc4;hp=41e09aed5b593d319dd56334a58a11ff569bd9a1;hpb=485a16d5b15766a1b42d6cd2a58c7a749ac0de40;p=libfirm diff --git a/include/libfirm/iroptimize.h b/include/libfirm/iroptimize.h index 41e09aed5..61103da18 100644 --- a/include/libfirm/iroptimize.h +++ b/include/libfirm/iroptimize.h @@ -20,7 +20,7 @@ /** * @file * @brief Available Optimisations of libFirm. - * @version $Id: cfopt.h 13543 2007-04-29 19:29:02Z beck $ + * @version $Id$ */ #ifndef FIRM_IROPTIMIZE_H #define FIRM_IROPTIMIZE_H @@ -63,8 +63,10 @@ void opt_bool(ir_graph *irg); * Try to reduce the number of conv nodes in the given ir graph. * * @param irg the graph + * + * @return non-zero if the optimization could be applied, 0 else */ -void conv_opt(ir_graph *irg); +int conv_opt(ir_graph *irg); /** * Do the scalar replacement optimization. @@ -196,7 +198,20 @@ struct ir_settings_if_conv_t { */ void opt_if_conv(ir_graph *irg, const ir_settings_if_conv_t *params); -void opt_ldst2(ir_graph *irg); +void opt_sync(ir_graph *irg); + +/* + * Check if we can replace the load by a given const from + * the const code irg. + * + * @param load the load to replace + * @param c the constant + * + * @return in the modes match or can be transformed using a reinterpret cast + * returns a copy of the constant (possibly Conv'ed) on the + * current_ir_graph + */ +ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c); /** * Load/Store optimization. @@ -222,8 +237,10 @@ void opt_ldst2(ir_graph *irg); * * Store after Load: A Store after a Load is removed, if the * Store doesn't have an exception handler. + * + * @return non-zero if the optimization could be applied, 0 else */ -void optimize_load_store(ir_graph *irg); +int optimize_load_store(ir_graph *irg); /** * Do Loop unrolling in the given graph. @@ -359,9 +376,9 @@ void proc_cloning(float threshold); * See Muchnik 12.3.1 Algebraic Simplification and Reassociation of * Addressing Expressions. * - * + * @return non-zero if the optimization could be applied, 0 else */ -void optimize_reassociation(ir_graph *irg); +int optimize_reassociation(ir_graph *irg); /** * Normalize the Returns of a graph by creating a new End block @@ -439,9 +456,16 @@ void reduce_strength(ir_graph *irg); */ int opt_tail_rec_irg(ir_graph *irg); -/* +/** * Optimize tail-recursion calls for all IR-Graphs. - * Depends on the flag opt_tail_recursion. + * Can currently handle: + * - direct return value, i.e. return func(). + * - additive return value, i.e. return x +/- func() + * - multiplicative return value, i.e. return x * func() or return -func() + * + * The current implementation must be run before optimize_funccalls(), + * because it expects the memory edges pointing to calls, which might be + * removed by optimize_funccalls(). */ void opt_tail_recursion(void); @@ -514,7 +538,63 @@ void optimize_class_casts(void); * * Does conditional constant propagation, unreachable code elimination and optimistic * global value numbering at once. + * + * @param irg the graph to run on */ void combo(ir_graph *irg); +/** Inlines all small methods at call sites where the called address comes + * from a SymConst node that references the entity representing the called + * method. + * + * The size argument is a rough measure for the code size of the method: + * Methods where the obstack containing the firm graph is smaller than + * size are inlined. Further only a limited number of calls are inlined. + * If the method contains more than 1024 inlineable calls none will be + * inlined. + * Inlining is only performed if flags `optimize' and `inlineing' are set. + * The graph may not be in state phase_building. + * It is recommended to call local_optimize_graph() after inlining as this + * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp + * combination as control flow operation. + */ +void inline_small_irgs(ir_graph *irg, int size); + + +/** Inlineing with a different heuristic than inline_small_irgs(). + * + * Inlines leave functions. If inlinening creates new leave + * function inlines these, too. (If g calls f, and f calls leave h, + * h is first inlined in f and then f in g.) + * + * Then inlines all small functions (this is not recursive). + * + * For a heuristic this inlineing uses firm node counts. It does + * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync. + * If the ignore_runtime flag is set, calls to functions marked with the + * mtp_property_runtime property are ignored. + * + * @param maxsize Do not inline any calls if a method has more than + * maxsize firm nodes. It may reach this limit by + * inlineing. + * @param leavesize Inline leave functions if they have less than leavesize + * nodes. + * @param size Inline all function smaller than size. + * @param ignore_runtime count a function only calling runtime functions as + * leave + */ +void inline_leave_functions(unsigned maxsize, unsigned leavesize, + unsigned size, int ignore_runtime); + +/** + * Heuristic inliner. Calculates a benefice value for every call and inlines + * those calls with a value higher than the threshold. + * + * @param maxsize Do not inline any calls if a method has more than + * maxsize firm nodes. It may reach this limit by + * inlineing. + * @param threshold inlining threshold + */ +void inline_functions(unsigned maxsize, int inline_threshold); + #endif