X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=include%2Flibfirm%2Firoptimize.h;h=9aa0e1d4ec1ac8b324da9fd22cea4c96fe97a28d;hb=7547cf525ad54f59d2dc3f39d4b257911c94989b;hp=29e0d46be28ce6b3fb0ff8cfed5a2e6fa1165ef0;hpb=0b23345d840519455c1cc8fe64e94786c8a165e2;p=libfirm diff --git a/include/libfirm/iroptimize.h b/include/libfirm/iroptimize.h index 29e0d46be..9aa0e1d4e 100644 --- a/include/libfirm/iroptimize.h +++ b/include/libfirm/iroptimize.h @@ -26,6 +26,8 @@ #define FIRM_IROPTIMIZE_H #include "firm_types.h" +#include "nodeops.h" +#include "begin.h" /** * Control flow optimization. @@ -36,13 +38,13 @@ * and propagates dead control flow by calling equivalent_node(). * Independent of compiler flag it removes Tuples from cf edges, * Bad predecessors from Blocks and Phis, and unnecessary predecessors of End. + * Destroys backedge information. * - * @bug So far destroys backedge information. * @bug Chokes on Id nodes if called in a certain order with other * optimizations. Call local_optimize_graph() before to remove * Ids. */ -void optimize_cf(ir_graph *irg); +FIRM_API void optimize_cf(ir_graph *irg); /** * Creates an ir_graph pass for optimize_cf(). @@ -51,14 +53,14 @@ void optimize_cf(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *optimize_cf_pass(const char *name); +FIRM_API ir_graph_pass_t *optimize_cf_pass(const char *name); /** * Perform path-sensitive jump threading on the given graph. * * @param irg the graph */ -void opt_jumpthreading(ir_graph* irg); +FIRM_API void opt_jumpthreading(ir_graph* irg); /** * Creates an ir_graph pass for opt_jumpthreading(). @@ -67,7 +69,7 @@ void opt_jumpthreading(ir_graph* irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_jumpthreading_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_jumpthreading_pass(const char *name); /** * Creates an ir_graph pass for opt_loopunroll(). @@ -76,7 +78,7 @@ ir_graph_pass_t *opt_jumpthreading_pass(const char *name); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_loopunroll_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_loopunroll_pass(const char *name); /** @@ -85,7 +87,7 @@ ir_graph_pass_t *opt_loopunroll_pass(const char *name); * * @param irg the graph */ -void opt_bool(ir_graph *irg); +FIRM_API void opt_bool(ir_graph *irg); /** * Creates an ir_graph pass for opt_bool(). @@ -94,7 +96,7 @@ void opt_bool(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_bool_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_bool_pass(const char *name); /** * Try to reduce the number of conv nodes in the given ir graph. @@ -103,7 +105,7 @@ ir_graph_pass_t *opt_bool_pass(const char *name); * * @return non-zero if the optimization could be applied, 0 else */ -int conv_opt(ir_graph *irg); +FIRM_API int conv_opt(ir_graph *irg); /** * Creates an ir_graph pass for conv_opt(). @@ -112,7 +114,7 @@ int conv_opt(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *conv_opt_pass(const char *name); +FIRM_API ir_graph_pass_t *conv_opt_pass(const char *name); /** * Do the scalar replacement optimization. @@ -121,7 +123,7 @@ ir_graph_pass_t *conv_opt_pass(const char *name); * * @param irg the graph which should be optimized */ -void data_flow_scalar_replacement_opt(ir_graph *irg); +FIRM_API void data_flow_scalar_replacement_opt(ir_graph *irg); /** * A callback that checks whether a entity is an allocation @@ -136,7 +138,8 @@ typedef int (*check_alloc_entity_func)(ir_entity *ent); * @param callback a callback function to check whether a * given entity is a allocation call */ -void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback); +FIRM_API void escape_enalysis_irg(ir_graph *irg, + check_alloc_entity_func callback); /** * Do simple and fast escape analysis for all graphs. @@ -160,7 +163,8 @@ void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback); * * This is most effective on Java where no other stack variables exists. */ -void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback); +FIRM_API void escape_analysis(int run_scalar_replace, + check_alloc_entity_func callback); /** * Optimize function calls by handling const functions. @@ -175,7 +179,7 @@ void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback); * This is a rather strong criteria, so do not expect that a * lot of functions will be found. Moreover, all of them might * already be inlined if inlining is activated. - * Anyway, it might be good for handling builtin's or pseudo-graphs, + * Anyway, it might be good for handling builtin's * even if the later read/write memory (but we know how). * * This optimizations read the irg_const_function property of @@ -197,7 +201,8 @@ void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback); * * @note This optimization destroys the link fields of nodes. */ -void optimize_funccalls(int force_run, check_alloc_entity_func callback); +FIRM_API void optimize_funccalls(int force_run, + check_alloc_entity_func callback); /** * Creates an ir_prog pass for optimize_funccalls(). @@ -212,9 +217,9 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback); * * @return the newly created ir_prog pass */ -ir_prog_pass_t *optimize_funccalls_pass( - const char *name, - int force_run, check_alloc_entity_func callback); +FIRM_API ir_prog_pass_t *optimize_funccalls_pass(const char *name, + int force_run, + check_alloc_entity_func callback); /** * Does Partial Redundancy Elimination combined with @@ -225,7 +230,7 @@ ir_prog_pass_t *optimize_funccalls_pass( * * @param irg the graph */ -void do_gvn_pre(ir_graph *irg); +FIRM_API void do_gvn_pre(ir_graph *irg); /** * Creates an ir_graph pass for do_gvn_pre(). @@ -234,7 +239,7 @@ void do_gvn_pre(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *do_gvn_pre_pass(const char *name); +FIRM_API ir_graph_pass_t *do_gvn_pre_pass(const char *name); /** * This function is called to evaluate, if a @@ -250,45 +255,31 @@ ir_graph_pass_t *do_gvn_pre_pass(const char *name); typedef int (*arch_allow_ifconv_func)(ir_node *sel, ir_node *mux_false, ir_node *mux_true); -/** - * The parameters structure. - */ -struct ir_settings_if_conv_t { - int max_depth; /**< The maximum depth up to which expressions - are examined when it has to be decided if they - can be placed into another block. */ - arch_allow_ifconv_func allow_ifconv; /**< Evaluator function, if not set all possible Psi - nodes will be created. */ -}; - /** * Perform If conversion on a graph. * * @param irg The graph. - * @param params The parameters for the if conversion. * * Cannot handle blocks with Bad control predecessors, so call it after control * flow optimization. */ -void opt_if_conv(ir_graph *irg, const ir_settings_if_conv_t *params); +FIRM_API void opt_if_conv(ir_graph *irg); /** * Creates an ir_graph pass for opt_if_conv(). * * @param name the name of this pass or NULL - * @param params The parameters for the if conversion. * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_if_conv_pass( - const char *name, const ir_settings_if_conv_t *params); +FIRM_API ir_graph_pass_t *opt_if_conv_pass(const char *name); /** * Tries to reduce dependencies for memory nodes where possible by parllelizing * them and synchronising with Sync nodes * @param irg the graph where memory operations should be parallelised */ -void opt_parallelize_mem(ir_graph *irg); +FIRM_API void opt_parallelize_mem(ir_graph *irg); /** * Creates an ir_graph pass for opt_sync(). @@ -297,7 +288,7 @@ void opt_parallelize_mem(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_parallelize_mem_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_parallelize_mem_pass(const char *name); /* * Check if we can replace the load by a given const from @@ -310,7 +301,7 @@ ir_graph_pass_t *opt_parallelize_mem_pass(const char *name); * returns a copy of the constant (possibly Conv'ed) on the * current_ir_graph */ -ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c); +FIRM_API ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c); /** * Load/Store optimization. @@ -339,7 +330,7 @@ ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c); * * @return non-zero if the optimization could be applied, 0 else */ -int optimize_load_store(ir_graph *irg); +FIRM_API int optimize_load_store(ir_graph *irg); /** * Creates an ir_graph pass for optimize_load_store(). @@ -348,14 +339,14 @@ int optimize_load_store(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *optimize_load_store_pass(const char *name); +FIRM_API ir_graph_pass_t *optimize_load_store_pass(const char *name); /** * New experimental alternative to optimize_load_store. * Based on a dataflow analysis, so load/stores are moved out of loops * where possible */ -int opt_ldst(ir_graph *irg); +FIRM_API int opt_ldst(ir_graph *irg); /** * Creates an ir_graph pass for opt_ldst(). @@ -364,7 +355,7 @@ int opt_ldst(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_ldst_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_ldst_pass(const char *name); /** * Optimize loops by peeling or unrolling them if beneficial. @@ -375,7 +366,7 @@ ir_graph_pass_t *opt_ldst_pass(const char *name); * The layout state of the frame type will be set to layout_undefined * if entities were removed. */ -void loop_optimization(ir_graph *irg); +FIRM_API void loop_optimization(ir_graph *irg); /** * Optimize the frame type of an irg by removing @@ -387,7 +378,7 @@ void loop_optimization(ir_graph *irg); * The layout state of the frame type will be set to layout_undefined * if entities were removed. */ -void opt_frame_irg(ir_graph *irg); +FIRM_API void opt_frame_irg(ir_graph *irg); /** * Creates an ir_graph pass for opt_frame_irg(). @@ -396,7 +387,7 @@ void opt_frame_irg(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_frame_irg_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_frame_irg_pass(const char *name); /** Possible flags for the Operator Scalar Replacement. */ typedef enum osr_flags { @@ -471,7 +462,7 @@ typedef enum osr_flags { * * This algorithm destroys the link field of nodes. */ -void opt_osr(ir_graph *irg, unsigned flags); +FIRM_API void opt_osr(ir_graph *irg, unsigned flags); /** * Creates an ir_graph pass for remove_phi_cycles(). @@ -481,7 +472,7 @@ void opt_osr(ir_graph *irg, unsigned flags); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_osr_pass(const char *name, unsigned flags); +FIRM_API ir_graph_pass_t *opt_osr_pass(const char *name, unsigned flags); /** * Removes useless Phi cycles, i.e cycles of Phi nodes with only one @@ -493,7 +484,7 @@ ir_graph_pass_t *opt_osr_pass(const char *name, unsigned flags); * * This algorithm destroys the link field of nodes. */ -void remove_phi_cycles(ir_graph *irg); +FIRM_API void remove_phi_cycles(ir_graph *irg); /** * Creates an ir_graph pass for remove_phi_cycles(). @@ -502,11 +493,11 @@ void remove_phi_cycles(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *remove_phi_cycles_pass(const char *name); +FIRM_API ir_graph_pass_t *remove_phi_cycles_pass(const char *name); /** A default threshold. */ -#define DEFAULT_CLONE_THRESHOLD 300 +#define DEFAULT_CLONE_THRESHOLD 20 /** * Do procedure cloning. Evaluate a heuristic weight for every @@ -519,7 +510,7 @@ ir_graph_pass_t *remove_phi_cycles_pass(const char *name); * when executing a cloned method. If threshold is 0.0, every possible * call is cloned. */ -void proc_cloning(float threshold); +FIRM_API void proc_cloning(float threshold); /** * Creates an ir_prog pass for proc_cloning(). @@ -529,7 +520,7 @@ void proc_cloning(float threshold); * * @return the newly created ir_prog pass */ -ir_prog_pass_t *proc_cloning_pass(const char *name, float threshold); +FIRM_API ir_prog_pass_t *proc_cloning_pass(const char *name, float threshold); /** * Reassociation. @@ -547,7 +538,7 @@ ir_prog_pass_t *proc_cloning_pass(const char *name, float threshold); * * @return non-zero if the optimization could be applied, 0 else */ -int optimize_reassociation(ir_graph *irg); +FIRM_API int optimize_reassociation(ir_graph *irg); /** * Creates an ir_graph pass for optimize_reassociation(). @@ -556,7 +547,7 @@ int optimize_reassociation(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *optimize_reassociation_pass(const char *name); +FIRM_API ir_graph_pass_t *optimize_reassociation_pass(const char *name); /** * Normalize the Returns of a graph by creating a new End block @@ -578,7 +569,7 @@ ir_graph_pass_t *optimize_reassociation_pass(const char *name); * res = c; * return res; */ -void normalize_one_return(ir_graph *irg); +FIRM_API void normalize_one_return(ir_graph *irg); /** * Creates an ir_graph pass for normalize_one_return(). @@ -587,7 +578,7 @@ void normalize_one_return(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *normalize_one_return_pass(const char *name); +FIRM_API ir_graph_pass_t *normalize_one_return_pass(const char *name); /** * Normalize the Returns of a graph by moving @@ -609,7 +600,7 @@ ir_graph_pass_t *normalize_one_return_pass(const char *name); * else * return c; */ -void normalize_n_returns(ir_graph *irg); +FIRM_API void normalize_n_returns(ir_graph *irg); /** * Creates an ir_graph pass for normalize_n_returns(). @@ -618,7 +609,7 @@ void normalize_n_returns(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *normalize_n_returns_pass(const char *name); +FIRM_API ir_graph_pass_t *normalize_n_returns_pass(const char *name); /** * Do the scalar replacement optimization. @@ -629,7 +620,7 @@ ir_graph_pass_t *normalize_n_returns_pass(const char *name); * * @return non-zero, if at least one entity was replaced */ -int scalar_replacement_opt(ir_graph *irg); +FIRM_API int scalar_replacement_opt(ir_graph *irg); /** * Creates an ir_graph pass for scalar_replacement_opt(). @@ -638,10 +629,10 @@ int scalar_replacement_opt(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *scalar_replacement_opt_pass(const char *name); +FIRM_API ir_graph_pass_t *scalar_replacement_opt_pass(const char *name); /** Performs strength reduction for the passed graph. */ -void reduce_strength(ir_graph *irg); +FIRM_API void reduce_strength(ir_graph *irg); /** * Optimizes tail-recursion calls by converting them into loops. @@ -659,7 +650,7 @@ void reduce_strength(ir_graph *irg); * * @return non-zero if the optimization could be applied, 0 else */ -int opt_tail_rec_irg(ir_graph *irg); +FIRM_API int opt_tail_rec_irg(ir_graph *irg); /** * Creates an ir_graph pass for opt_tail_rec_irg(). @@ -668,7 +659,7 @@ int opt_tail_rec_irg(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name); +FIRM_API ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name); /** * Optimize tail-recursion calls for all IR-Graphs. @@ -681,7 +672,7 @@ ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name); * because it expects the memory edges pointing to calls, which might be * removed by optimize_funccalls(). */ -void opt_tail_recursion(void); +FIRM_API void opt_tail_recursion(void); /** * Creates an ir_prog pass for opt_tail_recursion(). @@ -690,7 +681,7 @@ void opt_tail_recursion(void); * * @return the newly created ir_prog pass */ -ir_prog_pass_t *opt_tail_recursion_pass(const char *name); +FIRM_API ir_prog_pass_t *opt_tail_recursion_pass(const char *name); /** This is the type for a method, that returns a pointer type to * tp. This is needed in the normalization. */ @@ -721,7 +712,7 @@ typedef ir_type *(*gen_pointer_type_to_func)(ir_type *tp); * search to find an existing pointer type. If it can not find a type, * generates a pointer type with mode_P_mach and suffix "cc_ptr_tp". */ -void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct); +FIRM_API void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct); /** Insert Casts so that class type casts conform exactly with the type hierarchy * in given graph. @@ -730,7 +721,8 @@ void normalize_irp_class_casts(gen_pointer_type_to_func gppt_fct); * * This transformation requires that type information is computed. @see irtypeinfo.h. */ -void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct); +FIRM_API void normalize_irg_class_casts(ir_graph *irg, + gen_pointer_type_to_func gppt_fct); /** Optimize casting between class types. * @@ -752,7 +744,7 @@ void normalize_irg_class_casts(ir_graph *irg, gen_pointer_type_to_func gppt_fct) * Typeinformation is valid after optimization. * Invalidates trout information. */ -void optimize_class_casts(void); +FIRM_API void optimize_class_casts(void); /** * CLiff Click's combo algorithm from @@ -763,7 +755,7 @@ void optimize_class_casts(void); * * @param irg the graph to run on */ -void combo(ir_graph *irg); +FIRM_API void combo(ir_graph *irg); /** * Creates an ir_graph pass for combo. @@ -772,7 +764,7 @@ void combo(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *combo_pass(const char *name); +FIRM_API ir_graph_pass_t *combo_pass(const char *name); /** * Inlines all small methods at call sites where the called address comes @@ -793,7 +785,7 @@ ir_graph_pass_t *combo_pass(const char *name); * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp * combination as control flow operation. */ -void inline_small_irgs(ir_graph *irg, int size); +FIRM_API void inline_small_irgs(ir_graph *irg, int size); /** * Creates an ir_graph pass for inline_small_irgs(). @@ -803,7 +795,7 @@ void inline_small_irgs(ir_graph *irg, int size); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size); +FIRM_API ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size); /** * Inlineing with a different heuristic than inline_small_irgs(). @@ -828,8 +820,8 @@ ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size); * @param ignore_runtime count a function only calling runtime functions as * leave */ -void inline_leave_functions(unsigned maxsize, unsigned leavesize, - unsigned size, int ignore_runtime); +FIRM_API void inline_leave_functions(unsigned maxsize, unsigned leavesize, + unsigned size, int ignore_runtime); /** * Creates an ir_prog pass for inline_leave_functions(). @@ -846,9 +838,9 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize, * * @return the newly created ir_prog pass */ -ir_prog_pass_t *inline_leave_functions_pass( - const char *name, unsigned maxsize, unsigned leavesize, - unsigned size, int ignore_runtime); +FIRM_API ir_prog_pass_t *inline_leave_functions_pass(const char *name, + unsigned maxsize, unsigned leavesize, unsigned size, + int ignore_runtime); typedef void (*opt_ptr)(ir_graph *irg); @@ -863,8 +855,8 @@ typedef void (*opt_ptr)(ir_graph *irg); * @param after_inline_opt optimizations performed immediately after inlining * some calls */ -void inline_functions(unsigned maxsize, int inline_threshold, - opt_ptr after_inline_opt); +FIRM_API void inline_functions(unsigned maxsize, int inline_threshold, + opt_ptr after_inline_opt); /** * Creates an ir_prog pass for inline_functions(). @@ -872,14 +864,17 @@ void inline_functions(unsigned maxsize, int inline_threshold, * @param name the name of this pass or NULL * @param maxsize Do not inline any calls if a method has more than * maxsize firm nodes. It may reach this limit by - * inlineing. + * inlineing. * @param inline_threshold inlining threshold + * @param after_inline_opt a function that is called after inlining a + * procedure. You should run fast local optimisations + * here which cleanup the graph before further + * inlining * * @return the newly created ir_prog pass */ -ir_prog_pass_t *inline_functions_pass( - const char *name, unsigned maxsize, int inline_threshold, - opt_ptr after_inline_opt); +FIRM_API ir_prog_pass_t *inline_functions_pass(const char *name, + unsigned maxsize, int inline_threshold, opt_ptr after_inline_opt); /** * Combines congruent blocks into one. @@ -888,7 +883,7 @@ ir_prog_pass_t *inline_functions_pass( * * @return non-zero if the graph was transformed */ -int shape_blocks(ir_graph *irg); +FIRM_API int shape_blocks(ir_graph *irg); /** * Creates an ir_graph pass for shape_blocks(). @@ -897,30 +892,66 @@ int shape_blocks(ir_graph *irg); * * @return the newly created ir_graph pass */ -ir_graph_pass_t *shape_blocks_pass(const char *name); +FIRM_API ir_graph_pass_t *shape_blocks_pass(const char *name); /** * Perform loop inversion on a given graph. * Loop inversion transforms a head controlled loop (like while(...) {} and * for(...) {}) into a foot controlled loop (do {} while(...)). */ -void do_loop_inversion(ir_graph *irg); +FIRM_API void do_loop_inversion(ir_graph *irg); /** * Perform loop unrolling on a given graph. * Loop unrolling multiplies the number loop completely by a number found * through a heuristic. */ -void do_loop_unrolling(ir_graph *irg); +FIRM_API void do_loop_unrolling(ir_graph *irg); /** * Perform loop peeling on a given graph. */ -void do_loop_peeling(ir_graph *irg); +FIRM_API void do_loop_peeling(ir_graph *irg); + +/** + * Creates an ir_graph pass for loop inversion. + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *loop_inversion_pass(const char *name); + +/** + * Creates an ir_graph pass for loop unrolling. + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *loop_unroll_pass(const char *name); + +/** + * Creates an ir_graph pass for loop peeling. + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *loop_peeling_pass(const char *name); typedef ir_type *(*get_Alloc_func)(ir_node *n); /** Set a new get_Alloc_func and returns the old one. */ -get_Alloc_func firm_set_Alloc_func(get_Alloc_func newf); +FIRM_API get_Alloc_func firm_set_Alloc_func(get_Alloc_func newf); + +/** + * Creates an ir_graph pass for set_vrp_data() + * + * @param name The name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *set_vrp_pass(const char *name); /** * Removes all entities which are unused. @@ -930,9 +961,185 @@ get_Alloc_func firm_set_Alloc_func(get_Alloc_func newf); * This is usually conservative than gc_irgs, but does not respect properties * of object-oriented programs. */ -void garbage_collect_entities(void); +FIRM_API void garbage_collect_entities(void); /** Pass for garbage_collect_entities */ -ir_prog_pass_t *garbage_collect_entities_pass(const char *name); +FIRM_API ir_prog_pass_t *garbage_collect_entities_pass(const char *name); + +/** + * Performs dead node elimination by copying the ir graph to a new obstack. + * + * The major intention of this pass is to free memory occupied by + * dead nodes and outdated analyzes information. Further this + * function removes Bad predecessors from Blocks and the corresponding + * inputs to Phi nodes. This opens optimization potential for other + * optimizations. Further this phase reduces dead Block<->Jmp + * self-cycles to Bad nodes. + * + * Dead_node_elimination is only performed if options `optimize' and + * `opt_dead_node_elimination' are set. The graph may + * not be in state phase_building. The outs datasturcture is freed, + * the outs state set to outs_none. Backedge information is conserved. + * Removes old attributes of nodes. Sets link field to NULL. + * Callee information must be freed (irg_callee_info_none). + * + * @param irg The graph to be optimized. + */ +FIRM_API void dead_node_elimination(ir_graph *irg); + +/** + * Creates an ir_graph pass for dead_node_elimination(). + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *dead_node_elimination_pass(const char *name); + +/** + * Inlines a method at the given call site. + * + * Removes the call node and splits the basic block the call node + * belongs to. Inserts a copy of the called graph between these nodes. + * Assumes that call is a Call node in current_ir_graph and that + * the type in the Call nodes type attribute is the same as the + * type of the called graph. + * Further it assumes that all Phi nodes in a block of current_ir_graph + * are assembled in a "link" list in the link field of the corresponding + * block nodes. Further assumes that all Proj nodes are in a "link" list + * in the nodes producing the tuple. (This is only an optical feature + * for the graph.) Conserves this feature for the old + * nodes of the graph. This precondition can be established by a call to + * collect_phisprojs(), see irgmod.h. + * As dead_node_elimination this function reduces dead Block<->Jmp + * self-cycles to Bad nodes. + * + * Called_graph must be unequal to current_ir_graph. Will not inline + * if they are equal. + * Sets visited masterflag in current_ir_graph to the max of the flag in + * current and called graph. + * Assumes that both, the called and the calling graph are in state + * "op_pin_state_pinned". + * It is recommended to call local_optimize_graph() after inlining as this + * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp + * combination as control flow operation. + * + * @param call the call node that should be inlined + * @param called_graph the IR-graph that is called at call + * + * @return zero if method could not be inlined (recursion for instance), + * non-zero if all went ok + */ +FIRM_API int inline_method(ir_node *call, ir_graph *called_graph); + +/** + * Code Placement. + * + * Pins all floating nodes to a block where they + * will be executed only if needed. Depends on the flag opt_global_cse. + * Graph may not be in phase_building. Does not schedule control dead + * code. Uses dominator information which it computes if the irg is not + * in state dom_consistent. Destroys the out information as it moves nodes + * to other blocks. Optimizes Tuples in Control edges. + * + * Call remove_critical_cf_edges() before place_code(). This normalizes + * the control flow graph so that for all operations a basic block exists + * where they can be optimally placed. + */ +FIRM_API void place_code(ir_graph *irg); + +/** + * Creates an ir_graph pass for place_code(). + * This pass enables GCSE, runs optimize_graph_df() and finally + * place_code(); + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *place_code_pass(const char *name); + +/** + * Determine information about the values of nodes and perform simplications + * using this information. This optimization performs a data-flow analysis to + * find the minimal fixpoint. + */ +FIRM_API void fixpoint_vrp(ir_graph*); + +/** + * Creates an ir_graph pass for fixpoint_vrp(). + * This pass dDetermines information about the values of nodes + * and perform simplications using this information. + * This optimization performs a data-flow analysis to + * find the minimal fixpoint. + * + * @param name the name of this pass or NULL + * + * @return the newly created ir_graph pass + */ +FIRM_API ir_graph_pass_t *fixpoint_vrp_irg_pass(const char *name); + +/** Needed for MSVC to suppress warnings because it doest NOT handle const right. */ +typedef const ir_node *ir_node_cnst_ptr; + +/** + * Check, if the value of a node is != 0. + * + * This is a often needed case, so we handle here Confirm + * nodes too. + * + * @param n a node representing the value + * @param confirm if n is confirmed to be != 0, returns + * the the Confirm-node, else NULL + */ +FIRM_API int value_not_zero(const ir_node *n, ir_node_cnst_ptr *confirm); + +/** + * Check, if the value of a node cannot represent a NULL pointer. + * + * - If option sel_based_null_check_elim is enabled, all + * Sel nodes can be skipped. + * - A SymConst(entity) is NEVER a NULL pointer + * - A Const != NULL is NEVER a NULL pointer + * - Confirms are evaluated + * + * @param n a node representing the value + * @param confirm if n is confirmed to be != NULL, returns + * the the Confirm-node, else NULL + */ +FIRM_API int value_not_null(const ir_node *n, ir_node_cnst_ptr *confirm); + +/** + * Possible return values of value_classify(). + */ +typedef enum ir_value_classify_sign { + value_classified_unknown = 0, /**< could not classify */ + value_classified_positive = 1, /**< value is positive, i.e. >= 0 */ + value_classified_negative = -1 /**< value is negative, i.e. <= 0 if + no signed zero exists or < 0 else */ +} ir_value_classify_sign; + +/** + * Check, if the value of a node can be confirmed >= 0 or <= 0, + * If the mode of the value did not honor signed zeros, else + * check for >= 0 or < 0. + * + * @param n a node representing the value + */ +FIRM_API ir_value_classify_sign classify_value_sign(ir_node *n); + +/** + * Return the value of a Cmp if one or both predecessors + * are Confirm nodes. + * + * @param cmp the compare node that will be evaluated + * @param left the left operand of the Cmp + * @param right the right operand of the Cmp + * @param pnc the compare relation + */ +FIRM_API ir_tarval *computed_value_Cmp_Confirm( + ir_node *cmp, ir_node *left, ir_node *right, pn_Cmp pnc); + +#include "end.h" #endif