/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @file
* @brief Optimizations for a whole ir graph, i.e., a procedure.
* @author Christian Schaefer, Goetz Lindenmaier, Sebastian Felis
- * @version $Id$
*/
#ifndef FIRM_IR_IRGOPT_H
#define FIRM_IR_IRGOPT_H
#include "firm_types.h"
+#include "begin.h"
-/** Applies local optimizations (see iropt.h) to all nodes reachable from node n.
- *
- * @param n The node to be optimized.
+/**
+ * @ingroup iroptimize
+ * @defgroup irgopt Graph Transformations
+ * @{
*/
-void local_optimize_node(ir_node *n);
-/** Applies local optimizations (see iropt.h) to all nodes in the graph.
+/** Applies local optimizations (see iropt.h) to all nodes reachable from node
+ * @p n.
*
- * @param irg The graph to be optimized.
- *
- * After applying local_optimize_graph() to a IR-graph, Bad nodes
- * only occure as predecessor of Block and Phi nodes.
+ * @param n The node to be optimized.
*/
-void local_optimize_graph (ir_graph *irg);
+FIRM_API void local_optimize_node(ir_node *n);
/** Applies local optimizations (see iropt.h) to all nodes in the graph.
*
* @param irg The graph to be optimized.
*
* After applying local_optimize_graph() to a IR-graph, Bad nodes
- * only occure as predecessor of Block and Phi nodes.
- *
- * This version used a fixpoint iteration.
+ * only occur as predecessor of Block and Phi nodes.
*/
-void optimize_graph_df(ir_graph *irg);
+FIRM_API void local_optimize_graph(ir_graph *irg);
-/** Performs dead node elimination by copying the ir graph to a new obstack.
+/** Applies local optimizations (see iropt.h) to all nodes in the graph.
*
- * The major intention of this pass is to free memory occupied by
- * dead nodes and outdated analyzes information. Further this
- * function removes Bad predecessors from Blocks and the corresponding
- * inputs to Phi nodes. This opens optimization potential for other
- * optimizations. Further this phase reduces dead Block<->Jmp
- * self-cycles to Bad nodes.
+ * After applying optimize_graph_df() to a IR-graph, Bad nodes
+ * only occur as predecessor of Block and Phi nodes.
*
- * Dead_node_elimination is only performed if options `optimize' and
- * `opt_dead_node_elimination' are set. The graph may
- * not be in state phase_building. The outs datasturcture is freed,
- * the outs state set to outs_none. Backedge information is conserved.
- * Removes old attributes of nodes. Sets link field to NULL.
- * Callee information must be freed (irg_callee_info_none).
+ * This version uses fixpoint iteration.
*
* @param irg The graph to be optimized.
+ *
+ * @return non-zero if the optimization could be applied, 0 else
*/
-void dead_node_elimination(ir_graph *irg);
-
-typedef struct _survive_dce_t survive_dce_t;
+FIRM_API void local_opts(ir_graph *irg);
-/**
- * Make a new Survive DCE environment.
+/** Same functionality as local_opts above, but without framework wrapper
+ * @deprecated
*/
-survive_dce_t *new_survive_dce(void);
+FIRM_API int optimize_graph_df(ir_graph *irg);
/**
- * Free a Survive DCE environment.
+ * Eliminates (obviously) unreachable code
*/
-void free_survive_dce(survive_dce_t *sd);
+FIRM_API void remove_unreachable_code(ir_graph *irg);
/**
- * Register a node pointer to be patched upon DCE.
- * When DCE occurs, the node pointer specified by @p place will be
- * patched to the new address of the node it is pointing to.
+ * Removes all Bad nodes from a graph.
+ *
+ * @param irg The graph to be optimized.
*
- * @param sd The Survive DCE environment.
- * @param place The address of the node pointer.
+ * @return non-zero if at least one Bad was removed, otherwise 0
*/
-void survive_dce_register_irn(survive_dce_t *sd, ir_node **place);
+FIRM_API int remove_bads(ir_graph *irg);
-/** Cleans the control flow from Bad predecessors.
- *
- * Removes Bad predecessors from Blocks and the corresponding
- * inputs to Phi nodes as in dead_node_elimination but without
- * copying the graph.
- *
- * Conserves loop information.
+/**
+ * Removes all Tuple nodes from a graph.
*
* @param irg The graph to be optimized.
+ *
+ * @return non-zero if at least one Tuple was removed, otherwise 0
*/
-void remove_bad_predecessors(ir_graph *irg);
-
-/** Inlines a method at the given call site.
- *
- * Removes the call node and splits the basic block the call node
- * belongs to. Inserts a copy of the called graph between these nodes.
- * Assumes that call is a Call node in current_ir_graph and that
- * the type in the Call nodes type attribute is the same as the
- * type of the called graph.
- * Further it assumes that all Phi nodes in a block of current_ir_graph
- * are assembled in a "link" list in the link field of the corresponding
- * block nodes. Further assumes that all Proj nodes are in a "link" list
- * in the nodes producing the tuple. (This is only an optical feature
- * for the graph.) Conserves this feature for the old
- * nodes of the graph. This precondition can be established by a call to
- * collect_phisprojs(), see irgmod.h.
- * As dead_node_elimination this function reduces dead Block<->Jmp
- * self-cycles to Bad nodes.
- *
- * Called_graph must be unequal to current_ir_graph. Will not inline
- * if they are equal.
- * Sets visited masterflag in current_ir_graph to the max of the flag in
- * current and called graph.
- * Assumes that both, the called and the calling graph are in state
- * "op_pin_state_pinned".
- * It is recommended to call local_optimize_graph() after inlining as this
- * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
- * combination as control flow operation.
- *
- * @param call the call node that should be inlined
- * @param called_graph the IR-graph that is called at call
- *
- * @return zero if method could not be inlined (recursion for instance),
- * non-zero if all went ok
- */
-int inline_method(ir_node *call, ir_graph *called_graph);
-
-/** Inlines all small methods at call sites where the called address comes
- * from a SymConst node that references the entity representing the called
- * method.
- *
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined. Further only a limited number of calls are inlined.
- * If the method contains more than 1024 inlineable calls none will be
- * inlined.
- * Inlining is only performed if flags `optimize' and `inlineing' are set.
- * The graph may not be in state phase_building.
- * It is recommended to call local_optimize_graph() after inlining as this
- * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
- * combination as control flow operation.
- */
-void inline_small_irgs(ir_graph *irg, int size);
-
+FIRM_API int remove_tuples(ir_graph *irg);
-/** Inlineing with a different heuristic than inline_small_irgs().
- *
- * Inlines leave functions. If inlinening creates new leave
- * function inlines these, too. (If g calls f, and f calls leave h,
- * h is first inlined in f and then f in g.)
- *
- * Then inlines all small functions (this is not recursive).
+/**
+ * Creates an ir_graph pass for optimize_graph_df().
*
- * For a heuristic this inlineing uses firm node counts. It does
- * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
- * If the ignore_runtime flag is set, calls to functions marked with the
- * mtp_property_runtime property are ignored.
+ * @param name the name of this pass or NULL
*
- * @param maxsize Do not inline any calls if a method has more than
- * maxsize firm nodes. It may reach this limit by
- * inlineing.
- * @param leavesize Inline leave functions if they have less than leavesize
- * nodes.
- * @param size Inline all function smaller than size.
- * @param ignore_runtime count a function only calling runtime functions as
- * leave
+ * @return the newly created ir_graph pass
*/
-void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime);
+FIRM_API ir_graph_pass_t *optimize_graph_df_pass(const char *name);
-/** Code Placement.
- *
- * Pins all floating nodes to a block where they
- * will be executed only if needed. Depends on the flag opt_global_cse.
- * Graph may not be in phase_building. Does not schedule control dead
- * code. Uses dominator information which it computes if the irg is not
- * in state dom_consistent. Destroys the out information as it moves nodes
- * to other blocks. Optimizes Tuples in Control edges.
- * @todo This is not tested!
+/** Places an empty basic block on critical control flow edges thereby
+ * removing them.
*
- * Call remove_critical_cf_edges() before place_code(). This normalizes
- * the control flow graph so that for all operations a basic block exists
- * where they can be optimally placed.
+ * A critical control flow edge is an edge from a block with several
+ * control exits to a block with several control entries (See Muchnic
+ * p. 407). Exception edges are always ignored.
*
- * @todo A more powerful code placement would move operations past Phi nodes
- * out of loops.
+ * @param irg IR Graph
*/
-void place_code(ir_graph *irg);
+FIRM_API void remove_critical_cf_edges(ir_graph *irg);
/** Places an empty basic block on critical control flow edges thereby
* removing them.
* control exits to a block with several control entries (See Muchnic
* p. 407).
*
- * @param irg IR Graph
+ * @param irg IR Graph
+ * @param ignore_exception_edges if non-zero, exception edges will be ignored
*/
-void remove_critical_cf_edges(ir_graph *irg);
+FIRM_API void remove_critical_cf_edges_ex(ir_graph *irg,
+ int ignore_exception_edges);
+
+/** @} */
+
+#include "end.h"
#endif