2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimizations for a whole ir graph, i.e., a procedure.
23 * @author Christian Schaefer, Goetz Lindenmaier, Sebastian Felis
26 #ifndef FIRM_IR_IRGOPT_H
27 #define FIRM_IR_IRGOPT_H
29 #include "firm_types.h"
31 /** Applies local optimizations (see iropt.h) to all nodes reachable from node n.
33 * @param n The node to be optimized.
35 void local_optimize_node(ir_node *n);
37 /** Applies local optimizations (see iropt.h) to all nodes in the graph.
39 * @param irg The graph to be optimized.
41 * After applying local_optimize_graph() to a IR-graph, Bad nodes
42 * only occure as predecessor of Block and Phi nodes.
44 void local_optimize_graph (ir_graph *irg);
46 /** Applies local optimizations (see iropt.h) to all nodes in the graph.
48 * @param irg The graph to be optimized.
50 * After applying local_optimize_graph() to a IR-graph, Bad nodes
51 * only occure as predecessor of Block and Phi nodes.
53 * This version used a fixpoint iteration.
55 void optimize_graph_df(ir_graph *irg);
57 /** Performs dead node elimination by copying the ir graph to a new obstack.
59 * The major intention of this pass is to free memory occupied by
60 * dead nodes and outdated analyzes information. Further this
61 * function removes Bad predecessors from Blocks and the corresponding
62 * inputs to Phi nodes. This opens optimization potential for other
63 * optimizations. Further this phase reduces dead Block<->Jmp
64 * self-cycles to Bad nodes.
66 * Dead_node_elimination is only performed if options `optimize' and
67 * `opt_dead_node_elimination' are set. The graph may
68 * not be in state phase_building. The outs datasturcture is freed,
69 * the outs state set to outs_none. Backedge information is conserved.
70 * Removes old attributes of nodes. Sets link field to NULL.
71 * Callee information must be freed (irg_callee_info_none).
73 * @param irg The graph to be optimized.
75 void dead_node_elimination(ir_graph *irg);
77 typedef struct _survive_dce_t survive_dce_t;
80 * Make a new Survive DCE environment.
82 survive_dce_t *new_survive_dce(void);
85 * Free a Survive DCE environment.
87 void free_survive_dce(survive_dce_t *sd);
90 * Register a node pointer to be patched upon DCE.
91 * When DCE occurs, the node pointer specified by @p place will be
92 * patched to the new address of the node it is pointing to.
94 * @param sd The Survive DCE environment.
95 * @param place The address of the node pointer.
97 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place);
99 /** Cleans the control flow from Bad predecessors.
101 * Removes Bad predecessors from Blocks and the corresponding
102 * inputs to Phi nodes as in dead_node_elimination but without
105 * Conserves loop information.
107 * @param irg The graph to be optimized.
109 void remove_bad_predecessors(ir_graph *irg);
111 /** Inlines a method at the given call site.
113 * Removes the call node and splits the basic block the call node
114 * belongs to. Inserts a copy of the called graph between these nodes.
115 * Assumes that call is a Call node in current_ir_graph and that
116 * the type in the Call nodes type attribute is the same as the
117 * type of the called graph.
118 * Further it assumes that all Phi nodes in a block of current_ir_graph
119 * are assembled in a "link" list in the link field of the corresponding
120 * block nodes. Further assumes that all Proj nodes are in a "link" list
121 * in the nodes producing the tuple. (This is only an optical feature
122 * for the graph.) Conserves this feature for the old
123 * nodes of the graph. This precondition can be established by a call to
124 * collect_phisprojs(), see irgmod.h.
125 * As dead_node_elimination this function reduces dead Block<->Jmp
126 * self-cycles to Bad nodes.
128 * Called_graph must be unequal to current_ir_graph. Will not inline
130 * Sets visited masterflag in current_ir_graph to the max of the flag in
131 * current and called graph.
132 * Assumes that both, the called and the calling graph are in state
133 * "op_pin_state_pinned".
134 * It is recommended to call local_optimize_graph() after inlining as this
135 * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
136 * combination as control flow operation.
138 * @param call the call node that should be inlined
139 * @param called_graph the IR-graph that is called at call
141 * @return zero if method could not be inlined (recursion for instance),
142 * non-zero if all went ok
144 int inline_method(ir_node *call, ir_graph *called_graph);
146 /** Inlines all small methods at call sites where the called address comes
147 * from a SymConst node that references the entity representing the called
150 * The size argument is a rough measure for the code size of the method:
151 * Methods where the obstack containing the firm graph is smaller than
152 * size are inlined. Further only a limited number of calls are inlined.
153 * If the method contains more than 1024 inlineable calls none will be
155 * Inlining is only performed if flags `optimize' and `inlineing' are set.
156 * The graph may not be in state phase_building.
157 * It is recommended to call local_optimize_graph() after inlining as this
158 * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
159 * combination as control flow operation.
161 void inline_small_irgs(ir_graph *irg, int size);
164 /** Inlineing with a different heuristic than inline_small_irgs().
166 * Inlines leave functions. If inlinening creates new leave
167 * function inlines these, too. (If g calls f, and f calls leave h,
168 * h is first inlined in f and then f in g.)
170 * Then inlines all small functions (this is not recursive).
172 * For a heuristic this inlineing uses firm node counts. It does
173 * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
174 * If the ignore_runtime flag is set, calls to functions marked with the
175 * mtp_property_runtime property are ignored.
177 * @param maxsize Do not inline any calls if a method has more than
178 * maxsize firm nodes. It may reach this limit by
180 * @param leavesize Inline leave functions if they have less than leavesize
182 * @param size Inline all function smaller than size.
183 * @param ignore_runtime count a function only calling runtime functions as
186 void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime);
190 * Pins all floating nodes to a block where they
191 * will be executed only if needed. Depends on the flag opt_global_cse.
192 * Graph may not be in phase_building. Does not schedule control dead
193 * code. Uses dominator information which it computes if the irg is not
194 * in state dom_consistent. Destroys the out information as it moves nodes
195 * to other blocks. Optimizes Tuples in Control edges.
196 * @todo This is not tested!
198 * Call remove_critical_cf_edges() before place_code(). This normalizes
199 * the control flow graph so that for all operations a basic block exists
200 * where they can be optimally placed.
202 * @todo A more powerful code placement would move operations past Phi nodes
205 void place_code(ir_graph *irg);
207 /** Places an empty basic block on critical control flow edges thereby
210 * A critical control flow edge is an edge from a block with several
211 * control exits to a block with several control entries (See Muchnic
214 * @param irg IR Graph
216 void remove_critical_cf_edges(ir_graph *irg);