3 * File name: ir/ir/irgopt.h
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 * Optimizations for a whole ir graph, i.e., a procedure.
18 * @author Christian Schaefer, Goetz Lindenmaier
26 /** Applies local optimizations (see iropt.h) to all nodes in the graph. */
27 void local_optimize_graph (ir_graph *irg);
29 /** Performs dead node elimination by copying the ir graph to a new obstack.
31 * The major intention of this pass is to free memory occupied by
32 * dead nodes and outdated analyses information. Further this
33 * function removes Bad predecesors from Blocks and the corresponding
34 * inputs to Phi nodes. This opens optmization potential for other
35 * optimizations. Further this phase reduces dead Block<->Jmp
36 * self-cycles to Bad nodes.
38 * Dead_node_elimination is only performed if options `optimize' and
39 * `opt_dead_node_elimination' are set. The graph may
40 * not be in state phase_building. The outs datasturcture is freed,
41 * the outs state set to no_outs. Backedge information is conserved.
42 * Removes old attributes of nodes. Sets link field to NULL.
43 * Callee information must be freed (irg_callee_info_none).
45 * Attention: the numbers assigned to nodes if the library is compiled for
46 * development/debugging are not conserved by copying. */
47 void dead_node_elimination(ir_graph *irg);
49 /** Removes Bad Bad predecesors from Blocks and the corresponding
50 inputs to Phi nodes as in dead_node_elimination but without
53 @todo not implemented! / buggy? */
54 void remove_bad_predecessors(ir_graph *irg);
56 /** Inlines a method at the given call site.
58 * Removes the call node and splits the basic block the call node
59 * belongs to. Inserts a copy of the called graph between these nodes.
60 * Assumes that call is a Call node in current_ir_graph and that
61 * the type in the Call nodes type attribute is the same as the
62 * type of the called graph.
63 * Further it assumes that all Phi nodes in a block of current_ir_graph
64 * are assembled in a "link" list in the link field of the corresponding
65 * block nodes. Further assumes that all Proj nodes are in a "link" list
66 * in the nodes producing the tuple. (This is only an optical feature
67 * for the graph.) Conserves this feature for the old
68 * nodes of the graph. This precondition can be established by a call to
69 * collect_phisprojs(), see irgmod.h.
70 * As dead_node_elimination this function reduces dead Block<->Jmp
71 * self-cycles to Bad nodes.
73 * Called_graph must be unequal to current_ir_graph. Will not inline
75 * Sets visited masterflag in current_ir_graph to the max of the flag in
76 * current and called graph.
77 * Assumes that both, the called and the calling graph are in state
79 * It is recommended to call local_optimize_graph after inlining as this
80 * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
81 * combination as control flow operation.
83 * @param call the call node that should be inlined
84 * @param called_graph the IR-graph that is called at call
86 * @return zero if method could not be inlined (recursion for instance),
87 * non-zero if all went ok
89 int inline_method(ir_node *call, ir_graph *called_graph);
91 /** Inlines all small methods at call sites where the called address comes
92 * from a Const node that references the entity representing the called
94 * The size argument is a rough measure for the code size of the method:
95 * Methods where the obstack containing the firm graph is smaller than
96 * size are inlined. Further only a limited number of calls are inlined.
97 * If the method contains more than 1024 inlineable calls none will be
99 * Inlining is only performed if flags `optimize' and `inlineing' are set.
100 * The graph may not be in state phase_building.
101 * It is recommended to call local_optimize_graph after inlining as this
102 * function leaves a set of obscure Tuple nodes, e.g. a Proj-Tuple-Jmp
103 * combination as control flow operation. */
104 void inline_small_irgs(ir_graph *irg, int size);
107 /** Inlineing with a different heuristic than inline_small_irgs.
109 * Inlines leave functions. If inlinening creates new leave
110 * function inlines these, too. (If g calls f, and f calls leave h,
111 * h is first inlined in f and then f in g.)
113 * Then inlines all small functions (this is not recursive).
115 * For a heuristic this inlineing uses firm node counts. It does
116 * not count auxiliary nodes as Proj, Tuple, End, Start, Id, Sync.
118 * @param maxsize Do not inline any calls if a method has more than
119 * maxsize firm nodes. It may reach this limit by
121 * @param leavesize Inline leave functions if they have less than leavesize
123 * @param size Inline all function smaller than size.
125 void inline_leave_functions(int maxsize, int leavesize, int size);
127 /** Code Placement. Pinns all floating nodes to a block where they
128 will be executed only if needed. Depends on the flag opt_global_cse.
129 Graph may not be in phase_building. Does not schedule control dead
130 code. Uses dominator information which it computes if the irg is not
131 in state dom_consistent. Destroys the out information as it moves nodes
132 to other blocks. Optimizes Tuples in Control edges.
133 @todo This is not tested!
135 Call remove_critical_cf_edges() before place_code(). This normalizes
136 the control flow graph so that for all operations a basic block exists
137 where they can be optimally placed.
139 @todo A more powerful code placement would move operations past Phi nodes
141 void place_code(ir_graph *irg);
143 /** Control flow optimization.
144 * Removes empty blocks doing if simplifications and loop simplifications.
145 * A block is empty if it contains only a Jmp node and Phi nodes.
146 * Merges single entry single exit blocks with their predecessor
147 * and propagates dead control flow by calling equivalent_node.
148 * Independent of compiler flag it removes Tuples from cf edges,
149 * Bad predecessors form blocks and unnecessary predecessors of End.
151 * @bug So far destroys backedge information.
152 * @bug Chokes on Id nodes if called in a certain order with other
153 * optimizations. Call local_optimize_graph before to remove
156 void optimize_cf(ir_graph *irg);
159 /** Places an empty basic block on critical control flow edges thereby
161 A critical control flow edge is an edge from a block with several
162 control exits to a block with several control entries (See Muchnic
164 Is only executed if flag set_opt_critical_edges() is set.
167 void remove_critical_cf_edges(ir_graph *irg);
169 # endif /* _IRGOPT_H_ */