2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimizations for a whole ir graph, i.e., a procedure.
23 * @author Christian Schaefer, Goetz Lindenmaier, Sebastian Felis,
31 #include "irgraph_t.h"
33 #include "iroptimize.h"
44 #include "iredges_t.h"
48 * A wrapper around optimize_inplace_2() to be called from a walker.
50 static void optimize_in_place_wrapper(ir_node *n, void *env)
52 ir_node *optimized = optimize_in_place_2(n);
56 exchange(n, optimized);
61 * Do local optimizations for a node.
63 * @param n the IR-node where to start. Typically the End node
66 * @note current_ir_graph must be set
68 static inline void do_local_optimize(ir_node *n)
70 ir_graph *irg = get_irn_irg(n);
72 /* Handle graph state */
73 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(irg, op_pin_state_floats);
77 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
79 /* Clean the value_table in irg for the CSE. */
82 /* walk over the graph */
83 irg_walk(n, firm_clear_link, optimize_in_place_wrapper, NULL);
86 void local_optimize_node(ir_node *n)
88 ir_graph *rem = current_ir_graph;
89 current_ir_graph = get_irn_irg(n);
93 current_ir_graph = rem;
96 static void enqueue_node(ir_node *node, pdeq *waitq)
98 if (get_irn_link(node) == waitq)
100 pdeq_putr(waitq, node);
101 set_irn_link(node, waitq);
105 * Enqueue all users of a node to a wait queue.
106 * Handles mode_T nodes.
108 static void enqueue_users(ir_node *n, pdeq *waitq)
110 foreach_out_edge(n, edge) {
111 ir_node *succ = get_edge_src_irn(edge);
113 enqueue_node(succ, waitq);
115 /* Also enqueue Phis to prevent inconsistencies. */
116 if (is_Block(succ)) {
117 foreach_out_edge(succ, edge2) {
118 ir_node *succ2 = get_edge_src_irn(edge2);
121 enqueue_node(succ2, waitq);
124 } else if (get_irn_mode(succ) == mode_T) {
125 /* A mode_T node has Proj's. Because most optimizations
126 run on the Proj's we have to enqueue them also. */
127 enqueue_users(succ, waitq);
133 * Block-Walker: uses dominance depth to mark dead blocks.
135 static void find_unreachable_blocks(ir_node *block, void *env)
137 pdeq *waitq = (pdeq*) env;
139 if (get_Block_dom_depth(block) < 0) {
140 ir_graph *irg = get_irn_irg(block);
141 ir_node *end = get_irg_end(irg);
143 foreach_block_succ(block, edge) {
144 ir_node *succ_block = get_edge_src_irn(edge);
145 enqueue_node(succ_block, waitq);
146 foreach_out_edge(succ_block, edge2) {
147 ir_node *succ = get_edge_src_irn(edge2);
149 enqueue_node(succ, waitq);
152 enqueue_node(end, waitq);
156 void local_optimize_graph(ir_graph *irg)
158 ir_graph *rem = current_ir_graph;
159 current_ir_graph = irg;
161 do_local_optimize(get_irg_end(irg));
163 current_ir_graph = rem;
167 * Data flow optimization walker.
168 * Optimizes all nodes and enqueue its users
171 static void opt_walker(ir_node *n, void *env)
173 pdeq *waitq = (pdeq*)env;
176 optimized = optimize_in_place_2(n);
177 set_irn_link(optimized, NULL);
179 if (optimized != n) {
180 enqueue_users(n, waitq);
181 exchange(n, optimized);
185 int optimize_graph_df(ir_graph *irg)
187 pdeq *waitq = new_pdeq();
188 ir_graph *rem = current_ir_graph;
191 current_ir_graph = irg;
193 if (get_opt_global_cse())
194 set_irg_pinned(irg, op_pin_state_floats);
196 /* enable unreachable code elimination */
197 assert(!irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE));
198 add_irg_constraints(irg, IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE);
205 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
206 irg_walk_graph(irg, NULL, opt_walker, waitq);
208 /* any optimized nodes are stored in the wait queue,
209 * so if it's not empty, the graph has been changed */
210 while (!pdeq_empty(waitq)) {
211 /* finish the wait queue */
212 while (! pdeq_empty(waitq)) {
213 ir_node *n = (ir_node*)pdeq_getl(waitq);
214 opt_walker(n, waitq);
216 /* Calculate dominance so we can kill unreachable code
217 * We want this intertwined with localopts for better optimization (phase coupling) */
219 irg_block_walk_graph(irg, NULL, find_unreachable_blocks, waitq);
222 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
224 /* disable unreachable code elimination */
225 clear_irg_constraints(irg, IR_GRAPH_CONSTRAINT_OPTIMIZE_UNREACHABLE_CODE);
226 add_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
228 /* invalidate infos */
229 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
230 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
231 edges_deactivate(irg);
233 /* Finally kill BAD and doublets from the keep alives.
234 * Doing this AFTER edges where deactivated saves cycles */
235 end = get_irg_end(irg);
236 remove_End_Bads_and_doublets(end);
238 current_ir_graph = rem;
240 /* Note we do not have a reliable way to detect changes, since some
241 * localopt rules change the inputs of a node and do not return a new
242 * node, so we conservatively say true here */
246 void local_opts_const_code(void)
248 ir_graph *irg = get_const_code_irg();
249 /* Clean the value_table in irg for the CSE. */
252 walk_const_code(firm_clear_link, optimize_in_place_wrapper, NULL);
255 ir_graph_pass_t *optimize_graph_df_pass(const char *name)
257 return def_graph_pass_ret(name ? name : "optimize_graph_df", optimize_graph_df);