1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** Optimizations for a whole ir graph, i.e., a procedure.
16 # include "irnode_t.h"
17 # include "irgraph_t.h"
25 pset *new_identities (void);
26 void del_identities (pset *value_table);
28 /********************************************************************/
29 /* apply optimizations of iropt to all nodes. */
30 /********************************************************************/
33 optimize_in_place_wrapper (ir_node *n, void *env) {
37 /* optimize all sons after recursion, i.e., the sons' sons are
39 for (i = -1; i < get_irn_arity(n); i++) {
40 optimized = optimize_in_place(get_irn_n(n, i));
41 set_irn_n(n, i, optimized);
46 local_optimize_graph (ir_graph *irg) {
47 ir_graph *rem = current_ir_graph;
48 current_ir_graph = irg;
50 /* Should we clean the value_table in irg for the cse? Better do so... */
51 del_identities(irg->value_table);
52 irg->value_table = new_identities();
54 /* walk over the graph */
55 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
57 current_ir_graph = rem;
60 /********************************************************************/
61 /* Routines for dead node elimination / copying garbage collection */
63 /********************************************************************/
65 /* Remeber the new node in the old node by using a field all nodes have. */
67 set_new_node (ir_node *old, ir_node *new)
72 /* Get this new node, before the old node is forgotton.*/
74 get_new_node (ir_node * n)
80 /* We use the block_visited flag to mark that we have computed the
81 number of useful predecessors for this block.
82 Further we encode the new arity in this flag. Remembering the arity is
83 useful, as it saves a lot of pointer accesses. This function is called
84 for all Phi and Block nodes in a Block. */
86 compute_new_arity(ir_node *b) {
90 irg_v = get_irg_block_visited(current_ir_graph);
91 block_v = get_Block_block_visited(b);
92 if (block_v >= irg_v) {
93 /* we computed the number of preds for this block and saved it in the
95 return block_v - irg_v;
97 /* compute the number of good predecessors */
98 res = get_irn_arity(b);
99 for (i = 0; i < get_irn_arity(b); i++)
100 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
101 /* save it in the flag. */
102 set_Block_block_visited(b, irg_v + res);
107 /* Copies the node to the new obstack. The Ins of the new node point to
108 the predecessors on the old obstack. n->link points to the new node.
109 For Phi and Block nodes the function allocates in-arrays with an arity
110 only for useful predecessors. The arity is determined by counting
111 the non-bad predecessors of the block. */
113 copy_node (ir_node *n, void *env) {
117 if (get_irn_opcode(n) == iro_Block) {
119 new_arity = compute_new_arity(n);
121 block = get_nodes_Block(n);
122 if (get_irn_opcode(n) == iro_Phi) {
123 new_arity = compute_new_arity(block);
125 new_arity = get_irn_arity(n);
128 nn = new_ir_node(current_ir_graph,
134 /* Copy the attributes. These might point to additional data. If this
135 was allocated on the old obstack the pointers now are dangling. This
136 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
141 /* Copies new predecessors of old node to new node remembered in link.
142 Spare the Bad predecessors of Phi and Block nodes. */
144 copy_preds (ir_node *n, void *env) {
145 ir_node *nn, *block, *on;
148 nn = get_new_node(n);
150 if (get_irn_opcode(n) == iro_Block) {
151 /* Don't copy Bad nodes. */
153 for (i = 0; i < get_irn_arity(n); i++)
154 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
155 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
158 /* repair the block visited flag from above misuse */
159 set_Block_block_visited(nn, 0);
160 /* Local optimization could not merge two subsequent blocks if
161 in array contained Bads. Now it's possible. */
162 on = optimize_in_place(nn);
163 if (nn != on) exchange(nn, on);
164 } else if (get_irn_opcode(n) == iro_Phi) {
165 /* Don't copy node if corresponding predecessor in block is Bad.
166 The Block itself should not be Bad. */
167 block = get_nodes_Block(n);
168 set_irn_n (nn, -1, get_new_node(block));
170 for (i = 0; i < get_irn_arity(n); i++)
171 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
172 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
175 /* Compacting the Phi's ins might generate Phis with only one
177 if (get_irn_arity(n) == 1)
178 exchange(n, get_irn_n(n, 0));
180 for (i = -1; i < get_irn_arity(n); i++)
181 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
185 /* Copies the graph reachable from current_ir_graph->end to the obstack
187 Then fixes the fields in current_ir_graph containing nodes of the
191 /* Not all nodes remembered in current_ir_graph might be reachable
192 from the end node. Assure their link is set to NULL so that
193 we can test whether new nodes have been computed. */
194 set_irn_link(get_irg_frame (current_ir_graph), NULL);
195 set_irn_link(get_irg_globals(current_ir_graph), NULL);
196 set_irn_link(get_irg_args (current_ir_graph), NULL);
198 /* we use the block walk flag for removing Bads from Blocks ins. */
199 inc_irg_block_visited(current_ir_graph);
202 irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL);
204 /* fix the fields in current_ir_graph */
205 set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
206 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
207 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL)
208 irg_walk(get_irg_frame(current_ir_graph), copy_node, copy_preds, NULL);
209 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL)
210 irg_walk(get_irg_globals(current_ir_graph), copy_node, copy_preds, NULL);
211 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL)
212 irg_walk(get_irg_args(current_ir_graph), copy_node, copy_preds, NULL);
213 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
214 set_irg_start_block(current_ir_graph,
215 get_new_node(get_irg_start_block(current_ir_graph)));
216 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
217 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
218 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
219 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
220 copy_node(get_irg_bad(current_ir_graph), NULL);
221 copy_preds(get_irg_bad(current_ir_graph), NULL);
223 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
227 /* Amroq call this emigrate() */
229 dead_node_elimination(ir_graph *irg) {
231 struct obstack *graveyard_obst = NULL;
232 struct obstack *rebirth_obst = NULL;
234 /* Remember external state of current_ir_graph. */
235 rem = current_ir_graph;
236 current_ir_graph = irg;
238 if (get_optimize() && get_opt_dead_node_elimination()) {
240 /* A quiet place, where the old obstack can rest in peace,
241 until it will be cremated. */
242 graveyard_obst = irg->obst;
244 /* A new obstack, where the reachable nodes will be copied to. */
245 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
246 current_ir_graph->obst = rebirth_obst;
247 obstack_init (current_ir_graph->obst);
249 /* Copy the graph from the old to the new obstack */
252 /* Free memory from old unoptimized obstack */
253 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
254 xfree (graveyard_obst); /* ... then free it. */
257 current_ir_graph = rem;