1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** Optimizations for a whole ir graph, i.e., a procedure.
16 # include "irnode_t.h"
17 # include "irgraph_t.h"
25 pset *new_identities (void);
26 void del_identities (pset *value_table);
28 /********************************************************************/
29 /* apply optimizations of iropt to all nodes. */
30 /********************************************************************/
33 optimize_in_place_wrapper (ir_node *n, void *env) {
37 /* optimize all sons after recursion, i.e., the sons' sons are
39 for (i = -1; i < get_irn_arity(n); i++) {
40 optimized = optimize_in_place(get_irn_n(n, i));
41 set_irn_n(n, i, optimized);
46 local_optimize_graph (ir_graph *irg) {
47 ir_graph *rem = current_ir_graph;
48 current_ir_graph = irg;
50 /* Should we clean the value_table in irg for the cse? Better do so... */
51 del_identities(irg->value_table);
52 irg->value_table = new_identities();
54 /* walk over the graph */
55 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
57 current_ir_graph = rem;
60 /********************************************************************/
61 /* Routines for dead node elimination / copying garbage collection */
63 /********************************************************************/
65 /* Remeber the new node in the old node by using a field all nodes have. */
67 set_new_node (ir_node *old, ir_node *new)
72 /* Get this new node, before the old node is forgotton.*/
74 get_new_node (ir_node * n)
80 /* We use the block_visited flag to mark that we have computed the
81 number of useful predecessors for this block.
82 Further we encode the new arity in this flag. Remembering the arity is
83 useful, as it saves a lot of pointer accesses. This function is called
84 for all Phi and Block nodes in a Block. */
86 compute_new_arity(ir_node *b) {
90 irg_v = get_irg_block_visited(current_ir_graph);
91 block_v = get_Block_block_visited(b);
92 if (block_v >= irg_v) {
93 /* we computed the number of preds for this block and saved it in the
95 return block_v - irg_v;
97 /* compute the number of good predecessors */
98 res = get_irn_arity(b);
99 for (i = 0; i < get_irn_arity(b); i++)
100 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
101 /* save it in the flag. */
102 set_Block_block_visited(b, irg_v + res);
107 /* Copies the node to the new obstack. The Ins of the new node point to
108 the predecessors on the old obstack. n->link points to the new node.
109 For Phi and Block nodes the function allocates in-arrays with an arity
110 only for useful predecessors. The arity is determined by counting
111 the non-bad predecessors of the block. */
113 copy_node (ir_node *n, void *env) {
117 if (get_irn_opcode(n) == iro_Block) {
119 new_arity = compute_new_arity(n);
121 block = get_nodes_Block(n);
122 if (get_irn_opcode(n) == iro_Phi) {
123 new_arity = compute_new_arity(block);
125 new_arity = get_irn_arity(n);
128 nn = new_ir_node(current_ir_graph,
134 /* Copy the attributes. These might point to additional data. If this
135 was allocated on the old obstack the pointers now are dangling. This
136 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
141 /* Copies new predecessors of old node to new node remembered in link.
142 Spare the Bad predecessors of Phi and Block nodes. */
144 copy_preds (ir_node *n, void *env) {
145 ir_node *nn, *block, *on;
148 nn = get_new_node(n);
150 if (get_irn_opcode(n) == iro_Block) {
151 /* Don't copy Bad nodes. */
153 for (i = 0; i < get_irn_arity(n); i++)
154 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
155 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
158 /* repair the block visited flag from above misuse */
159 set_Block_block_visited(nn, 0);
160 /* Local optimization could not merge two subsequent blocks if
161 in array contained Bads. Now it's possible, but don't do it for
163 if (n != current_ir_graph->end_block) on = optimize_in_place(nn);
165 if (nn != on) exchange(nn, on);
166 } else if (get_irn_opcode(n) == iro_Phi) {
167 /* Don't copy node if corresponding predecessor in block is Bad.
168 The Block itself should not be Bad. */
169 block = get_nodes_Block(n);
170 set_irn_n (nn, -1, get_new_node(block));
172 for (i = 0; i < get_irn_arity(n); i++)
173 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
174 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
177 /* Compacting the Phi's ins might generate Phis with only one
179 if (get_irn_arity(n) == 1)
180 exchange(n, get_irn_n(n, 0));
182 for (i = -1; i < get_irn_arity(n); i++)
183 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
187 /* Copies the graph reachable from current_ir_graph->end to the obstack
189 Then fixes the fields in current_ir_graph containing nodes of the
193 /* Not all nodes remembered in current_ir_graph might be reachable
194 from the end node. Assure their link is set to NULL so that
195 we can test whether new nodes have been computed. */
196 set_irn_link(get_irg_frame (current_ir_graph), NULL);
197 set_irn_link(get_irg_globals(current_ir_graph), NULL);
198 set_irn_link(get_irg_args (current_ir_graph), NULL);
200 /* we use the block walk flag for removing Bads from Blocks ins. */
201 inc_irg_block_visited(current_ir_graph);
204 irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL);
206 /* fix the fields in current_ir_graph */
207 set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
208 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
209 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL)
210 irg_walk(get_irg_frame(current_ir_graph), copy_node, copy_preds, NULL);
211 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL)
212 irg_walk(get_irg_globals(current_ir_graph), copy_node, copy_preds, NULL);
213 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL)
214 irg_walk(get_irg_args(current_ir_graph), copy_node, copy_preds, NULL);
215 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
216 set_irg_start_block(current_ir_graph,
217 get_new_node(get_irg_start_block(current_ir_graph)));
218 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
219 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
220 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
221 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
222 copy_node(get_irg_bad(current_ir_graph), NULL);
223 copy_preds(get_irg_bad(current_ir_graph), NULL);
225 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
229 /* Amroq call this emigrate() */
231 dead_node_elimination(ir_graph *irg) {
233 struct obstack *graveyard_obst = NULL;
234 struct obstack *rebirth_obst = NULL;
236 /* Remember external state of current_ir_graph. */
237 rem = current_ir_graph;
238 current_ir_graph = irg;
240 if (get_optimize() && get_opt_dead_node_elimination()) {
242 /* A quiet place, where the old obstack can rest in peace,
243 until it will be cremated. */
244 graveyard_obst = irg->obst;
246 /* A new obstack, where the reachable nodes will be copied to. */
247 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
248 current_ir_graph->obst = rebirth_obst;
249 obstack_init (current_ir_graph->obst);
251 /* Copy the graph from the old to the new obstack */
254 /* Free memory from old unoptimized obstack */
255 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
256 xfree (graveyard_obst); /* ... then free it. */
259 current_ir_graph = rem;