1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** dead node elimination
7 ** walks one time through the whole graph and copies it into another graph,
8 ** so unreachable nodes will be lost.
18 /********************************************************************/
19 /* apply optimizations of iropt to all nodes. */
21 optimize_in_place_wrapper (ir_node *n, void *env) {
25 /* optimize all sons after recursion, i.e., the sons' sons are
27 for (i = -1; i < get_irn_arity(n); i++) {
28 optimized = optimize_in_place(get_irn_n(n, i));
29 set_irn_n(n, i, optimized);
35 local_optimize_graph (ir_graph *irg) {
36 ir_graph *rem = current_ir_graph;
37 current_ir_graph = irg;
39 /* walk over the graph */
40 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
42 current_ir_graph = rem;
45 /********************************************************************/
46 /* Routines for dead node elimination / copying garbage collection */
49 /* Remeber the new node in the old node,
50 by using a field that all nodes have. */
52 set_new_node (ir_node *old, ir_node *new)
54 old->in[0] = new; /* Hi Chris: Benutze old->link, ich hab mich vergewissert dass
55 das hier ueberschrieben werden kann, das erspaart eine
56 indirektion --> schneller. */
60 /* Get this new node, before the old node is forgotton.*/
62 get_new_node (ir_node * n)
72 /* Create this node on a new obstack. */
74 copy_node (ir_node *n, void *env) {
83 a = get_binop_left(n);
84 b = get_binop_right(n);
85 } else if (is_unop(n)) {
89 switch (get_irn_opcode(n)) {
92 ir_node **in = get_Block_cfgpred_arr(n);
93 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
94 set_Block_cfgpred(n, i, get_new_node(get_Block_cfgpred(n, i)));
95 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
99 res = new_r_Start (current_ir_graph, get_new_node(get_nodes_Block(n)));
102 res = new_r_End (current_ir_graph, get_new_node(get_nodes_Block(n)));
103 current_ir_graph -> end = res;
104 current_ir_graph -> end_block = get_nodes_Block(res);
107 res = new_r_Jmp (current_ir_graph, get_new_node(get_nodes_Block(n)));
110 res = new_r_Cond (current_ir_graph, get_new_node(get_nodes_Block(n)),
111 get_new_node(get_Cond_selector(n)));
116 in = get_Return_res_arr(n);
117 for (i = 0; i < get_Return_n_res(n); i++)
118 set_Return_res(n, i, get_new_node(get_Return_res(n, i)));
119 res = new_r_Return (current_ir_graph, get_new_node(get_nodes_Block(n)),
120 get_new_node(get_Return_mem(n)),
121 get_Return_n_res(n), in);
125 res = new_r_Raise (current_ir_graph, get_new_node(get_nodes_Block(n)),
126 get_new_node(get_Raise_mem(n)),
127 get_new_node(get_Raise_exo_ptr(n)));
130 res = new_r_Const (current_ir_graph, get_new_node(get_nodes_Block(n)),
131 get_irn_mode(n), get_Const_tarval(n));
135 type_or_id *value = NULL;
137 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
140 value = (type_or_id *) get_SymConst_type(n);
144 if (get_SymConst_kind(n)==linkage_ptr_info)
146 value = (type_or_id *) get_SymConst_ptrinfo(n);
149 res = new_r_SymConst (current_ir_graph, get_new_node(get_nodes_Block(n)),
150 value, get_SymConst_kind (n));
155 ir_node **in = get_Sel_index_arr(n);
156 for (i = 0; i < get_Sel_n_index(n); i++)
157 set_Sel_index(n, i, get_new_node(get_Sel_index(n, i)));
158 res = new_r_Sel (current_ir_graph, get_new_node(get_nodes_Block(n)),
159 get_new_node(get_Sel_mem(n)),
160 get_new_node(get_Sel_ptr(n)), get_Sel_n_index(n),
161 in, get_Sel_entity(n));
166 ir_node **in = get_Call_param_arr(n);
167 for (i = 0; i < get_Call_arity(n); i++)
168 set_Call_param(n, i, get_new_node(get_Call_param(n, i)));
169 res = new_r_Call (current_ir_graph, get_new_node(get_nodes_Block(n)),
170 get_new_node(get_Call_mem(n)),
171 get_new_node(get_Call_ptr(n)), get_Call_arity(n),
172 in, get_Call_type (n));
176 res = new_r_Add (current_ir_graph, get_new_node(get_nodes_Block(n)),
177 get_new_node(a), get_new_node(b), get_irn_mode(n));
181 res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_Block(n)),
182 get_new_node(a), get_new_node(b), get_irn_mode(n));
186 res = new_r_Minus (current_ir_graph, get_new_node(get_nodes_Block(n)),
187 get_new_node(a), get_irn_mode(n));
190 res = new_r_Mul (current_ir_graph, get_new_node(get_nodes_Block(n)),
191 get_new_node(a), get_new_node(b), get_irn_mode(n));
194 res = new_r_Quot (current_ir_graph, get_new_node(get_nodes_Block(n)),
195 get_new_node(get_Quot_mem(n)), get_new_node(a),
199 res = new_r_DivMod (current_ir_graph, get_new_node(get_nodes_Block(n)),
200 get_new_node(get_DivMod_mem(n)), get_new_node(a),
204 res = new_r_Div (current_ir_graph, get_new_node(get_nodes_Block(n)),
205 get_new_node(get_Div_mem(n)), get_new_node(a),
209 res = new_r_Mod (current_ir_graph, get_new_node(get_nodes_Block(n)),
210 get_new_node(get_Mod_mem(n)), get_new_node(a),
214 res = new_r_Abs (current_ir_graph, get_new_node(get_nodes_Block(n)),
215 get_new_node(get_Abs_op(n)), get_irn_mode(n));
218 res = new_r_And (current_ir_graph, get_new_node(get_nodes_Block(n)),
219 get_new_node(a), get_new_node(b), get_irn_mode(n));
222 res = new_r_Or (current_ir_graph, get_new_node(get_nodes_Block(n)),
223 get_new_node(a), get_new_node(b), get_irn_mode(n));
226 res = new_r_Eor (current_ir_graph, get_new_node(get_nodes_Block(n)),
227 get_new_node(a), get_new_node(b), get_irn_mode(n));
230 res = new_r_Not (current_ir_graph, get_new_node(get_nodes_Block(n)),
231 get_new_node(get_Not_op(n)), get_irn_mode(n));
234 res = new_r_Cmp (current_ir_graph, get_new_node(get_nodes_Block(n)),
235 get_new_node(get_Cmp_left(n)),
236 get_new_node(get_Cmp_right(n)));
239 res = new_r_Shl (current_ir_graph, get_new_node(get_nodes_Block(n)),
240 get_new_node(get_Shl_left(n)),
241 get_new_node(get_Shl_right(n)), get_irn_mode(n));
244 res = new_r_Shr (current_ir_graph, get_new_node(get_nodes_Block(n)),
245 get_new_node(get_Shr_left(n)),
246 get_new_node(get_Shr_right(n)), get_irn_mode(n));
249 res = new_r_Shrs (current_ir_graph, get_new_node(get_nodes_Block(n)),
250 get_new_node(get_Shrs_left(n)),
251 get_new_node(get_Shrs_right(n)), get_irn_mode(n));
254 res = new_r_Rot (current_ir_graph, get_new_node(get_nodes_Block(n)),
255 get_new_node(get_Rot_left(n)),
256 get_new_node(get_Rot_right(n)), get_irn_mode(n));
259 res = new_r_Conv (current_ir_graph, get_new_node(get_nodes_Block(n)),
260 get_new_node(get_Conv_op(n)),
265 ir_node **in = get_Phi_preds_arr(n);
266 for (i = 0; i < get_Phi_n_preds(n); i++)
267 set_Phi_pred(n, i, get_new_node(get_Phi_pred(n, i)));
268 res = new_r_Phi (current_ir_graph, get_new_node(get_nodes_Block(n)),
269 get_Phi_n_preds(n), in, get_irn_mode(n));
273 res = new_r_Load (current_ir_graph, get_new_node(get_nodes_Block(n)),
274 get_new_node(get_Load_mem(n)),
275 get_new_node(get_Load_ptr(n)));
278 res = new_r_Store (current_ir_graph, get_new_node(get_nodes_Block(n)),
279 get_new_node(get_Store_mem(n)),
280 get_new_node(get_Store_ptr(n)),
281 get_new_node(get_Store_value(n)));
284 res = new_r_Alloc (current_ir_graph, get_new_node(get_nodes_Block(n)),
285 get_new_node(get_Alloc_mem(n)),
286 get_new_node(get_Alloc_size(n)),
287 get_Alloc_type(n), get_Alloc_where(n));
291 res = new_r_Free (current_ir_graph, get_new_node(get_nodes_Block(n)),
292 get_new_node(get_Free_mem(n)),
293 get_new_node(get_Free_ptr(n)),
294 get_new_node(get_Free_size(n)), get_Free_type(n));
298 ir_node **in = get_Sync_preds_arr(n);
299 for (i = 0; i < get_Sync_n_preds(n); i++)
300 set_Sync_pred(n, i, get_new_node(get_Sync_pred(n, i)));
301 res = new_r_Sync (current_ir_graph, get_new_node(get_nodes_Block(n)),
302 get_Sync_n_preds(n), in);
306 res = new_r_Proj (current_ir_graph, get_new_node(get_nodes_Block(n)),
307 get_new_node(get_Proj_pred(n)), get_irn_mode(n),
312 ir_node **in = get_Tuple_preds_arr(n);
313 for (i = 0; i < get_Tuple_n_preds(n); i++)
314 set_Tuple_pred(n, i, get_new_node(get_Tuple_pred(n, i)));
315 res = new_r_Tuple (current_ir_graph, get_new_node(get_nodes_Block(n)),
316 get_Tuple_n_preds(n), in);
320 res = new_r_Id (current_ir_graph, get_new_node(get_nodes_Block(n)),
321 get_new_node(get_Id_pred(n)), get_irn_mode(n));
327 /* @@@ Here we could call optimize()!! */
328 set_new_node(n, res);
333 dead_node_elimination(ir_graph *irg) {
334 struct obstack *graveyard_obst=NULL;
335 struct obstack *rebirth_obst;
337 ir_node *old_node, *new_node;
338 ir_graph *rem = current_ir_graph;
339 current_ir_graph = irg;
341 if (get_opt_dead_node_elimination()) {
343 /* A quiet place, where the old obstack can rest in peace,
344 until it will be cremated. */
345 graveyard_obst = irg->obst;
347 /* A new obstack, where the reachable nodes will be copied to. */
348 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
349 current_ir_graph->obst = rebirth_obst;
350 obstack_init (current_ir_graph->obst);
353 printf("Before starting the DEAD NODE ELIMINATION !\n");
355 /* Copy nodes remembered in irg fields first.
356 The optimization contains tests against these fields, e.g., not
357 to optimize the start block away. Therefore these fields have to
359 Further setting these fields in copy_node would impose additional
360 tests for all nodes of a kind.
361 Predict the visited flag the walker will use! */
362 /* Copy the start Block node */
363 old_node = irg->start_block;
364 new_node = new_r_Block (current_ir_graph, 0, NULL); /* new_r_Block calls
365 no optimization --> save */
366 irg->start_block = new_node;
367 set_new_node (old_node, new_node);
368 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
369 /* Copy the Start node */
370 old_node = irg->start;
371 new_node = new_r_Start (current_ir_graph, irg->start_block);
372 irg->start = new_node;
373 set_new_node (old_node, new_node);
374 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
375 /* Copy the Bad node */
377 new_node = new_ir_node (irg, irg->start_block, op_Bad, mode_T, 0, NULL);
379 set_new_node (old_node, new_node);
380 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
381 /* Copy the Projs for the Start's results. */
382 old_node = irg->frame;
383 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_frame_base);
384 irg->frame = new_node;
385 set_new_node (old_node, new_node);
386 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
388 old_node = irg->globals;
389 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_globals);
390 irg->globals = new_node;
391 set_new_node (old_node, new_node);
392 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
394 old_node = irg->args;
395 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_T, pns_args);
396 irg->args = new_node;
397 set_new_node (old_node, new_node);
398 set_irn_visited (new_node, get_irg_visited(current_ir_graph)+1);
400 /* Walks the graph once, and at the recursive way do the copy thing.
401 all reachable nodes will be copied to a new obstack. */
402 irg_walk(irg->end, NULL, copy_node, NULL);
405 printf("After the DEAD NODE ELIMINATION !\n");
407 /* Free memory from old unoptimized obstack */
408 xfree (graveyard_obst);
411 current_ir_graph = rem;