1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** dead node elimination
7 ** walks one time through the whole graph and copies it into another graph,
8 ** so unreachable nodes will be lost.
18 /********************************************************************/
19 /* apply optimizations of iropt to all nodes. */
21 optimize_in_place_wrapper (ir_node *n, void *env) {
25 /* optimize all sons after recursion, i.e., the sons' sons are
27 for (i = -1; i < get_irn_arity(n); i++) {
28 optimized = optimize_in_place(get_irn_n(n, i));
29 set_irn_n(n, i, optimized);
35 local_optimize_graph (ir_graph *irg) {
36 ir_graph *rem = current_ir_graph;
37 current_ir_graph = irg;
39 /* walk over the graph */
40 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
42 current_ir_graph = rem;
45 /********************************************************************/
46 /* Routines for dead node elimination / copying garbage collection */
49 /* Remeber the new node in the old node,
50 by using a field that all nodes have. */
52 set_new_node (ir_node *old, ir_node *new)
54 old->in[0] = new; /* Hi Chris: Benutze old->link, ich hab mich vergewissert dass
55 das hier ueberschrieben werden kann, das erspaart eine
56 indirektion --> schneller. */
60 /* Get this new node, before the old node is forgotton.*/
62 get_new_node (ir_node * n)
71 /* Create this node on a new obstack. */
73 copy_node (ir_node *n, void *env) {
83 a = get_binop_left(n);
84 b = get_binop_right(n);
85 } else if (is_unop(n)) {
89 switch (get_irn_opcode(n)) {
92 ir_node **in = get_Block_cfgpred_arr(n);
93 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
94 set_Block_cfgpred(n, i, get_new_node(get_Block_cfgpred(n, i)));
95 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
99 res = new_r_Start (current_ir_graph, get_new_node(get_nodes_Block(n)));
102 res = new_r_End (current_ir_graph, get_new_node(get_nodes_Block(n)));
103 current_ir_graph -> end = res;
104 current_ir_graph -> end_block = get_nodes_Block(res);
107 res = new_r_Jmp (current_ir_graph, get_new_node(get_nodes_Block(n)));
111 res = new_r_Cond (current_ir_graph, get_new_node(get_nodes_Block(n)),
112 get_new_node(get_Cond_selector(n)));
117 in = get_Return_res_arr(n);
118 for (i = 0; i < get_Return_n_res(n); i++) {
119 set_Return_res(n, i, get_new_node(get_Return_res(n, i)));
121 res = new_r_Return (current_ir_graph, get_new_node(get_nodes_Block(n)),
122 get_new_node(get_Return_mem(n)),
123 get_Return_n_res(n), in);
127 res = new_r_Raise (current_ir_graph, get_new_node(get_nodes_Block(n)),
128 get_new_node(get_Raise_mem(n)),
129 get_new_node(get_Raise_exo_ptr(n)));
132 res = new_r_Const (current_ir_graph, get_new_node(get_nodes_Block(n)),
133 get_irn_mode(n), get_Const_tarval(n));
137 type_or_id *value = NULL;
139 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
142 value = (type_or_id *) get_SymConst_type(n);
146 if (get_SymConst_kind(n)==linkage_ptr_info)
148 value = (type_or_id *) get_SymConst_ptrinfo(n);
151 res = new_r_SymConst (current_ir_graph, get_new_node(get_nodes_Block(n)),
152 value, get_SymConst_kind (n));
157 ir_node **in = get_Sel_index_arr(n);
158 for (i = 0; i < get_Sel_n_index(n); i++)
159 set_Sel_index(n, i, get_new_node(get_Sel_index(n, i)));
160 res = new_r_Sel (current_ir_graph, get_new_node(get_nodes_Block(n)),
161 get_new_node(get_Sel_mem(n)),
162 get_new_node(get_Sel_ptr(n)), get_Sel_n_index(n),
163 in, get_Sel_entity(n));
168 ir_node **in = get_Call_param_arr(n);
169 for (i = 0; i < get_Call_arity(n); i++)
170 set_Call_param(n, i, get_new_node(get_Call_param(n, i)));
171 res = new_r_Call (current_ir_graph, get_new_node(get_nodes_Block(n)),
172 get_new_node(get_Call_mem(n)),
173 get_new_node(get_Call_ptr(n)), get_Call_arity(n),
174 in, get_Call_type (n));
178 res = new_r_Add (current_ir_graph, get_new_node(get_nodes_Block(n)),
179 get_new_node(a), get_new_node(b), get_irn_mode(n));
183 res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_Block(n)),
184 get_new_node(a), get_new_node(b), get_irn_mode(n));
188 res = new_r_Minus (current_ir_graph, get_new_node(get_nodes_Block(n)),
189 get_new_node(a), get_irn_mode(n));
192 res = new_r_Mul (current_ir_graph, get_new_node(get_nodes_Block(n)),
193 get_new_node(a), get_new_node(b), get_irn_mode(n));
196 res = new_r_Quot (current_ir_graph, get_new_node(get_nodes_Block(n)),
197 get_new_node(get_Quot_mem(n)), get_new_node(a),
201 res = new_r_DivMod (current_ir_graph, get_new_node(get_nodes_Block(n)),
202 get_new_node(get_DivMod_mem(n)), get_new_node(a),
206 res = new_r_Div (current_ir_graph, get_new_node(get_nodes_Block(n)),
207 get_new_node(get_Div_mem(n)), get_new_node(a),
211 res = new_r_Mod (current_ir_graph, get_new_node(get_nodes_Block(n)),
212 get_new_node(get_Mod_mem(n)), get_new_node(a),
216 res = new_r_Abs (current_ir_graph, get_new_node(get_nodes_Block(n)),
217 get_new_node(get_Abs_op(n)), get_irn_mode(n));
220 res = new_r_And (current_ir_graph, get_new_node(get_nodes_Block(n)),
221 get_new_node(a), get_new_node(b), get_irn_mode(n));
224 res = new_r_Or (current_ir_graph, get_new_node(get_nodes_Block(n)),
225 get_new_node(a), get_new_node(b), get_irn_mode(n));
228 res = new_r_Eor (current_ir_graph, get_new_node(get_nodes_Block(n)),
229 get_new_node(a), get_new_node(b), get_irn_mode(n));
232 res = new_r_Not (current_ir_graph, get_new_node(get_nodes_Block(n)),
233 get_new_node(get_Not_op(n)), get_irn_mode(n));
236 res = new_r_Cmp (current_ir_graph, get_new_node(get_nodes_Block(n)),
237 get_new_node(get_Cmp_left(n)),
238 get_new_node(get_Cmp_right(n)));
241 res = new_r_Shl (current_ir_graph, get_new_node(get_nodes_Block(n)),
242 get_new_node(get_Shl_left(n)),
243 get_new_node(get_Shl_right(n)), get_irn_mode(n));
246 res = new_r_Shr (current_ir_graph, get_new_node(get_nodes_Block(n)),
247 get_new_node(get_Shr_left(n)),
248 get_new_node(get_Shr_right(n)), get_irn_mode(n));
251 res = new_r_Shrs (current_ir_graph, get_new_node(get_nodes_Block(n)),
252 get_new_node(get_Shrs_left(n)),
253 get_new_node(get_Shrs_right(n)), get_irn_mode(n));
256 res = new_r_Rot (current_ir_graph, get_new_node(get_nodes_Block(n)),
257 get_new_node(get_Rot_left(n)),
258 get_new_node(get_Rot_right(n)), get_irn_mode(n));
261 res = new_r_Conv (current_ir_graph, get_new_node(get_nodes_Block(n)),
262 get_new_node(get_Conv_op(n)),
267 ir_node **in = get_Phi_preds_arr(n);
268 for (i = 0; i < get_Phi_n_preds(n); i++)
269 set_Phi_pred(n, i, get_new_node(get_Phi_pred(n, i)));
270 res = new_r_Phi (current_ir_graph, get_new_node(get_nodes_Block(n)),
271 get_Phi_n_preds(n), in, get_irn_mode(n));
275 res = new_r_Load (current_ir_graph, get_new_node(get_nodes_Block(n)),
276 get_new_node(get_Load_mem(n)),
277 get_new_node(get_Load_ptr(n)));
280 res = new_r_Store (current_ir_graph, get_new_node(get_nodes_Block(n)),
281 get_new_node(get_Store_mem(n)),
282 get_new_node(get_Store_ptr(n)),
283 get_new_node(get_Store_value(n)));
286 res = new_r_Alloc (current_ir_graph, get_new_node(get_nodes_Block(n)),
287 get_new_node(get_Alloc_mem(n)),
288 get_new_node(get_Alloc_size(n)),
289 get_Alloc_type(n), get_Alloc_where(n));
293 res = new_r_Free (current_ir_graph, get_new_node(get_nodes_Block(n)),
294 get_new_node(get_Free_mem(n)),
295 get_new_node(get_Free_ptr(n)),
296 get_new_node(get_Free_size(n)), get_Free_type(n));
300 ir_node **in = get_Sync_preds_arr(n);
301 for (i = 0; i < get_Sync_n_preds(n); i++)
302 set_Sync_pred(n, i, get_new_node(get_Sync_pred(n, i)));
303 res = new_r_Sync (current_ir_graph, get_new_node(get_nodes_Block(n)),
304 get_Sync_n_preds(n), in);
308 res = new_r_Proj (current_ir_graph, get_new_node(get_nodes_Block(n)),
309 get_new_node(get_Proj_pred(n)), get_irn_mode(n),
314 ir_node **in = get_Tuple_preds_arr(n);
315 for (i = 0; i < get_Tuple_n_preds(n); i++)
316 set_Tuple_pred(n, i, get_new_node(get_Tuple_pred(n, i)));
317 res = new_r_Tuple (current_ir_graph, get_new_node(get_nodes_Block(n)),
318 get_Tuple_n_preds(n), in);
322 res = new_r_Id (current_ir_graph, get_new_node(get_nodes_Block(n)),
323 get_new_node(get_Id_pred(n)), get_irn_mode(n));
329 /* @@@ Here we could call optimize()!! */
330 set_new_node(n, res);
332 printf(" "); DDMSG2(res);
335 /* Amroq call this emigrate() */
337 dead_node_elimination(ir_graph *irg) {
338 struct obstack *graveyard_obst=NULL;
339 struct obstack *rebirth_obst;
341 ir_node *old_node, *new_node;
342 ir_graph *rem = current_ir_graph;
343 current_ir_graph = irg;
345 if (get_opt_dead_node_elimination()) {
347 /* A quiet place, where the old obstack can rest in peace,
348 until it will be cremated. */
349 graveyard_obst = irg->obst;
351 /* A new obstack, where the reachable nodes will be copied to. */
352 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
353 current_ir_graph->obst = rebirth_obst;
354 obstack_init (current_ir_graph->obst);
357 printf("Before starting the DEAD NODE ELIMINATION !\n");
359 /* Copy nodes remembered in irg fields first.
360 The optimization contains tests against these fields, e.g., not
361 to optimize the start block away. Therefore these fields have to
363 Further setting these fields in copy_node would impose additional
364 tests for all nodes of a kind.
365 Predict the visited flag the walker will use! */
366 /* Copy the start Block node */
367 old_node = irg->start_block;
368 new_node = new_r_Block (current_ir_graph, 0, NULL); /* new_r_Block calls
369 no optimization --> save */
370 irg->start_block = new_node;
371 set_new_node (old_node, new_node);
372 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
373 /* Copy the Start node */
374 old_node = irg->start;
375 new_node = new_r_Start (current_ir_graph, irg->start_block);
376 irg->start = new_node;
378 set_new_node (old_node, new_node);
379 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
380 /* Copy the Bad node */
382 new_node = new_ir_node (irg, irg->start_block, op_Bad, mode_T, 0, NULL);
384 set_new_node (old_node, new_node);
385 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
386 /* Copy the Projs for the Start's results. */
387 old_node = irg->frame;
388 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_frame_base);
389 irg->frame = new_node;
390 set_new_node (old_node, new_node);
391 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
393 old_node = irg->globals;
394 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_globals);
395 irg->globals = new_node;
396 set_new_node (old_node, new_node);
397 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
399 old_node = irg->args;
400 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_T, pns_args);
401 irg->args = new_node;
402 set_new_node (old_node, new_node);
403 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
405 /* Walks the graph once, and at the recursive way do the copy thing.
406 all reachable nodes will be copied to a new obstack. */
407 irg_walk(irg->end, NULL, copy_node, NULL);
410 printf("After the DEAD NODE ELIMINATION !\n");
412 /* Free memory from old unoptimized obstack */
413 xfree (graveyard_obst);
416 current_ir_graph = rem;