1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** dead node elimination
7 ** walks one time through the whole graph and copies it into another graph,
8 ** so unreachable nodes will be lost.
18 /********************************************************************/
19 /* apply optimizations of iropt to all nodes. */
21 optimize_in_place_wrapper (ir_node *n, void *env) {
25 /* optimize all sons after recursion, i.e., the sons' sons are
27 for (i = -1; i < get_irn_arity(n); i++) {
28 optimized = optimize_in_place(get_irn_n(n, i));
29 set_irn_n(n, i, optimized);
35 local_optimize_graph (ir_graph *irg) {
36 ir_graph *rem = current_ir_graph;
37 current_ir_graph = irg;
39 /* walk over the graph */
40 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
42 current_ir_graph = rem;
45 /********************************************************************/
46 /* Routines for dead node elimination / copying garbage collection */
49 /* Remeber the new node in the old node,
50 by using a field that all nodes have. */
52 set_new_node (ir_node *old, ir_node *new)
54 old->in[0] = new; /* Hi Chris: Benutze old->link, ich hab mich vergewissert dass
55 das hier ueberschrieben werden kann, das erspaart eine
56 indirektion --> schneller. */
60 /* Get this new node, before the old node is forgotton.*/
62 get_new_node (ir_node * n)
71 /* Create this node on a new obstack. */
73 copy_node (ir_node *n, void *env) {
83 a = get_binop_left(n);
84 b = get_binop_right(n);
85 } else if (is_unop(n)) {
89 switch (get_irn_opcode(n)) {
92 ir_node **in = get_Block_cfgpred_arr(n);
93 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
94 set_Block_cfgpred(n, i, get_new_node(get_Block_cfgpred(n, i)));
95 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
99 res = new_r_Start (current_ir_graph, get_new_node(get_nodes_Block(n)));
102 res = new_r_End (current_ir_graph, get_new_node(get_nodes_Block(n)));
103 current_ir_graph -> end = res;
104 current_ir_graph -> end_block = get_nodes_Block(res);
107 res = new_r_Jmp (current_ir_graph, get_new_node(get_nodes_Block(n)));
110 res = new_r_Cond (current_ir_graph, get_new_node(get_nodes_Block(n)),
111 get_new_node(get_Cond_selector(n)));
116 in = get_Return_res_arr(n);
117 for (i = 0; i < get_Return_n_res(n); i++) {
118 set_Return_res(n, i, get_new_node(get_Return_res(n, i)));
120 res = new_r_Return (current_ir_graph, get_new_node(get_nodes_Block(n)),
121 get_new_node(get_Return_mem(n)),
122 get_Return_n_res(n), in);
126 res = new_r_Raise (current_ir_graph, get_new_node(get_nodes_Block(n)),
127 get_new_node(get_Raise_mem(n)),
128 get_new_node(get_Raise_exo_ptr(n)));
131 res = new_r_Const (current_ir_graph, get_new_node(get_nodes_Block(n)),
132 get_irn_mode(n), get_Const_tarval(n));
136 type_or_id *value = NULL;
138 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
141 value = (type_or_id *) get_SymConst_type(n);
145 if (get_SymConst_kind(n)==linkage_ptr_info)
147 value = (type_or_id *) get_SymConst_ptrinfo(n);
150 res = new_r_SymConst (current_ir_graph, get_new_node(get_nodes_Block(n)),
151 value, get_SymConst_kind (n));
156 ir_node **in = get_Sel_index_arr(n);
157 for (i = 0; i < get_Sel_n_index(n); i++)
158 set_Sel_index(n, i, get_new_node(get_Sel_index(n, i)));
159 res = new_r_Sel (current_ir_graph, get_new_node(get_nodes_Block(n)),
160 get_new_node(get_Sel_mem(n)),
161 get_new_node(get_Sel_ptr(n)), get_Sel_n_index(n),
162 in, get_Sel_entity(n));
167 ir_node **in = get_Call_param_arr(n);
168 for (i = 0; i < get_Call_arity(n); i++)
169 set_Call_param(n, i, get_new_node(get_Call_param(n, i)));
170 res = new_r_Call (current_ir_graph, get_new_node(get_nodes_Block(n)),
171 get_new_node(get_Call_mem(n)),
172 get_new_node(get_Call_ptr(n)), get_Call_arity(n),
173 in, get_Call_type (n));
177 res = new_r_Add (current_ir_graph, get_new_node(get_nodes_Block(n)),
178 get_new_node(a), get_new_node(b), get_irn_mode(n));
182 res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_Block(n)),
183 get_new_node(a), get_new_node(b), get_irn_mode(n));
187 res = new_r_Minus (current_ir_graph, get_new_node(get_nodes_Block(n)),
188 get_new_node(a), get_irn_mode(n));
191 res = new_r_Mul (current_ir_graph, get_new_node(get_nodes_Block(n)),
192 get_new_node(a), get_new_node(b), get_irn_mode(n));
195 res = new_r_Quot (current_ir_graph, get_new_node(get_nodes_Block(n)),
196 get_new_node(get_Quot_mem(n)), get_new_node(a),
200 res = new_r_DivMod (current_ir_graph, get_new_node(get_nodes_Block(n)),
201 get_new_node(get_DivMod_mem(n)), get_new_node(a),
205 res = new_r_Div (current_ir_graph, get_new_node(get_nodes_Block(n)),
206 get_new_node(get_Div_mem(n)), get_new_node(a),
210 res = new_r_Mod (current_ir_graph, get_new_node(get_nodes_Block(n)),
211 get_new_node(get_Mod_mem(n)), get_new_node(a),
215 res = new_r_Abs (current_ir_graph, get_new_node(get_nodes_Block(n)),
216 get_new_node(get_Abs_op(n)), get_irn_mode(n));
219 res = new_r_And (current_ir_graph, get_new_node(get_nodes_Block(n)),
220 get_new_node(a), get_new_node(b), get_irn_mode(n));
223 res = new_r_Or (current_ir_graph, get_new_node(get_nodes_Block(n)),
224 get_new_node(a), get_new_node(b), get_irn_mode(n));
227 res = new_r_Eor (current_ir_graph, get_new_node(get_nodes_Block(n)),
228 get_new_node(a), get_new_node(b), get_irn_mode(n));
231 res = new_r_Not (current_ir_graph, get_new_node(get_nodes_Block(n)),
232 get_new_node(get_Not_op(n)), get_irn_mode(n));
235 res = new_r_Cmp (current_ir_graph, get_new_node(get_nodes_Block(n)),
236 get_new_node(get_Cmp_left(n)),
237 get_new_node(get_Cmp_right(n)));
240 res = new_r_Shl (current_ir_graph, get_new_node(get_nodes_Block(n)),
241 get_new_node(get_Shl_left(n)),
242 get_new_node(get_Shl_right(n)), get_irn_mode(n));
245 res = new_r_Shr (current_ir_graph, get_new_node(get_nodes_Block(n)),
246 get_new_node(get_Shr_left(n)),
247 get_new_node(get_Shr_right(n)), get_irn_mode(n));
250 res = new_r_Shrs (current_ir_graph, get_new_node(get_nodes_Block(n)),
251 get_new_node(get_Shrs_left(n)),
252 get_new_node(get_Shrs_right(n)), get_irn_mode(n));
255 res = new_r_Rot (current_ir_graph, get_new_node(get_nodes_Block(n)),
256 get_new_node(get_Rot_left(n)),
257 get_new_node(get_Rot_right(n)), get_irn_mode(n));
260 res = new_r_Conv (current_ir_graph, get_new_node(get_nodes_Block(n)),
261 get_new_node(get_Conv_op(n)),
266 ir_node **in = get_Phi_preds_arr(n);
267 for (i = 0; i < get_Phi_n_preds(n); i++)
268 set_Phi_pred(n, i, get_new_node(get_Phi_pred(n, i)));
269 res = new_r_Phi (current_ir_graph, get_new_node(get_nodes_Block(n)),
270 get_Phi_n_preds(n), in, get_irn_mode(n));
274 res = new_r_Load (current_ir_graph, get_new_node(get_nodes_Block(n)),
275 get_new_node(get_Load_mem(n)),
276 get_new_node(get_Load_ptr(n)));
279 res = new_r_Store (current_ir_graph, get_new_node(get_nodes_Block(n)),
280 get_new_node(get_Store_mem(n)),
281 get_new_node(get_Store_ptr(n)),
282 get_new_node(get_Store_value(n)));
285 res = new_r_Alloc (current_ir_graph, get_new_node(get_nodes_Block(n)),
286 get_new_node(get_Alloc_mem(n)),
287 get_new_node(get_Alloc_size(n)),
288 get_Alloc_type(n), get_Alloc_where(n));
292 res = new_r_Free (current_ir_graph, get_new_node(get_nodes_Block(n)),
293 get_new_node(get_Free_mem(n)),
294 get_new_node(get_Free_ptr(n)),
295 get_new_node(get_Free_size(n)), get_Free_type(n));
299 ir_node **in = get_Sync_preds_arr(n);
300 for (i = 0; i < get_Sync_n_preds(n); i++)
301 set_Sync_pred(n, i, get_new_node(get_Sync_pred(n, i)));
302 res = new_r_Sync (current_ir_graph, get_new_node(get_nodes_Block(n)),
303 get_Sync_n_preds(n), in);
307 res = new_r_Proj (current_ir_graph, get_new_node(get_nodes_Block(n)),
308 get_new_node(get_Proj_pred(n)), get_irn_mode(n),
313 ir_node **in = get_Tuple_preds_arr(n);
314 for (i = 0; i < get_Tuple_n_preds(n); i++)
315 set_Tuple_pred(n, i, get_new_node(get_Tuple_pred(n, i)));
316 res = new_r_Tuple (current_ir_graph, get_new_node(get_nodes_Block(n)),
317 get_Tuple_n_preds(n), in);
321 res = new_r_Id (current_ir_graph, get_new_node(get_nodes_Block(n)),
322 get_new_node(get_Id_pred(n)), get_irn_mode(n));
328 /* @@@ Here we could call optimize()!! */
329 set_new_node(n, res);
331 printf(" "); DDMSG2(res);
336 dead_node_elimination(ir_graph *irg) {
337 struct obstack *graveyard_obst=NULL;
338 struct obstack *rebirth_obst;
340 ir_node *old_node, *new_node;
341 ir_graph *rem = current_ir_graph;
342 current_ir_graph = irg;
344 if (get_opt_dead_node_elimination()) {
346 /* A quiet place, where the old obstack can rest in peace,
347 until it will be cremated. */
348 graveyard_obst = irg->obst;
350 /* A new obstack, where the reachable nodes will be copied to. */
351 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
352 current_ir_graph->obst = rebirth_obst;
353 obstack_init (current_ir_graph->obst);
356 printf("Before starting the DEAD NODE ELIMINATION !\n");
358 /* Copy nodes remembered in irg fields first.
359 The optimization contains tests against these fields, e.g., not
360 to optimize the start block away. Therefore these fields have to
362 Further setting these fields in copy_node would impose additional
363 tests for all nodes of a kind.
364 Predict the visited flag the walker will use! */
365 /* Copy the start Block node */
366 old_node = irg->start_block;
367 new_node = new_r_Block (current_ir_graph, 0, NULL); /* new_r_Block calls
368 no optimization --> save */
369 irg->start_block = new_node;
370 set_new_node (old_node, new_node);
371 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
372 /* Copy the Start node */
373 old_node = irg->start;
374 new_node = new_r_Start (current_ir_graph, irg->start_block);
375 irg->start = new_node;
377 set_new_node (old_node, new_node);
378 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
379 /* Copy the Bad node */
381 new_node = new_ir_node (irg, irg->start_block, op_Bad, mode_T, 0, NULL);
383 set_new_node (old_node, new_node);
384 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
385 /* Copy the Projs for the Start's results. */
386 old_node = irg->frame;
387 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_frame_base);
388 irg->frame = new_node;
389 set_new_node (old_node, new_node);
390 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
392 old_node = irg->globals;
393 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_globals);
394 irg->globals = new_node;
395 set_new_node (old_node, new_node);
396 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
398 old_node = irg->args;
399 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_T, pns_args);
400 irg->args = new_node;
401 set_new_node (old_node, new_node);
402 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
404 /* Walks the graph once, and at the recursive way do the copy thing.
405 all reachable nodes will be copied to a new obstack. */
406 irg_walk(irg->end, NULL, copy_node, NULL);
409 printf("After the DEAD NODE ELIMINATION !\n");
411 /* Free memory from old unoptimized obstack */
412 xfree (graveyard_obst);
415 current_ir_graph = rem;