1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** Optimizations for a whole ir graph, i.e., a procedure.
16 /********************************************************************/
17 /* apply optimizations of iropt to all nodes. */
20 optimize_in_place_wrapper (ir_node *n, void *env) {
24 /* optimize all sons after recursion, i.e., the sons' sons are
26 for (i = -1; i < get_irn_arity(n); i++) {
27 optimized = optimize_in_place(get_irn_n(n, i));
28 set_irn_n(n, i, optimized);
33 local_optimize_graph (ir_graph *irg) {
34 ir_graph *rem = current_ir_graph;
35 current_ir_graph = irg;
37 /* walk over the graph */
38 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
40 current_ir_graph = rem;
43 /********************************************************************/
44 /* Routines for dead node elimination / copying garbage collection */
47 /* Remeber the new node in the old node,
48 by using a field that all nodes have. */
50 set_new_node (ir_node *old, ir_node *new)
53 /* old->in[0] = new; Hi Chris: Benutze old->link, ich hab mich vergewissert dass
54 das hier ueberschrieben werden kann, das erspaart eine
55 indirektion --> schneller. */
60 /* Get this new node, before the old node is forgotton.*/
62 get_new_node (ir_node * n)
72 /* Create this node on a new obstack. */
74 copy_node (ir_node *n, void *env) {
84 a = get_binop_left(n);
85 b = get_binop_right(n);
86 } else if (is_unop(n)) {
90 switch (get_irn_opcode(n)) {
92 { ir_node **in = get_Block_cfgpred_arr(n);
93 for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
94 set_Block_cfgpred(n, i, get_new_node(get_Block_cfgpred(n, i)));
96 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
100 res = new_r_Start (current_ir_graph, get_new_node(get_nodes_Block(n)));
103 res = new_r_End (current_ir_graph, get_new_node(get_nodes_Block(n)));
104 current_ir_graph -> end = res;
105 current_ir_graph -> end_block = get_nodes_Block(res);
108 res = new_r_Jmp (current_ir_graph, get_new_node(get_nodes_Block(n)));
111 res = new_r_Cond (current_ir_graph, get_new_node(get_nodes_Block(n)),
112 get_new_node(get_Cond_selector(n)));
117 in = get_Return_res_arr(n);
120 /* printf("1. n: %p, in: %p, in[0]: %p, in[1]: %p, in[2]: %p in[3] %p \n", */
121 /* n, in, in[0], in[1], in[2], in[3]); */
123 for (i = 0; i < get_Return_n_res(n); i++) {
124 /* printf(" old: %p, new: %p \n", get_Return_res(n, i), get_new_node(get_Return_res(n, i))); */
125 set_Return_res(n, i, get_new_node(get_Return_res(n, i)));
127 res = new_r_Return (current_ir_graph,
128 get_new_node(get_nodes_Block(n)),
129 get_new_node(get_Return_mem(n)),
130 get_Return_n_res(n), in);
134 res = new_r_Raise (current_ir_graph,
135 get_new_node(get_nodes_Block(n)),
136 get_new_node(get_Raise_mem(n)),
137 get_new_node(get_Raise_exo_ptr(n)));
140 res = new_r_Const (current_ir_graph, get_new_node(get_nodes_Block(n)),
141 get_irn_mode(n), get_Const_tarval(n));
145 type_or_id *value = NULL;
147 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
150 value = (type_or_id *) get_SymConst_type(n);
154 if (get_SymConst_kind(n)==linkage_ptr_info)
156 value = (type_or_id *) get_SymConst_ptrinfo(n);
159 res = new_r_SymConst (current_ir_graph, get_new_node(get_nodes_Block(n)),
160 value, get_SymConst_kind (n));
165 ir_node **in = get_Sel_index_arr(n);
166 for (i = 0; i < get_Sel_n_index(n); i++)
167 set_Sel_index(n, i, get_new_node(get_Sel_index(n, i)));
168 res = new_r_Sel (current_ir_graph, get_new_node(get_nodes_Block(n)),
169 get_new_node(get_Sel_mem(n)),
170 get_new_node(get_Sel_ptr(n)), get_Sel_n_index(n),
171 in, get_Sel_entity(n));
177 in = get_Call_param_arr(n);
179 for (i = 0; i < get_Call_arity(n); i++)
180 set_Call_param(n, i, get_new_node(get_Call_param(n, i)));
181 res = new_r_Call (current_ir_graph,
182 get_new_node(get_nodes_Block(n)),
183 get_new_node(get_Call_mem(n)),
184 get_new_node(get_Call_ptr(n)),
185 get_Call_arity(n), in,
190 res = new_r_Add (current_ir_graph, get_new_node(get_nodes_Block(n)),
191 get_new_node(a), get_new_node(b), get_irn_mode(n));
195 res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_Block(n)),
196 get_new_node(a), get_new_node(b), get_irn_mode(n));
200 res = new_r_Minus (current_ir_graph, get_new_node(get_nodes_Block(n)),
201 get_new_node(a), get_irn_mode(n));
204 res = new_r_Mul (current_ir_graph, get_new_node(get_nodes_Block(n)),
205 get_new_node(a), get_new_node(b), get_irn_mode(n));
208 res = new_r_Quot (current_ir_graph, get_new_node(get_nodes_Block(n)),
209 get_new_node(get_Quot_mem(n)), get_new_node(a),
213 res = new_r_DivMod (current_ir_graph, get_new_node(get_nodes_Block(n)),
214 get_new_node(get_DivMod_mem(n)), get_new_node(a),
218 res = new_r_Div (current_ir_graph, get_new_node(get_nodes_Block(n)),
219 get_new_node(get_Div_mem(n)), get_new_node(a),
223 res = new_r_Mod (current_ir_graph, get_new_node(get_nodes_Block(n)),
224 get_new_node(get_Mod_mem(n)), get_new_node(a),
228 res = new_r_Abs (current_ir_graph, get_new_node(get_nodes_Block(n)),
229 get_new_node(get_Abs_op(n)), get_irn_mode(n));
232 res = new_r_And (current_ir_graph, get_new_node(get_nodes_Block(n)),
233 get_new_node(a), get_new_node(b), get_irn_mode(n));
236 res = new_r_Or (current_ir_graph, get_new_node(get_nodes_Block(n)),
237 get_new_node(a), get_new_node(b), get_irn_mode(n));
240 res = new_r_Eor (current_ir_graph, get_new_node(get_nodes_Block(n)),
241 get_new_node(a), get_new_node(b), get_irn_mode(n));
244 res = new_r_Not (current_ir_graph, get_new_node(get_nodes_Block(n)),
245 get_new_node(get_Not_op(n)), get_irn_mode(n));
248 DDMSG2(get_new_node(get_Cmp_left(n)));
249 DDMSG2(get_new_node(get_Cmp_right(n)));
250 DDMSG2(get_new_node(get_nodes_Block(n)));
252 res = new_r_Cmp (current_ir_graph,
253 get_new_node(get_nodes_Block(n)),
254 get_new_node(get_Cmp_left(n)),
255 get_new_node(get_Cmp_right(n)));
259 res = new_r_Shl (current_ir_graph, get_new_node(get_nodes_Block(n)),
260 get_new_node(get_Shl_left(n)),
261 get_new_node(get_Shl_right(n)), get_irn_mode(n));
264 res = new_r_Shr (current_ir_graph, get_new_node(get_nodes_Block(n)),
265 get_new_node(get_Shr_left(n)),
266 get_new_node(get_Shr_right(n)), get_irn_mode(n));
269 res = new_r_Shrs (current_ir_graph, get_new_node(get_nodes_Block(n)),
270 get_new_node(get_Shrs_left(n)),
271 get_new_node(get_Shrs_right(n)), get_irn_mode(n));
274 res = new_r_Rot (current_ir_graph, get_new_node(get_nodes_Block(n)),
275 get_new_node(get_Rot_left(n)),
276 get_new_node(get_Rot_right(n)), get_irn_mode(n));
279 res = new_r_Conv (current_ir_graph, get_new_node(get_nodes_Block(n)),
280 get_new_node(get_Conv_op(n)),
285 ir_node **in = get_Phi_preds_arr(n);
286 for (i = 0; i < get_Phi_n_preds(n); i++)
287 set_Phi_pred(n, i, get_new_node(get_Phi_pred(n, i)));
288 res = new_r_Phi (current_ir_graph, get_new_node(get_nodes_Block(n)),
289 get_Phi_n_preds(n), in, get_irn_mode(n));
293 res = new_r_Load (current_ir_graph, get_new_node(get_nodes_Block(n)),
294 get_new_node(get_Load_mem(n)),
295 get_new_node(get_Load_ptr(n)));
298 res = new_r_Store (current_ir_graph, get_new_node(get_nodes_Block(n)),
299 get_new_node(get_Store_mem(n)),
300 get_new_node(get_Store_ptr(n)),
301 get_new_node(get_Store_value(n)));
304 res = new_r_Alloc (current_ir_graph, get_new_node(get_nodes_Block(n)),
305 get_new_node(get_Alloc_mem(n)),
306 get_new_node(get_Alloc_size(n)),
307 get_Alloc_type(n), get_Alloc_where(n));
311 res = new_r_Free (current_ir_graph, get_new_node(get_nodes_Block(n)),
312 get_new_node(get_Free_mem(n)),
313 get_new_node(get_Free_ptr(n)),
314 get_new_node(get_Free_size(n)), get_Free_type(n));
318 ir_node **in = get_Sync_preds_arr(n);
319 for (i = 0; i < get_Sync_n_preds(n); i++)
320 set_Sync_pred(n, i, get_new_node(get_Sync_pred(n, i)));
321 res = new_r_Sync (current_ir_graph, get_new_node(get_nodes_Block(n)),
322 get_Sync_n_preds(n), in);
326 res = new_r_Proj (current_ir_graph, get_new_node(get_nodes_Block(n)),
327 get_new_node(get_Proj_pred(n)), get_irn_mode(n),
332 ir_node **in = get_Tuple_preds_arr(n);
333 for (i = 0; i < get_Tuple_n_preds(n); i++)
334 set_Tuple_pred(n, i, get_new_node(get_Tuple_pred(n, i)));
335 res = new_r_Tuple (current_ir_graph, get_new_node(get_nodes_Block(n)),
336 get_Tuple_n_preds(n), in);
340 res = new_r_Id (current_ir_graph, get_new_node(get_nodes_Block(n)),
341 get_new_node(get_Id_pred(n)), get_irn_mode(n));
347 /* @@@ Here we could call optimize()!! Not necessary, called in constructor anyways. */
348 set_new_node(n, res);
350 printf(" "); DDMSG2(res);
353 /* Amroq call this emigrate() */
355 dead_node_elimination(ir_graph *irg) {
356 struct obstack *graveyard_obst=NULL;
357 struct obstack *rebirth_obst;
359 ir_node *old_node, *new_node;
360 ir_graph *rem = current_ir_graph;
361 current_ir_graph = irg;
363 if (get_optimize() && get_opt_dead_node_elimination()) {
365 /* A quiet place, where the old obstack can rest in peace,
366 until it will be cremated. */
367 graveyard_obst = irg->obst;
369 /* A new obstack, where the reachable nodes will be copied to. */
370 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
371 current_ir_graph->obst = rebirth_obst;
372 obstack_init (current_ir_graph->obst);
374 /* @@@@@ Do we need to do something about cse? */
378 printf("Before starting the DEAD NODE ELIMINATION !\n");
380 /* Copy nodes remembered in irg fields first.
381 The optimization contains tests against these fields, e.g., not
382 to optimize the start block away. Therefore these fields have to
384 Further setting these fields in copy_node would impose additional
385 tests for all nodes of a kind.
386 Predict the visited flag the walker will use! */
387 /* Copy the start Block node */
388 old_node = irg->start_block;
389 new_node = new_r_Block (current_ir_graph, 0, NULL); /* new_r_Block calls
390 no optimization --> save */
391 irg->start_block = new_node;
393 set_new_node (old_node, new_node);
394 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
395 /* Copy the Start node */
396 old_node = irg->start;
397 new_node = new_r_Start (current_ir_graph, irg->start_block);
398 irg->start = new_node;
400 set_new_node (old_node, new_node);
401 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
402 /* Copy the Bad node */
404 new_node = new_ir_node (irg, irg->start_block, op_Bad, mode_T, 0, NULL);
407 set_new_node (old_node, new_node);
408 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
409 /* Copy the Projs for the Start's results. */
410 old_node = irg->frame;
411 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_frame_base);
412 irg->frame = new_node;
414 set_new_node (old_node, new_node);
415 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
417 old_node = irg->globals;
418 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_globals);
419 irg->globals = new_node;
421 set_new_node (old_node, new_node);
422 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
424 old_node = irg->args;
425 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_T, pns_args);
426 irg->args = new_node;
428 set_new_node (old_node, new_node);
429 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
431 /* Walks the graph once, and at the recursive way do the copy thing.
432 all reachable nodes will be copied to a new obstack. */
433 irg_walk(irg->end, NULL, copy_node, NULL);
436 printf("After the DEAD NODE ELIMINATION !\n");
438 /* Free memory from old unoptimized obstack */
439 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
440 xfree (graveyard_obst); /* ... then free it. */
443 current_ir_graph = rem;