1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** Optimizations for a whole ir graph, i.e., a procedure.
12 # include "irnode_t.h"
13 # include "irgraph_t.h"
19 /********************************************************************/
20 /* apply optimizations of iropt to all nodes. */
23 optimize_in_place_wrapper (ir_node *n, void *env) {
27 /* optimize all sons after recursion, i.e., the sons' sons are
29 for (i = -1; i < get_irn_arity(n); i++) {
30 optimized = optimize_in_place(get_irn_n(n, i));
31 set_irn_n(n, i, optimized);
36 local_optimize_graph (ir_graph *irg) {
37 ir_graph *rem = current_ir_graph;
38 current_ir_graph = irg;
40 /* walk over the graph */
41 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
43 current_ir_graph = rem;
46 /********************************************************************/
47 /* Routines for dead node elimination / copying garbage collection */
50 /* Remeber the new node in the old node by using a field all nodes have. */
52 set_new_node (ir_node *old, ir_node *new)
57 /* Get this new node, before the old node is forgotton.*/
59 get_new_node (ir_node * n)
64 /* Copies the node to the new obstack. In's point to the predecessors
65 on the old obstack. n->link points to the new node. */
67 copy_node (ir_node *n, void *env) {
70 if (get_irn_opcode(n) == iro_Block) {
73 block = get_nodes_Block(n);
75 nn = new_ir_node(current_ir_graph,
85 /* Copies new predecessors of old node to new node remembered in link. */
87 copy_preds (ir_node *n, void *env) {
92 if (get_irn_opcode(n) == iro_Block) start = 0; else start = -1;
93 for (i = start; i < get_irn_arity(n); i++)
94 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
97 /* To break the recursion of the graph walk if there are loops in
98 the graph we have to allocate new nodes for Phis and blocks
99 before descending. Here we use the old predecessors for the
100 new nodes. These are replaced by the proper predecessors in
102 It turned out that it is not sufficient to just break loops
103 for Phi and Block nodes, as the walker can hit visited but
104 not copied nodes at any point in the graph.
105 A simple fix would be allocating Id's for every node and then
106 exchanging them, but this will cause new dead nodes on the new
108 So now there is a different implementation more based on the
109 view on the graph as a graph than as a represented program. */
111 create_dummy (ir_node *n, void *env) {
114 /* Assure link is set to NULL so we can test whether there is a
115 new node by checking link.
116 set_irn_link(n, NULL); */
118 switch (get_irn_opcode(n)) {
120 set_new_node(n, new_ir_node(current_ir_graph, NULL, op_Block, mode_R,
121 get_irn_arity(n), get_irn_in(n)));
124 set_new_node(n, new_ir_node(current_ir_graph, NULL, op_Phi,
126 get_irn_arity(n), get_irn_in(n)));
129 } /* end switch (get_irn_opcode(n)) */
132 /* Create a copy of this node on a new obstack. */
134 copy_node2 (ir_node *n, void *env) {
144 a = get_binop_left(n);
145 b = get_binop_right(n);
146 } else if (is_unop(n)) {
150 switch (get_irn_opcode(n)) {
153 res = get_new_node(n);
155 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
156 set_Block_cfgpred(res, i, get_new_node(get_Block_cfgpred(n, i)));
157 set_Block_matured(res, 1);
161 res = new_r_Start (current_ir_graph, get_new_node(get_nodes_Block(n)));
164 res = new_r_End (current_ir_graph, get_new_node(get_nodes_Block(n)));
165 current_ir_graph -> end = res;
166 current_ir_graph -> end_block = get_nodes_Block(res);
169 res = new_r_Jmp (current_ir_graph, get_new_node(get_nodes_Block(n)));
172 res = new_r_Cond (current_ir_graph, get_new_node(get_nodes_Block(n)),
173 get_new_node(get_Cond_selector(n)));
178 in = get_Return_res_arr(n);
179 for (i = 0; i < get_Return_n_res(n); i++)
180 set_Return_res(n, i, get_new_node(get_Return_res(n, i)));
181 res = new_r_Return (current_ir_graph,
182 get_new_node(get_nodes_Block(n)),
183 get_new_node(get_Return_mem(n)),
184 get_Return_n_res(n), in);
188 res = new_r_Raise (current_ir_graph,
189 get_new_node(get_nodes_Block(n)),
190 get_new_node(get_Raise_mem(n)),
191 get_new_node(get_Raise_exo_ptr(n)));
194 res = new_r_Const (current_ir_graph, get_new_node(get_nodes_Block(n)),
195 get_irn_mode(n), get_Const_tarval(n));
199 type_or_id_p value = NULL;
201 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
204 value = (type_or_id_p) get_SymConst_type(n);
208 if (get_SymConst_kind(n)==linkage_ptr_info)
210 value = (type_or_id_p) get_SymConst_ptrinfo(n);
213 res = new_r_SymConst (current_ir_graph, get_new_node(get_nodes_Block(n)),
214 value, get_SymConst_kind (n));
219 ir_node **in = get_Sel_index_arr(n);
220 for (i = 0; i < get_Sel_n_index(n); i++)
221 set_Sel_index(n, i, get_new_node(get_Sel_index(n, i)));
222 res = new_r_Sel (current_ir_graph, get_new_node(get_nodes_Block(n)),
223 get_new_node(get_Sel_mem(n)),
224 get_new_node(get_Sel_ptr(n)), get_Sel_n_index(n),
225 in, get_Sel_entity(n));
231 in = get_Call_param_arr(n);
233 for (i = 0; i < get_Call_arity(n); i++)
234 set_Call_param(n, i, get_new_node(get_Call_param(n, i)));
235 res = new_r_Call (current_ir_graph,
236 get_new_node(get_nodes_Block(n)),
237 get_new_node(get_Call_mem(n)),
238 get_new_node(get_Call_ptr(n)),
239 get_Call_arity(n), in,
244 res = new_r_Add (current_ir_graph, get_new_node(get_nodes_Block(n)),
245 get_new_node(a), get_new_node(b), get_irn_mode(n));
249 res = new_r_Sub (current_ir_graph, get_new_node(get_nodes_Block(n)),
250 get_new_node(a), get_new_node(b), get_irn_mode(n));
254 res = new_r_Minus (current_ir_graph, get_new_node(get_nodes_Block(n)),
255 get_new_node(a), get_irn_mode(n));
258 res = new_r_Mul (current_ir_graph, get_new_node(get_nodes_Block(n)),
259 get_new_node(a), get_new_node(b), get_irn_mode(n));
262 res = new_r_Quot (current_ir_graph, get_new_node(get_nodes_Block(n)),
263 get_new_node(get_Quot_mem(n)), get_new_node(a),
267 res = new_r_DivMod (current_ir_graph, get_new_node(get_nodes_Block(n)),
268 get_new_node(get_DivMod_mem(n)), get_new_node(a),
272 res = new_r_Div (current_ir_graph, get_new_node(get_nodes_Block(n)),
273 get_new_node(get_Div_mem(n)), get_new_node(a),
277 res = new_r_Mod (current_ir_graph, get_new_node(get_nodes_Block(n)),
278 get_new_node(get_Mod_mem(n)), get_new_node(a),
282 res = new_r_Abs (current_ir_graph, get_new_node(get_nodes_Block(n)),
283 get_new_node(get_Abs_op(n)), get_irn_mode(n));
286 res = new_r_And (current_ir_graph, get_new_node(get_nodes_Block(n)),
287 get_new_node(a), get_new_node(b), get_irn_mode(n));
290 res = new_r_Or (current_ir_graph, get_new_node(get_nodes_Block(n)),
291 get_new_node(a), get_new_node(b), get_irn_mode(n));
294 res = new_r_Eor (current_ir_graph, get_new_node(get_nodes_Block(n)),
295 get_new_node(a), get_new_node(b), get_irn_mode(n));
298 res = new_r_Not (current_ir_graph, get_new_node(get_nodes_Block(n)),
299 get_new_node(get_Not_op(n)), get_irn_mode(n));
302 res = new_r_Cmp (current_ir_graph,
303 get_new_node(get_nodes_Block(n)),
304 get_new_node(get_Cmp_left(n)),
305 get_new_node(get_Cmp_right(n)));
308 res = new_r_Shl (current_ir_graph, get_new_node(get_nodes_Block(n)),
309 get_new_node(get_Shl_left(n)),
310 get_new_node(get_Shl_right(n)), get_irn_mode(n));
313 res = new_r_Shr (current_ir_graph, get_new_node(get_nodes_Block(n)),
314 get_new_node(get_Shr_left(n)),
315 get_new_node(get_Shr_right(n)), get_irn_mode(n));
318 res = new_r_Shrs (current_ir_graph, get_new_node(get_nodes_Block(n)),
319 get_new_node(get_Shrs_left(n)),
320 get_new_node(get_Shrs_right(n)), get_irn_mode(n));
323 res = new_r_Rot (current_ir_graph, get_new_node(get_nodes_Block(n)),
324 get_new_node(get_Rot_left(n)),
325 get_new_node(get_Rot_right(n)), get_irn_mode(n));
328 res = new_r_Conv (current_ir_graph, get_new_node(get_nodes_Block(n)),
329 get_new_node(get_Conv_op(n)),
334 res = get_new_node(n);
335 for (i = 0; i < get_Phi_n_preds(n); i++)
336 set_Phi_pred(res, i, get_new_node(get_Phi_pred(n, i)));
337 set_nodes_Block(res, get_new_node(get_nodes_Block(n)));
341 res = new_r_Load (current_ir_graph, get_new_node(get_nodes_Block(n)),
342 get_new_node(get_Load_mem(n)),
343 get_new_node(get_Load_ptr(n)));
346 res = new_r_Store (current_ir_graph, get_new_node(get_nodes_Block(n)),
347 get_new_node(get_Store_mem(n)),
348 get_new_node(get_Store_ptr(n)),
349 get_new_node(get_Store_value(n)));
352 res = new_r_Alloc (current_ir_graph, get_new_node(get_nodes_Block(n)),
353 get_new_node(get_Alloc_mem(n)),
354 get_new_node(get_Alloc_size(n)),
355 get_Alloc_type(n), get_Alloc_where(n));
359 res = new_r_Free (current_ir_graph, get_new_node(get_nodes_Block(n)),
360 get_new_node(get_Free_mem(n)),
361 get_new_node(get_Free_ptr(n)),
362 get_new_node(get_Free_size(n)), get_Free_type(n));
366 ir_node **in = get_Sync_preds_arr(n);
367 for (i = 0; i < get_Sync_n_preds(n); i++)
368 set_Sync_pred(n, i, get_new_node(get_Sync_pred(n, i)));
369 res = new_r_Sync (current_ir_graph, get_new_node(get_nodes_Block(n)),
370 get_Sync_n_preds(n), in);
374 res = new_r_Proj (current_ir_graph, get_new_node(get_nodes_Block(n)),
375 get_new_node(get_Proj_pred(n)), get_irn_mode(n),
381 ir_node **in = get_Tuple_preds_arr(n);
382 for (i = 0; i < get_Tuple_n_preds(n); i++)
383 set_Tuple_pred(n, i, get_new_node(get_Tuple_pred(n, i)));
384 res = new_r_Tuple (current_ir_graph, get_new_node(get_nodes_Block(n)),
385 get_Tuple_n_preds(n), in);
389 res = get_new_node(get_Id_pred(n));
395 /* @@@ Here we could call optimize()!! Not necessary, called in constructor anyways. */
396 set_new_node(n, res);
397 printf(" "); DDMSG2(res);
400 /* Copies the graph reachable from current_ir_graph->end to the obstack
402 Then fixes the fields in current_ir_graph containing nodes of the
406 /* Not all nodes remembered in current_ir_graph might be reachable
407 from the end node. Assure their link is set to NULL so that
408 we can test whether new nodes have been computed. */
409 set_irn_link(get_irg_frame (current_ir_graph), NULL);
410 set_irn_link(get_irg_globals(current_ir_graph), NULL);
411 set_irn_link(get_irg_args (current_ir_graph), NULL);
414 irg_walk(get_irg_end(current_ir_graph), copy_node, copy_preds, NULL);
416 /* fix the fields in current_ir_graph */
417 set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
418 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
419 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL)
420 irg_walk(get_irg_frame(current_ir_graph), copy_node, copy_preds, NULL);
421 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL)
422 irg_walk(get_irg_globals(current_ir_graph), copy_node, copy_preds, NULL);
423 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL)
424 irg_walk(get_irg_args(current_ir_graph), copy_node, copy_preds, NULL);
425 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
426 set_irg_start_block(current_ir_graph,
427 get_new_node(get_irg_start_block(current_ir_graph)));
428 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
429 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
430 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
431 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
432 copy_node(get_irg_bad(current_ir_graph), NULL);
433 copy_preds(get_irg_bad(current_ir_graph), NULL);
435 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
440 ir_node *old_node, *new_node, *projX;
441 ir_graph *irg = current_ir_graph;
444 printf("Before starting the DEAD NODE ELIMINATION !\n");
446 /* Copy nodes remembered in irg fields first.
447 The optimization contains tests against these fields, e.g., not
448 to optimize the start block away. Therefore these fields have to
450 Further setting these fields in copy_node would impose additional
451 tests for all nodes of a kind.
452 Predict the visited flag the walker will use! */
453 /* Copy the start Block node. Get the ProjX of the Start node, that is
454 predecessor of the start Block. We have to break the cycle and fix it
455 later. We use the old in array as placeholder. */
456 old_node = irg->start_block;
457 new_node = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(old_node),
458 get_Block_cfgpred_arr(old_node));
459 /* new_r_Block calls no optimization --> save */
460 projX = get_Block_cfgpred(old_node, 0);
461 irg->start_block = new_node;
462 set_new_node (old_node, new_node);
463 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
464 /* Copy the Start node */
465 old_node = irg->start;
466 new_node = new_r_Start (current_ir_graph, irg->start_block);
467 irg->start = new_node;
468 set_new_node (old_node, new_node);
469 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
470 /* Copy the Bad node */
472 new_node = new_ir_node (irg, irg->start_block, op_Bad, mode_T, 0, NULL);
474 set_new_node (old_node, new_node);
475 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
476 /* Copy the Projs for the Start's results. */
478 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_X, pns_initial_exec);
479 set_Block_cfgpred(irg->start_block, 0, new_node);
480 set_new_node (old_node, new_node);
481 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
483 old_node = irg->frame;
484 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_frame_base);
485 irg->frame = new_node;
486 set_new_node (old_node, new_node);
487 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
489 old_node = irg->globals;
490 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_p, pns_globals);
491 irg->globals = new_node;
492 set_new_node (old_node, new_node);
493 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
495 old_node = irg->args;
496 new_node = new_r_Proj (irg, irg->start_block, irg->start, mode_T, pns_args);
497 irg->args = new_node;
498 set_new_node (old_node, new_node);
499 set_irn_visited (old_node, get_irg_visited(current_ir_graph)+1);
501 /* Walks the graph once, and at the recursive way do the copy thing.
502 all reachable nodes will be copied to a new obstack. */
503 irg_walk(irg->end, create_dummy, copy_node2, NULL);
506 printf("After DEAD NODE ELIMINATION !\n");
509 /* Amroq call this emigrate() */
511 dead_node_elimination(ir_graph *irg) {
513 struct obstack *graveyard_obst = NULL;
514 struct obstack *rebirth_obst = NULL;
516 /* Remember external state of current_ir_graph. */
517 rem = current_ir_graph;
518 current_ir_graph = irg;
520 if (get_optimize() && get_opt_dead_node_elimination()) {
522 /* A quiet place, where the old obstack can rest in peace,
523 until it will be cremated. */
524 graveyard_obst = irg->obst;
526 /* A new obstack, where the reachable nodes will be copied to. */
527 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
528 current_ir_graph->obst = rebirth_obst;
529 obstack_init (current_ir_graph->obst);
531 /* Copy the graph from the old to the new obstack */
534 /* Free memory from old unoptimized obstack */
535 // obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
536 //xfree (graveyard_obst); /* ... then free it. */
539 current_ir_graph = rem;