1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** dead node elemination
7 ** walks one time through the whole graph and copies it into another graph,
8 ** so unreachable nodes will be lost.
18 /********************************************************************/
19 /* apply optimizations of iropt to all nodes. */
21 optimize_in_place_wrapper (ir_node *n, void *env) {
25 /* optimize all sons after recursion, i.e., the sons' sons are
27 for (i = -1; i < get_irn_arity(n); i++) {
28 optimized = optimize_in_place(get_irn_n(n, i));
29 set_irn_n(n, i, optimized);
35 local_optimize_graph (ir_graph *irg) {
36 ir_graph *rem = current_ir_graph;
37 current_ir_graph = irg;
39 /* walk over the graph */
40 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
42 current_ir_graph = rem;
45 /********************************************************************/
46 /* Routines for dead node elimination / copying garbage collection */
49 /* Remeber the new node in the old node,
50 by using a field that all nodes have. */
52 set_new_node (ir_node *old, ir_node *new)
58 /* Get this new node, before the old node is forgotton.*/
60 get_new_node (ir_node * n)
65 /* Create this node on a new obstack. */
67 copy_node (ir_node *n, void *env) {
70 res = (ir_node *) malloc (sizeof (ir_node));
71 a = (ir_node *) malloc (sizeof (ir_node));
72 b = (ir_node *) malloc (sizeof (ir_node));
75 a = get_binop_left(n);
76 b = get_binop_right(n);
77 } else if (is_unop(n)) {
81 switch (get_irn_opcode(n)) {
85 ir_node *in [get_Block_n_cfgpreds(n)];
87 for (i=0; i <(get_Block_n_cfgpreds(n)); i++) {
88 in[i] = get_Block_cfgpred (n, i);
90 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
95 res = new_r_Start (current_ir_graph, get_new_node(n));
99 res = new_r_End (current_ir_graph, get_new_node(n));
100 set_new_node(n, res);
103 res = new_r_Jmp (current_ir_graph, get_new_node(n));
104 set_new_node(n, res);
107 res = new_r_Cond (current_ir_graph, get_new_node(n),
108 get_Cond_selector(n));
109 set_new_node(n, res);
114 ir_node *in [get_Return_n_res(n)];
115 for (i=0; i <(get_Return_n_res(n)); i++) {
116 in[i] = get_Return_res (n, i);
118 res = new_r_Return (current_ir_graph, get_new_node(n),
119 get_Return_mem (n), get_Return_n_res(n), in);
120 set_new_node(n, res);
124 res = new_r_Raise (current_ir_graph, get_new_node(n),
125 get_Raise_mem(n), get_Raise_exoptr(n));
126 set_new_node(n, res);
129 res = new_r_Const (current_ir_graph, get_new_node(n),
130 get_irn_mode(n), get_Const_tarval(n));
131 set_new_node(n, res);
136 value = (type_or_id *) malloc (sizeof (type_or_id));
137 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
139 value = (type_or_id *) get_SymConst_type(n);
143 if (get_SymConst_kind(n)==linkage_ptr_info)
145 value = (type_or_id *) get_SymConst_ptrinfo(n);
148 res = new_r_SymConst (current_ir_graph, get_new_node(n), value,
149 get_SymConst_kind (n));
150 set_new_node(n, res);
156 ir_node *in [get_Sel_n_index(n)];
157 for (i=0; i <(get_Sel_n_index(n)); i++) {
158 in[i] = get_Sel_index (n, i);
160 res = new_r_Sel (current_ir_graph, get_new_node(n),
161 get_Sel_mem(n), get_Sel_ptr(n), get_Sel_n_index(n),
162 in, get_Sel_entity(n));
163 set_new_node(n, res);
169 ir_node *in [get_Call_arity(n)];
170 for (i=0; i <(get_Call_arity(n)); i++) {
171 in[i] = get_Call_param (n, i);
173 res = new_r_Call (current_ir_graph, get_new_node(n), get_Call_mem(n),
174 get_Call_ptr(n), get_Call_arity(n),
175 in, get_Call_type (n));
176 set_new_node(n, res);
180 res = new_r_Add (current_ir_graph, get_new_node(n),
181 get_new_node(a), get_new_node(b), get_irn_mode(n));
182 set_new_node(n, res);
187 temp_node = get_nodes_Block(n);
188 res = new_r_Sub (current_ir_graph, get_new_node(temp_node),
189 get_new_node(a), get_new_node(b), get_irn_mode(n));
190 set_new_node(n, res);
194 res = new_r_Minus (current_ir_graph, get_new_node(n), get_new_node(a),
196 set_new_node(n, res);
199 res = new_r_Mul (current_ir_graph, get_new_node(n), get_new_node(a),
200 get_new_node(b), get_irn_mode(n));
203 res = new_r_Quot (current_ir_graph, get_new_node(n), get_Quot_mem (n),
204 get_new_node(a), get_new_node(b));
207 res = new_r_DivMod (current_ir_graph, get_new_node(n), get_DivMod_mem(n),
208 get_new_node(a), get_new_node(b));
211 res = new_r_Div (current_ir_graph, get_new_node(n), get_Div_mem(n),
212 get_new_node(a), get_new_node(b));
215 res = new_r_Mod (current_ir_graph, get_new_node(n), get_Mod_mem(n),
216 get_new_node(a), get_new_node(b));
219 res = new_r_Abs (current_ir_graph, get_new_node(n), get_Abs_op(n),
223 res = new_r_And (current_ir_graph, get_new_node(n), get_new_node(a),
224 get_new_node(b), get_irn_mode(n));
227 res = new_r_Or (current_ir_graph, get_new_node(n), get_new_node(a),
228 get_new_node(b), get_irn_mode(n));
231 res = new_r_Eor (current_ir_graph, get_new_node(n), get_new_node(a),
232 get_new_node(b), get_irn_mode(n));
235 res = new_r_Not (current_ir_graph, get_new_node(n), get_Not_op(n),
239 res = new_r_Cmp (current_ir_graph, get_new_node(n), get_Cmp_left(n),
243 res = new_r_Shl (current_ir_graph, get_new_node(n), get_Shl_left(n),
244 get_Shl_right(n), get_irn_mode(n));
247 res = new_r_Shr (current_ir_graph, get_new_node(n), get_Shr_left(n),
248 get_Shr_right(n), get_irn_mode(n));
251 res = new_r_Shrs (current_ir_graph, get_new_node(n), get_Shrs_left(n),
252 get_Shrs_right(n), get_irn_mode(n));
255 res = new_r_Rot (current_ir_graph, get_new_node(n), get_Rot_left(n),
256 get_Rot_right(n), get_irn_mode(n));
259 res = new_r_Conv (current_ir_graph, get_new_node(n), get_Conv_op(n),
265 ir_node *in [get_Phi_n_preds(n)];
266 for (i=0; i <(get_Phi_n_preds(n)); i++) {
267 in[i] = get_Phi_pred (n, i);
269 res = new_r_Phi (current_ir_graph, get_new_node(n),
270 get_Phi_n_preds(n), in, get_irn_mode(n));
271 set_new_node(n, res);
275 res = new_r_Load (current_ir_graph, get_new_node(n), get_Load_mem(n),
279 res = new_r_Store (current_ir_graph, get_new_node(n), get_Store_mem(n),
280 get_Store_ptr(n), get_Store_value(n));
283 res = new_r_Alloc (current_ir_graph, get_new_node(n),
284 get_Alloc_mem(n), get_Alloc_size(n),
285 get_Alloc_type(n), get_Alloc_where(n));
289 res = new_r_Free (current_ir_graph, get_new_node(n),
290 get_Free_mem(n), get_Free_ptr(n),
291 get_Free_size(n), get_Free_type(n));
296 ir_node *in [get_Sync_n_preds(n)];
297 for (i=0; i <(get_Sync_n_preds(n)); i++) {
298 in[i] = get_Sync_pred (n, i);
300 res = new_r_Sync (current_ir_graph, get_new_node(n),
301 get_Sync_n_preds(n), in);
302 set_new_node(n, res);
306 res = new_r_Proj (current_ir_graph, get_new_node(n),
307 get_Proj_pred(n), get_irn_mode(n),
313 ir_node *in [get_Tuple_n_preds(n)];
314 for (i=0; i <(get_Tuple_n_preds(n)); i++) {
315 in[i] = get_Tuple_pred (n, i);
317 res = new_r_Tuple (current_ir_graph, get_new_node(n),
318 get_Tuple_n_preds(n), in);
319 set_new_node(n, res);
323 res = new_r_Id (current_ir_graph, get_new_node(n),
324 get_Id_pred(n), get_irn_mode(n));
327 res = new_r_Bad (get_new_node(n));
334 dead_node_elemination(ir_graph *irg) {
335 struct obstack *graveyard_obst;
336 struct obstack *rebirth_obst;
337 ir_graph *rem = current_ir_graph;
338 current_ir_graph = irg;
340 /* A quiet place, where the old obstack can rest in peace,
341 until it will be cremated. */
342 graveyard_obst = irg->obst;
344 /* A new obstack, where the reachable nodes will be copied to. */
345 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
346 current_ir_graph->obst = rebirth_obst;
347 obstack_init (current_ir_graph->obst);
349 /* Walks the graph once, and at the recursive way do the copy thing.
350 all reachable nodes will be copied to a new obstack. */
351 irg_walk(irg->end, NULL, copy_node, NULL);
353 /* Free memory from old unoptimized obstack */
354 xfree (graveyard_obst);
356 current_ir_graph = rem;