1 /* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** dead node elemination
7 ** walks one time through the whole graph and copies it into another graph,
8 ** so unreachable nodes will be lost.
19 optimize_in_place_wrapper (ir_node *n, void *env) {
23 /* optimize all sons after recursion, i.e., the sons' sons are
25 for (i = -1; i < get_irn_arity(n); i++) {
26 optimized = optimize_in_place(get_irn_n(n, i));
27 set_irn_n(n, i, optimized);
33 local_optimize_graph (ir_graph *irg) {
34 ir_graph *rem = current_ir_graph;
35 current_ir_graph = irg;
37 /* walk over the graph */
38 irg_walk(irg->end, NULL, optimize_in_place_wrapper, NULL);
40 current_ir_graph = rem;
43 /* Remeber the new node in the old node,
44 by using a field that all nodes have. */
46 set_new_node (ir_node *old, ir_node *new)
52 /* Get this new node, before the old node is forgotton.*/
54 get_new_node (ir_node * n)
61 /* Create this node on a new obstack. */
63 copy_node (ir_node *n, void *env) {
66 res = (ir_node *) malloc (sizeof (ir_node));
67 a = (ir_node *) malloc (sizeof (ir_node));
68 b = (ir_node *) malloc (sizeof (ir_node));
71 a = get_binop_left(n);
72 b = get_binop_right(n);
73 } else if (is_unop(n)) {
77 switch (get_irn_opcode(n)) {
81 ir_node *in [get_Block_n_cfgpreds(n)];
83 for (i=0; i <(get_Block_n_cfgpreds(n)); i++) {
84 in[i] = get_Block_cfgpred (n, i);
86 res = new_r_Block (current_ir_graph, get_Block_n_cfgpreds(n), in);
91 res = new_r_Start (current_ir_graph, get_new_node(n));
95 res = new_r_End (current_ir_graph, get_new_node(n));
99 res = new_r_Jmp (current_ir_graph, get_new_node(n));
100 set_new_node(n, res);
103 res = new_r_Cond (current_ir_graph, get_new_node(n),
104 get_Cond_selector(n));
105 set_new_node(n, res);
110 ir_node *in [get_Return_n_res(n)];
111 for (i=0; i <(get_Return_n_res(n)); i++) {
112 in[i] = get_Return_res (n, i);
114 res = new_r_Return (current_ir_graph, get_new_node(n),
115 get_Return_mem (n), get_Return_n_res(n), in);
116 set_new_node(n, res);
120 res = new_r_Raise (current_ir_graph, get_new_node(n),
121 get_Raise_mem(n), get_Raise_exoptr(n));
122 set_new_node(n, res);
125 res = new_r_Const (current_ir_graph, get_new_node(n),
126 get_irn_mode(n), get_Const_tarval(n));
127 set_new_node(n, res);
132 value = (type_or_id *) malloc (sizeof (type_or_id));
133 if ((get_SymConst_kind(n)==type_tag) || (get_SymConst_kind(n)==size))
135 value = get_SymConst_type(n);
139 if (get_SymConst_kind(n)==linkage_ptr_info)
141 value = get_SymConst_ptrinfo(n);
144 res = new_r_SymConst (current_ir_graph, get_new_node(n), value,
145 get_SymConst_kind (n));
146 set_new_node(n, res);
152 ir_node *in [get_Sel_n_index(n)];
153 for (i=0; i <(get_Sel_n_index(n)); i++) {
154 in[i] = get_Sel_index (n, i);
156 res = new_r_Sel (current_ir_graph, get_new_node(n),
157 get_Sel_mem(n), get_Sel_ptr(n), get_Sel_n_index(n),
158 in, get_Sel_entity(n));
159 set_new_node(n, res);
165 ir_node *in [get_Call_arity(n)];
166 for (i=0; i <(get_Call_arity(n)); i++) {
167 in[i] = get_Call_param (n, i);
169 res = new_r_Call (current_ir_graph, get_new_node(n), get_Call_mem(n),
170 get_Call_ptr(n), get_Call_arity(n),
171 in, get_Call_type (n));
172 set_new_node(n, res);
176 res = new_r_Add (current_ir_graph, get_new_node(n),
177 get_new_node(a), get_new_node(b), get_irn_mode(n));
178 set_new_node(n, res);
183 temp_node = get_nodes_block(n);
184 res = new_r_Sub (current_ir_graph, get_new_node(temp_node),
185 get_new_node(a), get_new_node(b), get_irn_mode(n));
186 set_new_node(n, res);
190 res = new_r_Minus (current_ir_graph, get_new_node(n), get_new_node(a),
192 set_new_node(n, res);
195 res = new_r_Mul (current_ir_graph, get_new_node(n), get_new_node(a),
196 get_new_node(b), get_irn_mode(n));
199 res = new_r_Quot (current_ir_graph, get_new_node(n), get_Quot_mem (n),
200 get_new_node(a), get_new_node(b));
203 res = new_r_DivMod (current_ir_graph, get_new_node(n), get_DivMod_mem(n),
204 get_new_node(a), get_new_node(b));
207 res = new_r_Div (current_ir_graph, get_new_node(n), get_Div_mem(n),
208 get_new_node(a), get_new_node(b));
211 res = new_r_Mod (current_ir_graph, get_new_node(n), get_Mod_mem(n),
212 get_new_node(a), get_new_node(b));
215 res = new_r_Mod (current_ir_graph, get_new_node(n), get_Abs_op(n),
219 res = new_r_And (current_ir_graph, get_new_node(n), get_new_node(a),
220 get_new_node(b), get_irn_mode(n));
223 res = new_r_Or (current_ir_graph, get_new_node(n), get_new_node(a),
224 get_new_node(b), get_irn_mode(n));
227 res = new_r_Eor (current_ir_graph, get_new_node(n), get_new_node(a),
228 get_new_node(b), get_irn_mode(n));
231 res = new_r_Not (current_ir_graph, get_new_node(n), get_Not_op(n),
235 res = new_r_Cmp (current_ir_graph, get_new_node(n), get_Cmp_left(n),
239 res = new_r_Shl (current_ir_graph, get_new_node(n), get_Shl_left(n),
240 get_Shl_right(n), get_irn_mode(n));
243 res = new_r_Shr (current_ir_graph, get_new_node(n), get_Shr_left(n),
244 get_Shr_right(n), get_irn_mode(n));
247 res = new_r_Shrs (current_ir_graph, get_new_node(n), get_Shrs_left(n),
248 get_Shrs_right(n), get_irn_mode(n));
251 res = new_r_Rot (current_ir_graph, get_new_node(n), get_Rot_left(n),
252 get_Rot_right(n), get_irn_mode(n));
255 res = new_r_Conv (current_ir_graph, get_new_node(n), get_Conv_op(n),
261 ir_node *in [get_Phi_n_preds(n)];
262 for (i=0; i <(get_Phi_n_preds(n)); i++) {
263 in[i] = get_Phi_pred (n, i);
265 res = new_r_Phi (current_ir_graph, get_new_node(n),
266 get_Phi_n_preds(n), in, get_irn_mode(n));
267 set_new_node(n, res);
271 res = new_r_Load (current_ir_graph, get_new_node(n), get_Load_mem(n),
275 res = new_r_Store (current_ir_graph, get_new_node(n), get_Store_mem(n),
276 get_Store_ptr(n), get_Store_value(n));
279 res = new_r_Alloc (current_ir_graph, get_new_node(n),
280 get_Alloc_mem(n), get_Alloc_size(n),
281 get_Alloc_type(n), get_Alloc_where(n));
285 res = new_r_Free (current_ir_graph, get_new_node(n),
286 get_Free_mem(n), get_Free_ptr(n),
287 get_Free_size(n), get_Free_type(n));
292 ir_node *in [get_Sync_n_preds(n)];
293 for (i=0; i <(get_Sync_n_preds(n)); i++) {
294 in[i] = get_Sync_pred (n, i);
296 res = new_r_Sync (current_ir_graph, get_new_node(n),
297 get_Sync_n_preds(n), in);
298 set_new_node(n, res);
302 res = new_r_Proj (current_ir_graph, get_new_node(n),
303 get_Proj_pred(n), get_irn_mode(n),
309 ir_node *in [get_Tuple_n_preds(n)];
310 for (i=0; i <(get_Tuple_n_preds(n)); i++) {
311 in[i] = get_Tuple_pred (n, i);
313 res = new_r_Tuple (current_ir_graph, get_new_node(n),
314 get_Tuple_n_preds(n), in);
315 set_new_node(n, res);
319 res = new_r_Id (current_ir_graph, get_new_node(n),
320 get_Id_pred(n), get_irn_mode(n));
323 res = new_r_Bad (get_new_node(n));
331 dead_node_elemination(ir_graph *irg) {
332 struct obstack *graveyard_obst;
333 struct obstack *rebirth_obst;
335 /* A quiet place, where the old obstack can rest in peace,
336 until it will be cremated. */
337 graveyard_obst = irg->obst;
339 /* A new obstack, where the reachable nodes will be copied to. */
340 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
341 current_ir_graph->obst = rebirth_obst;
342 obstack_init (current_ir_graph->obst);
344 /* Walks the graph once, and at the recursive way do the copy thing.
345 all reachable nodes will be copied to a new obstack. */
346 irg_walk(irg->end, NULL, copy_node, NULL);
348 /* Free memory from old unoptimized obstack */
349 xfree (graveyard_obst);