1 /* Coyright (C) 1998 - 2002 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer, Goetz Lindenmaier, Sebastian Felis
6 ** Optimizations for a whole ir graph, i.e., a procedure.
19 # include "irnode_t.h"
20 # include "irgraph_t.h"
28 # include "pdeq.h" /* Fuer code placement */
31 # include "irbackedge_t.h"
33 /* Defined in iropt.c */
34 pset *new_identities (void);
35 void del_identities (pset *value_table);
36 void add_identities (pset *value_table, ir_node *node);
38 /********************************************************************/
39 /* apply optimizations of iropt to all nodes. */
40 /********************************************************************/
42 static void init_link (ir_node *n, void *env) {
43 set_irn_link(n, NULL);
47 optimize_in_place_wrapper (ir_node *n, void *env) {
51 for (i = 0; i < get_irn_arity(n); i++) {
52 optimized = optimize_in_place_2(get_irn_n(n, i));
53 set_irn_n(n, i, optimized);
56 if (get_irn_op(n) == op_Block) {
57 optimized = optimize_in_place_2(n);
58 if (optimized != n) exchange (n, optimized);
63 local_optimize_graph (ir_graph *irg) {
64 ir_graph *rem = current_ir_graph;
65 current_ir_graph = irg;
67 /* Handle graph state */
68 assert(get_irg_phase_state(irg) != phase_building);
69 if (get_opt_global_cse())
70 set_irg_pinned(current_ir_graph, floats);
71 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
72 set_irg_outs_inconsistent(current_ir_graph);
73 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
74 set_irg_dom_inconsistent(current_ir_graph);
76 /* Clean the value_table in irg for the cse. */
77 del_identities(irg->value_table);
78 irg->value_table = new_identities();
80 /* walk over the graph */
81 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
83 current_ir_graph = rem;
86 /********************************************************************/
87 /* Routines for dead node elimination / copying garbage collection */
89 /********************************************************************/
91 /* Remeber the new node in the old node by using a field all nodes have. */
93 set_new_node (ir_node *old, ir_node *new)
98 /* Get this new node, before the old node is forgotton.*/
99 static INLINE ir_node *
100 get_new_node (ir_node * n)
105 /* We use the block_visited flag to mark that we have computed the
106 number of useful predecessors for this block.
107 Further we encode the new arity in this flag in the old blocks.
108 Remembering the arity is useful, as it saves a lot of pointer
109 accesses. This function is called for all Phi and Block nodes
112 compute_new_arity(ir_node *b) {
116 irg_v = get_irg_block_visited(current_ir_graph);
117 block_v = get_Block_block_visited(b);
118 if (block_v >= irg_v) {
119 /* we computed the number of preds for this block and saved it in the
121 return block_v - irg_v;
123 /* compute the number of good predecessors */
124 res = get_irn_arity(b);
125 for (i = 0; i < get_irn_arity(b); i++)
126 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
127 /* save it in the flag. */
128 set_Block_block_visited(b, irg_v + res);
133 static INLINE void new_backedge_info(ir_node *n) {
134 switch(get_irn_opcode(n)) {
136 n->attr.block.cg_backedge = NULL;
137 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
140 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
143 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 /* Copies the node to the new obstack. The Ins of the new node point to
150 the predecessors on the old obstack. For block/phi nodes not all
151 predecessors might be copied. n->link points to the new node.
152 For Phi and Block nodes the function allocates in-arrays with an arity
153 only for useful predecessors. The arity is determined by counting
154 the non-bad predecessors of the block. */
156 copy_node (ir_node *n, void *env) {
160 if (get_irn_opcode(n) == iro_Block) {
162 new_arity = compute_new_arity(n);
163 n->attr.block.graph_arr = NULL;
165 block = get_nodes_Block(n);
166 if (get_irn_opcode(n) == iro_Phi) {
167 new_arity = compute_new_arity(block);
169 new_arity = get_irn_arity(n);
172 nn = new_ir_node(get_irn_dbg_info(n),
179 /* Copy the attributes. These might point to additional data. If this
180 was allocated on the old obstack the pointers now are dangling. This
181 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
183 new_backedge_info(nn);
186 /* printf("\n old node: "); DDMSG2(n);
187 printf(" new node: "); DDMSG2(nn); */
191 /* Copies new predecessors of old node to new node remembered in link.
192 Spare the Bad predecessors of Phi and Block nodes. */
194 copy_preds (ir_node *n, void *env) {
198 nn = get_new_node(n);
200 /* printf("\n old node: "); DDMSG2(n);
201 printf(" new node: "); DDMSG2(nn);
202 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
204 if (get_irn_opcode(n) == iro_Block) {
205 /* Don't copy Bad nodes. */
207 for (i = 0; i < get_irn_arity(n); i++)
208 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
209 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
210 //if (is_backedge(n, i)) set_backedge(nn, j);
213 /* repair the block visited flag from above misuse. Repair it in both
214 graphs so that the old one can still be used. */
215 set_Block_block_visited(nn, 0);
216 set_Block_block_visited(n, 0);
217 /* Local optimization could not merge two subsequent blocks if
218 in array contained Bads. Now it's possible.
219 We don't call optimize_in_place as it requires
220 that the fields in ir_graph are set properly. */
221 if ((get_opt_control_flow()) &&
222 (get_Block_n_cfgpreds(nn) == 1) &&
223 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
224 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
225 } else if (get_irn_opcode(n) == iro_Phi) {
226 /* Don't copy node if corresponding predecessor in block is Bad.
227 The Block itself should not be Bad. */
228 block = get_nodes_Block(n);
229 set_irn_n (nn, -1, get_new_node(block));
231 for (i = 0; i < get_irn_arity(n); i++)
232 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
233 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
234 //if (is_backedge(n, i)) set_backedge(nn, j);
237 /* If the pre walker reached this Phi after the post walker visited the
238 block block_visited is > 0. */
239 set_Block_block_visited(get_nodes_Block(n), 0);
240 /* Compacting the Phi's ins might generate Phis with only one
242 if (get_irn_arity(n) == 1)
243 exchange(n, get_irn_n(n, 0));
245 for (i = -1; i < get_irn_arity(n); i++)
246 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
248 /* Now the new node is complete. We can add it to the hash table for cse.
249 @@@ inlinening aborts if we identify End. Why? */
250 if(get_irn_op(nn) != op_End)
251 add_identities (current_ir_graph->value_table, nn);
254 /* Copies the graph recursively, compacts the keepalive of the end node. */
257 ir_node *oe, *ne; /* old end, new end */
258 ir_node *ka; /* keep alive */
261 oe = get_irg_end(current_ir_graph);
262 /* copy the end node by hand, allocate dynamic in array! */
263 ne = new_ir_node(get_irn_dbg_info(oe),
270 /* Copy the attributes. Well, there might be some in the future... */
272 set_new_node(oe, ne);
274 /* copy the live nodes */
275 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
276 /* copy_preds for the end node ... */
277 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
279 /** ... and now the keep alives. **/
280 /* First pick the not marked block nodes and walk them. We must pick these
281 first as else we will oversee blocks reachable from Phis. */
282 for (i = 0; i < get_irn_arity(oe); i++) {
283 ka = get_irn_n(oe, i);
284 if ((get_irn_op(ka) == op_Block) &&
285 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
286 /* We must keep the block alive and copy everything reachable */
287 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
288 irg_walk(ka, copy_node, copy_preds, NULL);
289 add_End_keepalive(ne, get_new_node(ka));
293 /* Now pick the Phis. Here we will keep all! */
294 for (i = 0; i < get_irn_arity(oe); i++) {
295 ka = get_irn_n(oe, i);
296 if ((get_irn_op(ka) == op_Phi)) {
297 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
298 /* We didn't copy the Phi yet. */
299 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
300 irg_walk(ka, copy_node, copy_preds, NULL);
302 add_End_keepalive(ne, get_new_node(ka));
307 /* Copies the graph reachable from current_ir_graph->end to the obstack
308 in current_ir_graph and fixes the environment.
309 Then fixes the fields in current_ir_graph containing nodes of the
314 /* Not all nodes remembered in current_ir_graph might be reachable
315 from the end node. Assure their link is set to NULL, so that
316 we can test whether new nodes have been computed. */
317 set_irn_link(get_irg_frame (current_ir_graph), NULL);
318 set_irn_link(get_irg_globals(current_ir_graph), NULL);
319 set_irn_link(get_irg_args (current_ir_graph), NULL);
321 /* we use the block walk flag for removing Bads from Blocks ins. */
322 inc_irg_block_visited(current_ir_graph);
327 /* fix the fields in current_ir_graph */
328 old_end = get_irg_end(current_ir_graph);
329 set_irg_end (current_ir_graph, get_new_node(old_end));
331 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
332 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
333 copy_node (get_irg_frame(current_ir_graph), NULL);
334 copy_preds(get_irg_frame(current_ir_graph), NULL);
336 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
337 copy_node (get_irg_globals(current_ir_graph), NULL);
338 copy_preds(get_irg_globals(current_ir_graph), NULL);
340 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
341 copy_node (get_irg_args(current_ir_graph), NULL);
342 copy_preds(get_irg_args(current_ir_graph), NULL);
344 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
346 set_irg_start_block(current_ir_graph,
347 get_new_node(get_irg_start_block(current_ir_graph)));
348 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
349 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
350 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
351 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
352 copy_node(get_irg_bad(current_ir_graph), NULL);
353 copy_preds(get_irg_bad(current_ir_graph), NULL);
355 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
356 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
357 copy_node(get_irg_unknown(current_ir_graph), NULL);
358 copy_preds(get_irg_unknown(current_ir_graph), NULL);
360 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
363 /* Copies all reachable nodes to a new obstack. Removes bad inputs
364 from block nodes and the corresponding inputs from Phi nodes.
365 Merges single exit blocks with single entry blocks and removes
367 Adds all new nodes to a new hash table for cse. Does not
368 perform cse, so the hash table might contain common subexpressions. */
369 /* Amroq call this emigrate() */
371 dead_node_elimination(ir_graph *irg) {
373 struct obstack *graveyard_obst = NULL;
374 struct obstack *rebirth_obst = NULL;
376 /* Remember external state of current_ir_graph. */
377 rem = current_ir_graph;
378 current_ir_graph = irg;
380 /* Handle graph state */
381 assert(get_irg_phase_state(current_ir_graph) != phase_building);
382 free_outs(current_ir_graph);
384 /* @@@ so far we loose loops when copying */
385 set_irg_loop(current_ir_graph, NULL);
387 if (get_optimize() && get_opt_dead_node_elimination()) {
389 /* A quiet place, where the old obstack can rest in peace,
390 until it will be cremated. */
391 graveyard_obst = irg->obst;
393 /* A new obstack, where the reachable nodes will be copied to. */
394 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
395 current_ir_graph->obst = rebirth_obst;
396 obstack_init (current_ir_graph->obst);
398 /* We also need a new hash table for cse */
399 del_identities (irg->value_table);
400 irg->value_table = new_identities ();
402 /* Copy the graph from the old to the new obstack */
405 /* Free memory from old unoptimized obstack */
406 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
407 xfree (graveyard_obst); /* ... then free it. */
410 current_ir_graph = rem;
413 /* Relink bad predeseccors of a block and store the old in array to the
414 link field. This function is called by relink_bad_predecessors().
415 The array of link field starts with the block operand at position 0.
416 If block has bad predecessors, create a new in array without bad preds.
417 Otherwise let in array untouched. */
418 static void relink_bad_block_predecessors(ir_node *n, void *env) {
419 ir_node **new_in, *irn;
420 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
422 /* if link field of block is NULL, look for bad predecessors otherwise
423 this is allready done */
424 if (get_irn_op(n) == op_Block &&
425 get_irn_link(n) == NULL) {
427 /* save old predecessors in link field (position 0 is the block operand)*/
428 set_irn_link(n, (void *)get_irn_in(n));
430 /* count predecessors without bad nodes */
431 old_irn_arity = get_irn_arity(n);
432 for (i = 0; i < old_irn_arity; i++)
433 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
435 /* arity changing: set new predecessors without bad nodes */
436 if (new_irn_arity < old_irn_arity) {
437 /* get new predecessor array without Block predecessor */
438 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
440 /* set new predeseccors in array */
443 for (i = 1; i < old_irn_arity; i++) {
444 irn = get_irn_n(n, i);
445 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
448 } /* ir node has bad predecessors */
450 } /* Block is not relinked */
453 /* Relinks Bad predecesors from Bocks and Phis called by walker
454 remove_bad_predecesors(). If n is a Block, call
455 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
456 function of Phi's Block. If this block has bad predecessors, relink preds
458 static void relink_bad_predecessors(ir_node *n, void *env) {
459 ir_node *block, **old_in;
460 int i, old_irn_arity, new_irn_arity;
462 /* relink bad predeseccors of a block */
463 if (get_irn_op(n) == op_Block)
464 relink_bad_block_predecessors(n, env);
466 /* If Phi node relink its block and its predecessors */
467 if (get_irn_op(n) == op_Phi) {
469 /* Relink predeseccors of phi's block */
470 block = get_nodes_Block(n);
471 if (get_irn_link(block) == NULL)
472 relink_bad_block_predecessors(block, env);
474 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
475 old_irn_arity = ARR_LEN(old_in);
477 /* Relink Phi predeseccors if count of predeseccors changed */
478 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
479 /* set new predeseccors in array
480 n->in[0] remains the same block */
482 for(i = 1; i < old_irn_arity; i++)
483 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
485 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
488 } /* n is a Phi node */
491 /* Removes Bad Bad predecesors from Blocks and the corresponding
492 inputs to Phi nodes as in dead_node_elimination but without
494 On walking up set the link field to NULL, on walking down call
495 relink_bad_predecessors() (This function stores the old in array
496 to the link field and sets a new in array if arity of predecessors
498 void remove_bad_predecessors(ir_graph *irg) {
499 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
503 /**********************************************************************/
504 /* Funcionality for inlining */
505 /**********************************************************************/
507 /* Copy node for inlineing. Copies the node by calling copy_node and
508 then updates the entity if it's a local one. env must be a pointer
509 to the frame type of the procedure. The new entities must be in
510 the link field of the entities. */
512 copy_node_inline (ir_node *n, void *env) {
514 type *frame_tp = (type *)env;
517 if (get_irn_op(n) == op_Sel) {
518 new = get_new_node (n);
519 assert(get_irn_op(new) == op_Sel);
520 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
521 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
526 void inline_method(ir_node *call, ir_graph *called_graph) {
528 ir_node *post_call, *post_bl;
530 ir_node *end, *end_bl;
534 ir_node *cf_op = NULL, *bl;
535 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
538 if (!get_optimize() || !get_opt_inline()) return;
539 /** Turn off optimizations, this can cause problems when allocating new nodes. **/
540 rem_opt = get_optimize();
543 /* Handle graph state */
544 assert(get_irg_phase_state(current_ir_graph) != phase_building);
545 assert(get_irg_pinned(current_ir_graph) == pinned);
546 assert(get_irg_pinned(called_graph) == pinned);
547 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
548 set_irg_outs_inconsistent(current_ir_graph);
550 /** Check preconditions **/
551 assert(get_irn_op(call) == op_Call);
552 /* @@@ TODO does not work for InterfaceIII.java after cgana
553 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
554 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
555 get_Call_type(call)));
557 assert(get_type_tpop(get_Call_type(call)) == type_method);
558 if (called_graph == current_ir_graph) return;
561 /** Part the Call node into two nodes. Pre_call collects the parameters of
562 the procedure and later replaces the Start node of the called graph.
563 Post_call is the old Call node and collects the results of the called
564 graph. Both will end up being a tuple. **/
565 post_bl = get_nodes_Block(call);
566 set_irg_current_block(current_ir_graph, post_bl);
567 /* XxMxPxP of Start + parameter of Call */
569 in[1] = get_Call_mem(call);
570 in[2] = get_irg_frame(current_ir_graph);
571 in[3] = get_irg_globals(current_ir_graph);
572 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
573 pre_call = new_Tuple(5, in);
576 /** Part the block of the Call node into two blocks.
577 The new block gets the ins of the old block, pre_call and all its
578 predecessors and all Phi nodes. **/
579 part_block(pre_call);
581 /** Prepare state for dead node elimination **/
582 /* Visited flags in calling irg must be >= flag in called irg.
583 Else walker and arity computation will not work. */
584 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
585 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
586 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
587 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
588 /* Set pre_call as new Start node in link field of the start node of
589 calling graph and pre_calls block as new block for the start block
591 Further mark these nodes so that they are not visited by the
593 set_irn_link(get_irg_start(called_graph), pre_call);
594 set_irn_visited(get_irg_start(called_graph),
595 get_irg_visited(current_ir_graph));
596 set_irn_link(get_irg_start_block(called_graph),
597 get_nodes_Block(pre_call));
598 set_irn_visited(get_irg_start_block(called_graph),
599 get_irg_visited(current_ir_graph));
601 /* Initialize for compaction of in arrays */
602 inc_irg_block_visited(current_ir_graph);
604 /*** Replicate local entities of the called_graph ***/
605 /* copy the entities. */
606 called_frame = get_irg_frame_type(called_graph);
607 for (i = 0; i < get_class_n_members(called_frame); i++) {
608 entity *new_ent, *old_ent;
609 old_ent = get_class_member(called_frame, i);
610 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
611 set_entity_link(old_ent, new_ent);
614 /* visited is > than that of called graph. With this trick visited will
615 remain unchanged so that an outer walker, e.g., searching the call nodes
616 to inline, calling this inline will not visit the inlined nodes. */
617 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
619 /** Performing dead node elimination inlines the graph **/
620 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
622 /* @@@ endless loops are not copied!! -- they should be, I think... */
623 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
624 get_irg_frame_type(called_graph));
626 /* Repair called_graph */
627 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
628 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
629 set_Block_block_visited(get_irg_start_block(called_graph), 0);
631 /*** Merge the end of the inlined procedure with the call site ***/
632 /* We will turn the old Call node into a Tuple with the following
635 0: Phi of all Memories of Return statements.
636 1: Jmp from new Block that merges the control flow from all exception
637 predecessors of the old end block.
638 2: Tuple of all arguments.
639 3: Phi of Exception memories.
642 /** Precompute some values **/
643 end_bl = get_new_node(get_irg_end_block(called_graph));
644 end = get_new_node(get_irg_end(called_graph));
645 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
646 n_res = get_method_n_ress(get_Call_type(call));
648 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
649 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
651 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
653 /** archive keepalives **/
654 for (i = 0; i < get_irn_arity(end); i++)
655 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
656 /* The new end node will die, but the in array is not on the obstack ... */
659 /** Collect control flow from Return blocks to post_calls block. Replace
660 Return nodes by Jump nodes. **/
662 for (i = 0; i < arity; i++) {
664 ret = get_irn_n(end_bl, i);
665 if (get_irn_op(ret) == op_Return) {
666 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
670 set_irn_in(post_bl, n_ret, cf_pred);
672 /** Collect results from Return nodes to post_call. Post_call is
673 turned into a tuple. **/
674 turn_into_tuple(post_call, 4);
675 /* First the Memory-Phi */
677 for (i = 0; i < arity; i++) {
678 ret = get_irn_n(end_bl, i);
679 if (get_irn_op(ret) == op_Return) {
680 cf_pred[n_ret] = get_Return_mem(ret);
684 phi = new_Phi(n_ret, cf_pred, mode_M);
685 set_Tuple_pred(call, 0, phi);
686 /* Conserve Phi-list for further inlinings -- but might be optimized */
687 if (get_nodes_Block(phi) == post_bl) {
688 set_irn_link(phi, get_irn_link(post_bl));
689 set_irn_link(post_bl, phi);
691 /* Now the real results */
693 for (j = 0; j < n_res; j++) {
695 for (i = 0; i < arity; i++) {
696 ret = get_irn_n(end_bl, i);
697 if (get_irn_op(ret) == op_Return) {
698 cf_pred[n_ret] = get_Return_res(ret, j);
702 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
704 /* Conserve Phi-list for further inlinings -- but might be optimized */
705 if (get_nodes_Block(phi) == post_bl) {
706 set_irn_link(phi, get_irn_link(post_bl));
707 set_irn_link(post_bl, phi);
710 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
712 set_Tuple_pred(call, 2, new_Bad());
714 /* Finally the exception control flow. We need to add a Phi node to
715 collect the memory containing the exception objects. Further we need
716 to add another block to get a correct representation of this Phi. To
717 this block we add a Jmp that resolves into the X output of the Call
718 when the Call is turned into a tuple. */
720 for (i = 0; i < arity; i++) {
722 ret = get_irn_n(end_bl, i);
723 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
724 cf_pred[n_exc] = ret;
729 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
730 set_Tuple_pred(call, 1, new_Jmp());
731 /* The Phi for the memories with the exception objects */
733 for (i = 0; i < arity; i++) {
735 ret = skip_Proj(get_irn_n(end_bl, i));
736 if (get_irn_op(ret) == op_Call) {
737 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
739 } else if (is_fragile_op(ret)) {
740 /* We rely that all cfops have the memory output at the same position. */
741 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
743 } else if (get_irn_op(ret) == op_Raise) {
744 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
748 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
750 set_Tuple_pred(call, 1, new_Bad());
751 set_Tuple_pred(call, 3, new_Bad());
756 /*** Correct the control flow to the end node.
757 If the exception control flow from the Call directly branched to the
758 end block we now have the following control flow predecessor pattern:
759 ProjX -> Tuple -> Jmp.
760 We must remove the Jmp along with it's empty block and add Jmp's
761 predecessors as predecessors of this end block. ***/
762 /* find the problematic predecessor of the end block. */
763 end_bl = get_irg_end_block(current_ir_graph);
764 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
765 cf_op = get_Block_cfgpred(end_bl, i);
766 if (get_irn_op(cf_op) == op_Proj) {
767 cf_op = get_Proj_pred(cf_op);
768 if (get_irn_op(cf_op) == op_Tuple) {
769 cf_op = get_Tuple_pred(cf_op, 1);
770 assert(get_irn_op(cf_op) == op_Jmp);
776 if (i < get_Block_n_cfgpreds(end_bl)) {
777 bl = get_nodes_Block(cf_op);
778 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
779 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
780 for (j = 0; j < i; j++)
781 cf_pred[j] = get_Block_cfgpred(end_bl, j);
782 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
783 cf_pred[j] = get_Block_cfgpred(bl, j-i);
784 for (j = j; j < arity; j++)
785 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
786 set_irn_in(end_bl, arity, cf_pred);
790 /** Turn cse back on. **/
791 set_optimize(rem_opt);
794 /********************************************************************/
795 /* Apply inlineing to small methods. */
796 /********************************************************************/
800 /* It makes no sense to inline too many calls in one procedure. Anyways,
801 I didn't get a version with NEW_ARR_F to run. */
802 #define MAX_INLINE 1024
804 static void collect_calls(ir_node *call, void *env) {
805 ir_node **calls = (ir_node **)env;
808 ir_graph *called_irg;
810 if (get_irn_op(call) != op_Call) return;
812 addr = get_Call_ptr(call);
813 if (get_irn_op(addr) == op_Const) {
814 /* Check whether the constant is the pointer to a compiled entity. */
815 tv = get_Const_tarval(addr);
817 called_irg = get_entity_irg(tv->u.P.ent);
818 if (called_irg && pos < MAX_INLINE) {
819 /* The Call node calls a locally defined method. Remember to inline. */
827 /* Inlines all small methods at call sites where the called address comes
828 from a Const node that references the entity representing the called
830 The size argument is a rough measure for the code size of the method:
831 Methods where the obstack containing the firm graph is smaller than
833 void inline_small_irgs(ir_graph *irg, int size) {
835 ir_node *calls[MAX_INLINE];
836 ir_graph *rem = current_ir_graph;
838 if (!(get_optimize() && get_opt_inline())) return;
840 current_ir_graph = irg;
841 /* Handle graph state */
842 assert(get_irg_phase_state(current_ir_graph) != phase_building);
844 /* Find Call nodes to inline.
845 (We can not inline during a walk of the graph, as inlineing the same
846 method several times changes the visited flag of the walked graph:
847 after the first inlineing visited of the callee equals visited of
848 the caller. With the next inlineing both are increased.) */
850 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
852 if ((pos > 0) && (pos < MAX_INLINE)) {
853 /* There are calls to inline */
854 collect_phiprojs(irg);
855 for (i = 0; i < pos; i++) {
859 tv = get_Const_tarval(get_Call_ptr(calls[i]));
860 callee = get_entity_irg(tv->u.P.ent);
861 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
862 inline_method(calls[i], callee);
867 current_ir_graph = rem;
871 /********************************************************************/
872 /* Code Placement. Pinns all floating nodes to a block where they */
873 /* will be executed only if needed. */
874 /********************************************************************/
876 static pdeq *worklist; /* worklist of ir_node*s */
878 /* Find the earliest correct block for N. --- Place N into the
879 same Block as its dominance-deepest Input. */
881 place_floats_early (ir_node *n)
885 /* we must not run into an infinite loop */
886 assert (irn_not_visited(n));
889 /* Place floating nodes. */
890 if (get_op_pinned(get_irn_op(n)) == floats) {
892 ir_node *b = new_Bad(); /* The block to place this node in */
894 assert(get_irn_op(n) != op_Block);
896 if ((get_irn_op(n) == op_Const) ||
897 (get_irn_op(n) == op_SymConst) ||
899 /* These nodes will not be placed by the loop below. */
900 b = get_irg_start_block(current_ir_graph);
904 /* find the block for this node. */
905 for (i = 0; i < get_irn_arity(n); i++) {
906 ir_node *dep = get_irn_n(n, i);
908 if ((irn_not_visited(dep)) &&
909 (get_op_pinned(get_irn_op(dep)) == floats)) {
910 place_floats_early (dep);
912 /* Because all loops contain at least one pinned node, now all
913 our inputs are either pinned or place_early has already
914 been finished on them. We do not have any unfinished inputs! */
915 dep_block = get_nodes_Block(dep);
916 if ((!is_Bad(dep_block)) &&
917 (get_Block_dom_depth(dep_block) > depth)) {
919 depth = get_Block_dom_depth(dep_block);
921 /* Avoid that the node is placed in the Start block */
922 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
923 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
924 assert(b != get_irg_start_block(current_ir_graph));
928 set_nodes_Block(n, b);
931 /* Add predecessors of non floating nodes on worklist. */
932 start = (get_irn_op(n) == op_Block) ? 0 : -1;
933 for (i = start; i < get_irn_arity(n); i++) {
934 ir_node *pred = get_irn_n(n, i);
935 if (irn_not_visited(pred)) {
936 pdeq_putr (worklist, pred);
941 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
942 Start, Call and end at pinned nodes as Store, Call. Place_early
943 places all floating nodes reachable from its argument through floating
944 nodes and adds all beginnings at pinned nodes to the worklist. */
945 static INLINE void place_early () {
947 inc_irg_visited(current_ir_graph);
949 /* this inits the worklist */
950 place_floats_early (get_irg_end(current_ir_graph));
952 /* Work the content of the worklist. */
953 while (!pdeq_empty (worklist)) {
954 ir_node *n = pdeq_getl (worklist);
955 if (irn_not_visited(n)) place_floats_early (n);
958 set_irg_outs_inconsistent(current_ir_graph);
959 current_ir_graph->pinned = pinned;
963 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
965 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
967 ir_node *block = NULL;
969 /* Compute the latest block into which we can place a node so that it is
971 if (get_irn_op(consumer) == op_Phi) {
972 /* our comsumer is a Phi-node, the effective use is in all those
973 blocks through which the Phi-node reaches producer */
975 ir_node *phi_block = get_nodes_Block(consumer);
976 for (i = 0; i < get_irn_arity(consumer); i++) {
977 if (get_irn_n(consumer, i) == producer) {
978 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
982 assert(is_no_Block(consumer));
983 block = get_nodes_Block(consumer);
986 /* Compute the deepest common ancestor of block and dca. */
988 if (!dca) return block;
989 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
990 block = get_Block_idom(block);
991 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
992 dca = get_Block_idom(dca);
994 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
999 static INLINE int get_irn_loop_depth(ir_node *n) {
1000 return get_loop_depth(get_irn_loop(n));
1003 /* Move n to a block with less loop depth than it's current block. The
1004 new block must be dominated by early. */
1006 move_out_of_loops (ir_node *n, ir_node *early)
1008 ir_node *best, *dca;
1012 /* Find the region deepest in the dominator tree dominating
1013 dca with the least loop nesting depth, but still dominated
1014 by our early placement. */
1015 dca = get_nodes_Block(n);
1017 while (dca != early) {
1018 dca = get_Block_idom(dca);
1019 if (!dca) break; /* should we put assert(dca)? */
1020 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1024 if (best != get_nodes_Block(n)) {
1026 printf("Moving out of loop: "); DDMN(n);
1027 printf(" Outermost block: "); DDMN(early);
1028 printf(" Best block: "); DDMN(best);
1029 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1031 set_nodes_Block(n, best);
1035 /* Find the latest legal block for N and place N into the
1036 `optimal' Block between the latest and earliest legal block.
1037 The `optimal' block is the dominance-deepest block of those
1038 with the least loop-nesting-depth. This places N out of as many
1039 loops as possible and then makes it as controldependant as
1042 place_floats_late (ir_node *n)
1047 assert (irn_not_visited(n)); /* no multiple placement */
1049 /* no need to place block nodes, control nodes are already placed. */
1050 if ((get_irn_op(n) != op_Block) &&
1052 (get_irn_mode(n) != mode_X)) {
1053 /* Remember the early palacement of this block to move it
1054 out of loop no further than the early placement. */
1055 early = get_nodes_Block(n);
1056 /* Assure that our users are all placed, except the Phi-nodes.
1057 --- Each dataflow cycle contains at least one Phi-node. We
1058 have to break the `user has to be placed before the
1059 producer' dependance cycle and the Phi-nodes are the
1060 place to do so, because we need to base our placement on the
1061 final region of our users, which is OK with Phi-nodes, as they
1062 are pinned, and they never have to be placed after a
1063 producer of one of their inputs in the same block anyway. */
1064 for (i = 0; i < get_irn_n_outs(n); i++) {
1065 ir_node *succ = get_irn_out(n, i);
1066 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1067 place_floats_late (succ);
1070 /* We have to determine the final block of this node... except for
1072 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1073 (get_irn_op(n) != op_Const) &&
1074 (get_irn_op(n) != op_SymConst)) {
1075 ir_node *dca = NULL; /* deepest common ancestor in the
1076 dominator tree of all nodes'
1077 blocks depending on us; our final
1078 placement has to dominate DCA. */
1079 for (i = 0; i < get_irn_n_outs(n); i++) {
1080 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1082 set_nodes_Block(n, dca);
1084 move_out_of_loops (n, early);
1088 mark_irn_visited(n);
1090 /* Add predecessors of all non-floating nodes on list. (Those of floating
1091 nodes are placeded already and therefore are marked.) */
1092 for (i = 0; i < get_irn_n_outs(n); i++) {
1093 if (irn_not_visited(get_irn_out(n, i))) {
1094 pdeq_putr (worklist, get_irn_out(n, i));
1099 static INLINE void place_late() {
1101 inc_irg_visited(current_ir_graph);
1103 /* This fills the worklist initially. */
1104 place_floats_late(get_irg_start_block(current_ir_graph));
1105 /* And now empty the worklist again... */
1106 while (!pdeq_empty (worklist)) {
1107 ir_node *n = pdeq_getl (worklist);
1108 if (irn_not_visited(n)) place_floats_late(n);
1112 void place_code(ir_graph *irg) {
1113 ir_graph *rem = current_ir_graph;
1114 current_ir_graph = irg;
1116 if (!(get_optimize() && get_opt_global_cse())) return;
1118 /* Handle graph state */
1119 assert(get_irg_phase_state(irg) != phase_building);
1120 if (get_irg_dom_state(irg) != dom_consistent)
1123 construct_backedges(irg);
1125 /* Place all floating nodes as early as possible. This guarantees
1126 a legal code placement. */
1127 worklist = new_pdeq ();
1130 /* place_early invalidates the outs, place_late needs them. */
1132 /* Now move the nodes down in the dominator tree. This reduces the
1133 unnecessary executions of the node. */
1136 set_irg_outs_inconsistent(current_ir_graph);
1137 del_pdeq (worklist);
1138 current_ir_graph = rem;
1143 /********************************************************************/
1144 /* Control flow optimization. */
1145 /* Removes Bad control flow predecessors and empty blocks. A block */
1146 /* is empty if it contains only a Jmp node. */
1147 /* Blocks can only be removed if they are not needed for the */
1148 /* semantics of Phi nodes. */
1149 /********************************************************************/
1151 /* Removes Tuples from Block control flow predecessors.
1152 Optimizes blocks with equivalent_node(). */
1153 static void merge_blocks(ir_node *n, void *env) {
1155 set_irn_link(n, NULL);
1157 if (get_irn_op(n) == op_Block) {
1159 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1160 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1161 } else if (get_irn_mode(n) == mode_X) {
1162 /* We will soon visit a block. Optimize it before visiting! */
1163 ir_node *b = get_nodes_Block(n);
1164 ir_node *new = equivalent_node(b);
1165 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1166 /* We would have to run gigo if new is bad. */
1167 if (get_optimize() && get_opt_control_flow()) exchange (b, new);
1169 new = equivalent_node(b);
1171 if (is_Bad(new)) exchange (n, new_Bad());
1175 /* Collects all Phi nodes in link list of Block.
1176 Marks all blocks "block_visited" if they contain a node other
1178 static void collect_nodes(ir_node *n, void *env) {
1179 if (is_no_Block(n)) {
1180 ir_node *b = get_nodes_Block(n);
1182 if ((get_irn_op(n) == op_Phi)) {
1183 /* Collect Phi nodes to compact ins along with block's ins. */
1184 set_irn_link(n, get_irn_link(b));
1186 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1187 mark_Block_block_visited(b);
1192 /* Returns true if pred is pred of block */
1193 static int is_pred_of(ir_node *pred, ir_node *b) {
1195 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1196 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1197 if (b_pred == pred) return 1;
1202 static int test_whether_dispensable(ir_node *b, int pos) {
1203 int i, j, n_preds = 1;
1204 int dispensable = 1;
1205 ir_node *cfop = get_Block_cfgpred(b, pos);
1206 ir_node *pred = get_nodes_Block(cfop);
1208 if (get_Block_block_visited(pred) + 1
1209 < get_irg_block_visited(current_ir_graph)) {
1210 if (!get_optimize() || !get_opt_control_flow()) {
1211 /* Mark block so that is will not be removed. */
1212 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1215 /* Seems to be empty. */
1216 if (!get_irn_link(b)) {
1217 /* There are no Phi nodes ==> dispensable. */
1218 n_preds = get_Block_n_cfgpreds(pred);
1220 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1221 Work preds < pos as if they were already removed. */
1222 for (i = 0; i < pos; i++) {
1223 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1224 if (get_Block_block_visited(b_pred) + 1
1225 < get_irg_block_visited(current_ir_graph)) {
1226 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1227 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1228 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1231 if (is_pred_of(b_pred, pred)) dispensable = 0;
1234 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1235 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1236 if (is_pred_of(b_pred, pred)) dispensable = 0;
1239 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1242 n_preds = get_Block_n_cfgpreds(pred);
1250 static void optimize_blocks(ir_node *b, void *env) {
1251 int i, j, k, max_preds, n_preds;
1252 ir_node *pred, *phi;
1255 /* Count the number of predecessor if this block is merged with pred blocks
1258 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1259 max_preds += test_whether_dispensable(b, i);
1261 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1264 printf(" working on "); DDMN(b);
1265 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1266 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1267 if (is_Bad(get_Block_cfgpred(b, i))) {
1268 printf(" removing Bad %i\n ", i);
1269 } else if (get_Block_block_visited(pred) +1
1270 < get_irg_block_visited(current_ir_graph)) {
1271 printf(" removing pred %i ", i); DDMN(pred);
1272 } else { printf(" Nothing to do for "); DDMN(pred); }
1274 ** end Debug output **/
1276 /** Fix the Phi nodes **/
1277 phi = get_irn_link(b);
1279 assert(get_irn_op(phi) == op_Phi);
1280 /* Find the new predecessors for the Phi */
1282 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1283 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1284 if (is_Bad(get_Block_cfgpred(b, i))) {
1286 } else if (get_Block_block_visited(pred) +1
1287 < get_irg_block_visited(current_ir_graph)) {
1288 /* It's an empty block and not yet visited. */
1289 ir_node *phi_pred = get_Phi_pred(phi, i);
1290 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1291 if (get_nodes_Block(phi_pred) == pred) {
1292 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1293 in[n_preds] = get_Phi_pred(phi_pred, j);
1295 in[n_preds] = phi_pred;
1299 /* The Phi_pred node is replaced now if it is a Phi. Remove it so
1300 that it is removed from keep_alives. */
1301 if (get_nodes_Block(phi_pred) == pred)
1302 exchange (phi_pred, new_Bad());
1304 /* @@@ hier brauche ich Schleifeninformation!!! Wenn keine Rueckwaertskante
1305 dann darfs auch keine Verwendung geben. */
1306 if (get_nodes_Block(phi_pred) == pred) {
1307 /* remove the Phi as it might be kept alive. Further there
1308 might be other users. */
1309 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! */
1313 in[n_preds] = get_Phi_pred(phi, i);
1318 set_irn_in(phi, n_preds, in);
1319 //clear_backedges (phi);
1321 phi = get_irn_link(phi);
1324 /** Move Phi nodes from removed blocks to this one.
1325 This happens only if merge between loop backedge and single loop entry. **/
1326 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1327 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1328 if (get_Block_block_visited(pred) +1
1329 < get_irg_block_visited(current_ir_graph)) {
1330 phi = get_irn_link(pred);
1332 if (get_irn_op(phi) == op_Phi) {
1333 set_nodes_Block(phi, b);
1336 for (i = 0; i < k; i++) {
1337 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1338 if (is_Bad(get_Block_cfgpred(b, i))) {
1340 } else if (get_Block_block_visited(pred) +1
1341 < get_irg_block_visited(current_ir_graph)) {
1342 /* It's an empty block and not yet visited. */
1343 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1344 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1345 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1346 Anweisungen.) Trotzdem tuts bisher!! */
1355 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1356 in[n_preds] = get_Phi_pred(phi, i);
1359 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1360 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1361 if (is_Bad(get_Block_cfgpred(b, i))) {
1363 } else if (get_Block_block_visited(pred) +1
1364 < get_irg_block_visited(current_ir_graph)) {
1365 /* It's an empty block and not yet visited. */
1366 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1375 set_irn_in(phi, n_preds, in);
1377 phi = get_irn_link(phi);
1382 /** Fix the block **/
1384 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1385 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1386 if (is_Bad(get_Block_cfgpred(b, i))) {
1388 } else if (get_Block_block_visited(pred) +1
1389 < get_irg_block_visited(current_ir_graph)) {
1390 /* It's an empty block and not yet visited. */
1391 assert(get_Block_n_cfgpreds(b) > 1);
1392 /* Else it should be optimized by equivalent_node. */
1393 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1394 in[n_preds] = get_Block_cfgpred(pred, j);
1397 /* Remove block as it might be kept alive. */
1398 exchange(pred, b/*new_Bad()*/);
1400 in[n_preds] = get_Block_cfgpred(b, i);
1404 set_irn_in(b, n_preds, in);
1408 void optimize_cf(ir_graph *irg) {
1411 ir_node *end = get_irg_end(irg);
1412 ir_graph *rem = current_ir_graph;
1413 current_ir_graph = irg;
1415 /* Handle graph state */
1416 assert(get_irg_phase_state(irg) != phase_building);
1417 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1418 set_irg_outs_inconsistent(current_ir_graph);
1419 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1420 set_irg_dom_inconsistent(current_ir_graph);
1422 /* Use block visited flag to mark non-empty blocks. */
1423 inc_irg_block_visited(irg);
1424 irg_walk(end, merge_blocks, collect_nodes, NULL);
1426 /* Optimize the standard code. */
1427 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1429 /* Walk all keep alives, optimize them if block, add to new in-array
1430 for end if useful. */
1431 in = NEW_ARR_F (ir_node *, 1);
1432 in[0] = get_nodes_Block(end);
1433 inc_irg_visited(current_ir_graph);
1434 for(i = 0; i < get_End_n_keepalives(end); i++) {
1435 ir_node *ka = get_End_keepalive(end, i);
1436 if (irn_not_visited(ka)) {
1437 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1438 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1439 get_irg_block_visited(current_ir_graph)-1);
1440 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1441 mark_irn_visited(ka);
1442 ARR_APP1 (ir_node *, in, ka);
1443 } else if (get_irn_op(ka) == op_Phi) {
1444 mark_irn_visited(ka);
1445 ARR_APP1 (ir_node *, in, ka);
1449 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1452 current_ir_graph = rem;