3 * File name: ir/ir/irgopt.c
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 # include "irnode_t.h"
24 # include "irgraph_t.h"
31 # include "pdeq.h" /* Fuer code placement */
34 # include "irbackedge_t.h"
36 /* Defined in iropt.c */
37 pset *new_identities (void);
38 void del_identities (pset *value_table);
39 void add_identities (pset *value_table, ir_node *node);
41 /********************************************************************/
42 /* apply optimizations of iropt to all nodes. */
43 /********************************************************************/
45 static void init_link (ir_node *n, void *env) {
46 set_irn_link(n, NULL);
50 optimize_in_place_wrapper (ir_node *n, void *env) {
52 ir_node *optimized, *old;
54 for (i = 0; i < get_irn_arity(n); i++) {
55 /* get?irn_n skips Id nodes, so comparison old != optimized does not
56 show all optimizations. Therefore always set new predecessor. */
57 old = get_irn_n(n, i);
58 optimized = optimize_in_place_2(old);
59 set_irn_n(n, i, optimized);
62 if (get_irn_op(n) == op_Block) {
63 optimized = optimize_in_place_2(n);
64 if (optimized != n) exchange (n, optimized);
69 local_optimize_graph (ir_graph *irg) {
70 ir_graph *rem = current_ir_graph;
71 current_ir_graph = irg;
73 /* Handle graph state */
74 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(current_ir_graph, floats);
77 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
78 set_irg_outs_inconsistent(current_ir_graph);
79 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
80 set_irg_dom_inconsistent(current_ir_graph);
82 /* Clean the value_table in irg for the cse. */
83 del_identities(irg->value_table);
84 irg->value_table = new_identities();
86 /* walk over the graph */
87 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
89 current_ir_graph = rem;
92 /********************************************************************/
93 /* Routines for dead node elimination / copying garbage collection */
95 /********************************************************************/
97 /* Remeber the new node in the old node by using a field all nodes have. */
99 set_new_node (ir_node *old, ir_node *new)
104 /* Get this new node, before the old node is forgotton.*/
105 static INLINE ir_node *
106 get_new_node (ir_node * n)
111 /* We use the block_visited flag to mark that we have computed the
112 number of useful predecessors for this block.
113 Further we encode the new arity in this flag in the old blocks.
114 Remembering the arity is useful, as it saves a lot of pointer
115 accesses. This function is called for all Phi and Block nodes
118 compute_new_arity(ir_node *b) {
122 irg_v = get_irg_block_visited(current_ir_graph);
123 block_v = get_Block_block_visited(b);
124 if (block_v >= irg_v) {
125 /* we computed the number of preds for this block and saved it in the
127 return block_v - irg_v;
129 /* compute the number of good predecessors */
130 res = get_irn_arity(b);
131 for (i = 0; i < get_irn_arity(b); i++)
132 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
133 /* save it in the flag. */
134 set_Block_block_visited(b, irg_v + res);
139 static INLINE void new_backedge_info(ir_node *n) {
140 switch(get_irn_opcode(n)) {
142 n->attr.block.cg_backedge = NULL;
143 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
146 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
155 /* Copies the node to the new obstack. The Ins of the new node point to
156 the predecessors on the old obstack. For block/phi nodes not all
157 predecessors might be copied. n->link points to the new node.
158 For Phi and Block nodes the function allocates in-arrays with an arity
159 only for useful predecessors. The arity is determined by counting
160 the non-bad predecessors of the block. */
162 copy_node (ir_node *n, void *env) {
166 if (get_irn_opcode(n) == iro_Block) {
168 new_arity = compute_new_arity(n);
169 n->attr.block.graph_arr = NULL;
171 block = get_nodes_Block(n);
172 if (get_irn_opcode(n) == iro_Phi) {
173 new_arity = compute_new_arity(block);
175 new_arity = get_irn_arity(n);
178 nn = new_ir_node(get_irn_dbg_info(n),
185 /* Copy the attributes. These might point to additional data. If this
186 was allocated on the old obstack the pointers now are dangling. This
187 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
189 new_backedge_info(nn);
192 /* printf("\n old node: "); DDMSG2(n);
193 printf(" new node: "); DDMSG2(nn); */
197 /* Copies new predecessors of old node to new node remembered in link.
198 Spare the Bad predecessors of Phi and Block nodes. */
200 copy_preds (ir_node *n, void *env) {
204 nn = get_new_node(n);
206 /* printf("\n old node: "); DDMSG2(n);
207 printf(" new node: "); DDMSG2(nn);
208 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
210 if (get_irn_opcode(n) == iro_Block) {
211 /* Don't copy Bad nodes. */
213 for (i = 0; i < get_irn_arity(n); i++)
214 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
215 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
216 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
219 /* repair the block visited flag from above misuse. Repair it in both
220 graphs so that the old one can still be used. */
221 set_Block_block_visited(nn, 0);
222 set_Block_block_visited(n, 0);
223 /* Local optimization could not merge two subsequent blocks if
224 in array contained Bads. Now it's possible.
225 We don't call optimize_in_place as it requires
226 that the fields in ir_graph are set properly. */
227 if ((get_opt_control_flow_straightening()) &&
228 (get_Block_n_cfgpreds(nn) == 1) &&
229 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
230 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
231 } else if (get_irn_opcode(n) == iro_Phi) {
232 /* Don't copy node if corresponding predecessor in block is Bad.
233 The Block itself should not be Bad. */
234 block = get_nodes_Block(n);
235 set_irn_n (nn, -1, get_new_node(block));
237 for (i = 0; i < get_irn_arity(n); i++)
238 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
239 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
240 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
243 /* If the pre walker reached this Phi after the post walker visited the
244 block block_visited is > 0. */
245 set_Block_block_visited(get_nodes_Block(n), 0);
246 /* Compacting the Phi's ins might generate Phis with only one
248 if (get_irn_arity(n) == 1)
249 exchange(n, get_irn_n(n, 0));
251 for (i = -1; i < get_irn_arity(n); i++)
252 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
254 /* Now the new node is complete. We can add it to the hash table for cse.
255 @@@ inlinening aborts if we identify End. Why? */
256 if(get_irn_op(nn) != op_End)
257 add_identities (current_ir_graph->value_table, nn);
260 /* Copies the graph recursively, compacts the keepalive of the end node. */
263 ir_node *oe, *ne; /* old end, new end */
264 ir_node *ka; /* keep alive */
267 oe = get_irg_end(current_ir_graph);
268 /* copy the end node by hand, allocate dynamic in array! */
269 ne = new_ir_node(get_irn_dbg_info(oe),
276 /* Copy the attributes. Well, there might be some in the future... */
278 set_new_node(oe, ne);
280 /* copy the live nodes */
281 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
282 /* copy_preds for the end node ... */
283 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
285 /** ... and now the keep alives. **/
286 /* First pick the not marked block nodes and walk them. We must pick these
287 first as else we will oversee blocks reachable from Phis. */
288 for (i = 0; i < get_irn_arity(oe); i++) {
289 ka = get_irn_n(oe, i);
290 if ((get_irn_op(ka) == op_Block) &&
291 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
292 /* We must keep the block alive and copy everything reachable */
293 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
294 irg_walk(ka, copy_node, copy_preds, NULL);
295 add_End_keepalive(ne, get_new_node(ka));
299 /* Now pick the Phis. Here we will keep all! */
300 for (i = 0; i < get_irn_arity(oe); i++) {
301 ka = get_irn_n(oe, i);
302 if ((get_irn_op(ka) == op_Phi)) {
303 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
304 /* We didn't copy the Phi yet. */
305 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
306 irg_walk(ka, copy_node, copy_preds, NULL);
308 add_End_keepalive(ne, get_new_node(ka));
313 /* Copies the graph reachable from current_ir_graph->end to the obstack
314 in current_ir_graph and fixes the environment.
315 Then fixes the fields in current_ir_graph containing nodes of the
318 copy_graph_env (void) {
320 /* Not all nodes remembered in current_ir_graph might be reachable
321 from the end node. Assure their link is set to NULL, so that
322 we can test whether new nodes have been computed. */
323 set_irn_link(get_irg_frame (current_ir_graph), NULL);
324 set_irn_link(get_irg_globals(current_ir_graph), NULL);
325 set_irn_link(get_irg_args (current_ir_graph), NULL);
327 /* we use the block walk flag for removing Bads from Blocks ins. */
328 inc_irg_block_visited(current_ir_graph);
333 /* fix the fields in current_ir_graph */
334 old_end = get_irg_end(current_ir_graph);
335 set_irg_end (current_ir_graph, get_new_node(old_end));
337 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
338 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
339 copy_node (get_irg_frame(current_ir_graph), NULL);
340 copy_preds(get_irg_frame(current_ir_graph), NULL);
342 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
343 copy_node (get_irg_globals(current_ir_graph), NULL);
344 copy_preds(get_irg_globals(current_ir_graph), NULL);
346 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
347 copy_node (get_irg_args(current_ir_graph), NULL);
348 copy_preds(get_irg_args(current_ir_graph), NULL);
350 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
352 set_irg_start_block(current_ir_graph,
353 get_new_node(get_irg_start_block(current_ir_graph)));
354 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
355 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
356 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
357 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
358 copy_node(get_irg_bad(current_ir_graph), NULL);
359 copy_preds(get_irg_bad(current_ir_graph), NULL);
361 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
362 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
363 copy_node(get_irg_unknown(current_ir_graph), NULL);
364 copy_preds(get_irg_unknown(current_ir_graph), NULL);
366 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
369 /* Copies all reachable nodes to a new obstack. Removes bad inputs
370 from block nodes and the corresponding inputs from Phi nodes.
371 Merges single exit blocks with single entry blocks and removes
373 Adds all new nodes to a new hash table for cse. Does not
374 perform cse, so the hash table might contain common subexpressions. */
375 /* Amroq call this emigrate() */
377 dead_node_elimination(ir_graph *irg) {
379 struct obstack *graveyard_obst = NULL;
380 struct obstack *rebirth_obst = NULL;
382 /* Remember external state of current_ir_graph. */
383 rem = current_ir_graph;
384 current_ir_graph = irg;
386 /* Handle graph state */
387 assert(get_irg_phase_state(current_ir_graph) != phase_building);
388 free_outs(current_ir_graph);
390 /* @@@ so far we loose loops when copying */
391 set_irg_loop(current_ir_graph, NULL);
393 if (get_optimize() && get_opt_dead_node_elimination()) {
395 /* A quiet place, where the old obstack can rest in peace,
396 until it will be cremated. */
397 graveyard_obst = irg->obst;
399 /* A new obstack, where the reachable nodes will be copied to. */
400 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
401 current_ir_graph->obst = rebirth_obst;
402 obstack_init (current_ir_graph->obst);
404 /* We also need a new hash table for cse */
405 del_identities (irg->value_table);
406 irg->value_table = new_identities ();
408 /* Copy the graph from the old to the new obstack */
411 /* Free memory from old unoptimized obstack */
412 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
413 xfree (graveyard_obst); /* ... then free it. */
416 current_ir_graph = rem;
419 /* Relink bad predeseccors of a block and store the old in array to the
420 link field. This function is called by relink_bad_predecessors().
421 The array of link field starts with the block operand at position 0.
422 If block has bad predecessors, create a new in array without bad preds.
423 Otherwise let in array untouched. */
424 static void relink_bad_block_predecessors(ir_node *n, void *env) {
425 ir_node **new_in, *irn;
426 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
428 /* if link field of block is NULL, look for bad predecessors otherwise
429 this is allready done */
430 if (get_irn_op(n) == op_Block &&
431 get_irn_link(n) == NULL) {
433 /* save old predecessors in link field (position 0 is the block operand)*/
434 set_irn_link(n, (void *)get_irn_in(n));
436 /* count predecessors without bad nodes */
437 old_irn_arity = get_irn_arity(n);
438 for (i = 0; i < old_irn_arity; i++)
439 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
441 /* arity changing: set new predecessors without bad nodes */
442 if (new_irn_arity < old_irn_arity) {
443 /* get new predecessor array without Block predecessor */
444 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
446 /* set new predeseccors in array */
449 for (i = 1; i < old_irn_arity; i++) {
450 irn = get_irn_n(n, i);
451 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
454 } /* ir node has bad predecessors */
456 } /* Block is not relinked */
459 /* Relinks Bad predecesors from Bocks and Phis called by walker
460 remove_bad_predecesors(). If n is a Block, call
461 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
462 function of Phi's Block. If this block has bad predecessors, relink preds
464 static void relink_bad_predecessors(ir_node *n, void *env) {
465 ir_node *block, **old_in;
466 int i, old_irn_arity, new_irn_arity;
468 /* relink bad predeseccors of a block */
469 if (get_irn_op(n) == op_Block)
470 relink_bad_block_predecessors(n, env);
472 /* If Phi node relink its block and its predecessors */
473 if (get_irn_op(n) == op_Phi) {
475 /* Relink predeseccors of phi's block */
476 block = get_nodes_Block(n);
477 if (get_irn_link(block) == NULL)
478 relink_bad_block_predecessors(block, env);
480 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
481 old_irn_arity = ARR_LEN(old_in);
483 /* Relink Phi predeseccors if count of predeseccors changed */
484 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
485 /* set new predeseccors in array
486 n->in[0] remains the same block */
488 for(i = 1; i < old_irn_arity; i++)
489 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
491 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
494 } /* n is a Phi node */
497 /* Removes Bad Bad predecesors from Blocks and the corresponding
498 inputs to Phi nodes as in dead_node_elimination but without
500 On walking up set the link field to NULL, on walking down call
501 relink_bad_predecessors() (This function stores the old in array
502 to the link field and sets a new in array if arity of predecessors
504 void remove_bad_predecessors(ir_graph *irg) {
505 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
509 /**********************************************************************/
510 /* Funcionality for inlining */
511 /**********************************************************************/
513 /* Copy node for inlineing. Copies the node by calling copy_node and
514 then updates the entity if it's a local one. env must be a pointer
515 to the frame type of the procedure. The new entities must be in
516 the link field of the entities. */
518 copy_node_inline (ir_node *n, void *env) {
520 type *frame_tp = (type *)env;
523 if (get_irn_op(n) == op_Sel) {
524 new = get_new_node (n);
525 assert(get_irn_op(new) == op_Sel);
526 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
527 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
532 void inline_method(ir_node *call, ir_graph *called_graph) {
534 ir_node *post_call, *post_bl;
536 ir_node *end, *end_bl;
540 ir_node *cf_op = NULL, *bl;
541 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
544 if (!get_optimize() || !get_opt_inline()) return;
545 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
546 rem_opt = get_optimize();
549 /* Handle graph state */
550 assert(get_irg_phase_state(current_ir_graph) != phase_building);
551 assert(get_irg_pinned(current_ir_graph) == pinned);
552 assert(get_irg_pinned(called_graph) == pinned);
553 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
554 set_irg_outs_inconsistent(current_ir_graph);
556 /* -- Check preconditions -- */
557 assert(get_irn_op(call) == op_Call);
558 /* @@@ does not work for InterfaceIII.java after cgana
559 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
560 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
561 get_Call_type(call)));
563 assert(get_type_tpop(get_Call_type(call)) == type_method);
564 if (called_graph == current_ir_graph) {
565 set_optimize(rem_opt);
570 the procedure and later replaces the Start node of the called graph.
571 Post_call is the old Call node and collects the results of the called
572 graph. Both will end up being a tuple. -- */
573 post_bl = get_nodes_Block(call);
574 set_irg_current_block(current_ir_graph, post_bl);
575 /* XxMxPxP of Start + parameter of Call */
577 in[1] = get_Call_mem(call);
578 in[2] = get_irg_frame(current_ir_graph);
579 in[3] = get_irg_globals(current_ir_graph);
580 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
581 pre_call = new_Tuple(5, in);
585 The new block gets the ins of the old block, pre_call and all its
586 predecessors and all Phi nodes. -- */
587 part_block(pre_call);
589 /* -- Prepare state for dead node elimination -- */
590 /* Visited flags in calling irg must be >= flag in called irg.
591 Else walker and arity computation will not work. */
592 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
593 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
594 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
595 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
596 /* Set pre_call as new Start node in link field of the start node of
597 calling graph and pre_calls block as new block for the start block
599 Further mark these nodes so that they are not visited by the
601 set_irn_link(get_irg_start(called_graph), pre_call);
602 set_irn_visited(get_irg_start(called_graph),
603 get_irg_visited(current_ir_graph));
604 set_irn_link(get_irg_start_block(called_graph),
605 get_nodes_Block(pre_call));
606 set_irn_visited(get_irg_start_block(called_graph),
607 get_irg_visited(current_ir_graph));
609 /* Initialize for compaction of in arrays */
610 inc_irg_block_visited(current_ir_graph);
612 /* -- Replicate local entities of the called_graph -- */
613 /* copy the entities. */
614 called_frame = get_irg_frame_type(called_graph);
615 for (i = 0; i < get_class_n_members(called_frame); i++) {
616 entity *new_ent, *old_ent;
617 old_ent = get_class_member(called_frame, i);
618 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
619 set_entity_link(old_ent, new_ent);
622 /* visited is > than that of called graph. With this trick visited will
623 remain unchanged so that an outer walker, e.g., searching the call nodes
624 to inline, calling this inline will not visit the inlined nodes. */
625 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
627 /* -- Performing dead node elimination inlines the graph -- */
628 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
630 /* @@@ endless loops are not copied!! -- they should be, I think... */
631 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
632 get_irg_frame_type(called_graph));
634 /* Repair called_graph */
635 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
636 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
637 set_Block_block_visited(get_irg_start_block(called_graph), 0);
639 /* -- Merge the end of the inlined procedure with the call site -- */
640 /* We will turn the old Call node into a Tuple with the following
643 0: Phi of all Memories of Return statements.
644 1: Jmp from new Block that merges the control flow from all exception
645 predecessors of the old end block.
646 2: Tuple of all arguments.
647 3: Phi of Exception memories.
650 /* -- Precompute some values -- */
651 end_bl = get_new_node(get_irg_end_block(called_graph));
652 end = get_new_node(get_irg_end(called_graph));
653 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
654 n_res = get_method_n_ress(get_Call_type(call));
656 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
657 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
659 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
661 /* -- archive keepalives -- */
662 for (i = 0; i < get_irn_arity(end); i++)
663 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
664 /* The new end node will die, but the in array is not on the obstack ... */
668 Return nodes by Jump nodes. -- */
670 for (i = 0; i < arity; i++) {
672 ret = get_irn_n(end_bl, i);
673 if (get_irn_op(ret) == op_Return) {
674 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
678 set_irn_in(post_bl, n_ret, cf_pred);
681 turned into a tuple. -- */
682 turn_into_tuple(post_call, 4);
683 /* First the Memory-Phi */
685 for (i = 0; i < arity; i++) {
686 ret = get_irn_n(end_bl, i);
687 if (get_irn_op(ret) == op_Return) {
688 cf_pred[n_ret] = get_Return_mem(ret);
692 phi = new_Phi(n_ret, cf_pred, mode_M);
693 set_Tuple_pred(call, 0, phi);
694 /* Conserve Phi-list for further inlinings -- but might be optimized */
695 if (get_nodes_Block(phi) == post_bl) {
696 set_irn_link(phi, get_irn_link(post_bl));
697 set_irn_link(post_bl, phi);
699 /* Now the real results */
701 for (j = 0; j < n_res; j++) {
703 for (i = 0; i < arity; i++) {
704 ret = get_irn_n(end_bl, i);
705 if (get_irn_op(ret) == op_Return) {
706 cf_pred[n_ret] = get_Return_res(ret, j);
710 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
712 /* Conserve Phi-list for further inlinings -- but might be optimized */
713 if (get_nodes_Block(phi) == post_bl) {
714 set_irn_link(phi, get_irn_link(post_bl));
715 set_irn_link(post_bl, phi);
718 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
720 set_Tuple_pred(call, 2, new_Bad());
722 /* Finally the exception control flow. We need to add a Phi node to
723 collect the memory containing the exception objects. Further we need
724 to add another block to get a correct representation of this Phi. To
725 this block we add a Jmp that resolves into the X output of the Call
726 when the Call is turned into a tuple. */
728 for (i = 0; i < arity; i++) {
730 ret = get_irn_n(end_bl, i);
731 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
732 cf_pred[n_exc] = ret;
737 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
738 set_Tuple_pred(call, 1, new_Jmp());
739 /* The Phi for the memories with the exception objects */
741 for (i = 0; i < arity; i++) {
743 ret = skip_Proj(get_irn_n(end_bl, i));
744 if (get_irn_op(ret) == op_Call) {
745 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
747 } else if (is_fragile_op(ret)) {
748 /* We rely that all cfops have the memory output at the same position. */
749 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
751 } else if (get_irn_op(ret) == op_Raise) {
752 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
756 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
758 set_Tuple_pred(call, 1, new_Bad());
759 set_Tuple_pred(call, 3, new_Bad());
765 /* -- If the exception control flow from the inlined Call directly
766 branched to the end block we now have the following control
767 flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
768 remove the Jmp along with it's empty block and add Jmp's
769 predecessors as predecessors of this end block. No problem if
770 there is no exception, because then branches Bad to End which
772 /* find the problematic predecessor of the end block. */
773 end_bl = get_irg_end_block(current_ir_graph);
774 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
775 cf_op = get_Block_cfgpred(end_bl, i);
776 if (get_irn_op(cf_op) == op_Proj) {
777 cf_op = get_Proj_pred(cf_op);
778 if (get_irn_op(cf_op) == op_Tuple) {
779 cf_op = get_Tuple_pred(cf_op, 1);
780 assert(get_irn_op(cf_op) == op_Jmp);
786 if (i < get_Block_n_cfgpreds(end_bl)) {
787 bl = get_nodes_Block(cf_op);
788 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
789 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
790 for (j = 0; j < i; j++)
791 cf_pred[j] = get_Block_cfgpred(end_bl, j);
792 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
793 cf_pred[j] = get_Block_cfgpred(bl, j-i);
794 for (j = j; j < arity; j++)
795 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
796 set_irn_in(end_bl, arity, cf_pred);
801 /* -- Turn cse back on. -- */
802 set_optimize(rem_opt);
805 /********************************************************************/
806 /* Apply inlineing to small methods. */
807 /********************************************************************/
811 /* It makes no sense to inline too many calls in one procedure. Anyways,
812 I didn't get a version with NEW_ARR_F to run. */
813 #define MAX_INLINE 1024
815 static void collect_calls(ir_node *call, void *env) {
816 ir_node **calls = (ir_node **)env;
819 ir_graph *called_irg;
821 if (get_irn_op(call) != op_Call) return;
823 addr = get_Call_ptr(call);
824 if (get_irn_op(addr) == op_Const) {
825 /* Check whether the constant is the pointer to a compiled entity. */
826 tv = get_Const_tarval(addr);
827 if (tarval_to_entity(tv)) {
828 called_irg = get_entity_irg(tarval_to_entity(tv));
829 if (called_irg && pos < MAX_INLINE) {
830 /* The Call node calls a locally defined method. Remember to inline. */
838 /* Inlines all small methods at call sites where the called address comes
839 from a Const node that references the entity representing the called
841 The size argument is a rough measure for the code size of the method:
842 Methods where the obstack containing the firm graph is smaller than
844 void inline_small_irgs(ir_graph *irg, int size) {
846 ir_node *calls[MAX_INLINE];
847 ir_graph *rem = current_ir_graph;
849 if (!(get_optimize() && get_opt_inline())) return;
851 current_ir_graph = irg;
852 /* Handle graph state */
853 assert(get_irg_phase_state(current_ir_graph) != phase_building);
855 /* Find Call nodes to inline.
856 (We can not inline during a walk of the graph, as inlineing the same
857 method several times changes the visited flag of the walked graph:
858 after the first inlineing visited of the callee equals visited of
859 the caller. With the next inlineing both are increased.) */
861 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
863 if ((pos > 0) && (pos < MAX_INLINE)) {
864 /* There are calls to inline */
865 collect_phiprojs(irg);
866 for (i = 0; i < pos; i++) {
869 tv = get_Const_tarval(get_Call_ptr(calls[i]));
870 callee = get_entity_irg(tarval_to_entity(tv));
871 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
872 inline_method(calls[i], callee);
877 current_ir_graph = rem;
881 /********************************************************************/
882 /* Code Placement. Pinns all floating nodes to a block where they */
883 /* will be executed only if needed. */
884 /********************************************************************/
888 static pdeq *worklist; /* worklist of ir_node*s */
890 /* Find the earliest correct block for N. --- Place N into the
891 same Block as its dominance-deepest Input. */
893 place_floats_early (ir_node *n)
897 /* we must not run into an infinite loop */
898 assert (irn_not_visited(n));
901 /* Place floating nodes. */
902 if (get_op_pinned(get_irn_op(n)) == floats) {
904 ir_node *b = new_Bad(); /* The block to place this node in */
906 assert(get_irn_op(n) != op_Block);
908 if ((get_irn_op(n) == op_Const) ||
909 (get_irn_op(n) == op_SymConst) ||
911 (get_irn_op(n) == op_Unknown)) {
912 /* These nodes will not be placed by the loop below. */
913 b = get_irg_start_block(current_ir_graph);
917 /* find the block for this node. */
918 for (i = 0; i < get_irn_arity(n); i++) {
919 ir_node *dep = get_irn_n(n, i);
921 if ((irn_not_visited(dep)) &&
922 (get_op_pinned(get_irn_op(dep)) == floats)) {
923 place_floats_early (dep);
925 /* Because all loops contain at least one pinned node, now all
926 our inputs are either pinned or place_early has already
927 been finished on them. We do not have any unfinished inputs! */
928 dep_block = get_nodes_Block(dep);
929 if ((!is_Bad(dep_block)) &&
930 (get_Block_dom_depth(dep_block) > depth)) {
932 depth = get_Block_dom_depth(dep_block);
934 /* Avoid that the node is placed in the Start block */
935 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
936 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
937 assert(b != get_irg_start_block(current_ir_graph));
941 set_nodes_Block(n, b);
944 /* Add predecessors of non floating nodes on worklist. */
945 start = (get_irn_op(n) == op_Block) ? 0 : -1;
946 for (i = start; i < get_irn_arity(n); i++) {
947 ir_node *pred = get_irn_n(n, i);
948 if (irn_not_visited(pred)) {
949 pdeq_putr (worklist, pred);
954 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
955 Start, Call and end at pinned nodes as Store, Call. Place_early
956 places all floating nodes reachable from its argument through floating
957 nodes and adds all beginnings at pinned nodes to the worklist. */
958 static INLINE void place_early (void) {
960 inc_irg_visited(current_ir_graph);
962 /* this inits the worklist */
963 place_floats_early (get_irg_end(current_ir_graph));
965 /* Work the content of the worklist. */
966 while (!pdeq_empty (worklist)) {
967 ir_node *n = pdeq_getl (worklist);
968 if (irn_not_visited(n)) place_floats_early (n);
971 set_irg_outs_inconsistent(current_ir_graph);
972 current_ir_graph->pinned = pinned;
976 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
978 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
980 ir_node *block = NULL;
982 /* Compute the latest block into which we can place a node so that it is
984 if (get_irn_op(consumer) == op_Phi) {
985 /* our comsumer is a Phi-node, the effective use is in all those
986 blocks through which the Phi-node reaches producer */
988 ir_node *phi_block = get_nodes_Block(consumer);
989 for (i = 0; i < get_irn_arity(consumer); i++) {
990 if (get_irn_n(consumer, i) == producer) {
991 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
995 assert(is_no_Block(consumer));
996 block = get_nodes_Block(consumer);
999 /* Compute the deepest common ancestor of block and dca. */
1001 if (!dca) return block;
1002 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
1003 block = get_Block_idom(block);
1004 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
1005 dca = get_Block_idom(dca);
1006 while (block != dca)
1007 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
1012 static INLINE int get_irn_loop_depth(ir_node *n) {
1013 return get_loop_depth(get_irn_loop(n));
1016 /* Move n to a block with less loop depth than it's current block. The
1017 new block must be dominated by early. */
1019 move_out_of_loops (ir_node *n, ir_node *early)
1021 ir_node *best, *dca;
1025 /* Find the region deepest in the dominator tree dominating
1026 dca with the least loop nesting depth, but still dominated
1027 by our early placement. */
1028 dca = get_nodes_Block(n);
1030 while (dca != early) {
1031 dca = get_Block_idom(dca);
1032 if (!dca) break; /* should we put assert(dca)? */
1033 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1037 if (best != get_nodes_Block(n)) {
1039 printf("Moving out of loop: "); DDMN(n);
1040 printf(" Outermost block: "); DDMN(early);
1041 printf(" Best block: "); DDMN(best);
1042 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1044 set_nodes_Block(n, best);
1048 /* Find the latest legal block for N and place N into the
1049 `optimal' Block between the latest and earliest legal block.
1050 The `optimal' block is the dominance-deepest block of those
1051 with the least loop-nesting-depth. This places N out of as many
1052 loops as possible and then makes it as controldependant as
1055 place_floats_late (ir_node *n)
1060 assert (irn_not_visited(n)); /* no multiple placement */
1062 /* no need to place block nodes, control nodes are already placed. */
1063 if ((get_irn_op(n) != op_Block) &&
1065 (get_irn_mode(n) != mode_X)) {
1066 /* Remember the early palacement of this block to move it
1067 out of loop no further than the early placement. */
1068 early = get_nodes_Block(n);
1069 /* Assure that our users are all placed, except the Phi-nodes.
1070 --- Each dataflow cycle contains at least one Phi-node. We
1071 have to break the `user has to be placed before the
1072 producer' dependance cycle and the Phi-nodes are the
1073 place to do so, because we need to base our placement on the
1074 final region of our users, which is OK with Phi-nodes, as they
1075 are pinned, and they never have to be placed after a
1076 producer of one of their inputs in the same block anyway. */
1077 for (i = 0; i < get_irn_n_outs(n); i++) {
1078 ir_node *succ = get_irn_out(n, i);
1079 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1080 place_floats_late (succ);
1083 /* We have to determine the final block of this node... except for
1085 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1086 (get_irn_op(n) != op_Const) &&
1087 (get_irn_op(n) != op_SymConst)) {
1088 ir_node *dca = NULL; /* deepest common ancestor in the
1089 dominator tree of all nodes'
1090 blocks depending on us; our final
1091 placement has to dominate DCA. */
1092 for (i = 0; i < get_irn_n_outs(n); i++) {
1093 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1095 set_nodes_Block(n, dca);
1097 move_out_of_loops (n, early);
1101 mark_irn_visited(n);
1103 /* Add predecessors of all non-floating nodes on list. (Those of floating
1104 nodes are placeded already and therefore are marked.) */
1105 for (i = 0; i < get_irn_n_outs(n); i++) {
1106 if (irn_not_visited(get_irn_out(n, i))) {
1107 pdeq_putr (worklist, get_irn_out(n, i));
1112 static INLINE void place_late(void) {
1114 inc_irg_visited(current_ir_graph);
1116 /* This fills the worklist initially. */
1117 place_floats_late(get_irg_start_block(current_ir_graph));
1118 /* And now empty the worklist again... */
1119 while (!pdeq_empty (worklist)) {
1120 ir_node *n = pdeq_getl (worklist);
1121 if (irn_not_visited(n)) place_floats_late(n);
1125 void place_code(ir_graph *irg) {
1126 ir_graph *rem = current_ir_graph;
1127 current_ir_graph = irg;
1129 if (!(get_optimize() && get_opt_global_cse())) return;
1131 /* Handle graph state */
1132 assert(get_irg_phase_state(irg) != phase_building);
1133 if (get_irg_dom_state(irg) != dom_consistent)
1136 construct_backedges(irg);
1138 /* Place all floating nodes as early as possible. This guarantees
1139 a legal code placement. */
1140 worklist = new_pdeq ();
1143 /* place_early invalidates the outs, place_late needs them. */
1145 /* Now move the nodes down in the dominator tree. This reduces the
1146 unnecessary executions of the node. */
1149 set_irg_outs_inconsistent(current_ir_graph);
1150 del_pdeq (worklist);
1151 current_ir_graph = rem;
1156 /********************************************************************/
1157 /* Control flow optimization. */
1158 /* Removes Bad control flow predecessors and empty blocks. A block */
1159 /* is empty if it contains only a Jmp node. */
1160 /* Blocks can only be removed if they are not needed for the */
1161 /* semantics of Phi nodes. */
1162 /********************************************************************/
1164 /* Removes Tuples from Block control flow predecessors.
1165 Optimizes blocks with equivalent_node().
1166 Replaces n by Bad if n is unreachable control flow. */
1167 static void merge_blocks(ir_node *n, void *env) {
1169 set_irn_link(n, NULL);
1171 if (get_irn_op(n) == op_Block) {
1173 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1174 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
1175 A different order of optimizations might cause problems. */
1176 if (get_opt_normalize())
1177 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1178 } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
1179 /* We will soon visit a block. Optimize it before visiting! */
1180 ir_node *b = get_nodes_Block(n);
1181 ir_node *new = equivalent_node(b);
1182 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1183 /* We would have to run gigo if new is bad, so we
1184 promote it directly below. */
1185 assert(((b == new) || get_opt_control_flow_straightening() || get_opt_control_flow_weak_simplification()) &&
1186 ("strange flag setting"));
1189 new = equivalent_node(b);
1191 /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
1192 if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
1196 /* Collects all Phi nodes in link list of Block.
1197 Marks all blocks "block_visited" if they contain a node other
1199 static void collect_nodes(ir_node *n, void *env) {
1200 if (is_no_Block(n)) {
1201 ir_node *b = get_nodes_Block(n);
1203 if ((get_irn_op(n) == op_Phi)) {
1204 /* Collect Phi nodes to compact ins along with block's ins. */
1205 set_irn_link(n, get_irn_link(b));
1207 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1208 mark_Block_block_visited(b);
1213 /* Returns true if pred is pred of block */
1214 static int is_pred_of(ir_node *pred, ir_node *b) {
1216 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1217 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1218 if (b_pred == pred) return 1;
1223 static int test_whether_dispensable(ir_node *b, int pos) {
1224 int i, j, n_preds = 1;
1225 int dispensable = 1;
1226 ir_node *cfop = get_Block_cfgpred(b, pos);
1227 ir_node *pred = get_nodes_Block(cfop);
1229 if (get_Block_block_visited(pred) + 1
1230 < get_irg_block_visited(current_ir_graph)) {
1231 if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
1232 /* Mark block so that is will not be removed. */
1233 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1236 /* Seems to be empty. */
1237 if (!get_irn_link(b)) {
1238 /* There are no Phi nodes ==> dispensable. */
1239 n_preds = get_Block_n_cfgpreds(pred);
1241 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1242 Work preds < pos as if they were already removed. */
1243 for (i = 0; i < pos; i++) {
1244 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1245 if (get_Block_block_visited(b_pred) + 1
1246 < get_irg_block_visited(current_ir_graph)) {
1247 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1248 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1249 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1252 if (is_pred_of(b_pred, pred)) dispensable = 0;
1255 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1256 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1257 if (is_pred_of(b_pred, pred)) dispensable = 0;
1260 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1263 n_preds = get_Block_n_cfgpreds(pred);
1271 static void optimize_blocks(ir_node *b, void *env) {
1272 int i, j, k, max_preds, n_preds;
1273 ir_node *pred, *phi;
1276 /* Count the number of predecessor if this block is merged with pred blocks
1279 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1280 max_preds += test_whether_dispensable(b, i);
1282 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1285 printf(" working on "); DDMN(b);
1286 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1287 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1288 if (is_Bad(get_Block_cfgpred(b, i))) {
1289 printf(" removing Bad %i\n ", i);
1290 } else if (get_Block_block_visited(pred) +1
1291 < get_irg_block_visited(current_ir_graph)) {
1292 printf(" removing pred %i ", i); DDMN(pred);
1293 } else { printf(" Nothing to do for "); DDMN(pred); }
1295 * end Debug output **/
1297 /** Fix the Phi nodes **/
1298 phi = get_irn_link(b);
1300 assert(get_irn_op(phi) == op_Phi);
1301 /* Find the new predecessors for the Phi */
1303 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1304 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1305 if (is_Bad(get_Block_cfgpred(b, i))) {
1307 } else if (get_Block_block_visited(pred) +1
1308 < get_irg_block_visited(current_ir_graph)) {
1309 /* It's an empty block and not yet visited. */
1310 ir_node *phi_pred = get_Phi_pred(phi, i);
1311 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1312 if (get_nodes_Block(phi_pred) == pred) {
1313 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1314 in[n_preds] = get_Phi_pred(phi_pred, j);
1316 in[n_preds] = phi_pred;
1320 /* The Phi_pred node is replaced now if it is a Phi.
1321 In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
1322 Daher muss der Phiknoten durch den neuen ersetzt werden.
1323 Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
1324 durch einen Bad) damit er aus den keep_alive verschwinden kann.
1325 Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
1327 if (get_nodes_Block(phi_pred) == pred) {
1328 /* remove the Phi as it might be kept alive. Further there
1329 might be other users. */
1330 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
1333 in[n_preds] = get_Phi_pred(phi, i);
1338 set_irn_in(phi, n_preds, in);
1340 phi = get_irn_link(phi);
1344 This happens only if merge between loop backedge and single loop entry. **/
1345 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1346 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1347 if (get_Block_block_visited(pred) +1
1348 < get_irg_block_visited(current_ir_graph)) {
1349 phi = get_irn_link(pred);
1351 if (get_irn_op(phi) == op_Phi) {
1352 set_nodes_Block(phi, b);
1355 for (i = 0; i < k; i++) {
1356 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1357 if (is_Bad(get_Block_cfgpred(b, i))) {
1359 } else if (get_Block_block_visited(pred) +1
1360 < get_irg_block_visited(current_ir_graph)) {
1361 /* It's an empty block and not yet visited. */
1362 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1363 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1364 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1365 Anweisungen.) Trotzdem tuts bisher!! */
1374 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1375 in[n_preds] = get_Phi_pred(phi, i);
1378 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1379 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1380 if (is_Bad(get_Block_cfgpred(b, i))) {
1382 } else if (get_Block_block_visited(pred) +1
1383 < get_irg_block_visited(current_ir_graph)) {
1384 /* It's an empty block and not yet visited. */
1385 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1394 set_irn_in(phi, n_preds, in);
1396 phi = get_irn_link(phi);
1401 /** Fix the block **/
1403 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1404 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1405 if (is_Bad(get_Block_cfgpred(b, i))) {
1407 } else if (get_Block_block_visited(pred) +1
1408 < get_irg_block_visited(current_ir_graph)) {
1409 /* It's an empty block and not yet visited. */
1410 assert(get_Block_n_cfgpreds(b) > 1);
1411 /* Else it should be optimized by equivalent_node. */
1412 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1413 in[n_preds] = get_Block_cfgpred(pred, j);
1416 /* Remove block as it might be kept alive. */
1417 exchange(pred, b/*new_Bad()*/);
1419 in[n_preds] = get_Block_cfgpred(b, i);
1423 set_irn_in(b, n_preds, in);
1427 void optimize_cf(ir_graph *irg) {
1430 ir_node *end = get_irg_end(irg);
1431 ir_graph *rem = current_ir_graph;
1432 current_ir_graph = irg;
1434 /* Handle graph state */
1435 assert(get_irg_phase_state(irg) != phase_building);
1436 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1437 set_irg_outs_inconsistent(current_ir_graph);
1438 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1439 set_irg_dom_inconsistent(current_ir_graph);
1441 /* Use block visited flag to mark non-empty blocks. */
1442 inc_irg_block_visited(irg);
1443 irg_walk(end, merge_blocks, collect_nodes, NULL);
1445 /* Optimize the standard code. */
1446 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1448 /* Walk all keep alives, optimize them if block, add to new in-array
1449 for end if useful. */
1450 in = NEW_ARR_F (ir_node *, 1);
1451 in[0] = get_nodes_Block(end);
1452 inc_irg_visited(current_ir_graph);
1453 for(i = 0; i < get_End_n_keepalives(end); i++) {
1454 ir_node *ka = get_End_keepalive(end, i);
1455 if (irn_not_visited(ka)) {
1456 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1457 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1458 get_irg_block_visited(current_ir_graph)-1);
1459 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1460 mark_irn_visited(ka);
1461 ARR_APP1 (ir_node *, in, ka);
1462 } else if (get_irn_op(ka) == op_Phi) {
1463 mark_irn_visited(ka);
1464 ARR_APP1 (ir_node *, in, ka);
1468 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1471 current_ir_graph = rem;
1476 * Called by walker of remove_critical_cf_edges.
1478 * Place an empty block to an edge between a blocks of multiple
1479 * predecessors and a block of multiple sucessors.
1482 * @param env Envirnment of walker. This field is unused and has
1485 static void walk_critical_cf_edges(ir_node *n, void *env) {
1487 ir_node *pre, *block, **in, *jmp;
1489 /* Block has multiple predecessors */
1490 if ((op_Block == get_irn_op(n)) &&
1491 (get_irn_arity(n) > 1)) {
1492 arity = get_irn_arity(n);
1494 for (i=0; i<arity; i++) {
1495 pre = get_irn_n(n, i);
1496 /* Predecessor has multiple sucessors. Insert new flow edge */
1497 if ((NULL != pre) && (op_Proj == get_irn_op(pre)) &&
1498 op_Raise != get_irn_op(skip_Proj(pre))) {
1500 /* set predecessor array for new block */
1501 in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
1502 /* set predecessor of new block */
1504 block = new_Block(1, in);
1505 /* insert new jmp node to new block */
1506 switch_block(block);
1509 /* set sucessor of new block */
1510 set_irn_n(n, i, jmp);
1512 } /* predecessor has multiple sucessors */
1513 } /* for all predecessors */
1514 } /* n is a block */
1517 void remove_critical_cf_edges(ir_graph *irg) {
1518 if (get_opt_critical_edges())
1519 irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);