3 * File name: ir/ir/irgopt.c
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 # include "irnode_t.h"
24 # include "irgraph_t.h"
31 # include "pdeq.h" /* Fuer code placement */
34 # include "irbackedge_t.h"
36 /* Defined in iropt.c */
37 pset *new_identities (void);
38 void del_identities (pset *value_table);
39 void add_identities (pset *value_table, ir_node *node);
41 /********************************************************************/
42 /* apply optimizations of iropt to all nodes. */
43 /********************************************************************/
45 static void init_link (ir_node *n, void *env) {
46 set_irn_link(n, NULL);
50 optimize_in_place_wrapper (ir_node *n, void *env) {
52 ir_node *optimized, *old;
54 for (i = 0; i < get_irn_arity(n); i++) {
55 /* get?irn_n skips Id nodes, so comparison old != optimized does not
56 show all optimizations. Therefore always set new predecessor. */
57 old = get_irn_n(n, i);
58 optimized = optimize_in_place_2(old);
59 set_irn_n(n, i, optimized);
62 if (get_irn_op(n) == op_Block) {
63 optimized = optimize_in_place_2(n);
64 if (optimized != n) exchange (n, optimized);
69 local_optimize_graph (ir_graph *irg) {
70 ir_graph *rem = current_ir_graph;
71 current_ir_graph = irg;
73 /* Handle graph state */
74 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(current_ir_graph, floats);
77 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
78 set_irg_outs_inconsistent(current_ir_graph);
79 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
80 set_irg_dom_inconsistent(current_ir_graph);
82 /* Clean the value_table in irg for the cse. */
83 del_identities(irg->value_table);
84 irg->value_table = new_identities();
86 /* walk over the graph */
87 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
89 current_ir_graph = rem;
92 /********************************************************************/
93 /* Routines for dead node elimination / copying garbage collection */
95 /********************************************************************/
97 /* Remeber the new node in the old node by using a field all nodes have. */
99 set_new_node (ir_node *old, ir_node *new)
104 /* Get this new node, before the old node is forgotton.*/
105 static INLINE ir_node *
106 get_new_node (ir_node * n)
111 /* We use the block_visited flag to mark that we have computed the
112 number of useful predecessors for this block.
113 Further we encode the new arity in this flag in the old blocks.
114 Remembering the arity is useful, as it saves a lot of pointer
115 accesses. This function is called for all Phi and Block nodes
118 compute_new_arity(ir_node *b) {
122 irg_v = get_irg_block_visited(current_ir_graph);
123 block_v = get_Block_block_visited(b);
124 if (block_v >= irg_v) {
125 /* we computed the number of preds for this block and saved it in the
127 return block_v - irg_v;
129 /* compute the number of good predecessors */
130 res = get_irn_arity(b);
131 for (i = 0; i < get_irn_arity(b); i++)
132 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
133 /* save it in the flag. */
134 set_Block_block_visited(b, irg_v + res);
139 static INLINE void new_backedge_info(ir_node *n) {
140 switch(get_irn_opcode(n)) {
142 n->attr.block.cg_backedge = NULL;
143 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
146 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
155 /* Copies the node to the new obstack. The Ins of the new node point to
156 the predecessors on the old obstack. For block/phi nodes not all
157 predecessors might be copied. n->link points to the new node.
158 For Phi and Block nodes the function allocates in-arrays with an arity
159 only for useful predecessors. The arity is determined by counting
160 the non-bad predecessors of the block. */
162 copy_node (ir_node *n, void *env) {
166 if (get_irn_opcode(n) == iro_Block) {
168 new_arity = compute_new_arity(n);
169 n->attr.block.graph_arr = NULL;
171 block = get_nodes_Block(n);
172 if (get_irn_opcode(n) == iro_Phi) {
173 new_arity = compute_new_arity(block);
175 new_arity = get_irn_arity(n);
178 nn = new_ir_node(get_irn_dbg_info(n),
185 /* Copy the attributes. These might point to additional data. If this
186 was allocated on the old obstack the pointers now are dangling. This
187 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
189 new_backedge_info(nn);
192 /* printf("\n old node: "); DDMSG2(n);
193 printf(" new node: "); DDMSG2(nn); */
197 /* Copies new predecessors of old node to new node remembered in link.
198 Spare the Bad predecessors of Phi and Block nodes. */
200 copy_preds (ir_node *n, void *env) {
204 nn = get_new_node(n);
206 /* printf("\n old node: "); DDMSG2(n);
207 printf(" new node: "); DDMSG2(nn);
208 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
210 if (get_irn_opcode(n) == iro_Block) {
211 /* Don't copy Bad nodes. */
213 for (i = 0; i < get_irn_arity(n); i++)
214 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
215 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
216 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
219 /* repair the block visited flag from above misuse. Repair it in both
220 graphs so that the old one can still be used. */
221 set_Block_block_visited(nn, 0);
222 set_Block_block_visited(n, 0);
223 /* Local optimization could not merge two subsequent blocks if
224 in array contained Bads. Now it's possible.
225 We don't call optimize_in_place as it requires
226 that the fields in ir_graph are set properly. */
227 if ((get_opt_control_flow_straightening()) &&
228 (get_Block_n_cfgpreds(nn) == 1) &&
229 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
230 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
231 } else if (get_irn_opcode(n) == iro_Phi) {
232 /* Don't copy node if corresponding predecessor in block is Bad.
233 The Block itself should not be Bad. */
234 block = get_nodes_Block(n);
235 set_irn_n (nn, -1, get_new_node(block));
237 for (i = 0; i < get_irn_arity(n); i++)
238 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
239 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
240 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
243 /* If the pre walker reached this Phi after the post walker visited the
244 block block_visited is > 0. */
245 set_Block_block_visited(get_nodes_Block(n), 0);
246 /* Compacting the Phi's ins might generate Phis with only one
248 if (get_irn_arity(n) == 1)
249 exchange(n, get_irn_n(n, 0));
251 for (i = -1; i < get_irn_arity(n); i++)
252 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
254 /* Now the new node is complete. We can add it to the hash table for cse.
255 @@@ inlinening aborts if we identify End. Why? */
256 if(get_irn_op(nn) != op_End)
257 add_identities (current_ir_graph->value_table, nn);
260 /* Copies the graph recursively, compacts the keepalive of the end node. */
263 ir_node *oe, *ne; /* old end, new end */
264 ir_node *ka; /* keep alive */
267 oe = get_irg_end(current_ir_graph);
268 /* copy the end node by hand, allocate dynamic in array! */
269 ne = new_ir_node(get_irn_dbg_info(oe),
276 /* Copy the attributes. Well, there might be some in the future... */
278 set_new_node(oe, ne);
280 /* copy the live nodes */
281 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
282 /* copy_preds for the end node ... */
283 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
285 /** ... and now the keep alives. **/
286 /* First pick the not marked block nodes and walk them. We must pick these
287 first as else we will oversee blocks reachable from Phis. */
288 for (i = 0; i < get_irn_arity(oe); i++) {
289 ka = get_irn_n(oe, i);
290 if ((get_irn_op(ka) == op_Block) &&
291 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
292 /* We must keep the block alive and copy everything reachable */
293 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
294 irg_walk(ka, copy_node, copy_preds, NULL);
295 add_End_keepalive(ne, get_new_node(ka));
299 /* Now pick the Phis. Here we will keep all! */
300 for (i = 0; i < get_irn_arity(oe); i++) {
301 ka = get_irn_n(oe, i);
302 if ((get_irn_op(ka) == op_Phi)) {
303 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
304 /* We didn't copy the Phi yet. */
305 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
306 irg_walk(ka, copy_node, copy_preds, NULL);
308 add_End_keepalive(ne, get_new_node(ka));
313 /* Copies the graph reachable from current_ir_graph->end to the obstack
314 in current_ir_graph and fixes the environment.
315 Then fixes the fields in current_ir_graph containing nodes of the
318 copy_graph_env (void) {
320 /* Not all nodes remembered in current_ir_graph might be reachable
321 from the end node. Assure their link is set to NULL, so that
322 we can test whether new nodes have been computed. */
323 set_irn_link(get_irg_frame (current_ir_graph), NULL);
324 set_irn_link(get_irg_globals(current_ir_graph), NULL);
325 set_irn_link(get_irg_args (current_ir_graph), NULL);
327 /* we use the block walk flag for removing Bads from Blocks ins. */
328 inc_irg_block_visited(current_ir_graph);
333 /* fix the fields in current_ir_graph */
334 old_end = get_irg_end(current_ir_graph);
335 set_irg_end (current_ir_graph, get_new_node(old_end));
337 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
338 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
339 copy_node (get_irg_frame(current_ir_graph), NULL);
340 copy_preds(get_irg_frame(current_ir_graph), NULL);
342 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
343 copy_node (get_irg_globals(current_ir_graph), NULL);
344 copy_preds(get_irg_globals(current_ir_graph), NULL);
346 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
347 copy_node (get_irg_args(current_ir_graph), NULL);
348 copy_preds(get_irg_args(current_ir_graph), NULL);
350 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
352 set_irg_start_block(current_ir_graph,
353 get_new_node(get_irg_start_block(current_ir_graph)));
354 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
355 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
356 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
357 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
358 copy_node(get_irg_bad(current_ir_graph), NULL);
359 copy_preds(get_irg_bad(current_ir_graph), NULL);
361 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
362 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
363 copy_node(get_irg_unknown(current_ir_graph), NULL);
364 copy_preds(get_irg_unknown(current_ir_graph), NULL);
366 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
369 /* Copies all reachable nodes to a new obstack. Removes bad inputs
370 from block nodes and the corresponding inputs from Phi nodes.
371 Merges single exit blocks with single entry blocks and removes
373 Adds all new nodes to a new hash table for cse. Does not
374 perform cse, so the hash table might contain common subexpressions. */
375 /* Amroq call this emigrate() */
377 dead_node_elimination(ir_graph *irg) {
379 struct obstack *graveyard_obst = NULL;
380 struct obstack *rebirth_obst = NULL;
382 /* Remember external state of current_ir_graph. */
383 rem = current_ir_graph;
384 current_ir_graph = irg;
386 /* Handle graph state */
387 assert(get_irg_phase_state(current_ir_graph) != phase_building);
388 free_outs(current_ir_graph);
390 /* @@@ so far we loose loops when copying */
391 set_irg_loop(current_ir_graph, NULL);
393 if (get_optimize() && get_opt_dead_node_elimination()) {
395 /* A quiet place, where the old obstack can rest in peace,
396 until it will be cremated. */
397 graveyard_obst = irg->obst;
399 /* A new obstack, where the reachable nodes will be copied to. */
400 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
401 current_ir_graph->obst = rebirth_obst;
402 obstack_init (current_ir_graph->obst);
404 /* We also need a new hash table for cse */
405 del_identities (irg->value_table);
406 irg->value_table = new_identities ();
408 /* Copy the graph from the old to the new obstack */
411 /* Free memory from old unoptimized obstack */
412 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
413 xfree (graveyard_obst); /* ... then free it. */
416 current_ir_graph = rem;
419 /* Relink bad predeseccors of a block and store the old in array to the
420 link field. This function is called by relink_bad_predecessors().
421 The array of link field starts with the block operand at position 0.
422 If block has bad predecessors, create a new in array without bad preds.
423 Otherwise let in array untouched. */
424 static void relink_bad_block_predecessors(ir_node *n, void *env) {
425 ir_node **new_in, *irn;
426 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
428 /* if link field of block is NULL, look for bad predecessors otherwise
429 this is allready done */
430 if (get_irn_op(n) == op_Block &&
431 get_irn_link(n) == NULL) {
433 /* save old predecessors in link field (position 0 is the block operand)*/
434 set_irn_link(n, (void *)get_irn_in(n));
436 /* count predecessors without bad nodes */
437 old_irn_arity = get_irn_arity(n);
438 for (i = 0; i < old_irn_arity; i++)
439 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
441 /* arity changing: set new predecessors without bad nodes */
442 if (new_irn_arity < old_irn_arity) {
443 /* get new predecessor array without Block predecessor */
444 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
446 /* set new predeseccors in array */
449 for (i = 1; i < old_irn_arity; i++) {
450 irn = get_irn_n(n, i);
451 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
454 } /* ir node has bad predecessors */
456 } /* Block is not relinked */
459 /* Relinks Bad predecesors from Bocks and Phis called by walker
460 remove_bad_predecesors(). If n is a Block, call
461 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
462 function of Phi's Block. If this block has bad predecessors, relink preds
464 static void relink_bad_predecessors(ir_node *n, void *env) {
465 ir_node *block, **old_in;
466 int i, old_irn_arity, new_irn_arity;
468 /* relink bad predeseccors of a block */
469 if (get_irn_op(n) == op_Block)
470 relink_bad_block_predecessors(n, env);
472 /* If Phi node relink its block and its predecessors */
473 if (get_irn_op(n) == op_Phi) {
475 /* Relink predeseccors of phi's block */
476 block = get_nodes_Block(n);
477 if (get_irn_link(block) == NULL)
478 relink_bad_block_predecessors(block, env);
480 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
481 old_irn_arity = ARR_LEN(old_in);
483 /* Relink Phi predeseccors if count of predeseccors changed */
484 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
485 /* set new predeseccors in array
486 n->in[0] remains the same block */
488 for(i = 1; i < old_irn_arity; i++)
489 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
491 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
494 } /* n is a Phi node */
497 /* Removes Bad Bad predecesors from Blocks and the corresponding
498 inputs to Phi nodes as in dead_node_elimination but without
500 On walking up set the link field to NULL, on walking down call
501 relink_bad_predecessors() (This function stores the old in array
502 to the link field and sets a new in array if arity of predecessors
504 void remove_bad_predecessors(ir_graph *irg) {
505 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
509 /**********************************************************************/
510 /* Funcionality for inlining */
511 /**********************************************************************/
513 /* Copy node for inlineing. Copies the node by calling copy_node and
514 then updates the entity if it's a local one. env must be a pointer
515 to the frame type of the procedure. The new entities must be in
516 the link field of the entities. */
518 copy_node_inline (ir_node *n, void *env) {
520 type *frame_tp = (type *)env;
523 if (get_irn_op(n) == op_Sel) {
524 new = get_new_node (n);
525 assert(get_irn_op(new) == op_Sel);
526 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
527 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
532 void inline_method(ir_node *call, ir_graph *called_graph) {
534 ir_node *post_call, *post_bl;
536 ir_node *end, *end_bl;
540 ir_node *cf_op = NULL, *bl;
541 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
544 if (!get_optimize() || !get_opt_inline()) return;
545 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
546 rem_opt = get_optimize();
549 /* Handle graph state */
550 assert(get_irg_phase_state(current_ir_graph) != phase_building);
551 assert(get_irg_pinned(current_ir_graph) == pinned);
552 assert(get_irg_pinned(called_graph) == pinned);
553 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
554 set_irg_outs_inconsistent(current_ir_graph);
556 /* -- Check preconditions -- */
557 assert(get_irn_op(call) == op_Call);
558 /* @@@ does not work for InterfaceIII.java after cgana
559 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
560 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
561 get_Call_type(call)));
563 assert(get_type_tpop(get_Call_type(call)) == type_method);
564 if (called_graph == current_ir_graph) {
565 set_optimize(rem_opt);
570 the procedure and later replaces the Start node of the called graph.
571 Post_call is the old Call node and collects the results of the called
572 graph. Both will end up being a tuple. -- */
573 post_bl = get_nodes_Block(call);
574 set_irg_current_block(current_ir_graph, post_bl);
575 /* XxMxPxP of Start + parameter of Call */
577 in[1] = get_Call_mem(call);
578 in[2] = get_irg_frame(current_ir_graph);
579 in[3] = get_irg_globals(current_ir_graph);
580 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
581 pre_call = new_Tuple(5, in);
585 The new block gets the ins of the old block, pre_call and all its
586 predecessors and all Phi nodes. -- */
587 part_block(pre_call);
589 /* -- Prepare state for dead node elimination -- */
590 /* Visited flags in calling irg must be >= flag in called irg.
591 Else walker and arity computation will not work. */
592 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
593 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
594 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
595 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
596 /* Set pre_call as new Start node in link field of the start node of
597 calling graph and pre_calls block as new block for the start block
599 Further mark these nodes so that they are not visited by the
601 set_irn_link(get_irg_start(called_graph), pre_call);
602 set_irn_visited(get_irg_start(called_graph),
603 get_irg_visited(current_ir_graph));
604 set_irn_link(get_irg_start_block(called_graph),
605 get_nodes_Block(pre_call));
606 set_irn_visited(get_irg_start_block(called_graph),
607 get_irg_visited(current_ir_graph));
609 /* Initialize for compaction of in arrays */
610 inc_irg_block_visited(current_ir_graph);
612 /* -- Replicate local entities of the called_graph -- */
613 /* copy the entities. */
614 called_frame = get_irg_frame_type(called_graph);
615 for (i = 0; i < get_class_n_members(called_frame); i++) {
616 entity *new_ent, *old_ent;
617 old_ent = get_class_member(called_frame, i);
618 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
619 set_entity_link(old_ent, new_ent);
622 /* visited is > than that of called graph. With this trick visited will
623 remain unchanged so that an outer walker, e.g., searching the call nodes
624 to inline, calling this inline will not visit the inlined nodes. */
625 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
627 /* -- Performing dead node elimination inlines the graph -- */
628 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
630 /* @@@ endless loops are not copied!! -- they should be, I think... */
631 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
632 get_irg_frame_type(called_graph));
634 /* Repair called_graph */
635 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
636 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
637 set_Block_block_visited(get_irg_start_block(called_graph), 0);
639 /* -- Merge the end of the inlined procedure with the call site -- */
640 /* We will turn the old Call node into a Tuple with the following
643 0: Phi of all Memories of Return statements.
644 1: Jmp from new Block that merges the control flow from all exception
645 predecessors of the old end block.
646 2: Tuple of all arguments.
647 3: Phi of Exception memories.
650 /* -- Precompute some values -- */
651 end_bl = get_new_node(get_irg_end_block(called_graph));
652 end = get_new_node(get_irg_end(called_graph));
653 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
654 n_res = get_method_n_ress(get_Call_type(call));
656 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
657 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
659 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
661 /* -- archive keepalives -- */
662 for (i = 0; i < get_irn_arity(end); i++)
663 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
664 /* The new end node will die, but the in array is not on the obstack ... */
668 Return nodes by Jump nodes. -- */
670 for (i = 0; i < arity; i++) {
672 ret = get_irn_n(end_bl, i);
673 if (get_irn_op(ret) == op_Return) {
674 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
678 set_irn_in(post_bl, n_ret, cf_pred);
681 turned into a tuple. -- */
682 turn_into_tuple(post_call, 4);
683 /* First the Memory-Phi */
685 for (i = 0; i < arity; i++) {
686 ret = get_irn_n(end_bl, i);
687 if (get_irn_op(ret) == op_Return) {
688 cf_pred[n_ret] = get_Return_mem(ret);
692 phi = new_Phi(n_ret, cf_pred, mode_M);
693 set_Tuple_pred(call, 0, phi);
694 /* Conserve Phi-list for further inlinings -- but might be optimized */
695 if (get_nodes_Block(phi) == post_bl) {
696 set_irn_link(phi, get_irn_link(post_bl));
697 set_irn_link(post_bl, phi);
699 /* Now the real results */
701 for (j = 0; j < n_res; j++) {
703 for (i = 0; i < arity; i++) {
704 ret = get_irn_n(end_bl, i);
705 if (get_irn_op(ret) == op_Return) {
706 cf_pred[n_ret] = get_Return_res(ret, j);
710 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
712 /* Conserve Phi-list for further inlinings -- but might be optimized */
713 if (get_nodes_Block(phi) == post_bl) {
714 set_irn_link(phi, get_irn_link(post_bl));
715 set_irn_link(post_bl, phi);
718 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
720 set_Tuple_pred(call, 2, new_Bad());
722 /* Finally the exception control flow. We need to add a Phi node to
723 collect the memory containing the exception objects. Further we need
724 to add another block to get a correct representation of this Phi. To
725 this block we add a Jmp that resolves into the X output of the Call
726 when the Call is turned into a tuple. */
728 for (i = 0; i < arity; i++) {
730 ret = get_irn_n(end_bl, i);
731 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
732 cf_pred[n_exc] = ret;
737 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
738 set_Tuple_pred(call, 1, new_Jmp());
739 /* The Phi for the memories with the exception objects */
741 for (i = 0; i < arity; i++) {
743 ret = skip_Proj(get_irn_n(end_bl, i));
744 if (get_irn_op(ret) == op_Call) {
745 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
747 } else if (is_fragile_op(ret)) {
748 /* We rely that all cfops have the memory output at the same position. */
749 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
751 } else if (get_irn_op(ret) == op_Raise) {
752 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
756 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
758 set_Tuple_pred(call, 1, new_Bad());
759 set_Tuple_pred(call, 3, new_Bad());
765 /* -- If the exception control flow from the inlined Call directly
766 branched to the end block we now have the following control
767 flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
768 remove the Jmp along with it's empty block and add Jmp's
769 predecessors as predecessors of this end block. No problem if
770 there is no exception, because then branches Bad to End which
772 /* find the problematic predecessor of the end block. */
773 end_bl = get_irg_end_block(current_ir_graph);
774 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
775 cf_op = get_Block_cfgpred(end_bl, i);
776 if (get_irn_op(cf_op) == op_Proj) {
777 cf_op = get_Proj_pred(cf_op);
778 if (get_irn_op(cf_op) == op_Tuple) {
779 cf_op = get_Tuple_pred(cf_op, 1);
780 assert(get_irn_op(cf_op) == op_Jmp);
786 if (i < get_Block_n_cfgpreds(end_bl)) {
787 bl = get_nodes_Block(cf_op);
788 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
789 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
790 for (j = 0; j < i; j++)
791 cf_pred[j] = get_Block_cfgpred(end_bl, j);
792 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
793 cf_pred[j] = get_Block_cfgpred(bl, j-i);
794 for (j = j; j < arity; j++)
795 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
796 set_irn_in(end_bl, arity, cf_pred);
801 /* -- Turn cse back on. -- */
802 set_optimize(rem_opt);
805 /********************************************************************/
806 /* Apply inlineing to small methods. */
807 /********************************************************************/
811 /* It makes no sense to inline too many calls in one procedure. Anyways,
812 I didn't get a version with NEW_ARR_F to run. */
813 #define MAX_INLINE 1024
815 static void collect_calls(ir_node *call, void *env) {
816 ir_node **calls = (ir_node **)env;
819 ir_graph *called_irg;
821 if (get_irn_op(call) != op_Call) return;
823 addr = get_Call_ptr(call);
824 if (get_irn_op(addr) == op_Const) {
825 /* Check whether the constant is the pointer to a compiled entity. */
826 tv = get_Const_tarval(addr);
827 if (tarval_to_entity(tv)) {
828 called_irg = get_entity_irg(tarval_to_entity(tv));
829 if (called_irg && pos < MAX_INLINE) {
830 /* The Call node calls a locally defined method. Remember to inline. */
838 /* Inlines all small methods at call sites where the called address comes
839 from a Const node that references the entity representing the called
841 The size argument is a rough measure for the code size of the method:
842 Methods where the obstack containing the firm graph is smaller than
844 void inline_small_irgs(ir_graph *irg, int size) {
846 ir_node *calls[MAX_INLINE];
847 ir_graph *rem = current_ir_graph;
849 if (!(get_optimize() && get_opt_inline())) return;
851 current_ir_graph = irg;
852 /* Handle graph state */
853 assert(get_irg_phase_state(current_ir_graph) != phase_building);
855 /* Find Call nodes to inline.
856 (We can not inline during a walk of the graph, as inlineing the same
857 method several times changes the visited flag of the walked graph:
858 after the first inlineing visited of the callee equals visited of
859 the caller. With the next inlineing both are increased.) */
861 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
863 if ((pos > 0) && (pos < MAX_INLINE)) {
864 /* There are calls to inline */
865 collect_phiprojs(irg);
866 for (i = 0; i < pos; i++) {
869 tv = get_Const_tarval(get_Call_ptr(calls[i]));
870 callee = get_entity_irg(tarval_to_entity(tv));
871 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
872 inline_method(calls[i], callee);
877 current_ir_graph = rem;
881 /********************************************************************/
882 /* Code Placement. Pinns all floating nodes to a block where they */
883 /* will be executed only if needed. */
884 /********************************************************************/
886 static pdeq *worklist; /* worklist of ir_node*s */
888 /* Find the earliest correct block for N. --- Place N into the
889 same Block as its dominance-deepest Input. */
891 place_floats_early (ir_node *n)
895 /* we must not run into an infinite loop */
896 assert (irn_not_visited(n));
899 /* Place floating nodes. */
900 if (get_op_pinned(get_irn_op(n)) == floats) {
902 ir_node *b = new_Bad(); /* The block to place this node in */
904 assert(get_irn_op(n) != op_Block);
906 if ((get_irn_op(n) == op_Const) ||
907 (get_irn_op(n) == op_SymConst) ||
909 /* These nodes will not be placed by the loop below. */
910 b = get_irg_start_block(current_ir_graph);
914 /* find the block for this node. */
915 for (i = 0; i < get_irn_arity(n); i++) {
916 ir_node *dep = get_irn_n(n, i);
918 if ((irn_not_visited(dep)) &&
919 (get_op_pinned(get_irn_op(dep)) == floats)) {
920 place_floats_early (dep);
922 /* Because all loops contain at least one pinned node, now all
923 our inputs are either pinned or place_early has already
924 been finished on them. We do not have any unfinished inputs! */
925 dep_block = get_nodes_Block(dep);
926 if ((!is_Bad(dep_block)) &&
927 (get_Block_dom_depth(dep_block) > depth)) {
929 depth = get_Block_dom_depth(dep_block);
931 /* Avoid that the node is placed in the Start block */
932 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
933 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
934 assert(b != get_irg_start_block(current_ir_graph));
938 set_nodes_Block(n, b);
941 /* Add predecessors of non floating nodes on worklist. */
942 start = (get_irn_op(n) == op_Block) ? 0 : -1;
943 for (i = start; i < get_irn_arity(n); i++) {
944 ir_node *pred = get_irn_n(n, i);
945 if (irn_not_visited(pred)) {
946 pdeq_putr (worklist, pred);
951 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
952 Start, Call and end at pinned nodes as Store, Call. Place_early
953 places all floating nodes reachable from its argument through floating
954 nodes and adds all beginnings at pinned nodes to the worklist. */
955 static INLINE void place_early (void) {
957 inc_irg_visited(current_ir_graph);
959 /* this inits the worklist */
960 place_floats_early (get_irg_end(current_ir_graph));
962 /* Work the content of the worklist. */
963 while (!pdeq_empty (worklist)) {
964 ir_node *n = pdeq_getl (worklist);
965 if (irn_not_visited(n)) place_floats_early (n);
968 set_irg_outs_inconsistent(current_ir_graph);
969 current_ir_graph->pinned = pinned;
973 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
975 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
977 ir_node *block = NULL;
979 /* Compute the latest block into which we can place a node so that it is
981 if (get_irn_op(consumer) == op_Phi) {
982 /* our comsumer is a Phi-node, the effective use is in all those
983 blocks through which the Phi-node reaches producer */
985 ir_node *phi_block = get_nodes_Block(consumer);
986 for (i = 0; i < get_irn_arity(consumer); i++) {
987 if (get_irn_n(consumer, i) == producer) {
988 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
992 assert(is_no_Block(consumer));
993 block = get_nodes_Block(consumer);
996 /* Compute the deepest common ancestor of block and dca. */
998 if (!dca) return block;
999 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
1000 block = get_Block_idom(block);
1001 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
1002 dca = get_Block_idom(dca);
1003 while (block != dca)
1004 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
1009 static INLINE int get_irn_loop_depth(ir_node *n) {
1010 return get_loop_depth(get_irn_loop(n));
1013 /* Move n to a block with less loop depth than it's current block. The
1014 new block must be dominated by early. */
1016 move_out_of_loops (ir_node *n, ir_node *early)
1018 ir_node *best, *dca;
1022 /* Find the region deepest in the dominator tree dominating
1023 dca with the least loop nesting depth, but still dominated
1024 by our early placement. */
1025 dca = get_nodes_Block(n);
1027 while (dca != early) {
1028 dca = get_Block_idom(dca);
1029 if (!dca) break; /* should we put assert(dca)? */
1030 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1034 if (best != get_nodes_Block(n)) {
1036 printf("Moving out of loop: "); DDMN(n);
1037 printf(" Outermost block: "); DDMN(early);
1038 printf(" Best block: "); DDMN(best);
1039 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1041 set_nodes_Block(n, best);
1045 /* Find the latest legal block for N and place N into the
1046 `optimal' Block between the latest and earliest legal block.
1047 The `optimal' block is the dominance-deepest block of those
1048 with the least loop-nesting-depth. This places N out of as many
1049 loops as possible and then makes it as controldependant as
1052 place_floats_late (ir_node *n)
1057 assert (irn_not_visited(n)); /* no multiple placement */
1059 /* no need to place block nodes, control nodes are already placed. */
1060 if ((get_irn_op(n) != op_Block) &&
1062 (get_irn_mode(n) != mode_X)) {
1063 /* Remember the early palacement of this block to move it
1064 out of loop no further than the early placement. */
1065 early = get_nodes_Block(n);
1066 /* Assure that our users are all placed, except the Phi-nodes.
1067 --- Each dataflow cycle contains at least one Phi-node. We
1068 have to break the `user has to be placed before the
1069 producer' dependance cycle and the Phi-nodes are the
1070 place to do so, because we need to base our placement on the
1071 final region of our users, which is OK with Phi-nodes, as they
1072 are pinned, and they never have to be placed after a
1073 producer of one of their inputs in the same block anyway. */
1074 for (i = 0; i < get_irn_n_outs(n); i++) {
1075 ir_node *succ = get_irn_out(n, i);
1076 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1077 place_floats_late (succ);
1080 /* We have to determine the final block of this node... except for
1082 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1083 (get_irn_op(n) != op_Const) &&
1084 (get_irn_op(n) != op_SymConst)) {
1085 ir_node *dca = NULL; /* deepest common ancestor in the
1086 dominator tree of all nodes'
1087 blocks depending on us; our final
1088 placement has to dominate DCA. */
1089 for (i = 0; i < get_irn_n_outs(n); i++) {
1090 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1092 set_nodes_Block(n, dca);
1094 move_out_of_loops (n, early);
1098 mark_irn_visited(n);
1100 /* Add predecessors of all non-floating nodes on list. (Those of floating
1101 nodes are placeded already and therefore are marked.) */
1102 for (i = 0; i < get_irn_n_outs(n); i++) {
1103 if (irn_not_visited(get_irn_out(n, i))) {
1104 pdeq_putr (worklist, get_irn_out(n, i));
1109 static INLINE void place_late(void) {
1111 inc_irg_visited(current_ir_graph);
1113 /* This fills the worklist initially. */
1114 place_floats_late(get_irg_start_block(current_ir_graph));
1115 /* And now empty the worklist again... */
1116 while (!pdeq_empty (worklist)) {
1117 ir_node *n = pdeq_getl (worklist);
1118 if (irn_not_visited(n)) place_floats_late(n);
1122 void place_code(ir_graph *irg) {
1123 ir_graph *rem = current_ir_graph;
1124 current_ir_graph = irg;
1126 if (!(get_optimize() && get_opt_global_cse())) return;
1128 /* Handle graph state */
1129 assert(get_irg_phase_state(irg) != phase_building);
1130 if (get_irg_dom_state(irg) != dom_consistent)
1133 construct_backedges(irg);
1135 /* Place all floating nodes as early as possible. This guarantees
1136 a legal code placement. */
1137 worklist = new_pdeq ();
1140 /* place_early invalidates the outs, place_late needs them. */
1142 /* Now move the nodes down in the dominator tree. This reduces the
1143 unnecessary executions of the node. */
1146 set_irg_outs_inconsistent(current_ir_graph);
1147 del_pdeq (worklist);
1148 current_ir_graph = rem;
1153 /********************************************************************/
1154 /* Control flow optimization. */
1155 /* Removes Bad control flow predecessors and empty blocks. A block */
1156 /* is empty if it contains only a Jmp node. */
1157 /* Blocks can only be removed if they are not needed for the */
1158 /* semantics of Phi nodes. */
1159 /********************************************************************/
1161 /* Removes Tuples from Block control flow predecessors.
1162 Optimizes blocks with equivalent_node().
1163 Replaces n by Bad if n is unreachable control flow. */
1164 static void merge_blocks(ir_node *n, void *env) {
1166 set_irn_link(n, NULL);
1168 if (get_irn_op(n) == op_Block) {
1170 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1171 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
1172 A different order of optimizations might cause problems. */
1173 if (get_opt_normalize())
1174 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1175 } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
1176 /* We will soon visit a block. Optimize it before visiting! */
1177 ir_node *b = get_nodes_Block(n);
1178 ir_node *new = equivalent_node(b);
1179 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1180 /* We would have to run gigo if new is bad, so we
1181 promote it directly below. */
1182 assert(((b == new) || get_opt_control_flow_straightening() || get_opt_control_flow_weak_simplification()) &&
1183 ("strange flag setting"));
1186 new = equivalent_node(b);
1188 /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
1189 if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
1193 /* Collects all Phi nodes in link list of Block.
1194 Marks all blocks "block_visited" if they contain a node other
1196 static void collect_nodes(ir_node *n, void *env) {
1197 if (is_no_Block(n)) {
1198 ir_node *b = get_nodes_Block(n);
1200 if ((get_irn_op(n) == op_Phi)) {
1201 /* Collect Phi nodes to compact ins along with block's ins. */
1202 set_irn_link(n, get_irn_link(b));
1204 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1205 mark_Block_block_visited(b);
1210 /* Returns true if pred is pred of block */
1211 static int is_pred_of(ir_node *pred, ir_node *b) {
1213 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1214 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1215 if (b_pred == pred) return 1;
1220 static int test_whether_dispensable(ir_node *b, int pos) {
1221 int i, j, n_preds = 1;
1222 int dispensable = 1;
1223 ir_node *cfop = get_Block_cfgpred(b, pos);
1224 ir_node *pred = get_nodes_Block(cfop);
1226 if (get_Block_block_visited(pred) + 1
1227 < get_irg_block_visited(current_ir_graph)) {
1228 if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
1229 /* Mark block so that is will not be removed. */
1230 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1233 /* Seems to be empty. */
1234 if (!get_irn_link(b)) {
1235 /* There are no Phi nodes ==> dispensable. */
1236 n_preds = get_Block_n_cfgpreds(pred);
1238 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1239 Work preds < pos as if they were already removed. */
1240 for (i = 0; i < pos; i++) {
1241 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1242 if (get_Block_block_visited(b_pred) + 1
1243 < get_irg_block_visited(current_ir_graph)) {
1244 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1245 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1246 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1249 if (is_pred_of(b_pred, pred)) dispensable = 0;
1252 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1253 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1254 if (is_pred_of(b_pred, pred)) dispensable = 0;
1257 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1260 n_preds = get_Block_n_cfgpreds(pred);
1268 static void optimize_blocks(ir_node *b, void *env) {
1269 int i, j, k, max_preds, n_preds;
1270 ir_node *pred, *phi;
1273 /* Count the number of predecessor if this block is merged with pred blocks
1276 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1277 max_preds += test_whether_dispensable(b, i);
1279 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1282 printf(" working on "); DDMN(b);
1283 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1284 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1285 if (is_Bad(get_Block_cfgpred(b, i))) {
1286 printf(" removing Bad %i\n ", i);
1287 } else if (get_Block_block_visited(pred) +1
1288 < get_irg_block_visited(current_ir_graph)) {
1289 printf(" removing pred %i ", i); DDMN(pred);
1290 } else { printf(" Nothing to do for "); DDMN(pred); }
1292 * end Debug output **/
1294 /** Fix the Phi nodes **/
1295 phi = get_irn_link(b);
1297 assert(get_irn_op(phi) == op_Phi);
1298 /* Find the new predecessors for the Phi */
1300 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1301 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1302 if (is_Bad(get_Block_cfgpred(b, i))) {
1304 } else if (get_Block_block_visited(pred) +1
1305 < get_irg_block_visited(current_ir_graph)) {
1306 /* It's an empty block and not yet visited. */
1307 ir_node *phi_pred = get_Phi_pred(phi, i);
1308 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1309 if (get_nodes_Block(phi_pred) == pred) {
1310 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1311 in[n_preds] = get_Phi_pred(phi_pred, j);
1313 in[n_preds] = phi_pred;
1317 /* The Phi_pred node is replaced now if it is a Phi.
1318 In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
1319 Daher muss der Phiknoten durch den neuen ersetzt werden.
1320 Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
1321 durch einen Bad) damit er aus den keep_alive verschwinden kann.
1322 Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
1324 if (get_nodes_Block(phi_pred) == pred) {
1325 /* remove the Phi as it might be kept alive. Further there
1326 might be other users. */
1327 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
1330 in[n_preds] = get_Phi_pred(phi, i);
1335 set_irn_in(phi, n_preds, in);
1337 phi = get_irn_link(phi);
1341 This happens only if merge between loop backedge and single loop entry. **/
1342 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1343 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1344 if (get_Block_block_visited(pred) +1
1345 < get_irg_block_visited(current_ir_graph)) {
1346 phi = get_irn_link(pred);
1348 if (get_irn_op(phi) == op_Phi) {
1349 set_nodes_Block(phi, b);
1352 for (i = 0; i < k; i++) {
1353 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1354 if (is_Bad(get_Block_cfgpred(b, i))) {
1356 } else if (get_Block_block_visited(pred) +1
1357 < get_irg_block_visited(current_ir_graph)) {
1358 /* It's an empty block and not yet visited. */
1359 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1360 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1361 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1362 Anweisungen.) Trotzdem tuts bisher!! */
1371 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1372 in[n_preds] = get_Phi_pred(phi, i);
1375 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1376 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1377 if (is_Bad(get_Block_cfgpred(b, i))) {
1379 } else if (get_Block_block_visited(pred) +1
1380 < get_irg_block_visited(current_ir_graph)) {
1381 /* It's an empty block and not yet visited. */
1382 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1391 set_irn_in(phi, n_preds, in);
1393 phi = get_irn_link(phi);
1398 /** Fix the block **/
1400 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1401 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1402 if (is_Bad(get_Block_cfgpred(b, i))) {
1404 } else if (get_Block_block_visited(pred) +1
1405 < get_irg_block_visited(current_ir_graph)) {
1406 /* It's an empty block and not yet visited. */
1407 assert(get_Block_n_cfgpreds(b) > 1);
1408 /* Else it should be optimized by equivalent_node. */
1409 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1410 in[n_preds] = get_Block_cfgpred(pred, j);
1413 /* Remove block as it might be kept alive. */
1414 exchange(pred, b/*new_Bad()*/);
1416 in[n_preds] = get_Block_cfgpred(b, i);
1420 set_irn_in(b, n_preds, in);
1424 void optimize_cf(ir_graph *irg) {
1427 ir_node *end = get_irg_end(irg);
1428 ir_graph *rem = current_ir_graph;
1429 current_ir_graph = irg;
1431 /* Handle graph state */
1432 assert(get_irg_phase_state(irg) != phase_building);
1433 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1434 set_irg_outs_inconsistent(current_ir_graph);
1435 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1436 set_irg_dom_inconsistent(current_ir_graph);
1438 /* Use block visited flag to mark non-empty blocks. */
1439 inc_irg_block_visited(irg);
1440 irg_walk(end, merge_blocks, collect_nodes, NULL);
1442 /* Optimize the standard code. */
1443 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1445 /* Walk all keep alives, optimize them if block, add to new in-array
1446 for end if useful. */
1447 in = NEW_ARR_F (ir_node *, 1);
1448 in[0] = get_nodes_Block(end);
1449 inc_irg_visited(current_ir_graph);
1450 for(i = 0; i < get_End_n_keepalives(end); i++) {
1451 ir_node *ka = get_End_keepalive(end, i);
1452 if (irn_not_visited(ka)) {
1453 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1454 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1455 get_irg_block_visited(current_ir_graph)-1);
1456 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1457 mark_irn_visited(ka);
1458 ARR_APP1 (ir_node *, in, ka);
1459 } else if (get_irn_op(ka) == op_Phi) {
1460 mark_irn_visited(ka);
1461 ARR_APP1 (ir_node *, in, ka);
1465 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1468 current_ir_graph = rem;
1473 * Called by walker of remove_critical_cf_edges.
1475 * Place an empty block to an edge between a blocks of multiple
1476 * predecessors and a block of multiple sucessors.
1479 * @param env Envirnment of walker. This field is unused and has
1482 static void walk_critical_cf_edges(ir_node *n, void *env) {
1484 ir_node *pre, *block, **in, *jmp;
1486 /* Block has multiple predecessors */
1487 if ((op_Block == get_irn_op(n)) &&
1488 (get_irn_arity(n) > 1)) {
1489 arity = get_irn_arity(n);
1491 for (i=0; i<arity; i++) {
1492 pre = get_irn_n(n, i);
1493 /* Predecessor has multiple sucessors. Insert new flow edge */
1494 if ((NULL != pre) && (op_Proj == get_irn_op(pre))) {
1496 /* set predecessor array for new block */
1497 in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
1498 /* set predecessor of new block */
1500 block = new_Block(1, in);
1501 /* insert new jmp node to new block */
1502 switch_block(block);
1505 /* set sucessor of new block */
1506 set_irn_n(n, i, jmp);
1508 } /* predecessor has multiple sucessors */
1509 } /* for all predecessors */
1510 } /* n is a block */
1513 void remove_critical_cf_edges(ir_graph *irg) {
1514 if (get_opt_critical_edges())
1515 irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);