3 * File name: ir/ir/irgopt.c
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 # include "irnode_t.h"
24 # include "irgraph_t.h"
31 # include "pdeq.h" /* Fuer code placement */
34 # include "irbackedge_t.h"
36 /* Defined in iropt.c */
37 pset *new_identities (void);
38 void del_identities (pset *value_table);
39 void add_identities (pset *value_table, ir_node *node);
41 /********************************************************************/
42 /* apply optimizations of iropt to all nodes. */
43 /********************************************************************/
45 static void init_link (ir_node *n, void *env) {
46 set_irn_link(n, NULL);
50 optimize_in_place_wrapper (ir_node *n, void *env) {
52 ir_node *optimized, *old;
54 for (i = 0; i < get_irn_arity(n); i++) {
55 /* get?irn_n skips Id nodes, so comparison old != optimized does not
56 show all optimizations. Therefore always set new predecessor. */
57 old = get_irn_n(n, i);
58 optimized = optimize_in_place_2(old);
59 set_irn_n(n, i, optimized);
62 if (get_irn_op(n) == op_Block) {
63 optimized = optimize_in_place_2(n);
64 if (optimized != n) exchange (n, optimized);
69 local_optimize_graph (ir_graph *irg) {
70 ir_graph *rem = current_ir_graph;
71 current_ir_graph = irg;
73 /* Handle graph state */
74 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(current_ir_graph, floats);
77 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
78 set_irg_outs_inconsistent(current_ir_graph);
79 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
80 set_irg_dom_inconsistent(current_ir_graph);
82 /* Clean the value_table in irg for the cse. */
83 del_identities(irg->value_table);
84 irg->value_table = new_identities();
86 /* walk over the graph */
87 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
89 current_ir_graph = rem;
92 /********************************************************************/
93 /* Routines for dead node elimination / copying garbage collection */
95 /********************************************************************/
97 /* Remeber the new node in the old node by using a field all nodes have. */
99 set_new_node (ir_node *old, ir_node *new)
104 /* Get this new node, before the old node is forgotton.*/
105 static INLINE ir_node *
106 get_new_node (ir_node * n)
111 /* We use the block_visited flag to mark that we have computed the
112 number of useful predecessors for this block.
113 Further we encode the new arity in this flag in the old blocks.
114 Remembering the arity is useful, as it saves a lot of pointer
115 accesses. This function is called for all Phi and Block nodes
118 compute_new_arity(ir_node *b) {
122 irg_v = get_irg_block_visited(current_ir_graph);
123 block_v = get_Block_block_visited(b);
124 if (block_v >= irg_v) {
125 /* we computed the number of preds for this block and saved it in the
127 return block_v - irg_v;
129 /* compute the number of good predecessors */
130 res = get_irn_arity(b);
131 for (i = 0; i < get_irn_arity(b); i++)
132 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
133 /* save it in the flag. */
134 set_Block_block_visited(b, irg_v + res);
139 static INLINE void new_backedge_info(ir_node *n) {
140 switch(get_irn_opcode(n)) {
142 n->attr.block.cg_backedge = NULL;
143 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
146 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
155 /* Copies the node to the new obstack. The Ins of the new node point to
156 the predecessors on the old obstack. For block/phi nodes not all
157 predecessors might be copied. n->link points to the new node.
158 For Phi and Block nodes the function allocates in-arrays with an arity
159 only for useful predecessors. The arity is determined by counting
160 the non-bad predecessors of the block. */
162 copy_node (ir_node *n, void *env) {
166 if (get_irn_opcode(n) == iro_Block) {
168 new_arity = compute_new_arity(n);
169 n->attr.block.graph_arr = NULL;
171 block = get_nodes_Block(n);
172 if (get_irn_opcode(n) == iro_Phi) {
173 new_arity = compute_new_arity(block);
175 new_arity = get_irn_arity(n);
178 nn = new_ir_node(get_irn_dbg_info(n),
185 /* Copy the attributes. These might point to additional data. If this
186 was allocated on the old obstack the pointers now are dangling. This
187 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
189 new_backedge_info(nn);
192 /* printf("\n old node: "); DDMSG2(n);
193 printf(" new node: "); DDMSG2(nn); */
197 /* Copies new predecessors of old node to new node remembered in link.
198 Spare the Bad predecessors of Phi and Block nodes. */
200 copy_preds (ir_node *n, void *env) {
204 nn = get_new_node(n);
206 /* printf("\n old node: "); DDMSG2(n);
207 printf(" new node: "); DDMSG2(nn);
208 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
210 if (get_irn_opcode(n) == iro_Block) {
211 /* Don't copy Bad nodes. */
213 for (i = 0; i < get_irn_arity(n); i++)
214 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
215 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
216 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
219 /* repair the block visited flag from above misuse. Repair it in both
220 graphs so that the old one can still be used. */
221 set_Block_block_visited(nn, 0);
222 set_Block_block_visited(n, 0);
223 /* Local optimization could not merge two subsequent blocks if
224 in array contained Bads. Now it's possible.
225 We don't call optimize_in_place as it requires
226 that the fields in ir_graph are set properly. */
227 if ((get_opt_control_flow_straightening()) &&
228 (get_Block_n_cfgpreds(nn) == 1) &&
229 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
230 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
231 } else if (get_irn_opcode(n) == iro_Phi) {
232 /* Don't copy node if corresponding predecessor in block is Bad.
233 The Block itself should not be Bad. */
234 block = get_nodes_Block(n);
235 set_irn_n (nn, -1, get_new_node(block));
237 for (i = 0; i < get_irn_arity(n); i++)
238 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
239 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
240 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
243 /* If the pre walker reached this Phi after the post walker visited the
244 block block_visited is > 0. */
245 set_Block_block_visited(get_nodes_Block(n), 0);
246 /* Compacting the Phi's ins might generate Phis with only one
248 if (get_irn_arity(n) == 1)
249 exchange(n, get_irn_n(n, 0));
251 for (i = -1; i < get_irn_arity(n); i++)
252 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
254 /* Now the new node is complete. We can add it to the hash table for cse.
255 @@@ inlinening aborts if we identify End. Why? */
256 if(get_irn_op(nn) != op_End)
257 add_identities (current_ir_graph->value_table, nn);
260 /* Copies the graph recursively, compacts the keepalive of the end node. */
263 ir_node *oe, *ne; /* old end, new end */
264 ir_node *ka; /* keep alive */
267 oe = get_irg_end(current_ir_graph);
268 /* copy the end node by hand, allocate dynamic in array! */
269 ne = new_ir_node(get_irn_dbg_info(oe),
276 /* Copy the attributes. Well, there might be some in the future... */
278 set_new_node(oe, ne);
280 /* copy the live nodes */
281 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
282 /* copy_preds for the end node ... */
283 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
285 /** ... and now the keep alives. **/
286 /* First pick the not marked block nodes and walk them. We must pick these
287 first as else we will oversee blocks reachable from Phis. */
288 for (i = 0; i < get_irn_arity(oe); i++) {
289 ka = get_irn_n(oe, i);
290 if ((get_irn_op(ka) == op_Block) &&
291 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
292 /* We must keep the block alive and copy everything reachable */
293 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
294 irg_walk(ka, copy_node, copy_preds, NULL);
295 add_End_keepalive(ne, get_new_node(ka));
299 /* Now pick the Phis. Here we will keep all! */
300 for (i = 0; i < get_irn_arity(oe); i++) {
301 ka = get_irn_n(oe, i);
302 if ((get_irn_op(ka) == op_Phi)) {
303 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
304 /* We didn't copy the Phi yet. */
305 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
306 irg_walk(ka, copy_node, copy_preds, NULL);
308 add_End_keepalive(ne, get_new_node(ka));
313 /* Copies the graph reachable from current_ir_graph->end to the obstack
314 in current_ir_graph and fixes the environment.
315 Then fixes the fields in current_ir_graph containing nodes of the
318 copy_graph_env (void) {
320 /* Not all nodes remembered in current_ir_graph might be reachable
321 from the end node. Assure their link is set to NULL, so that
322 we can test whether new nodes have been computed. */
323 set_irn_link(get_irg_frame (current_ir_graph), NULL);
324 set_irn_link(get_irg_globals(current_ir_graph), NULL);
325 set_irn_link(get_irg_args (current_ir_graph), NULL);
327 /* we use the block walk flag for removing Bads from Blocks ins. */
328 inc_irg_block_visited(current_ir_graph);
333 /* fix the fields in current_ir_graph */
334 old_end = get_irg_end(current_ir_graph);
335 set_irg_end (current_ir_graph, get_new_node(old_end));
337 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
338 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
339 copy_node (get_irg_frame(current_ir_graph), NULL);
340 copy_preds(get_irg_frame(current_ir_graph), NULL);
342 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
343 copy_node (get_irg_globals(current_ir_graph), NULL);
344 copy_preds(get_irg_globals(current_ir_graph), NULL);
346 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
347 copy_node (get_irg_args(current_ir_graph), NULL);
348 copy_preds(get_irg_args(current_ir_graph), NULL);
350 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
352 set_irg_start_block(current_ir_graph,
353 get_new_node(get_irg_start_block(current_ir_graph)));
354 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
355 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
356 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
357 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
358 copy_node(get_irg_bad(current_ir_graph), NULL);
359 copy_preds(get_irg_bad(current_ir_graph), NULL);
361 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
362 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
363 copy_node(get_irg_unknown(current_ir_graph), NULL);
364 copy_preds(get_irg_unknown(current_ir_graph), NULL);
366 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
369 /* Copies all reachable nodes to a new obstack. Removes bad inputs
370 from block nodes and the corresponding inputs from Phi nodes.
371 Merges single exit blocks with single entry blocks and removes
373 Adds all new nodes to a new hash table for cse. Does not
374 perform cse, so the hash table might contain common subexpressions. */
375 /* Amroq call this emigrate() */
377 dead_node_elimination(ir_graph *irg) {
379 struct obstack *graveyard_obst = NULL;
380 struct obstack *rebirth_obst = NULL;
382 /* Remember external state of current_ir_graph. */
383 rem = current_ir_graph;
384 current_ir_graph = irg;
386 /* Handle graph state */
387 assert(get_irg_phase_state(current_ir_graph) != phase_building);
388 free_outs(current_ir_graph);
390 /* @@@ so far we loose loops when copying */
391 set_irg_loop(current_ir_graph, NULL);
393 if (get_optimize() && get_opt_dead_node_elimination()) {
395 /* A quiet place, where the old obstack can rest in peace,
396 until it will be cremated. */
397 graveyard_obst = irg->obst;
399 /* A new obstack, where the reachable nodes will be copied to. */
400 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
401 current_ir_graph->obst = rebirth_obst;
402 obstack_init (current_ir_graph->obst);
404 /* We also need a new hash table for cse */
405 del_identities (irg->value_table);
406 irg->value_table = new_identities ();
408 /* Copy the graph from the old to the new obstack */
411 /* Free memory from old unoptimized obstack */
412 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
413 xfree (graveyard_obst); /* ... then free it. */
416 current_ir_graph = rem;
419 /* Relink bad predeseccors of a block and store the old in array to the
420 link field. This function is called by relink_bad_predecessors().
421 The array of link field starts with the block operand at position 0.
422 If block has bad predecessors, create a new in array without bad preds.
423 Otherwise let in array untouched. */
424 static void relink_bad_block_predecessors(ir_node *n, void *env) {
425 ir_node **new_in, *irn;
426 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
428 /* if link field of block is NULL, look for bad predecessors otherwise
429 this is allready done */
430 if (get_irn_op(n) == op_Block &&
431 get_irn_link(n) == NULL) {
433 /* save old predecessors in link field (position 0 is the block operand)*/
434 set_irn_link(n, (void *)get_irn_in(n));
436 /* count predecessors without bad nodes */
437 old_irn_arity = get_irn_arity(n);
438 for (i = 0; i < old_irn_arity; i++)
439 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
441 /* arity changing: set new predecessors without bad nodes */
442 if (new_irn_arity < old_irn_arity) {
443 /* get new predecessor array without Block predecessor */
444 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
446 /* set new predeseccors in array */
449 for (i = 1; i < old_irn_arity; i++) {
450 irn = get_irn_n(n, i);
451 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
454 } /* ir node has bad predecessors */
456 } /* Block is not relinked */
459 /* Relinks Bad predecesors from Bocks and Phis called by walker
460 remove_bad_predecesors(). If n is a Block, call
461 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
462 function of Phi's Block. If this block has bad predecessors, relink preds
464 static void relink_bad_predecessors(ir_node *n, void *env) {
465 ir_node *block, **old_in;
466 int i, old_irn_arity, new_irn_arity;
468 /* relink bad predeseccors of a block */
469 if (get_irn_op(n) == op_Block)
470 relink_bad_block_predecessors(n, env);
472 /* If Phi node relink its block and its predecessors */
473 if (get_irn_op(n) == op_Phi) {
475 /* Relink predeseccors of phi's block */
476 block = get_nodes_Block(n);
477 if (get_irn_link(block) == NULL)
478 relink_bad_block_predecessors(block, env);
480 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
481 old_irn_arity = ARR_LEN(old_in);
483 /* Relink Phi predeseccors if count of predeseccors changed */
484 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
485 /* set new predeseccors in array
486 n->in[0] remains the same block */
488 for(i = 1; i < old_irn_arity; i++)
489 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
491 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
494 } /* n is a Phi node */
497 /* Removes Bad Bad predecesors from Blocks and the corresponding
498 inputs to Phi nodes as in dead_node_elimination but without
500 On walking up set the link field to NULL, on walking down call
501 relink_bad_predecessors() (This function stores the old in array
502 to the link field and sets a new in array if arity of predecessors
504 void remove_bad_predecessors(ir_graph *irg) {
505 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
509 /**********************************************************************/
510 /* Funcionality for inlining */
511 /**********************************************************************/
513 /* Copy node for inlineing. Copies the node by calling copy_node and
514 then updates the entity if it's a local one. env must be a pointer
515 to the frame type of the procedure. The new entities must be in
516 the link field of the entities. */
518 copy_node_inline (ir_node *n, void *env) {
520 type *frame_tp = (type *)env;
523 if (get_irn_op(n) == op_Sel) {
524 new = get_new_node (n);
525 assert(get_irn_op(new) == op_Sel);
526 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
527 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
534 void inline_method(ir_node *call, ir_graph *called_graph) {
536 ir_node *post_call, *post_bl;
538 ir_node *end, *end_bl;
542 ir_node *cf_op = NULL, *bl;
543 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
546 if (!get_optimize() || !get_opt_inline()) return;
547 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
548 rem_opt = get_optimize();
551 /* Handle graph state */
552 assert(get_irg_phase_state(current_ir_graph) != phase_building);
553 assert(get_irg_pinned(current_ir_graph) == pinned);
554 assert(get_irg_pinned(called_graph) == pinned);
555 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
556 set_irg_outs_inconsistent(current_ir_graph);
558 /* -- Check preconditions -- */
559 assert(get_irn_op(call) == op_Call);
560 /* @@@ does not work for InterfaceIII.java after cgana
561 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
562 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
563 get_Call_type(call)));
565 assert(get_type_tpop(get_Call_type(call)) == type_method);
566 if (called_graph == current_ir_graph) {
567 set_optimize(rem_opt);
572 the procedure and later replaces the Start node of the called graph.
573 Post_call is the old Call node and collects the results of the called
574 graph. Both will end up being a tuple. -- */
575 post_bl = get_nodes_Block(call);
576 set_irg_current_block(current_ir_graph, post_bl);
577 /* XxMxPxP of Start + parameter of Call */
579 in[1] = get_Call_mem(call);
580 in[2] = get_irg_frame(current_ir_graph);
581 in[3] = get_irg_globals(current_ir_graph);
582 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
583 pre_call = new_Tuple(5, in);
587 The new block gets the ins of the old block, pre_call and all its
588 predecessors and all Phi nodes. -- */
589 part_block(pre_call);
591 /* -- Prepare state for dead node elimination -- */
592 /* Visited flags in calling irg must be >= flag in called irg.
593 Else walker and arity computation will not work. */
594 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
595 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
596 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
597 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
598 /* Set pre_call as new Start node in link field of the start node of
599 calling graph and pre_calls block as new block for the start block
601 Further mark these nodes so that they are not visited by the
603 set_irn_link(get_irg_start(called_graph), pre_call);
604 set_irn_visited(get_irg_start(called_graph),
605 get_irg_visited(current_ir_graph));
606 set_irn_link(get_irg_start_block(called_graph),
607 get_nodes_Block(pre_call));
608 set_irn_visited(get_irg_start_block(called_graph),
609 get_irg_visited(current_ir_graph));
611 /* Initialize for compaction of in arrays */
612 inc_irg_block_visited(current_ir_graph);
614 /* -- Replicate local entities of the called_graph -- */
615 /* copy the entities. */
616 called_frame = get_irg_frame_type(called_graph);
617 for (i = 0; i < get_class_n_members(called_frame); i++) {
618 entity *new_ent, *old_ent;
619 old_ent = get_class_member(called_frame, i);
620 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
621 set_entity_link(old_ent, new_ent);
624 /* visited is > than that of called graph. With this trick visited will
625 remain unchanged so that an outer walker, e.g., searching the call nodes
626 to inline, calling this inline will not visit the inlined nodes. */
627 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
629 /* -- Performing dead node elimination inlines the graph -- */
630 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
632 /* @@@ endless loops are not copied!! -- they should be, I think... */
633 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
634 get_irg_frame_type(called_graph));
636 /* Repair called_graph */
637 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
638 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
639 set_Block_block_visited(get_irg_start_block(called_graph), 0);
641 /* -- Merge the end of the inlined procedure with the call site -- */
642 /* We will turn the old Call node into a Tuple with the following
645 0: Phi of all Memories of Return statements.
646 1: Jmp from new Block that merges the control flow from all exception
647 predecessors of the old end block.
648 2: Tuple of all arguments.
649 3: Phi of Exception memories.
652 /* -- Precompute some values -- */
653 end_bl = get_new_node(get_irg_end_block(called_graph));
654 end = get_new_node(get_irg_end(called_graph));
655 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
656 n_res = get_method_n_ress(get_Call_type(call));
658 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
659 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
661 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
663 /* -- archive keepalives -- */
664 for (i = 0; i < get_irn_arity(end); i++)
665 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
666 /* The new end node will die, but the in array is not on the obstack ... */
670 Return nodes by Jump nodes. -- */
672 for (i = 0; i < arity; i++) {
674 ret = get_irn_n(end_bl, i);
675 if (get_irn_op(ret) == op_Return) {
676 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
680 set_irn_in(post_bl, n_ret, cf_pred);
683 turned into a tuple. -- */
684 turn_into_tuple(post_call, 4);
685 /* First the Memory-Phi */
687 for (i = 0; i < arity; i++) {
688 ret = get_irn_n(end_bl, i);
689 if (get_irn_op(ret) == op_Return) {
690 cf_pred[n_ret] = get_Return_mem(ret);
694 phi = new_Phi(n_ret, cf_pred, mode_M);
695 set_Tuple_pred(call, 0, phi);
696 /* Conserve Phi-list for further inlinings -- but might be optimized */
697 if (get_nodes_Block(phi) == post_bl) {
698 set_irn_link(phi, get_irn_link(post_bl));
699 set_irn_link(post_bl, phi);
701 /* Now the real results */
703 for (j = 0; j < n_res; j++) {
705 for (i = 0; i < arity; i++) {
706 ret = get_irn_n(end_bl, i);
707 if (get_irn_op(ret) == op_Return) {
708 cf_pred[n_ret] = get_Return_res(ret, j);
712 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
714 /* Conserve Phi-list for further inlinings -- but might be optimized */
715 if (get_nodes_Block(phi) == post_bl) {
716 set_irn_link(phi, get_irn_link(post_bl));
717 set_irn_link(post_bl, phi);
720 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
722 set_Tuple_pred(call, 2, new_Bad());
724 /* Finally the exception control flow. We need to add a Phi node to
725 collect the memory containing the exception objects. Further we need
726 to add another block to get a correct representation of this Phi. To
727 this block we add a Jmp that resolves into the X output of the Call
728 when the Call is turned into a tuple. */
730 for (i = 0; i < arity; i++) {
732 ret = get_irn_n(end_bl, i);
733 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
734 cf_pred[n_exc] = ret;
739 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
740 set_Tuple_pred(call, 1, new_Jmp());
741 /* The Phi for the memories with the exception objects */
743 for (i = 0; i < arity; i++) {
745 ret = skip_Proj(get_irn_n(end_bl, i));
746 if (get_irn_op(ret) == op_Call) {
747 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
749 } else if (is_fragile_op(ret)) {
750 /* We rely that all cfops have the memory output at the same position. */
751 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
753 } else if (get_irn_op(ret) == op_Raise) {
754 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
758 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
760 set_Tuple_pred(call, 1, new_Bad());
761 set_Tuple_pred(call, 3, new_Bad());
767 /* -- If the exception control flow from the inlined Call directly
768 branched to the end block we now have the following control
769 flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
770 remove the Jmp along with it's empty block and add Jmp's
771 predecessors as predecessors of this end block. No problem if
772 there is no exception, because then branches Bad to End which
774 /* find the problematic predecessor of the end block. */
775 end_bl = get_irg_end_block(current_ir_graph);
776 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
777 cf_op = get_Block_cfgpred(end_bl, i);
778 if (get_irn_op(cf_op) == op_Proj) {
779 cf_op = get_Proj_pred(cf_op);
780 if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
781 // There are unoptimized tuples from inlineing before when no exc
782 assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
783 cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
784 assert(get_irn_op(cf_op) == op_Jmp);
790 if (i < get_Block_n_cfgpreds(end_bl)) {
791 bl = get_nodes_Block(cf_op);
792 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
793 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
794 for (j = 0; j < i; j++)
795 cf_pred[j] = get_Block_cfgpred(end_bl, j);
796 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
797 cf_pred[j] = get_Block_cfgpred(bl, j-i);
798 for (j = j; j < arity; j++)
799 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
800 set_irn_in(end_bl, arity, cf_pred);
805 /* -- Turn cse back on. -- */
806 set_optimize(rem_opt);
809 /********************************************************************/
810 /* Apply inlineing to small methods. */
811 /********************************************************************/
815 /* It makes no sense to inline too many calls in one procedure. Anyways,
816 I didn't get a version with NEW_ARR_F to run. */
817 #define MAX_INLINE 1024
819 static void collect_calls(ir_node *call, void *env) {
820 ir_node **calls = (ir_node **)env;
823 ir_graph *called_irg;
825 if (get_irn_op(call) != op_Call) return;
827 addr = get_Call_ptr(call);
828 if (get_irn_op(addr) == op_Const) {
829 /* Check whether the constant is the pointer to a compiled entity. */
830 tv = get_Const_tarval(addr);
831 if (tarval_to_entity(tv)) {
832 called_irg = get_entity_irg(tarval_to_entity(tv));
833 if (called_irg && pos < MAX_INLINE) {
834 /* The Call node calls a locally defined method. Remember to inline. */
842 /* Inlines all small methods at call sites where the called address comes
843 from a Const node that references the entity representing the called
845 The size argument is a rough measure for the code size of the method:
846 Methods where the obstack containing the firm graph is smaller than
848 void inline_small_irgs(ir_graph *irg, int size) {
850 ir_node *calls[MAX_INLINE];
851 ir_graph *rem = current_ir_graph;
853 if (!(get_optimize() && get_opt_inline())) return;
855 current_ir_graph = irg;
856 /* Handle graph state */
857 assert(get_irg_phase_state(current_ir_graph) != phase_building);
859 /* Find Call nodes to inline.
860 (We can not inline during a walk of the graph, as inlineing the same
861 method several times changes the visited flag of the walked graph:
862 after the first inlineing visited of the callee equals visited of
863 the caller. With the next inlineing both are increased.) */
865 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
867 if ((pos > 0) && (pos < MAX_INLINE)) {
868 /* There are calls to inline */
869 collect_phiprojs(irg);
870 for (i = 0; i < pos; i++) {
873 tv = get_Const_tarval(get_Call_ptr(calls[i]));
874 callee = get_entity_irg(tarval_to_entity(tv));
875 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
876 inline_method(calls[i], callee);
881 current_ir_graph = rem;
885 /********************************************************************/
886 /* Code Placement. Pinns all floating nodes to a block where they */
887 /* will be executed only if needed. */
888 /********************************************************************/
892 static pdeq *worklist; /* worklist of ir_node*s */
894 /* Find the earliest correct block for N. --- Place N into the
895 same Block as its dominance-deepest Input. */
897 place_floats_early (ir_node *n)
901 /* we must not run into an infinite loop */
902 assert (irn_not_visited(n));
905 /* Place floating nodes. */
906 if (get_op_pinned(get_irn_op(n)) == floats) {
908 ir_node *b = new_Bad(); /* The block to place this node in */
910 assert(get_irn_op(n) != op_Block);
912 if ((get_irn_op(n) == op_Const) ||
913 (get_irn_op(n) == op_SymConst) ||
915 (get_irn_op(n) == op_Unknown)) {
916 /* These nodes will not be placed by the loop below. */
917 b = get_irg_start_block(current_ir_graph);
921 /* find the block for this node. */
922 for (i = 0; i < get_irn_arity(n); i++) {
923 ir_node *dep = get_irn_n(n, i);
925 if ((irn_not_visited(dep)) &&
926 (get_op_pinned(get_irn_op(dep)) == floats)) {
927 place_floats_early (dep);
929 /* Because all loops contain at least one pinned node, now all
930 our inputs are either pinned or place_early has already
931 been finished on them. We do not have any unfinished inputs! */
932 dep_block = get_nodes_Block(dep);
933 if ((!is_Bad(dep_block)) &&
934 (get_Block_dom_depth(dep_block) > depth)) {
936 depth = get_Block_dom_depth(dep_block);
938 /* Avoid that the node is placed in the Start block */
939 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
940 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
941 assert(b != get_irg_start_block(current_ir_graph));
945 set_nodes_Block(n, b);
948 /* Add predecessors of non floating nodes on worklist. */
949 start = (get_irn_op(n) == op_Block) ? 0 : -1;
950 for (i = start; i < get_irn_arity(n); i++) {
951 ir_node *pred = get_irn_n(n, i);
952 if (irn_not_visited(pred)) {
953 pdeq_putr (worklist, pred);
958 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
959 Start, Call and end at pinned nodes as Store, Call. Place_early
960 places all floating nodes reachable from its argument through floating
961 nodes and adds all beginnings at pinned nodes to the worklist. */
962 static INLINE void place_early (void) {
964 inc_irg_visited(current_ir_graph);
966 /* this inits the worklist */
967 place_floats_early (get_irg_end(current_ir_graph));
969 /* Work the content of the worklist. */
970 while (!pdeq_empty (worklist)) {
971 ir_node *n = pdeq_getl (worklist);
972 if (irn_not_visited(n)) place_floats_early (n);
975 set_irg_outs_inconsistent(current_ir_graph);
976 current_ir_graph->pinned = pinned;
980 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
982 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
984 ir_node *block = NULL;
986 /* Compute the latest block into which we can place a node so that it is
988 if (get_irn_op(consumer) == op_Phi) {
989 /* our comsumer is a Phi-node, the effective use is in all those
990 blocks through which the Phi-node reaches producer */
992 ir_node *phi_block = get_nodes_Block(consumer);
993 for (i = 0; i < get_irn_arity(consumer); i++) {
994 if (get_irn_n(consumer, i) == producer) {
995 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
999 assert(is_no_Block(consumer));
1000 block = get_nodes_Block(consumer);
1003 /* Compute the deepest common ancestor of block and dca. */
1005 if (!dca) return block;
1006 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
1007 block = get_Block_idom(block);
1008 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
1009 dca = get_Block_idom(dca);
1010 while (block != dca)
1011 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
1016 static INLINE int get_irn_loop_depth(ir_node *n) {
1017 return get_loop_depth(get_irn_loop(n));
1020 /* Move n to a block with less loop depth than it's current block. The
1021 new block must be dominated by early. */
1023 move_out_of_loops (ir_node *n, ir_node *early)
1025 ir_node *best, *dca;
1029 /* Find the region deepest in the dominator tree dominating
1030 dca with the least loop nesting depth, but still dominated
1031 by our early placement. */
1032 dca = get_nodes_Block(n);
1034 while (dca != early) {
1035 dca = get_Block_idom(dca);
1036 if (!dca) break; /* should we put assert(dca)? */
1037 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1041 if (best != get_nodes_Block(n)) {
1043 printf("Moving out of loop: "); DDMN(n);
1044 printf(" Outermost block: "); DDMN(early);
1045 printf(" Best block: "); DDMN(best);
1046 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1048 set_nodes_Block(n, best);
1052 /* Find the latest legal block for N and place N into the
1053 `optimal' Block between the latest and earliest legal block.
1054 The `optimal' block is the dominance-deepest block of those
1055 with the least loop-nesting-depth. This places N out of as many
1056 loops as possible and then makes it as controldependant as
1059 place_floats_late (ir_node *n)
1064 assert (irn_not_visited(n)); /* no multiple placement */
1066 /* no need to place block nodes, control nodes are already placed. */
1067 if ((get_irn_op(n) != op_Block) &&
1069 (get_irn_mode(n) != mode_X)) {
1070 /* Remember the early palacement of this block to move it
1071 out of loop no further than the early placement. */
1072 early = get_nodes_Block(n);
1073 /* Assure that our users are all placed, except the Phi-nodes.
1074 --- Each dataflow cycle contains at least one Phi-node. We
1075 have to break the `user has to be placed before the
1076 producer' dependance cycle and the Phi-nodes are the
1077 place to do so, because we need to base our placement on the
1078 final region of our users, which is OK with Phi-nodes, as they
1079 are pinned, and they never have to be placed after a
1080 producer of one of their inputs in the same block anyway. */
1081 for (i = 0; i < get_irn_n_outs(n); i++) {
1082 ir_node *succ = get_irn_out(n, i);
1083 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1084 place_floats_late (succ);
1087 /* We have to determine the final block of this node... except for
1089 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1090 (get_irn_op(n) != op_Const) &&
1091 (get_irn_op(n) != op_SymConst)) {
1092 ir_node *dca = NULL; /* deepest common ancestor in the
1093 dominator tree of all nodes'
1094 blocks depending on us; our final
1095 placement has to dominate DCA. */
1096 for (i = 0; i < get_irn_n_outs(n); i++) {
1097 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1099 set_nodes_Block(n, dca);
1101 move_out_of_loops (n, early);
1105 mark_irn_visited(n);
1107 /* Add predecessors of all non-floating nodes on list. (Those of floating
1108 nodes are placeded already and therefore are marked.) */
1109 for (i = 0; i < get_irn_n_outs(n); i++) {
1110 if (irn_not_visited(get_irn_out(n, i))) {
1111 pdeq_putr (worklist, get_irn_out(n, i));
1116 static INLINE void place_late(void) {
1118 inc_irg_visited(current_ir_graph);
1120 /* This fills the worklist initially. */
1121 place_floats_late(get_irg_start_block(current_ir_graph));
1122 /* And now empty the worklist again... */
1123 while (!pdeq_empty (worklist)) {
1124 ir_node *n = pdeq_getl (worklist);
1125 if (irn_not_visited(n)) place_floats_late(n);
1129 void place_code(ir_graph *irg) {
1130 ir_graph *rem = current_ir_graph;
1131 current_ir_graph = irg;
1133 if (!(get_optimize() && get_opt_global_cse())) return;
1135 /* Handle graph state */
1136 assert(get_irg_phase_state(irg) != phase_building);
1137 if (get_irg_dom_state(irg) != dom_consistent)
1140 construct_backedges(irg);
1142 /* Place all floating nodes as early as possible. This guarantees
1143 a legal code placement. */
1144 worklist = new_pdeq ();
1147 /* place_early invalidates the outs, place_late needs them. */
1149 /* Now move the nodes down in the dominator tree. This reduces the
1150 unnecessary executions of the node. */
1153 set_irg_outs_inconsistent(current_ir_graph);
1154 del_pdeq (worklist);
1155 current_ir_graph = rem;
1160 /********************************************************************/
1161 /* Control flow optimization. */
1162 /* Removes Bad control flow predecessors and empty blocks. A block */
1163 /* is empty if it contains only a Jmp node. */
1164 /* Blocks can only be removed if they are not needed for the */
1165 /* semantics of Phi nodes. */
1166 /********************************************************************/
1168 /* Removes Tuples from Block control flow predecessors.
1169 Optimizes blocks with equivalent_node().
1170 Replaces n by Bad if n is unreachable control flow. */
1171 static void merge_blocks(ir_node *n, void *env) {
1173 set_irn_link(n, NULL);
1175 if (get_irn_op(n) == op_Block) {
1177 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1178 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
1179 A different order of optimizations might cause problems. */
1180 if (get_opt_normalize())
1181 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1182 } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
1183 /* We will soon visit a block. Optimize it before visiting! */
1184 ir_node *b = get_nodes_Block(n);
1185 ir_node *new = equivalent_node(b);
1186 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1187 /* We would have to run gigo if new is bad, so we
1188 promote it directly below. */
1189 assert(((b == new) ||
1190 get_opt_control_flow_straightening() ||
1191 get_opt_control_flow_weak_simplification()) &&
1192 ("strange flag setting"));
1195 new = equivalent_node(b);
1197 /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
1198 if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
1202 /* Collects all Phi nodes in link list of Block.
1203 Marks all blocks "block_visited" if they contain a node other
1205 static void collect_nodes(ir_node *n, void *env) {
1206 if (is_no_Block(n)) {
1207 ir_node *b = get_nodes_Block(n);
1209 if ((get_irn_op(n) == op_Phi)) {
1210 /* Collect Phi nodes to compact ins along with block's ins. */
1211 set_irn_link(n, get_irn_link(b));
1213 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1214 mark_Block_block_visited(b);
1219 /* Returns true if pred is pred of block */
1220 static int is_pred_of(ir_node *pred, ir_node *b) {
1222 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1223 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1224 if (b_pred == pred) return 1;
1229 static int test_whether_dispensable(ir_node *b, int pos) {
1230 int i, j, n_preds = 1;
1231 int dispensable = 1;
1232 ir_node *cfop = get_Block_cfgpred(b, pos);
1233 ir_node *pred = get_nodes_Block(cfop);
1235 if (get_Block_block_visited(pred) + 1
1236 < get_irg_block_visited(current_ir_graph)) {
1237 if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
1238 /* Mark block so that is will not be removed. */
1239 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1242 /* Seems to be empty. */
1243 if (!get_irn_link(b)) {
1244 /* There are no Phi nodes ==> dispensable. */
1245 n_preds = get_Block_n_cfgpreds(pred);
1247 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1248 Work preds < pos as if they were already removed. */
1249 for (i = 0; i < pos; i++) {
1250 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1251 if (get_Block_block_visited(b_pred) + 1
1252 < get_irg_block_visited(current_ir_graph)) {
1253 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1254 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1255 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1258 if (is_pred_of(b_pred, pred)) dispensable = 0;
1261 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1262 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1263 if (is_pred_of(b_pred, pred)) dispensable = 0;
1266 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1269 n_preds = get_Block_n_cfgpreds(pred);
1277 static void optimize_blocks(ir_node *b, void *env) {
1278 int i, j, k, max_preds, n_preds;
1279 ir_node *pred, *phi;
1282 /* Count the number of predecessor if this block is merged with pred blocks
1285 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1286 max_preds += test_whether_dispensable(b, i);
1288 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1291 printf(" working on "); DDMN(b);
1292 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1293 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1294 if (is_Bad(get_Block_cfgpred(b, i))) {
1295 printf(" removing Bad %i\n ", i);
1296 } else if (get_Block_block_visited(pred) +1
1297 < get_irg_block_visited(current_ir_graph)) {
1298 printf(" removing pred %i ", i); DDMN(pred);
1299 } else { printf(" Nothing to do for "); DDMN(pred); }
1301 * end Debug output **/
1303 /** Fix the Phi nodes **/
1304 phi = get_irn_link(b);
1306 assert(get_irn_op(phi) == op_Phi);
1307 /* Find the new predecessors for the Phi */
1309 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1310 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1311 if (is_Bad(get_Block_cfgpred(b, i))) {
1313 } else if (get_Block_block_visited(pred) +1
1314 < get_irg_block_visited(current_ir_graph)) {
1315 /* It's an empty block and not yet visited. */
1316 ir_node *phi_pred = get_Phi_pred(phi, i);
1317 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1318 if (get_nodes_Block(phi_pred) == pred) {
1319 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1320 in[n_preds] = get_Phi_pred(phi_pred, j);
1322 in[n_preds] = phi_pred;
1326 /* The Phi_pred node is replaced now if it is a Phi.
1327 In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
1328 Daher muss der Phiknoten durch den neuen ersetzt werden.
1329 Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
1330 durch einen Bad) damit er aus den keep_alive verschwinden kann.
1331 Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
1333 if (get_nodes_Block(phi_pred) == pred) {
1334 /* remove the Phi as it might be kept alive. Further there
1335 might be other users. */
1336 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
1339 in[n_preds] = get_Phi_pred(phi, i);
1344 set_irn_in(phi, n_preds, in);
1346 phi = get_irn_link(phi);
1350 This happens only if merge between loop backedge and single loop entry. **/
1351 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1352 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1353 if (get_Block_block_visited(pred) +1
1354 < get_irg_block_visited(current_ir_graph)) {
1355 phi = get_irn_link(pred);
1357 if (get_irn_op(phi) == op_Phi) {
1358 set_nodes_Block(phi, b);
1361 for (i = 0; i < k; i++) {
1362 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1363 if (is_Bad(get_Block_cfgpred(b, i))) {
1365 } else if (get_Block_block_visited(pred) +1
1366 < get_irg_block_visited(current_ir_graph)) {
1367 /* It's an empty block and not yet visited. */
1368 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1369 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1370 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1371 Anweisungen.) Trotzdem tuts bisher!! */
1380 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1381 in[n_preds] = get_Phi_pred(phi, i);
1384 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1385 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1386 if (is_Bad(get_Block_cfgpred(b, i))) {
1388 } else if (get_Block_block_visited(pred) +1
1389 < get_irg_block_visited(current_ir_graph)) {
1390 /* It's an empty block and not yet visited. */
1391 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1400 set_irn_in(phi, n_preds, in);
1402 phi = get_irn_link(phi);
1407 /** Fix the block **/
1409 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1410 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1411 if (is_Bad(get_Block_cfgpred(b, i))) {
1413 } else if (get_Block_block_visited(pred) +1
1414 < get_irg_block_visited(current_ir_graph)) {
1415 /* It's an empty block and not yet visited. */
1416 assert(get_Block_n_cfgpreds(b) > 1);
1417 /* Else it should be optimized by equivalent_node. */
1418 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1419 in[n_preds] = get_Block_cfgpred(pred, j);
1422 /* Remove block as it might be kept alive. */
1423 exchange(pred, b/*new_Bad()*/);
1425 in[n_preds] = get_Block_cfgpred(b, i);
1429 set_irn_in(b, n_preds, in);
1433 void optimize_cf(ir_graph *irg) {
1436 ir_node *end = get_irg_end(irg);
1437 ir_graph *rem = current_ir_graph;
1438 current_ir_graph = irg;
1440 /* Handle graph state */
1441 assert(get_irg_phase_state(irg) != phase_building);
1442 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1443 set_irg_outs_inconsistent(current_ir_graph);
1444 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1445 set_irg_dom_inconsistent(current_ir_graph);
1447 /* Use block visited flag to mark non-empty blocks. */
1448 inc_irg_block_visited(irg);
1449 irg_walk(end, merge_blocks, collect_nodes, NULL);
1451 /* Optimize the standard code. */
1452 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1454 /* Walk all keep alives, optimize them if block, add to new in-array
1455 for end if useful. */
1456 in = NEW_ARR_F (ir_node *, 1);
1457 in[0] = get_nodes_Block(end);
1458 inc_irg_visited(current_ir_graph);
1459 for(i = 0; i < get_End_n_keepalives(end); i++) {
1460 ir_node *ka = get_End_keepalive(end, i);
1461 if (irn_not_visited(ka)) {
1462 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1463 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1464 get_irg_block_visited(current_ir_graph)-1);
1465 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1466 mark_irn_visited(ka);
1467 ARR_APP1 (ir_node *, in, ka);
1468 } else if (get_irn_op(ka) == op_Phi) {
1469 mark_irn_visited(ka);
1470 ARR_APP1 (ir_node *, in, ka);
1474 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1477 current_ir_graph = rem;
1482 * Called by walker of remove_critical_cf_edges.
1484 * Place an empty block to an edge between a blocks of multiple
1485 * predecessors and a block of multiple sucessors.
1488 * @param env Envirnment of walker. This field is unused and has
1491 static void walk_critical_cf_edges(ir_node *n, void *env) {
1493 ir_node *pre, *block, **in, *jmp;
1495 /* Block has multiple predecessors */
1496 if ((op_Block == get_irn_op(n)) &&
1497 (get_irn_arity(n) > 1)) {
1498 arity = get_irn_arity(n);
1500 if (n == get_irg_end_block(current_ir_graph))
1501 return; // No use to add a block here.
1503 for (i=0; i<arity; i++) {
1504 pre = get_irn_n(n, i);
1505 /* Predecessor has multiple sucessors. Insert new flow edge */
1506 if ((NULL != pre) &&
1507 (op_Proj == get_irn_op(pre)) &&
1508 op_Raise != get_irn_op(skip_Proj(pre))) {
1510 /* set predecessor array for new block */
1511 in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
1512 /* set predecessor of new block */
1514 block = new_Block(1, in);
1515 /* insert new jmp node to new block */
1516 switch_block(block);
1519 /* set sucessor of new block */
1520 set_irn_n(n, i, jmp);
1522 } /* predecessor has multiple sucessors */
1523 } /* for all predecessors */
1524 } /* n is a block */
1527 void remove_critical_cf_edges(ir_graph *irg) {
1528 if (get_opt_critical_edges())
1529 irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);