3 * File name: ir/ir/irgopt.c
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 # include "irnode_t.h"
24 # include "irgraph_t.h"
31 # include "pdeq.h" /* Fuer code placement */
34 # include "irbackedge_t.h"
36 /* Defined in iropt.c */
37 pset *new_identities (void);
38 void del_identities (pset *value_table);
39 void add_identities (pset *value_table, ir_node *node);
41 /********************************************************************/
42 /* apply optimizations of iropt to all nodes. */
43 /********************************************************************/
45 static void init_link (ir_node *n, void *env) {
46 set_irn_link(n, NULL);
50 optimize_in_place_wrapper (ir_node *n, void *env) {
52 ir_node *optimized, *old;
54 for (i = 0; i < get_irn_arity(n); i++) {
55 /* get?irn_n skips Id nodes, so comparison old != optimized does not
56 show all optimizations. Therefore always set new predecessor. */
57 old = get_irn_n(n, i);
58 optimized = optimize_in_place_2(old);
59 set_irn_n(n, i, optimized);
62 if (get_irn_op(n) == op_Block) {
63 optimized = optimize_in_place_2(n);
64 if (optimized != n) exchange (n, optimized);
69 local_optimize_graph (ir_graph *irg) {
70 ir_graph *rem = current_ir_graph;
71 current_ir_graph = irg;
73 /* Handle graph state */
74 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(current_ir_graph, floats);
77 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
78 set_irg_outs_inconsistent(current_ir_graph);
79 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
80 set_irg_dom_inconsistent(current_ir_graph);
82 /* Clean the value_table in irg for the cse. */
83 del_identities(irg->value_table);
84 irg->value_table = new_identities();
86 /* walk over the graph */
87 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
89 current_ir_graph = rem;
92 /********************************************************************/
93 /* Routines for dead node elimination / copying garbage collection */
95 /********************************************************************/
97 /* Remeber the new node in the old node by using a field all nodes have. */
99 set_new_node (ir_node *old, ir_node *new)
104 /* Get this new node, before the old node is forgotton.*/
105 static INLINE ir_node *
106 get_new_node (ir_node * n)
111 /* We use the block_visited flag to mark that we have computed the
112 number of useful predecessors for this block.
113 Further we encode the new arity in this flag in the old blocks.
114 Remembering the arity is useful, as it saves a lot of pointer
115 accesses. This function is called for all Phi and Block nodes
118 compute_new_arity(ir_node *b) {
122 irg_v = get_irg_block_visited(current_ir_graph);
123 block_v = get_Block_block_visited(b);
124 if (block_v >= irg_v) {
125 /* we computed the number of preds for this block and saved it in the
127 return block_v - irg_v;
129 /* compute the number of good predecessors */
130 res = get_irn_arity(b);
131 for (i = 0; i < get_irn_arity(b); i++)
132 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
133 /* save it in the flag. */
134 set_Block_block_visited(b, irg_v + res);
139 static INLINE void new_backedge_info(ir_node *n) {
140 switch(get_irn_opcode(n)) {
142 n->attr.block.cg_backedge = NULL;
143 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
146 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
155 /* Copies the node to the new obstack. The Ins of the new node point to
156 the predecessors on the old obstack. For block/phi nodes not all
157 predecessors might be copied. n->link points to the new node.
158 For Phi and Block nodes the function allocates in-arrays with an arity
159 only for useful predecessors. The arity is determined by counting
160 the non-bad predecessors of the block. */
162 copy_node (ir_node *n, void *env) {
166 /* The end node looses it's flexible in array. This doesn't matter,
167 as dead node elimination builds End by hand, inlineing doesn't use
169 //assert(n->op == op_End || ((_ARR_DESCR(n->in))->cookie != ARR_F_MAGIC));
171 if (get_irn_opcode(n) == iro_Block) {
173 new_arity = compute_new_arity(n);
174 n->attr.block.graph_arr = NULL;
176 block = get_nodes_Block(n);
177 if (get_irn_opcode(n) == iro_Phi) {
178 new_arity = compute_new_arity(block);
180 new_arity = get_irn_arity(n);
183 nn = new_ir_node(get_irn_dbg_info(n),
190 /* Copy the attributes. These might point to additional data. If this
191 was allocated on the old obstack the pointers now are dangling. This
192 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
194 new_backedge_info(nn);
197 /* printf("\n old node: "); DDMSG2(n);
198 printf(" new node: "); DDMSG2(nn); */
202 /* Copies new predecessors of old node to new node remembered in link.
203 Spare the Bad predecessors of Phi and Block nodes. */
205 copy_preds (ir_node *n, void *env) {
209 nn = get_new_node(n);
211 /* printf("\n old node: "); DDMSG2(n);
212 printf(" new node: "); DDMSG2(nn);
213 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
215 if (get_irn_opcode(n) == iro_Block) {
216 /* Don't copy Bad nodes. */
218 for (i = 0; i < get_irn_arity(n); i++)
219 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
220 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
221 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
224 /* repair the block visited flag from above misuse. Repair it in both
225 graphs so that the old one can still be used. */
226 set_Block_block_visited(nn, 0);
227 set_Block_block_visited(n, 0);
228 /* Local optimization could not merge two subsequent blocks if
229 in array contained Bads. Now it's possible.
230 We don't call optimize_in_place as it requires
231 that the fields in ir_graph are set properly. */
232 if ((get_opt_control_flow_straightening()) &&
233 (get_Block_n_cfgpreds(nn) == 1) &&
234 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
235 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
236 } else if (get_irn_opcode(n) == iro_Phi) {
237 /* Don't copy node if corresponding predecessor in block is Bad.
238 The Block itself should not be Bad. */
239 block = get_nodes_Block(n);
240 set_irn_n (nn, -1, get_new_node(block));
242 for (i = 0; i < get_irn_arity(n); i++)
243 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
244 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
245 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
248 /* If the pre walker reached this Phi after the post walker visited the
249 block block_visited is > 0. */
250 set_Block_block_visited(get_nodes_Block(n), 0);
251 /* Compacting the Phi's ins might generate Phis with only one
253 if (get_irn_arity(n) == 1)
254 exchange(n, get_irn_n(n, 0));
256 for (i = -1; i < get_irn_arity(n); i++)
257 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
259 /* Now the new node is complete. We can add it to the hash table for cse.
260 @@@ inlinening aborts if we identify End. Why? */
261 if(get_irn_op(nn) != op_End)
262 add_identities (current_ir_graph->value_table, nn);
265 /* Copies the graph recursively, compacts the keepalive of the end node. */
268 ir_node *oe, *ne; /* old end, new end */
269 ir_node *ka; /* keep alive */
272 oe = get_irg_end(current_ir_graph);
273 /* copy the end node by hand, allocate dynamic in array! */
274 ne = new_ir_node(get_irn_dbg_info(oe),
281 /* Copy the attributes. Well, there might be some in the future... */
283 set_new_node(oe, ne);
285 /* copy the live nodes */
286 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
287 /* copy_preds for the end node ... */
288 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
290 /** ... and now the keep alives. **/
291 /* First pick the not marked block nodes and walk them. We must pick these
292 first as else we will oversee blocks reachable from Phis. */
293 for (i = 0; i < get_irn_arity(oe); i++) {
294 ka = get_irn_n(oe, i);
295 if ((get_irn_op(ka) == op_Block) &&
296 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
297 /* We must keep the block alive and copy everything reachable */
298 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
299 irg_walk(ka, copy_node, copy_preds, NULL);
300 add_End_keepalive(ne, get_new_node(ka));
304 /* Now pick the Phis. Here we will keep all! */
305 for (i = 0; i < get_irn_arity(oe); i++) {
306 ka = get_irn_n(oe, i);
307 if ((get_irn_op(ka) == op_Phi)) {
308 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
309 /* We didn't copy the Phi yet. */
310 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
311 irg_walk(ka, copy_node, copy_preds, NULL);
313 add_End_keepalive(ne, get_new_node(ka));
318 /* Copies the graph reachable from current_ir_graph->end to the obstack
319 in current_ir_graph and fixes the environment.
320 Then fixes the fields in current_ir_graph containing nodes of the
323 copy_graph_env (void) {
325 /* Not all nodes remembered in current_ir_graph might be reachable
326 from the end node. Assure their link is set to NULL, so that
327 we can test whether new nodes have been computed. */
328 set_irn_link(get_irg_frame (current_ir_graph), NULL);
329 set_irn_link(get_irg_globals(current_ir_graph), NULL);
330 set_irn_link(get_irg_args (current_ir_graph), NULL);
332 /* we use the block walk flag for removing Bads from Blocks ins. */
333 inc_irg_block_visited(current_ir_graph);
338 /* fix the fields in current_ir_graph */
339 old_end = get_irg_end(current_ir_graph);
340 set_irg_end (current_ir_graph, get_new_node(old_end));
342 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
343 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
344 copy_node (get_irg_frame(current_ir_graph), NULL);
345 copy_preds(get_irg_frame(current_ir_graph), NULL);
347 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
348 copy_node (get_irg_globals(current_ir_graph), NULL);
349 copy_preds(get_irg_globals(current_ir_graph), NULL);
351 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
352 copy_node (get_irg_args(current_ir_graph), NULL);
353 copy_preds(get_irg_args(current_ir_graph), NULL);
355 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
357 set_irg_start_block(current_ir_graph,
358 get_new_node(get_irg_start_block(current_ir_graph)));
359 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
360 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
361 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
362 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
363 copy_node(get_irg_bad(current_ir_graph), NULL);
364 copy_preds(get_irg_bad(current_ir_graph), NULL);
366 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
367 /* GL removed: we need unknown with mode for analyses.
368 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
369 copy_node(get_irg_unknown(current_ir_graph), NULL);
370 copy_preds(get_irg_unknown(current_ir_graph), NULL);
372 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
376 /* Copies all reachable nodes to a new obstack. Removes bad inputs
377 from block nodes and the corresponding inputs from Phi nodes.
378 Merges single exit blocks with single entry blocks and removes
380 Adds all new nodes to a new hash table for cse. Does not
381 perform cse, so the hash table might contain common subexpressions. */
382 /* Amroq call this emigrate() */
384 dead_node_elimination(ir_graph *irg) {
386 struct obstack *graveyard_obst = NULL;
387 struct obstack *rebirth_obst = NULL;
389 /* Remember external state of current_ir_graph. */
390 rem = current_ir_graph;
391 current_ir_graph = irg;
393 /* Handle graph state */
394 assert(get_irg_phase_state(current_ir_graph) != phase_building);
395 free_outs(current_ir_graph);
397 /* @@@ so far we loose loops when copying */
398 set_irg_loop(current_ir_graph, NULL);
400 if (get_optimize() && get_opt_dead_node_elimination()) {
402 /* A quiet place, where the old obstack can rest in peace,
403 until it will be cremated. */
404 graveyard_obst = irg->obst;
406 /* A new obstack, where the reachable nodes will be copied to. */
407 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
408 current_ir_graph->obst = rebirth_obst;
409 obstack_init (current_ir_graph->obst);
411 /* We also need a new hash table for cse */
412 del_identities (irg->value_table);
413 irg->value_table = new_identities ();
415 /* Copy the graph from the old to the new obstack */
418 /* Free memory from old unoptimized obstack */
419 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
420 xfree (graveyard_obst); /* ... then free it. */
423 current_ir_graph = rem;
426 /* Relink bad predeseccors of a block and store the old in array to the
427 link field. This function is called by relink_bad_predecessors().
428 The array of link field starts with the block operand at position 0.
429 If block has bad predecessors, create a new in array without bad preds.
430 Otherwise let in array untouched. */
431 static void relink_bad_block_predecessors(ir_node *n, void *env) {
432 ir_node **new_in, *irn;
433 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
435 /* if link field of block is NULL, look for bad predecessors otherwise
436 this is allready done */
437 if (get_irn_op(n) == op_Block &&
438 get_irn_link(n) == NULL) {
440 /* save old predecessors in link field (position 0 is the block operand)*/
441 set_irn_link(n, (void *)get_irn_in(n));
443 /* count predecessors without bad nodes */
444 old_irn_arity = get_irn_arity(n);
445 for (i = 0; i < old_irn_arity; i++)
446 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
448 /* arity changing: set new predecessors without bad nodes */
449 if (new_irn_arity < old_irn_arity) {
450 /* get new predecessor array without Block predecessor */
451 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
453 /* set new predeseccors in array */
456 for (i = 1; i < old_irn_arity; i++) {
457 irn = get_irn_n(n, i);
458 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
461 } /* ir node has bad predecessors */
463 } /* Block is not relinked */
466 /* Relinks Bad predecesors from Bocks and Phis called by walker
467 remove_bad_predecesors(). If n is a Block, call
468 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
469 function of Phi's Block. If this block has bad predecessors, relink preds
471 static void relink_bad_predecessors(ir_node *n, void *env) {
472 ir_node *block, **old_in;
473 int i, old_irn_arity, new_irn_arity;
475 /* relink bad predeseccors of a block */
476 if (get_irn_op(n) == op_Block)
477 relink_bad_block_predecessors(n, env);
479 /* If Phi node relink its block and its predecessors */
480 if (get_irn_op(n) == op_Phi) {
482 /* Relink predeseccors of phi's block */
483 block = get_nodes_Block(n);
484 if (get_irn_link(block) == NULL)
485 relink_bad_block_predecessors(block, env);
487 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
488 old_irn_arity = ARR_LEN(old_in);
490 /* Relink Phi predeseccors if count of predeseccors changed */
491 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
492 /* set new predeseccors in array
493 n->in[0] remains the same block */
495 for(i = 1; i < old_irn_arity; i++)
496 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
498 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
501 } /* n is a Phi node */
504 /* Removes Bad Bad predecesors from Blocks and the corresponding
505 inputs to Phi nodes as in dead_node_elimination but without
507 On walking up set the link field to NULL, on walking down call
508 relink_bad_predecessors() (This function stores the old in array
509 to the link field and sets a new in array if arity of predecessors
511 void remove_bad_predecessors(ir_graph *irg) {
512 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
516 /**********************************************************************/
517 /* Funcionality for inlining */
518 /**********************************************************************/
520 /* Copy node for inlineing. Updates attributes that change when
521 * inlineing but not for dead node elimination.
523 * Copies the node by calling copy_node and then updates the entity if
524 * it's a local one. env must be a pointer of the frame type of the
525 * inlined procedure. The new entities must be in the link field of
528 copy_node_inline (ir_node *n, void *env) {
530 type *frame_tp = (type *)env;
533 if (get_irn_op(n) == op_Sel) {
534 new = get_new_node (n);
535 assert(get_irn_op(new) == op_Sel);
536 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
537 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
539 } else if (get_irn_op(n) == op_Block) {
540 new = get_new_node (n);
541 new->attr.block.irg = current_ir_graph;
546 void inline_method(ir_node *call, ir_graph *called_graph) {
548 ir_node *post_call, *post_bl;
550 ir_node *end, *end_bl;
554 ir_node *cf_op = NULL, *bl;
555 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
558 if (!get_optimize() || !get_opt_inline()) return;
559 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
560 rem_opt = get_optimize();
563 /* Handle graph state */
564 assert(get_irg_phase_state(current_ir_graph) != phase_building);
565 assert(get_irg_pinned(current_ir_graph) == pinned);
566 assert(get_irg_pinned(called_graph) == pinned);
567 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
568 set_irg_outs_inconsistent(current_ir_graph);
570 /* -- Check preconditions -- */
571 assert(get_irn_op(call) == op_Call);
572 /* @@@ does not work for InterfaceIII.java after cgana
573 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
574 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
575 get_Call_type(call)));
577 assert(get_type_tpop(get_Call_type(call)) == type_method);
578 if (called_graph == current_ir_graph) {
579 set_optimize(rem_opt);
584 the procedure and later replaces the Start node of the called graph.
585 Post_call is the old Call node and collects the results of the called
586 graph. Both will end up being a tuple. -- */
587 post_bl = get_nodes_Block(call);
588 set_irg_current_block(current_ir_graph, post_bl);
589 /* XxMxPxP of Start + parameter of Call */
591 in[1] = get_Call_mem(call);
592 in[2] = get_irg_frame(current_ir_graph);
593 in[3] = get_irg_globals(current_ir_graph);
594 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
595 pre_call = new_Tuple(5, in);
599 The new block gets the ins of the old block, pre_call and all its
600 predecessors and all Phi nodes. -- */
601 part_block(pre_call);
603 /* -- Prepare state for dead node elimination -- */
604 /* Visited flags in calling irg must be >= flag in called irg.
605 Else walker and arity computation will not work. */
606 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
607 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
608 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
609 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
610 /* Set pre_call as new Start node in link field of the start node of
611 calling graph and pre_calls block as new block for the start block
613 Further mark these nodes so that they are not visited by the
615 set_irn_link(get_irg_start(called_graph), pre_call);
616 set_irn_visited(get_irg_start(called_graph),
617 get_irg_visited(current_ir_graph));
618 set_irn_link(get_irg_start_block(called_graph),
619 get_nodes_Block(pre_call));
620 set_irn_visited(get_irg_start_block(called_graph),
621 get_irg_visited(current_ir_graph));
623 /* Initialize for compaction of in arrays */
624 inc_irg_block_visited(current_ir_graph);
626 /* -- Replicate local entities of the called_graph -- */
627 /* copy the entities. */
628 called_frame = get_irg_frame_type(called_graph);
629 for (i = 0; i < get_class_n_members(called_frame); i++) {
630 entity *new_ent, *old_ent;
631 old_ent = get_class_member(called_frame, i);
632 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
633 set_entity_link(old_ent, new_ent);
636 /* visited is > than that of called graph. With this trick visited will
637 remain unchanged so that an outer walker, e.g., searching the call nodes
638 to inline, calling this inline will not visit the inlined nodes. */
639 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
641 /* -- Performing dead node elimination inlines the graph -- */
642 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
644 /* @@@ endless loops are not copied!! -- they should be, I think... */
645 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
646 get_irg_frame_type(called_graph));
648 /* Repair called_graph */
649 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
650 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
651 set_Block_block_visited(get_irg_start_block(called_graph), 0);
653 /* -- Merge the end of the inlined procedure with the call site -- */
654 /* We will turn the old Call node into a Tuple with the following
657 0: Phi of all Memories of Return statements.
658 1: Jmp from new Block that merges the control flow from all exception
659 predecessors of the old end block.
660 2: Tuple of all arguments.
661 3: Phi of Exception memories.
664 /* -- Precompute some values -- */
665 end_bl = get_new_node(get_irg_end_block(called_graph));
666 end = get_new_node(get_irg_end(called_graph));
667 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
668 n_res = get_method_n_ress(get_Call_type(call));
670 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
671 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
673 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
675 /* -- archive keepalives -- */
676 for (i = 0; i < get_irn_arity(end); i++)
677 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
679 /* The new end node will die. We need not free as the in array is on the obstack:
680 copy_node only generated 'D' arrays. */
683 Return nodes by Jump nodes. -- */
685 for (i = 0; i < arity; i++) {
687 ret = get_irn_n(end_bl, i);
688 if (get_irn_op(ret) == op_Return) {
689 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
693 set_irn_in(post_bl, n_ret, cf_pred);
696 turned into a tuple. -- */
697 turn_into_tuple(post_call, 4);
698 /* First the Memory-Phi */
700 for (i = 0; i < arity; i++) {
701 ret = get_irn_n(end_bl, i);
702 if (get_irn_op(ret) == op_Return) {
703 cf_pred[n_ret] = get_Return_mem(ret);
707 phi = new_Phi(n_ret, cf_pred, mode_M);
708 set_Tuple_pred(call, 0, phi);
709 /* Conserve Phi-list for further inlinings -- but might be optimized */
710 if (get_nodes_Block(phi) == post_bl) {
711 set_irn_link(phi, get_irn_link(post_bl));
712 set_irn_link(post_bl, phi);
714 /* Now the real results */
716 for (j = 0; j < n_res; j++) {
718 for (i = 0; i < arity; i++) {
719 ret = get_irn_n(end_bl, i);
720 if (get_irn_op(ret) == op_Return) {
721 cf_pred[n_ret] = get_Return_res(ret, j);
725 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
727 /* Conserve Phi-list for further inlinings -- but might be optimized */
728 if (get_nodes_Block(phi) == post_bl) {
729 set_irn_link(phi, get_irn_link(post_bl));
730 set_irn_link(post_bl, phi);
733 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
735 set_Tuple_pred(call, 2, new_Bad());
737 /* Finally the exception control flow. We need to add a Phi node to
738 collect the memory containing the exception objects. Further we need
739 to add another block to get a correct representation of this Phi. To
740 this block we add a Jmp that resolves into the X output of the Call
741 when the Call is turned into a tuple. */
743 for (i = 0; i < arity; i++) {
745 ret = get_irn_n(end_bl, i);
746 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
747 cf_pred[n_exc] = ret;
752 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
753 set_Tuple_pred(call, 1, new_Jmp());
754 /* The Phi for the memories with the exception objects */
756 for (i = 0; i < arity; i++) {
758 ret = skip_Proj(get_irn_n(end_bl, i));
759 if (get_irn_op(ret) == op_Call) {
760 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
762 } else if (is_fragile_op(ret)) {
763 /* We rely that all cfops have the memory output at the same position. */
764 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
766 } else if (get_irn_op(ret) == op_Raise) {
767 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
771 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
773 set_Tuple_pred(call, 1, new_Bad());
774 set_Tuple_pred(call, 3, new_Bad());
780 /* -- If the exception control flow from the inlined Call directly
781 branched to the end block we now have the following control
782 flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
783 remove the Jmp along with it's empty block and add Jmp's
784 predecessors as predecessors of this end block. No problem if
785 there is no exception, because then branches Bad to End which
787 /* find the problematic predecessor of the end block. */
788 end_bl = get_irg_end_block(current_ir_graph);
789 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
790 cf_op = get_Block_cfgpred(end_bl, i);
791 if (get_irn_op(cf_op) == op_Proj) {
792 cf_op = get_Proj_pred(cf_op);
793 if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
794 // There are unoptimized tuples from inlineing before when no exc
795 assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
796 cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
797 assert(get_irn_op(cf_op) == op_Jmp);
803 if (i < get_Block_n_cfgpreds(end_bl)) {
804 bl = get_nodes_Block(cf_op);
805 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
806 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
807 for (j = 0; j < i; j++)
808 cf_pred[j] = get_Block_cfgpred(end_bl, j);
809 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
810 cf_pred[j] = get_Block_cfgpred(bl, j-i);
811 for (j = j; j < arity; j++)
812 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
813 set_irn_in(end_bl, arity, cf_pred);
815 // Remove the exception pred from post-call Tuple.
816 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
820 /* -- Turn cse back on. -- */
821 set_optimize(rem_opt);
824 /********************************************************************/
825 /* Apply inlineing to small methods. */
826 /********************************************************************/
830 /* It makes no sense to inline too many calls in one procedure. Anyways,
831 I didn't get a version with NEW_ARR_F to run. */
832 #define MAX_INLINE 1024
834 static void collect_calls(ir_node *call, void *env) {
835 ir_node **calls = (ir_node **)env;
838 ir_graph *called_irg;
840 if (get_irn_op(call) != op_Call) return;
842 addr = get_Call_ptr(call);
843 if (get_irn_op(addr) == op_Const) {
844 /* Check whether the constant is the pointer to a compiled entity. */
845 tv = get_Const_tarval(addr);
846 if (tarval_to_entity(tv)) {
847 called_irg = get_entity_irg(tarval_to_entity(tv));
848 if (called_irg && pos < MAX_INLINE) {
849 /* The Call node calls a locally defined method. Remember to inline. */
857 /* Inlines all small methods at call sites where the called address comes
858 from a Const node that references the entity representing the called
860 The size argument is a rough measure for the code size of the method:
861 Methods where the obstack containing the firm graph is smaller than
863 void inline_small_irgs(ir_graph *irg, int size) {
865 ir_node *calls[MAX_INLINE];
866 ir_graph *rem = current_ir_graph;
868 if (!(get_optimize() && get_opt_inline())) return;
870 current_ir_graph = irg;
871 /* Handle graph state */
872 assert(get_irg_phase_state(current_ir_graph) != phase_building);
874 /* Find Call nodes to inline.
875 (We can not inline during a walk of the graph, as inlineing the same
876 method several times changes the visited flag of the walked graph:
877 after the first inlineing visited of the callee equals visited of
878 the caller. With the next inlineing both are increased.) */
880 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
882 if ((pos > 0) && (pos < MAX_INLINE)) {
883 /* There are calls to inline */
884 collect_phiprojs(irg);
885 for (i = 0; i < pos; i++) {
888 tv = get_Const_tarval(get_Call_ptr(calls[i]));
889 callee = get_entity_irg(tarval_to_entity(tv));
890 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
891 inline_method(calls[i], callee);
896 current_ir_graph = rem;
900 /********************************************************************/
901 /* Code Placement. Pinns all floating nodes to a block where they */
902 /* will be executed only if needed. */
903 /********************************************************************/
905 static pdeq *worklist; /* worklist of ir_node*s */
907 /* Find the earliest correct block for N. --- Place N into the
908 same Block as its dominance-deepest Input. */
910 place_floats_early (ir_node *n)
914 /* we must not run into an infinite loop */
915 assert (irn_not_visited(n));
918 /* Place floating nodes. */
919 if (get_op_pinned(get_irn_op(n)) == floats) {
921 ir_node *b = new_Bad(); /* The block to place this node in */
923 assert(get_irn_op(n) != op_Block);
925 if ((get_irn_op(n) == op_Const) ||
926 (get_irn_op(n) == op_SymConst) ||
928 (get_irn_op(n) == op_Unknown)) {
929 /* These nodes will not be placed by the loop below. */
930 b = get_irg_start_block(current_ir_graph);
934 /* find the block for this node. */
935 for (i = 0; i < get_irn_arity(n); i++) {
936 ir_node *dep = get_irn_n(n, i);
938 if ((irn_not_visited(dep)) &&
939 (get_op_pinned(get_irn_op(dep)) == floats)) {
940 place_floats_early (dep);
942 /* Because all loops contain at least one pinned node, now all
943 our inputs are either pinned or place_early has already
944 been finished on them. We do not have any unfinished inputs! */
945 dep_block = get_nodes_Block(dep);
946 if ((!is_Bad(dep_block)) &&
947 (get_Block_dom_depth(dep_block) > depth)) {
949 depth = get_Block_dom_depth(dep_block);
951 /* Avoid that the node is placed in the Start block */
952 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
953 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
954 assert(b != get_irg_start_block(current_ir_graph));
958 set_nodes_Block(n, b);
961 /* Add predecessors of non floating nodes on worklist. */
962 start = (get_irn_op(n) == op_Block) ? 0 : -1;
963 for (i = start; i < get_irn_arity(n); i++) {
964 ir_node *pred = get_irn_n(n, i);
965 if (irn_not_visited(pred)) {
966 pdeq_putr (worklist, pred);
971 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
972 Start, Call and end at pinned nodes as Store, Call. Place_early
973 places all floating nodes reachable from its argument through floating
974 nodes and adds all beginnings at pinned nodes to the worklist. */
975 static INLINE void place_early (void) {
977 inc_irg_visited(current_ir_graph);
979 /* this inits the worklist */
980 place_floats_early (get_irg_end(current_ir_graph));
982 /* Work the content of the worklist. */
983 while (!pdeq_empty (worklist)) {
984 ir_node *n = pdeq_getl (worklist);
985 if (irn_not_visited(n)) place_floats_early (n);
988 set_irg_outs_inconsistent(current_ir_graph);
989 current_ir_graph->pinned = pinned;
993 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
995 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
997 ir_node *block = NULL;
999 /* Compute the latest block into which we can place a node so that it is
1001 if (get_irn_op(consumer) == op_Phi) {
1002 /* our comsumer is a Phi-node, the effective use is in all those
1003 blocks through which the Phi-node reaches producer */
1005 ir_node *phi_block = get_nodes_Block(consumer);
1006 for (i = 0; i < get_irn_arity(consumer); i++) {
1007 if (get_irn_n(consumer, i) == producer) {
1008 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
1012 assert(is_no_Block(consumer));
1013 block = get_nodes_Block(consumer);
1016 /* Compute the deepest common ancestor of block and dca. */
1018 if (!dca) return block;
1019 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
1020 block = get_Block_idom(block);
1021 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
1022 dca = get_Block_idom(dca);
1023 while (block != dca)
1024 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
1029 static INLINE int get_irn_loop_depth(ir_node *n) {
1030 return get_loop_depth(get_irn_loop(n));
1033 /* Move n to a block with less loop depth than it's current block. The
1034 new block must be dominated by early. */
1036 move_out_of_loops (ir_node *n, ir_node *early)
1038 ir_node *best, *dca;
1042 /* Find the region deepest in the dominator tree dominating
1043 dca with the least loop nesting depth, but still dominated
1044 by our early placement. */
1045 dca = get_nodes_Block(n);
1047 while (dca != early) {
1048 dca = get_Block_idom(dca);
1049 if (!dca) break; /* should we put assert(dca)? */
1050 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1054 if (best != get_nodes_Block(n)) {
1056 printf("Moving out of loop: "); DDMN(n);
1057 printf(" Outermost block: "); DDMN(early);
1058 printf(" Best block: "); DDMN(best);
1059 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1061 set_nodes_Block(n, best);
1065 /* Find the latest legal block for N and place N into the
1066 `optimal' Block between the latest and earliest legal block.
1067 The `optimal' block is the dominance-deepest block of those
1068 with the least loop-nesting-depth. This places N out of as many
1069 loops as possible and then makes it as controldependant as
1072 place_floats_late (ir_node *n)
1077 assert (irn_not_visited(n)); /* no multiple placement */
1079 /* no need to place block nodes, control nodes are already placed. */
1080 if ((get_irn_op(n) != op_Block) &&
1082 (get_irn_mode(n) != mode_X)) {
1083 /* Remember the early palacement of this block to move it
1084 out of loop no further than the early placement. */
1085 early = get_nodes_Block(n);
1086 /* Assure that our users are all placed, except the Phi-nodes.
1087 --- Each dataflow cycle contains at least one Phi-node. We
1088 have to break the `user has to be placed before the
1089 producer' dependance cycle and the Phi-nodes are the
1090 place to do so, because we need to base our placement on the
1091 final region of our users, which is OK with Phi-nodes, as they
1092 are pinned, and they never have to be placed after a
1093 producer of one of their inputs in the same block anyway. */
1094 for (i = 0; i < get_irn_n_outs(n); i++) {
1095 ir_node *succ = get_irn_out(n, i);
1096 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1097 place_floats_late (succ);
1100 /* We have to determine the final block of this node... except for
1102 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1103 (get_irn_op(n) != op_Const) &&
1104 (get_irn_op(n) != op_SymConst)) {
1105 ir_node *dca = NULL; /* deepest common ancestor in the
1106 dominator tree of all nodes'
1107 blocks depending on us; our final
1108 placement has to dominate DCA. */
1109 for (i = 0; i < get_irn_n_outs(n); i++) {
1110 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1112 set_nodes_Block(n, dca);
1114 move_out_of_loops (n, early);
1118 mark_irn_visited(n);
1120 /* Add predecessors of all non-floating nodes on list. (Those of floating
1121 nodes are placeded already and therefore are marked.) */
1122 for (i = 0; i < get_irn_n_outs(n); i++) {
1123 if (irn_not_visited(get_irn_out(n, i))) {
1124 pdeq_putr (worklist, get_irn_out(n, i));
1129 static INLINE void place_late(void) {
1131 inc_irg_visited(current_ir_graph);
1133 /* This fills the worklist initially. */
1134 place_floats_late(get_irg_start_block(current_ir_graph));
1135 /* And now empty the worklist again... */
1136 while (!pdeq_empty (worklist)) {
1137 ir_node *n = pdeq_getl (worklist);
1138 if (irn_not_visited(n)) place_floats_late(n);
1142 void place_code(ir_graph *irg) {
1143 ir_graph *rem = current_ir_graph;
1144 current_ir_graph = irg;
1146 if (!(get_optimize() && get_opt_global_cse())) return;
1148 /* Handle graph state */
1149 assert(get_irg_phase_state(irg) != phase_building);
1150 if (get_irg_dom_state(irg) != dom_consistent)
1153 construct_backedges(irg);
1155 /* Place all floating nodes as early as possible. This guarantees
1156 a legal code placement. */
1157 worklist = new_pdeq ();
1160 /* place_early invalidates the outs, place_late needs them. */
1162 /* Now move the nodes down in the dominator tree. This reduces the
1163 unnecessary executions of the node. */
1166 set_irg_outs_inconsistent(current_ir_graph);
1167 del_pdeq (worklist);
1168 current_ir_graph = rem;
1173 /********************************************************************/
1174 /* Control flow optimization. */
1175 /* Removes Bad control flow predecessors and empty blocks. A block */
1176 /* is empty if it contains only a Jmp node. */
1177 /* Blocks can only be removed if they are not needed for the */
1178 /* semantics of Phi nodes. */
1179 /********************************************************************/
1181 /* Removes Tuples from Block control flow predecessors.
1182 Optimizes blocks with equivalent_node().
1183 Replaces n by Bad if n is unreachable control flow. */
1184 static void merge_blocks(ir_node *n, void *env) {
1186 set_irn_link(n, NULL);
1188 if (get_irn_op(n) == op_Block) {
1190 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1191 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
1192 A different order of optimizations might cause problems. */
1193 if (get_opt_normalize())
1194 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1195 } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
1196 /* We will soon visit a block. Optimize it before visiting! */
1197 ir_node *b = get_nodes_Block(n);
1198 ir_node *new = equivalent_node(b);
1199 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1200 /* We would have to run gigo if new is bad, so we
1201 promote it directly below. */
1202 assert(((b == new) ||
1203 get_opt_control_flow_straightening() ||
1204 get_opt_control_flow_weak_simplification()) &&
1205 ("strange flag setting"));
1208 new = equivalent_node(b);
1210 /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
1211 if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
1215 /* Collects all Phi nodes in link list of Block.
1216 Marks all blocks "block_visited" if they contain a node other
1218 static void collect_nodes(ir_node *n, void *env) {
1219 if (is_no_Block(n)) {
1220 ir_node *b = get_nodes_Block(n);
1222 if ((get_irn_op(n) == op_Phi)) {
1223 /* Collect Phi nodes to compact ins along with block's ins. */
1224 set_irn_link(n, get_irn_link(b));
1226 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1227 mark_Block_block_visited(b);
1232 /* Returns true if pred is pred of block */
1233 static int is_pred_of(ir_node *pred, ir_node *b) {
1235 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1236 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1237 if (b_pred == pred) return 1;
1242 static int test_whether_dispensable(ir_node *b, int pos) {
1243 int i, j, n_preds = 1;
1244 int dispensable = 1;
1245 ir_node *cfop = get_Block_cfgpred(b, pos);
1246 ir_node *pred = get_nodes_Block(cfop);
1248 if (get_Block_block_visited(pred) + 1
1249 < get_irg_block_visited(current_ir_graph)) {
1250 if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
1251 /* Mark block so that is will not be removed. */
1252 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1255 /* Seems to be empty. */
1256 if (!get_irn_link(b)) {
1257 /* There are no Phi nodes ==> dispensable. */
1258 n_preds = get_Block_n_cfgpreds(pred);
1260 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1261 Work preds < pos as if they were already removed. */
1262 for (i = 0; i < pos; i++) {
1263 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1264 if (get_Block_block_visited(b_pred) + 1
1265 < get_irg_block_visited(current_ir_graph)) {
1266 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1267 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1268 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1271 if (is_pred_of(b_pred, pred)) dispensable = 0;
1274 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1275 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1276 if (is_pred_of(b_pred, pred)) dispensable = 0;
1279 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1282 n_preds = get_Block_n_cfgpreds(pred);
1290 static void optimize_blocks(ir_node *b, void *env) {
1291 int i, j, k, max_preds, n_preds;
1292 ir_node *pred, *phi;
1295 /* Count the number of predecessor if this block is merged with pred blocks
1298 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1299 max_preds += test_whether_dispensable(b, i);
1301 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1304 printf(" working on "); DDMN(b);
1305 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1306 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1307 if (is_Bad(get_Block_cfgpred(b, i))) {
1308 printf(" removing Bad %i\n ", i);
1309 } else if (get_Block_block_visited(pred) +1
1310 < get_irg_block_visited(current_ir_graph)) {
1311 printf(" removing pred %i ", i); DDMN(pred);
1312 } else { printf(" Nothing to do for "); DDMN(pred); }
1314 * end Debug output **/
1316 /** Fix the Phi nodes **/
1317 phi = get_irn_link(b);
1319 assert(get_irn_op(phi) == op_Phi);
1320 /* Find the new predecessors for the Phi */
1322 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1323 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1324 if (is_Bad(get_Block_cfgpred(b, i))) {
1326 } else if (get_Block_block_visited(pred) +1
1327 < get_irg_block_visited(current_ir_graph)) {
1328 /* It's an empty block and not yet visited. */
1329 ir_node *phi_pred = get_Phi_pred(phi, i);
1330 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1331 if (get_nodes_Block(phi_pred) == pred) {
1332 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1333 in[n_preds] = get_Phi_pred(phi_pred, j);
1335 in[n_preds] = phi_pred;
1339 /* The Phi_pred node is replaced now if it is a Phi.
1340 In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
1341 Daher muss der Phiknoten durch den neuen ersetzt werden.
1342 Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
1343 durch einen Bad) damit er aus den keep_alive verschwinden kann.
1344 Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
1346 if (get_nodes_Block(phi_pred) == pred) {
1347 /* remove the Phi as it might be kept alive. Further there
1348 might be other users. */
1349 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
1352 in[n_preds] = get_Phi_pred(phi, i);
1357 set_irn_in(phi, n_preds, in);
1359 phi = get_irn_link(phi);
1363 This happens only if merge between loop backedge and single loop entry. **/
1364 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1365 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1366 if (get_Block_block_visited(pred) +1
1367 < get_irg_block_visited(current_ir_graph)) {
1368 phi = get_irn_link(pred);
1370 if (get_irn_op(phi) == op_Phi) {
1371 set_nodes_Block(phi, b);
1374 for (i = 0; i < k; i++) {
1375 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1376 if (is_Bad(get_Block_cfgpred(b, i))) {
1378 } else if (get_Block_block_visited(pred) +1
1379 < get_irg_block_visited(current_ir_graph)) {
1380 /* It's an empty block and not yet visited. */
1381 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1382 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1383 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1384 Anweisungen.) Trotzdem tuts bisher!! */
1393 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1394 in[n_preds] = get_Phi_pred(phi, i);
1397 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1398 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1399 if (is_Bad(get_Block_cfgpred(b, i))) {
1401 } else if (get_Block_block_visited(pred) +1
1402 < get_irg_block_visited(current_ir_graph)) {
1403 /* It's an empty block and not yet visited. */
1404 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1413 set_irn_in(phi, n_preds, in);
1415 phi = get_irn_link(phi);
1420 /** Fix the block **/
1422 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1423 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1424 if (is_Bad(get_Block_cfgpred(b, i))) {
1426 } else if (get_Block_block_visited(pred) +1
1427 < get_irg_block_visited(current_ir_graph)) {
1428 /* It's an empty block and not yet visited. */
1429 assert(get_Block_n_cfgpreds(b) > 1);
1430 /* Else it should be optimized by equivalent_node. */
1431 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1432 in[n_preds] = get_Block_cfgpred(pred, j);
1435 /* Remove block as it might be kept alive. */
1436 exchange(pred, b/*new_Bad()*/);
1438 in[n_preds] = get_Block_cfgpred(b, i);
1442 set_irn_in(b, n_preds, in);
1446 void optimize_cf(ir_graph *irg) {
1449 ir_node *end = get_irg_end(irg);
1450 ir_graph *rem = current_ir_graph;
1451 current_ir_graph = irg;
1453 /* Handle graph state */
1454 assert(get_irg_phase_state(irg) != phase_building);
1455 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1456 set_irg_outs_inconsistent(current_ir_graph);
1457 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1458 set_irg_dom_inconsistent(current_ir_graph);
1460 /* Use block visited flag to mark non-empty blocks. */
1461 inc_irg_block_visited(irg);
1462 irg_walk(end, merge_blocks, collect_nodes, NULL);
1464 /* Optimize the standard code. */
1465 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1467 /* Walk all keep alives, optimize them if block, add to new in-array
1468 for end if useful. */
1469 in = NEW_ARR_F (ir_node *, 1);
1470 in[0] = get_nodes_Block(end);
1471 inc_irg_visited(current_ir_graph);
1472 for(i = 0; i < get_End_n_keepalives(end); i++) {
1473 ir_node *ka = get_End_keepalive(end, i);
1474 if (irn_not_visited(ka)) {
1475 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1476 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1477 get_irg_block_visited(current_ir_graph)-1);
1478 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1479 mark_irn_visited(ka);
1480 ARR_APP1 (ir_node *, in, ka);
1481 } else if (get_irn_op(ka) == op_Phi) {
1482 mark_irn_visited(ka);
1483 ARR_APP1 (ir_node *, in, ka);
1487 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1490 current_ir_graph = rem;
1495 * Called by walker of remove_critical_cf_edges.
1497 * Place an empty block to an edge between a blocks of multiple
1498 * predecessors and a block of multiple sucessors.
1501 * @param env Envirnment of walker. This field is unused and has
1504 static void walk_critical_cf_edges(ir_node *n, void *env) {
1506 ir_node *pre, *block, **in, *jmp;
1508 /* Block has multiple predecessors */
1509 if ((op_Block == get_irn_op(n)) &&
1510 (get_irn_arity(n) > 1)) {
1511 arity = get_irn_arity(n);
1513 if (n == get_irg_end_block(current_ir_graph))
1514 return; // No use to add a block here.
1516 for (i=0; i<arity; i++) {
1517 pre = get_irn_n(n, i);
1518 /* Predecessor has multiple sucessors. Insert new flow edge */
1519 if ((NULL != pre) &&
1520 (op_Proj == get_irn_op(pre)) &&
1521 op_Raise != get_irn_op(skip_Proj(pre))) {
1523 /* set predecessor array for new block */
1524 in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
1525 /* set predecessor of new block */
1527 block = new_Block(1, in);
1528 /* insert new jmp node to new block */
1529 switch_block(block);
1532 /* set sucessor of new block */
1533 set_irn_n(n, i, jmp);
1535 } /* predecessor has multiple sucessors */
1536 } /* for all predecessors */
1537 } /* n is a block */
1540 void remove_critical_cf_edges(ir_graph *irg) {
1541 if (get_opt_critical_edges())
1542 irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);