3 * File name: ir/ir/irgopt.c
4 * Purpose: Optimizations for a whole ir graph, i.e., a procedure.
5 * Author: Christian Schaefer, Goetz Lindenmaier
6 * Modified by: Sebastian Felis
9 * Copyright: (c) 1998-2003 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
23 # include "irnode_t.h"
24 # include "irgraph_t.h"
31 # include "pdeq.h" /* Fuer code placement */
34 # include "irbackedge_t.h"
36 /* Defined in iropt.c */
37 pset *new_identities (void);
38 void del_identities (pset *value_table);
39 void add_identities (pset *value_table, ir_node *node);
41 /********************************************************************/
42 /* apply optimizations of iropt to all nodes. */
43 /********************************************************************/
45 static void init_link (ir_node *n, void *env) {
46 set_irn_link(n, NULL);
50 optimize_in_place_wrapper (ir_node *n, void *env) {
52 ir_node *optimized, *old;
54 for (i = 0; i < get_irn_arity(n); i++) {
55 /* get?irn_n skips Id nodes, so comparison old != optimized does not
56 show all optimizations. Therefore always set new predecessor. */
57 old = get_irn_n(n, i);
58 optimized = optimize_in_place_2(old);
59 set_irn_n(n, i, optimized);
62 if (get_irn_op(n) == op_Block) {
63 optimized = optimize_in_place_2(n);
64 if (optimized != n) exchange (n, optimized);
69 local_optimize_graph (ir_graph *irg) {
70 ir_graph *rem = current_ir_graph;
71 current_ir_graph = irg;
73 /* Handle graph state */
74 assert(get_irg_phase_state(irg) != phase_building);
75 if (get_opt_global_cse())
76 set_irg_pinned(current_ir_graph, floats);
77 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
78 set_irg_outs_inconsistent(current_ir_graph);
79 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
80 set_irg_dom_inconsistent(current_ir_graph);
82 /* Clean the value_table in irg for the cse. */
83 del_identities(irg->value_table);
84 irg->value_table = new_identities();
86 /* walk over the graph */
87 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
89 current_ir_graph = rem;
92 /********************************************************************/
93 /* Routines for dead node elimination / copying garbage collection */
95 /********************************************************************/
97 /* Remeber the new node in the old node by using a field all nodes have. */
99 set_new_node (ir_node *old, ir_node *new)
104 /* Get this new node, before the old node is forgotton.*/
105 static INLINE ir_node *
106 get_new_node (ir_node * n)
111 /* We use the block_visited flag to mark that we have computed the
112 number of useful predecessors for this block.
113 Further we encode the new arity in this flag in the old blocks.
114 Remembering the arity is useful, as it saves a lot of pointer
115 accesses. This function is called for all Phi and Block nodes
118 compute_new_arity(ir_node *b) {
122 irg_v = get_irg_block_visited(current_ir_graph);
123 block_v = get_Block_block_visited(b);
124 if (block_v >= irg_v) {
125 /* we computed the number of preds for this block and saved it in the
127 return block_v - irg_v;
129 /* compute the number of good predecessors */
130 res = get_irn_arity(b);
131 for (i = 0; i < get_irn_arity(b); i++)
132 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
133 /* save it in the flag. */
134 set_Block_block_visited(b, irg_v + res);
139 static INLINE void new_backedge_info(ir_node *n) {
140 switch(get_irn_opcode(n)) {
142 n->attr.block.cg_backedge = NULL;
143 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
146 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
155 /* Copies the node to the new obstack. The Ins of the new node point to
156 the predecessors on the old obstack. For block/phi nodes not all
157 predecessors might be copied. n->link points to the new node.
158 For Phi and Block nodes the function allocates in-arrays with an arity
159 only for useful predecessors. The arity is determined by counting
160 the non-bad predecessors of the block. */
162 copy_node (ir_node *n, void *env) {
166 if (get_irn_opcode(n) == iro_Block) {
168 new_arity = compute_new_arity(n);
169 n->attr.block.graph_arr = NULL;
171 block = get_nodes_Block(n);
172 if (get_irn_opcode(n) == iro_Phi) {
173 new_arity = compute_new_arity(block);
175 new_arity = get_irn_arity(n);
178 nn = new_ir_node(get_irn_dbg_info(n),
185 /* Copy the attributes. These might point to additional data. If this
186 was allocated on the old obstack the pointers now are dangling. This
187 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
189 new_backedge_info(nn);
192 /* printf("\n old node: "); DDMSG2(n);
193 printf(" new node: "); DDMSG2(nn); */
197 /* Copies new predecessors of old node to new node remembered in link.
198 Spare the Bad predecessors of Phi and Block nodes. */
200 copy_preds (ir_node *n, void *env) {
204 nn = get_new_node(n);
206 /* printf("\n old node: "); DDMSG2(n);
207 printf(" new node: "); DDMSG2(nn);
208 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
210 if (get_irn_opcode(n) == iro_Block) {
211 /* Don't copy Bad nodes. */
213 for (i = 0; i < get_irn_arity(n); i++)
214 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
215 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
216 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
219 /* repair the block visited flag from above misuse. Repair it in both
220 graphs so that the old one can still be used. */
221 set_Block_block_visited(nn, 0);
222 set_Block_block_visited(n, 0);
223 /* Local optimization could not merge two subsequent blocks if
224 in array contained Bads. Now it's possible.
225 We don't call optimize_in_place as it requires
226 that the fields in ir_graph are set properly. */
227 if ((get_opt_control_flow_straightening()) &&
228 (get_Block_n_cfgpreds(nn) == 1) &&
229 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
230 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
231 } else if (get_irn_opcode(n) == iro_Phi) {
232 /* Don't copy node if corresponding predecessor in block is Bad.
233 The Block itself should not be Bad. */
234 block = get_nodes_Block(n);
235 set_irn_n (nn, -1, get_new_node(block));
237 for (i = 0; i < get_irn_arity(n); i++)
238 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
239 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
240 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
243 /* If the pre walker reached this Phi after the post walker visited the
244 block block_visited is > 0. */
245 set_Block_block_visited(get_nodes_Block(n), 0);
246 /* Compacting the Phi's ins might generate Phis with only one
248 if (get_irn_arity(n) == 1)
249 exchange(n, get_irn_n(n, 0));
251 for (i = -1; i < get_irn_arity(n); i++)
252 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
254 /* Now the new node is complete. We can add it to the hash table for cse.
255 @@@ inlinening aborts if we identify End. Why? */
256 if(get_irn_op(nn) != op_End)
257 add_identities (current_ir_graph->value_table, nn);
260 /* Copies the graph recursively, compacts the keepalive of the end node. */
263 ir_node *oe, *ne; /* old end, new end */
264 ir_node *ka; /* keep alive */
267 oe = get_irg_end(current_ir_graph);
268 /* copy the end node by hand, allocate dynamic in array! */
269 ne = new_ir_node(get_irn_dbg_info(oe),
276 /* Copy the attributes. Well, there might be some in the future... */
278 set_new_node(oe, ne);
280 /* copy the live nodes */
281 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
282 /* copy_preds for the end node ... */
283 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
285 /** ... and now the keep alives. **/
286 /* First pick the not marked block nodes and walk them. We must pick these
287 first as else we will oversee blocks reachable from Phis. */
288 for (i = 0; i < get_irn_arity(oe); i++) {
289 ka = get_irn_n(oe, i);
290 if ((get_irn_op(ka) == op_Block) &&
291 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
292 /* We must keep the block alive and copy everything reachable */
293 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
294 irg_walk(ka, copy_node, copy_preds, NULL);
295 add_End_keepalive(ne, get_new_node(ka));
299 /* Now pick the Phis. Here we will keep all! */
300 for (i = 0; i < get_irn_arity(oe); i++) {
301 ka = get_irn_n(oe, i);
302 if ((get_irn_op(ka) == op_Phi)) {
303 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
304 /* We didn't copy the Phi yet. */
305 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
306 irg_walk(ka, copy_node, copy_preds, NULL);
308 add_End_keepalive(ne, get_new_node(ka));
313 /* Copies the graph reachable from current_ir_graph->end to the obstack
314 in current_ir_graph and fixes the environment.
315 Then fixes the fields in current_ir_graph containing nodes of the
318 copy_graph_env (void) {
320 /* Not all nodes remembered in current_ir_graph might be reachable
321 from the end node. Assure their link is set to NULL, so that
322 we can test whether new nodes have been computed. */
323 set_irn_link(get_irg_frame (current_ir_graph), NULL);
324 set_irn_link(get_irg_globals(current_ir_graph), NULL);
325 set_irn_link(get_irg_args (current_ir_graph), NULL);
327 /* we use the block walk flag for removing Bads from Blocks ins. */
328 inc_irg_block_visited(current_ir_graph);
333 /* fix the fields in current_ir_graph */
334 old_end = get_irg_end(current_ir_graph);
335 set_irg_end (current_ir_graph, get_new_node(old_end));
337 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
338 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
339 copy_node (get_irg_frame(current_ir_graph), NULL);
340 copy_preds(get_irg_frame(current_ir_graph), NULL);
342 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
343 copy_node (get_irg_globals(current_ir_graph), NULL);
344 copy_preds(get_irg_globals(current_ir_graph), NULL);
346 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
347 copy_node (get_irg_args(current_ir_graph), NULL);
348 copy_preds(get_irg_args(current_ir_graph), NULL);
350 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
352 set_irg_start_block(current_ir_graph,
353 get_new_node(get_irg_start_block(current_ir_graph)));
354 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
355 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
356 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
357 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
358 copy_node(get_irg_bad(current_ir_graph), NULL);
359 copy_preds(get_irg_bad(current_ir_graph), NULL);
361 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
362 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
363 copy_node(get_irg_unknown(current_ir_graph), NULL);
364 copy_preds(get_irg_unknown(current_ir_graph), NULL);
366 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
369 /* Copies all reachable nodes to a new obstack. Removes bad inputs
370 from block nodes and the corresponding inputs from Phi nodes.
371 Merges single exit blocks with single entry blocks and removes
373 Adds all new nodes to a new hash table for cse. Does not
374 perform cse, so the hash table might contain common subexpressions. */
375 /* Amroq call this emigrate() */
377 dead_node_elimination(ir_graph *irg) {
379 struct obstack *graveyard_obst = NULL;
380 struct obstack *rebirth_obst = NULL;
382 /* Remember external state of current_ir_graph. */
383 rem = current_ir_graph;
384 current_ir_graph = irg;
386 /* Handle graph state */
387 assert(get_irg_phase_state(current_ir_graph) != phase_building);
388 free_outs(current_ir_graph);
390 /* @@@ so far we loose loops when copying */
391 set_irg_loop(current_ir_graph, NULL);
393 if (get_optimize() && get_opt_dead_node_elimination()) {
395 /* A quiet place, where the old obstack can rest in peace,
396 until it will be cremated. */
397 graveyard_obst = irg->obst;
399 /* A new obstack, where the reachable nodes will be copied to. */
400 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
401 current_ir_graph->obst = rebirth_obst;
402 obstack_init (current_ir_graph->obst);
404 /* We also need a new hash table for cse */
405 del_identities (irg->value_table);
406 irg->value_table = new_identities ();
408 /* Copy the graph from the old to the new obstack */
411 /* Free memory from old unoptimized obstack */
412 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
413 xfree (graveyard_obst); /* ... then free it. */
416 current_ir_graph = rem;
419 /* Relink bad predeseccors of a block and store the old in array to the
420 link field. This function is called by relink_bad_predecessors().
421 The array of link field starts with the block operand at position 0.
422 If block has bad predecessors, create a new in array without bad preds.
423 Otherwise let in array untouched. */
424 static void relink_bad_block_predecessors(ir_node *n, void *env) {
425 ir_node **new_in, *irn;
426 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
428 /* if link field of block is NULL, look for bad predecessors otherwise
429 this is allready done */
430 if (get_irn_op(n) == op_Block &&
431 get_irn_link(n) == NULL) {
433 /* save old predecessors in link field (position 0 is the block operand)*/
434 set_irn_link(n, (void *)get_irn_in(n));
436 /* count predecessors without bad nodes */
437 old_irn_arity = get_irn_arity(n);
438 for (i = 0; i < old_irn_arity; i++)
439 if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
441 /* arity changing: set new predecessors without bad nodes */
442 if (new_irn_arity < old_irn_arity) {
443 /* get new predecessor array without Block predecessor */
444 new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
446 /* set new predeseccors in array */
449 for (i = 1; i < old_irn_arity; i++) {
450 irn = get_irn_n(n, i);
451 if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
454 } /* ir node has bad predecessors */
456 } /* Block is not relinked */
459 /* Relinks Bad predecesors from Bocks and Phis called by walker
460 remove_bad_predecesors(). If n is a Block, call
461 relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
462 function of Phi's Block. If this block has bad predecessors, relink preds
464 static void relink_bad_predecessors(ir_node *n, void *env) {
465 ir_node *block, **old_in;
466 int i, old_irn_arity, new_irn_arity;
468 /* relink bad predeseccors of a block */
469 if (get_irn_op(n) == op_Block)
470 relink_bad_block_predecessors(n, env);
472 /* If Phi node relink its block and its predecessors */
473 if (get_irn_op(n) == op_Phi) {
475 /* Relink predeseccors of phi's block */
476 block = get_nodes_Block(n);
477 if (get_irn_link(block) == NULL)
478 relink_bad_block_predecessors(block, env);
480 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
481 old_irn_arity = ARR_LEN(old_in);
483 /* Relink Phi predeseccors if count of predeseccors changed */
484 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
485 /* set new predeseccors in array
486 n->in[0] remains the same block */
488 for(i = 1; i < old_irn_arity; i++)
489 if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
491 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
494 } /* n is a Phi node */
497 /* Removes Bad Bad predecesors from Blocks and the corresponding
498 inputs to Phi nodes as in dead_node_elimination but without
500 On walking up set the link field to NULL, on walking down call
501 relink_bad_predecessors() (This function stores the old in array
502 to the link field and sets a new in array if arity of predecessors
504 void remove_bad_predecessors(ir_graph *irg) {
505 irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
509 /**********************************************************************/
510 /* Funcionality for inlining */
511 /**********************************************************************/
513 /* Copy node for inlineing. Updates attributes that change when
514 * inlineing but not for dead node elimination.
516 * Copies the node by calling copy_node and then updates the entity if
517 * it's a local one. env must be a pointer of the frame type of the
518 * inlined procedure. The new entities must be in the link field of
521 copy_node_inline (ir_node *n, void *env) {
523 type *frame_tp = (type *)env;
526 if (get_irn_op(n) == op_Sel) {
527 new = get_new_node (n);
528 assert(get_irn_op(new) == op_Sel);
529 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
530 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
532 } else if (get_irn_op(n) == op_Block) {
533 new = get_new_node (n);
534 new->attr.block.irg = current_ir_graph;
539 void inline_method(ir_node *call, ir_graph *called_graph) {
541 ir_node *post_call, *post_bl;
543 ir_node *end, *end_bl;
547 ir_node *cf_op = NULL, *bl;
548 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
551 if (!get_optimize() || !get_opt_inline()) return;
552 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
553 rem_opt = get_optimize();
556 /* Handle graph state */
557 assert(get_irg_phase_state(current_ir_graph) != phase_building);
558 assert(get_irg_pinned(current_ir_graph) == pinned);
559 assert(get_irg_pinned(called_graph) == pinned);
560 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
561 set_irg_outs_inconsistent(current_ir_graph);
563 /* -- Check preconditions -- */
564 assert(get_irn_op(call) == op_Call);
565 /* @@@ does not work for InterfaceIII.java after cgana
566 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
567 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
568 get_Call_type(call)));
570 assert(get_type_tpop(get_Call_type(call)) == type_method);
571 if (called_graph == current_ir_graph) {
572 set_optimize(rem_opt);
577 the procedure and later replaces the Start node of the called graph.
578 Post_call is the old Call node and collects the results of the called
579 graph. Both will end up being a tuple. -- */
580 post_bl = get_nodes_Block(call);
581 set_irg_current_block(current_ir_graph, post_bl);
582 /* XxMxPxP of Start + parameter of Call */
584 in[1] = get_Call_mem(call);
585 in[2] = get_irg_frame(current_ir_graph);
586 in[3] = get_irg_globals(current_ir_graph);
587 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
588 pre_call = new_Tuple(5, in);
592 The new block gets the ins of the old block, pre_call and all its
593 predecessors and all Phi nodes. -- */
594 part_block(pre_call);
596 /* -- Prepare state for dead node elimination -- */
597 /* Visited flags in calling irg must be >= flag in called irg.
598 Else walker and arity computation will not work. */
599 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
600 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
601 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
602 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
603 /* Set pre_call as new Start node in link field of the start node of
604 calling graph and pre_calls block as new block for the start block
606 Further mark these nodes so that they are not visited by the
608 set_irn_link(get_irg_start(called_graph), pre_call);
609 set_irn_visited(get_irg_start(called_graph),
610 get_irg_visited(current_ir_graph));
611 set_irn_link(get_irg_start_block(called_graph),
612 get_nodes_Block(pre_call));
613 set_irn_visited(get_irg_start_block(called_graph),
614 get_irg_visited(current_ir_graph));
616 /* Initialize for compaction of in arrays */
617 inc_irg_block_visited(current_ir_graph);
619 /* -- Replicate local entities of the called_graph -- */
620 /* copy the entities. */
621 called_frame = get_irg_frame_type(called_graph);
622 for (i = 0; i < get_class_n_members(called_frame); i++) {
623 entity *new_ent, *old_ent;
624 old_ent = get_class_member(called_frame, i);
625 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
626 set_entity_link(old_ent, new_ent);
629 /* visited is > than that of called graph. With this trick visited will
630 remain unchanged so that an outer walker, e.g., searching the call nodes
631 to inline, calling this inline will not visit the inlined nodes. */
632 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
634 /* -- Performing dead node elimination inlines the graph -- */
635 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
637 /* @@@ endless loops are not copied!! -- they should be, I think... */
638 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
639 get_irg_frame_type(called_graph));
641 /* Repair called_graph */
642 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
643 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
644 set_Block_block_visited(get_irg_start_block(called_graph), 0);
646 /* -- Merge the end of the inlined procedure with the call site -- */
647 /* We will turn the old Call node into a Tuple with the following
650 0: Phi of all Memories of Return statements.
651 1: Jmp from new Block that merges the control flow from all exception
652 predecessors of the old end block.
653 2: Tuple of all arguments.
654 3: Phi of Exception memories.
657 /* -- Precompute some values -- */
658 end_bl = get_new_node(get_irg_end_block(called_graph));
659 end = get_new_node(get_irg_end(called_graph));
660 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
661 n_res = get_method_n_ress(get_Call_type(call));
663 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
664 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
666 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
668 /* -- archive keepalives -- */
669 for (i = 0; i < get_irn_arity(end); i++)
670 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
671 /* The new end node will die, but the in array is not on the obstack ... */
675 Return nodes by Jump nodes. -- */
677 for (i = 0; i < arity; i++) {
679 ret = get_irn_n(end_bl, i);
680 if (get_irn_op(ret) == op_Return) {
681 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
685 set_irn_in(post_bl, n_ret, cf_pred);
688 turned into a tuple. -- */
689 turn_into_tuple(post_call, 4);
690 /* First the Memory-Phi */
692 for (i = 0; i < arity; i++) {
693 ret = get_irn_n(end_bl, i);
694 if (get_irn_op(ret) == op_Return) {
695 cf_pred[n_ret] = get_Return_mem(ret);
699 phi = new_Phi(n_ret, cf_pred, mode_M);
700 set_Tuple_pred(call, 0, phi);
701 /* Conserve Phi-list for further inlinings -- but might be optimized */
702 if (get_nodes_Block(phi) == post_bl) {
703 set_irn_link(phi, get_irn_link(post_bl));
704 set_irn_link(post_bl, phi);
706 /* Now the real results */
708 for (j = 0; j < n_res; j++) {
710 for (i = 0; i < arity; i++) {
711 ret = get_irn_n(end_bl, i);
712 if (get_irn_op(ret) == op_Return) {
713 cf_pred[n_ret] = get_Return_res(ret, j);
717 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
719 /* Conserve Phi-list for further inlinings -- but might be optimized */
720 if (get_nodes_Block(phi) == post_bl) {
721 set_irn_link(phi, get_irn_link(post_bl));
722 set_irn_link(post_bl, phi);
725 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
727 set_Tuple_pred(call, 2, new_Bad());
729 /* Finally the exception control flow. We need to add a Phi node to
730 collect the memory containing the exception objects. Further we need
731 to add another block to get a correct representation of this Phi. To
732 this block we add a Jmp that resolves into the X output of the Call
733 when the Call is turned into a tuple. */
735 for (i = 0; i < arity; i++) {
737 ret = get_irn_n(end_bl, i);
738 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
739 cf_pred[n_exc] = ret;
744 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
745 set_Tuple_pred(call, 1, new_Jmp());
746 /* The Phi for the memories with the exception objects */
748 for (i = 0; i < arity; i++) {
750 ret = skip_Proj(get_irn_n(end_bl, i));
751 if (get_irn_op(ret) == op_Call) {
752 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
754 } else if (is_fragile_op(ret)) {
755 /* We rely that all cfops have the memory output at the same position. */
756 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
758 } else if (get_irn_op(ret) == op_Raise) {
759 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
763 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
765 set_Tuple_pred(call, 1, new_Bad());
766 set_Tuple_pred(call, 3, new_Bad());
772 /* -- If the exception control flow from the inlined Call directly
773 branched to the end block we now have the following control
774 flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
775 remove the Jmp along with it's empty block and add Jmp's
776 predecessors as predecessors of this end block. No problem if
777 there is no exception, because then branches Bad to End which
779 /* find the problematic predecessor of the end block. */
780 end_bl = get_irg_end_block(current_ir_graph);
781 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
782 cf_op = get_Block_cfgpred(end_bl, i);
783 if (get_irn_op(cf_op) == op_Proj) {
784 cf_op = get_Proj_pred(cf_op);
785 if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
786 // There are unoptimized tuples from inlineing before when no exc
787 assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
788 cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
789 assert(get_irn_op(cf_op) == op_Jmp);
795 if (i < get_Block_n_cfgpreds(end_bl)) {
796 bl = get_nodes_Block(cf_op);
797 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
798 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
799 for (j = 0; j < i; j++)
800 cf_pred[j] = get_Block_cfgpred(end_bl, j);
801 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
802 cf_pred[j] = get_Block_cfgpred(bl, j-i);
803 for (j = j; j < arity; j++)
804 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
805 set_irn_in(end_bl, arity, cf_pred);
807 // Remove the exception pred from post-call Tuple.
808 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
812 /* -- Turn cse back on. -- */
813 set_optimize(rem_opt);
816 /********************************************************************/
817 /* Apply inlineing to small methods. */
818 /********************************************************************/
822 /* It makes no sense to inline too many calls in one procedure. Anyways,
823 I didn't get a version with NEW_ARR_F to run. */
824 #define MAX_INLINE 1024
826 static void collect_calls(ir_node *call, void *env) {
827 ir_node **calls = (ir_node **)env;
830 ir_graph *called_irg;
832 if (get_irn_op(call) != op_Call) return;
834 addr = get_Call_ptr(call);
835 if (get_irn_op(addr) == op_Const) {
836 /* Check whether the constant is the pointer to a compiled entity. */
837 tv = get_Const_tarval(addr);
838 if (tarval_to_entity(tv)) {
839 called_irg = get_entity_irg(tarval_to_entity(tv));
840 if (called_irg && pos < MAX_INLINE) {
841 /* The Call node calls a locally defined method. Remember to inline. */
849 /* Inlines all small methods at call sites where the called address comes
850 from a Const node that references the entity representing the called
852 The size argument is a rough measure for the code size of the method:
853 Methods where the obstack containing the firm graph is smaller than
855 void inline_small_irgs(ir_graph *irg, int size) {
857 ir_node *calls[MAX_INLINE];
858 ir_graph *rem = current_ir_graph;
860 if (!(get_optimize() && get_opt_inline())) return;
862 current_ir_graph = irg;
863 /* Handle graph state */
864 assert(get_irg_phase_state(current_ir_graph) != phase_building);
866 /* Find Call nodes to inline.
867 (We can not inline during a walk of the graph, as inlineing the same
868 method several times changes the visited flag of the walked graph:
869 after the first inlineing visited of the callee equals visited of
870 the caller. With the next inlineing both are increased.) */
872 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
874 if ((pos > 0) && (pos < MAX_INLINE)) {
875 /* There are calls to inline */
876 collect_phiprojs(irg);
877 for (i = 0; i < pos; i++) {
880 tv = get_Const_tarval(get_Call_ptr(calls[i]));
881 callee = get_entity_irg(tarval_to_entity(tv));
882 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
883 inline_method(calls[i], callee);
888 current_ir_graph = rem;
892 /********************************************************************/
893 /* Code Placement. Pinns all floating nodes to a block where they */
894 /* will be executed only if needed. */
895 /********************************************************************/
897 static pdeq *worklist; /* worklist of ir_node*s */
899 /* Find the earliest correct block for N. --- Place N into the
900 same Block as its dominance-deepest Input. */
902 place_floats_early (ir_node *n)
906 /* we must not run into an infinite loop */
907 assert (irn_not_visited(n));
910 /* Place floating nodes. */
911 if (get_op_pinned(get_irn_op(n)) == floats) {
913 ir_node *b = new_Bad(); /* The block to place this node in */
915 assert(get_irn_op(n) != op_Block);
917 if ((get_irn_op(n) == op_Const) ||
918 (get_irn_op(n) == op_SymConst) ||
920 (get_irn_op(n) == op_Unknown)) {
921 /* These nodes will not be placed by the loop below. */
922 b = get_irg_start_block(current_ir_graph);
926 /* find the block for this node. */
927 for (i = 0; i < get_irn_arity(n); i++) {
928 ir_node *dep = get_irn_n(n, i);
930 if ((irn_not_visited(dep)) &&
931 (get_op_pinned(get_irn_op(dep)) == floats)) {
932 place_floats_early (dep);
934 /* Because all loops contain at least one pinned node, now all
935 our inputs are either pinned or place_early has already
936 been finished on them. We do not have any unfinished inputs! */
937 dep_block = get_nodes_Block(dep);
938 if ((!is_Bad(dep_block)) &&
939 (get_Block_dom_depth(dep_block) > depth)) {
941 depth = get_Block_dom_depth(dep_block);
943 /* Avoid that the node is placed in the Start block */
944 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
945 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
946 assert(b != get_irg_start_block(current_ir_graph));
950 set_nodes_Block(n, b);
953 /* Add predecessors of non floating nodes on worklist. */
954 start = (get_irn_op(n) == op_Block) ? 0 : -1;
955 for (i = start; i < get_irn_arity(n); i++) {
956 ir_node *pred = get_irn_n(n, i);
957 if (irn_not_visited(pred)) {
958 pdeq_putr (worklist, pred);
963 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
964 Start, Call and end at pinned nodes as Store, Call. Place_early
965 places all floating nodes reachable from its argument through floating
966 nodes and adds all beginnings at pinned nodes to the worklist. */
967 static INLINE void place_early (void) {
969 inc_irg_visited(current_ir_graph);
971 /* this inits the worklist */
972 place_floats_early (get_irg_end(current_ir_graph));
974 /* Work the content of the worklist. */
975 while (!pdeq_empty (worklist)) {
976 ir_node *n = pdeq_getl (worklist);
977 if (irn_not_visited(n)) place_floats_early (n);
980 set_irg_outs_inconsistent(current_ir_graph);
981 current_ir_graph->pinned = pinned;
985 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
987 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
989 ir_node *block = NULL;
991 /* Compute the latest block into which we can place a node so that it is
993 if (get_irn_op(consumer) == op_Phi) {
994 /* our comsumer is a Phi-node, the effective use is in all those
995 blocks through which the Phi-node reaches producer */
997 ir_node *phi_block = get_nodes_Block(consumer);
998 for (i = 0; i < get_irn_arity(consumer); i++) {
999 if (get_irn_n(consumer, i) == producer) {
1000 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
1004 assert(is_no_Block(consumer));
1005 block = get_nodes_Block(consumer);
1008 /* Compute the deepest common ancestor of block and dca. */
1010 if (!dca) return block;
1011 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
1012 block = get_Block_idom(block);
1013 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
1014 dca = get_Block_idom(dca);
1015 while (block != dca)
1016 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
1021 static INLINE int get_irn_loop_depth(ir_node *n) {
1022 return get_loop_depth(get_irn_loop(n));
1025 /* Move n to a block with less loop depth than it's current block. The
1026 new block must be dominated by early. */
1028 move_out_of_loops (ir_node *n, ir_node *early)
1030 ir_node *best, *dca;
1034 /* Find the region deepest in the dominator tree dominating
1035 dca with the least loop nesting depth, but still dominated
1036 by our early placement. */
1037 dca = get_nodes_Block(n);
1039 while (dca != early) {
1040 dca = get_Block_idom(dca);
1041 if (!dca) break; /* should we put assert(dca)? */
1042 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
1046 if (best != get_nodes_Block(n)) {
1048 printf("Moving out of loop: "); DDMN(n);
1049 printf(" Outermost block: "); DDMN(early);
1050 printf(" Best block: "); DDMN(best);
1051 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
1053 set_nodes_Block(n, best);
1057 /* Find the latest legal block for N and place N into the
1058 `optimal' Block between the latest and earliest legal block.
1059 The `optimal' block is the dominance-deepest block of those
1060 with the least loop-nesting-depth. This places N out of as many
1061 loops as possible and then makes it as controldependant as
1064 place_floats_late (ir_node *n)
1069 assert (irn_not_visited(n)); /* no multiple placement */
1071 /* no need to place block nodes, control nodes are already placed. */
1072 if ((get_irn_op(n) != op_Block) &&
1074 (get_irn_mode(n) != mode_X)) {
1075 /* Remember the early palacement of this block to move it
1076 out of loop no further than the early placement. */
1077 early = get_nodes_Block(n);
1078 /* Assure that our users are all placed, except the Phi-nodes.
1079 --- Each dataflow cycle contains at least one Phi-node. We
1080 have to break the `user has to be placed before the
1081 producer' dependance cycle and the Phi-nodes are the
1082 place to do so, because we need to base our placement on the
1083 final region of our users, which is OK with Phi-nodes, as they
1084 are pinned, and they never have to be placed after a
1085 producer of one of their inputs in the same block anyway. */
1086 for (i = 0; i < get_irn_n_outs(n); i++) {
1087 ir_node *succ = get_irn_out(n, i);
1088 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
1089 place_floats_late (succ);
1092 /* We have to determine the final block of this node... except for
1094 if ((get_op_pinned(get_irn_op(n)) == floats) &&
1095 (get_irn_op(n) != op_Const) &&
1096 (get_irn_op(n) != op_SymConst)) {
1097 ir_node *dca = NULL; /* deepest common ancestor in the
1098 dominator tree of all nodes'
1099 blocks depending on us; our final
1100 placement has to dominate DCA. */
1101 for (i = 0; i < get_irn_n_outs(n); i++) {
1102 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
1104 set_nodes_Block(n, dca);
1106 move_out_of_loops (n, early);
1110 mark_irn_visited(n);
1112 /* Add predecessors of all non-floating nodes on list. (Those of floating
1113 nodes are placeded already and therefore are marked.) */
1114 for (i = 0; i < get_irn_n_outs(n); i++) {
1115 if (irn_not_visited(get_irn_out(n, i))) {
1116 pdeq_putr (worklist, get_irn_out(n, i));
1121 static INLINE void place_late(void) {
1123 inc_irg_visited(current_ir_graph);
1125 /* This fills the worklist initially. */
1126 place_floats_late(get_irg_start_block(current_ir_graph));
1127 /* And now empty the worklist again... */
1128 while (!pdeq_empty (worklist)) {
1129 ir_node *n = pdeq_getl (worklist);
1130 if (irn_not_visited(n)) place_floats_late(n);
1134 void place_code(ir_graph *irg) {
1135 ir_graph *rem = current_ir_graph;
1136 current_ir_graph = irg;
1138 if (!(get_optimize() && get_opt_global_cse())) return;
1140 /* Handle graph state */
1141 assert(get_irg_phase_state(irg) != phase_building);
1142 if (get_irg_dom_state(irg) != dom_consistent)
1145 construct_backedges(irg);
1147 /* Place all floating nodes as early as possible. This guarantees
1148 a legal code placement. */
1149 worklist = new_pdeq ();
1152 /* place_early invalidates the outs, place_late needs them. */
1154 /* Now move the nodes down in the dominator tree. This reduces the
1155 unnecessary executions of the node. */
1158 set_irg_outs_inconsistent(current_ir_graph);
1159 del_pdeq (worklist);
1160 current_ir_graph = rem;
1165 /********************************************************************/
1166 /* Control flow optimization. */
1167 /* Removes Bad control flow predecessors and empty blocks. A block */
1168 /* is empty if it contains only a Jmp node. */
1169 /* Blocks can only be removed if they are not needed for the */
1170 /* semantics of Phi nodes. */
1171 /********************************************************************/
1173 /* Removes Tuples from Block control flow predecessors.
1174 Optimizes blocks with equivalent_node().
1175 Replaces n by Bad if n is unreachable control flow. */
1176 static void merge_blocks(ir_node *n, void *env) {
1178 set_irn_link(n, NULL);
1180 if (get_irn_op(n) == op_Block) {
1182 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1183 /* GL @@@ : is this possible? if (get_opt_normalize()) -- added, all tests go throug.
1184 A different order of optimizations might cause problems. */
1185 if (get_opt_normalize())
1186 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1187 } else if (get_optimize() && (get_irn_mode(n) == mode_X)) {
1188 /* We will soon visit a block. Optimize it before visiting! */
1189 ir_node *b = get_nodes_Block(n);
1190 ir_node *new = equivalent_node(b);
1191 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1192 /* We would have to run gigo if new is bad, so we
1193 promote it directly below. */
1194 assert(((b == new) ||
1195 get_opt_control_flow_straightening() ||
1196 get_opt_control_flow_weak_simplification()) &&
1197 ("strange flag setting"));
1200 new = equivalent_node(b);
1202 /* GL @@@ get_opt_normalize hinzugefuegt, 5.5.2003 */
1203 if (is_Bad(new) && get_opt_normalize()) exchange (n, new_Bad());
1207 /* Collects all Phi nodes in link list of Block.
1208 Marks all blocks "block_visited" if they contain a node other
1210 static void collect_nodes(ir_node *n, void *env) {
1211 if (is_no_Block(n)) {
1212 ir_node *b = get_nodes_Block(n);
1214 if ((get_irn_op(n) == op_Phi)) {
1215 /* Collect Phi nodes to compact ins along with block's ins. */
1216 set_irn_link(n, get_irn_link(b));
1218 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1219 mark_Block_block_visited(b);
1224 /* Returns true if pred is pred of block */
1225 static int is_pred_of(ir_node *pred, ir_node *b) {
1227 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1228 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1229 if (b_pred == pred) return 1;
1234 static int test_whether_dispensable(ir_node *b, int pos) {
1235 int i, j, n_preds = 1;
1236 int dispensable = 1;
1237 ir_node *cfop = get_Block_cfgpred(b, pos);
1238 ir_node *pred = get_nodes_Block(cfop);
1240 if (get_Block_block_visited(pred) + 1
1241 < get_irg_block_visited(current_ir_graph)) {
1242 if (!get_optimize() || !get_opt_control_flow_strong_simplification()) {
1243 /* Mark block so that is will not be removed. */
1244 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1247 /* Seems to be empty. */
1248 if (!get_irn_link(b)) {
1249 /* There are no Phi nodes ==> dispensable. */
1250 n_preds = get_Block_n_cfgpreds(pred);
1252 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1253 Work preds < pos as if they were already removed. */
1254 for (i = 0; i < pos; i++) {
1255 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1256 if (get_Block_block_visited(b_pred) + 1
1257 < get_irg_block_visited(current_ir_graph)) {
1258 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1259 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1260 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1263 if (is_pred_of(b_pred, pred)) dispensable = 0;
1266 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1267 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1268 if (is_pred_of(b_pred, pred)) dispensable = 0;
1271 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1274 n_preds = get_Block_n_cfgpreds(pred);
1282 static void optimize_blocks(ir_node *b, void *env) {
1283 int i, j, k, max_preds, n_preds;
1284 ir_node *pred, *phi;
1287 /* Count the number of predecessor if this block is merged with pred blocks
1290 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1291 max_preds += test_whether_dispensable(b, i);
1293 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1296 printf(" working on "); DDMN(b);
1297 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1298 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1299 if (is_Bad(get_Block_cfgpred(b, i))) {
1300 printf(" removing Bad %i\n ", i);
1301 } else if (get_Block_block_visited(pred) +1
1302 < get_irg_block_visited(current_ir_graph)) {
1303 printf(" removing pred %i ", i); DDMN(pred);
1304 } else { printf(" Nothing to do for "); DDMN(pred); }
1306 * end Debug output **/
1308 /** Fix the Phi nodes **/
1309 phi = get_irn_link(b);
1311 assert(get_irn_op(phi) == op_Phi);
1312 /* Find the new predecessors for the Phi */
1314 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1315 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1316 if (is_Bad(get_Block_cfgpred(b, i))) {
1318 } else if (get_Block_block_visited(pred) +1
1319 < get_irg_block_visited(current_ir_graph)) {
1320 /* It's an empty block and not yet visited. */
1321 ir_node *phi_pred = get_Phi_pred(phi, i);
1322 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1323 if (get_nodes_Block(phi_pred) == pred) {
1324 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1325 in[n_preds] = get_Phi_pred(phi_pred, j);
1327 in[n_preds] = phi_pred;
1331 /* The Phi_pred node is replaced now if it is a Phi.
1332 In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
1333 Daher muss der Phiknoten durch den neuen ersetzt werden.
1334 Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
1335 durch einen Bad) damit er aus den keep_alive verschwinden kann.
1336 Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
1338 if (get_nodes_Block(phi_pred) == pred) {
1339 /* remove the Phi as it might be kept alive. Further there
1340 might be other users. */
1341 exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
1344 in[n_preds] = get_Phi_pred(phi, i);
1349 set_irn_in(phi, n_preds, in);
1351 phi = get_irn_link(phi);
1355 This happens only if merge between loop backedge and single loop entry. **/
1356 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1357 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1358 if (get_Block_block_visited(pred) +1
1359 < get_irg_block_visited(current_ir_graph)) {
1360 phi = get_irn_link(pred);
1362 if (get_irn_op(phi) == op_Phi) {
1363 set_nodes_Block(phi, b);
1366 for (i = 0; i < k; i++) {
1367 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1368 if (is_Bad(get_Block_cfgpred(b, i))) {
1370 } else if (get_Block_block_visited(pred) +1
1371 < get_irg_block_visited(current_ir_graph)) {
1372 /* It's an empty block and not yet visited. */
1373 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1374 /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
1375 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1376 Anweisungen.) Trotzdem tuts bisher!! */
1385 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1386 in[n_preds] = get_Phi_pred(phi, i);
1389 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1390 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1391 if (is_Bad(get_Block_cfgpred(b, i))) {
1393 } else if (get_Block_block_visited(pred) +1
1394 < get_irg_block_visited(current_ir_graph)) {
1395 /* It's an empty block and not yet visited. */
1396 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1405 set_irn_in(phi, n_preds, in);
1407 phi = get_irn_link(phi);
1412 /** Fix the block **/
1414 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1415 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1416 if (is_Bad(get_Block_cfgpred(b, i))) {
1418 } else if (get_Block_block_visited(pred) +1
1419 < get_irg_block_visited(current_ir_graph)) {
1420 /* It's an empty block and not yet visited. */
1421 assert(get_Block_n_cfgpreds(b) > 1);
1422 /* Else it should be optimized by equivalent_node. */
1423 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1424 in[n_preds] = get_Block_cfgpred(pred, j);
1427 /* Remove block as it might be kept alive. */
1428 exchange(pred, b/*new_Bad()*/);
1430 in[n_preds] = get_Block_cfgpred(b, i);
1434 set_irn_in(b, n_preds, in);
1438 void optimize_cf(ir_graph *irg) {
1441 ir_node *end = get_irg_end(irg);
1442 ir_graph *rem = current_ir_graph;
1443 current_ir_graph = irg;
1445 /* Handle graph state */
1446 assert(get_irg_phase_state(irg) != phase_building);
1447 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1448 set_irg_outs_inconsistent(current_ir_graph);
1449 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1450 set_irg_dom_inconsistent(current_ir_graph);
1452 /* Use block visited flag to mark non-empty blocks. */
1453 inc_irg_block_visited(irg);
1454 irg_walk(end, merge_blocks, collect_nodes, NULL);
1456 /* Optimize the standard code. */
1457 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1459 /* Walk all keep alives, optimize them if block, add to new in-array
1460 for end if useful. */
1461 in = NEW_ARR_F (ir_node *, 1);
1462 in[0] = get_nodes_Block(end);
1463 inc_irg_visited(current_ir_graph);
1464 for(i = 0; i < get_End_n_keepalives(end); i++) {
1465 ir_node *ka = get_End_keepalive(end, i);
1466 if (irn_not_visited(ka)) {
1467 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1468 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1469 get_irg_block_visited(current_ir_graph)-1);
1470 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1471 mark_irn_visited(ka);
1472 ARR_APP1 (ir_node *, in, ka);
1473 } else if (get_irn_op(ka) == op_Phi) {
1474 mark_irn_visited(ka);
1475 ARR_APP1 (ir_node *, in, ka);
1479 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1482 current_ir_graph = rem;
1487 * Called by walker of remove_critical_cf_edges.
1489 * Place an empty block to an edge between a blocks of multiple
1490 * predecessors and a block of multiple sucessors.
1493 * @param env Envirnment of walker. This field is unused and has
1496 static void walk_critical_cf_edges(ir_node *n, void *env) {
1498 ir_node *pre, *block, **in, *jmp;
1500 /* Block has multiple predecessors */
1501 if ((op_Block == get_irn_op(n)) &&
1502 (get_irn_arity(n) > 1)) {
1503 arity = get_irn_arity(n);
1505 if (n == get_irg_end_block(current_ir_graph))
1506 return; // No use to add a block here.
1508 for (i=0; i<arity; i++) {
1509 pre = get_irn_n(n, i);
1510 /* Predecessor has multiple sucessors. Insert new flow edge */
1511 if ((NULL != pre) &&
1512 (op_Proj == get_irn_op(pre)) &&
1513 op_Raise != get_irn_op(skip_Proj(pre))) {
1515 /* set predecessor array for new block */
1516 in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
1517 /* set predecessor of new block */
1519 block = new_Block(1, in);
1520 /* insert new jmp node to new block */
1521 switch_block(block);
1524 /* set sucessor of new block */
1525 set_irn_n(n, i, jmp);
1527 } /* predecessor has multiple sucessors */
1528 } /* for all predecessors */
1529 } /* n is a block */
1532 void remove_critical_cf_edges(ir_graph *irg) {
1533 if (get_opt_critical_edges())
1534 irg_walk_graph(irg, NULL, walk_critical_cf_edges, NULL);