1 /* Coyright (C) 1998 - 2000 by Universitaet Karlsruhe
2 ** All rights reserved.
4 ** Author: Christian Schaefer
6 ** Optimizations for a whole ir graph, i.e., a procedure.
19 # include "irnode_t.h"
20 # include "irgraph_t.h"
28 # include "pdeq.h" /* Fuer code placement */
31 # include "irbackedge_t.h"
33 /* Defined in iropt.c */
34 pset *new_identities (void);
35 void del_identities (pset *value_table);
36 void add_identities (pset *value_table, ir_node *node);
38 /********************************************************************/
39 /* apply optimizations of iropt to all nodes. */
40 /********************************************************************/
42 void init_link (ir_node *n, void *env) {
43 set_irn_link(n, NULL);
47 optimize_in_place_wrapper (ir_node *n, void *env) {
51 for (i = 0; i < get_irn_arity(n); i++) {
52 optimized = optimize_in_place_2(get_irn_n(n, i));
53 set_irn_n(n, i, optimized);
56 if (get_irn_op(n) == op_Block) {
57 optimized = optimize_in_place_2(n);
58 if (optimized != n) exchange (n, optimized);
63 local_optimize_graph (ir_graph *irg) {
64 ir_graph *rem = current_ir_graph;
65 current_ir_graph = irg;
67 /* Handle graph state */
68 assert(get_irg_phase_state(irg) != phase_building);
69 if (get_opt_global_cse())
70 set_irg_pinned(current_ir_graph, floats);
71 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
72 set_irg_outs_inconsistent(current_ir_graph);
73 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
74 set_irg_dom_inconsistent(current_ir_graph);
76 /* Clean the value_table in irg for the cse. */
77 del_identities(irg->value_table);
78 irg->value_table = new_identities();
80 /* walk over the graph */
81 irg_walk(irg->end, init_link, optimize_in_place_wrapper, NULL);
83 current_ir_graph = rem;
86 /********************************************************************/
87 /* Routines for dead node elimination / copying garbage collection */
89 /********************************************************************/
91 /* Remeber the new node in the old node by using a field all nodes have. */
93 set_new_node (ir_node *old, ir_node *new)
98 /* Get this new node, before the old node is forgotton.*/
100 get_new_node (ir_node * n)
105 /* We use the block_visited flag to mark that we have computed the
106 number of useful predecessors for this block.
107 Further we encode the new arity in this flag in the old blocks.
108 Remembering the arity is useful, as it saves a lot of pointer
109 accesses. This function is called for all Phi and Block nodes
112 compute_new_arity(ir_node *b) {
116 irg_v = get_irg_block_visited(current_ir_graph);
117 block_v = get_Block_block_visited(b);
118 if (block_v >= irg_v) {
119 /* we computed the number of preds for this block and saved it in the
121 return block_v - irg_v;
123 /* compute the number of good predecessors */
124 res = get_irn_arity(b);
125 for (i = 0; i < get_irn_arity(b); i++)
126 if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
127 /* save it in the flag. */
128 set_Block_block_visited(b, irg_v + res);
133 static INLINE void new_backedge_info(ir_node *n) {
134 switch(get_irn_opcode(n)) {
136 n->attr.block.cg_backedge = NULL;
137 n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
140 n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
143 n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
149 /* Copies the node to the new obstack. The Ins of the new node point to
150 the predecessors on the old obstack. For block/phi nodes not all
151 predecessors might be copied. n->link points to the new node.
152 For Phi and Block nodes the function allocates in-arrays with an arity
153 only for useful predecessors. The arity is determined by counting
154 the non-bad predecessors of the block. */
156 copy_node (ir_node *n, void *env) {
160 if (get_irn_opcode(n) == iro_Block) {
162 new_arity = compute_new_arity(n);
163 n->attr.block.graph_arr = NULL;
165 block = get_nodes_Block(n);
166 if (get_irn_opcode(n) == iro_Phi) {
167 new_arity = compute_new_arity(block);
169 new_arity = get_irn_arity(n);
172 nn = new_ir_node(get_irn_dbg_info(n),
179 /* Copy the attributes. These might point to additional data. If this
180 was allocated on the old obstack the pointers now are dangling. This
181 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
183 new_backedge_info(nn);
186 /* printf("\n old node: "); DDMSG2(n);
187 printf(" new node: "); DDMSG2(nn); */
191 /* Copies new predecessors of old node to new node remembered in link.
192 Spare the Bad predecessors of Phi and Block nodes. */
194 copy_preds (ir_node *n, void *env) {
198 nn = get_new_node(n);
200 /* printf("\n old node: "); DDMSG2(n);
201 printf(" new node: "); DDMSG2(nn);
202 printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
204 if (get_irn_opcode(n) == iro_Block) {
205 /* Don't copy Bad nodes. */
207 for (i = 0; i < get_irn_arity(n); i++)
208 if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
209 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
210 //if (is_backedge(n, i)) set_backedge(nn, j);
213 /* repair the block visited flag from above misuse. Repair it in both
214 graphs so that the old one can still be used. */
215 set_Block_block_visited(nn, 0);
216 set_Block_block_visited(n, 0);
217 /* Local optimization could not merge two subsequent blocks if
218 in array contained Bads. Now it's possible.
219 We don't call optimize_in_place as it requires
220 that the fields in ir_graph are set properly. */
221 if ((get_opt_control_flow()) &&
222 (get_Block_n_cfgpreds(nn) == 1) &&
223 (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
224 exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
225 } else if (get_irn_opcode(n) == iro_Phi) {
226 /* Don't copy node if corresponding predecessor in block is Bad.
227 The Block itself should not be Bad. */
228 block = get_nodes_Block(n);
229 set_irn_n (nn, -1, get_new_node(block));
231 for (i = 0; i < get_irn_arity(n); i++)
232 if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
233 set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
234 //if (is_backedge(n, i)) set_backedge(nn, j);
237 /* If the pre walker reached this Phi after the post walker visited the
238 block block_visited is > 0. */
239 set_Block_block_visited(get_nodes_Block(n), 0);
240 /* Compacting the Phi's ins might generate Phis with only one
242 if (get_irn_arity(n) == 1)
243 exchange(n, get_irn_n(n, 0));
245 for (i = -1; i < get_irn_arity(n); i++)
246 set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
248 /* Now the new node is complete. We can add it to the hash table for cse.
249 @@@ inlinening aborts if we identify End. Why? */
250 if(get_irn_op(nn) != op_End)
251 add_identities (current_ir_graph->value_table, nn);
254 /* Copies the graph recursively, compacts the keepalive of the end node. */
257 ir_node *oe, *ne; /* old end, new end */
258 ir_node *ka; /* keep alive */
261 oe = get_irg_end(current_ir_graph);
262 /* copy the end node by hand, allocate dynamic in array! */
263 ne = new_ir_node(get_irn_dbg_info(oe),
270 /* Copy the attributes. Well, there might be some in the future... */
272 set_new_node(oe, ne);
274 /* copy the live nodes */
275 irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
276 /* copy_preds for the end node ... */
277 set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
279 /** ... and now the keep alives. **/
280 /* First pick the not marked block nodes and walk them. We must pick these
281 first as else we will oversee blocks reachable from Phis. */
282 for (i = 0; i < get_irn_arity(oe); i++) {
283 ka = get_irn_n(oe, i);
284 if ((get_irn_op(ka) == op_Block) &&
285 (get_irn_visited(ka) < get_irg_visited(current_ir_graph))) {
286 /* We must keep the block alive and copy everything reachable */
287 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
288 irg_walk(ka, copy_node, copy_preds, NULL);
289 add_End_keepalive(ne, get_new_node(ka));
293 /* Now pick the Phis. Here we will keep all! */
294 for (i = 0; i < get_irn_arity(oe); i++) {
295 ka = get_irn_n(oe, i);
296 if ((get_irn_op(ka) == op_Phi)) {
297 if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
298 /* We didn't copy the Phi yet. */
299 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
300 irg_walk(ka, copy_node, copy_preds, NULL);
302 add_End_keepalive(ne, get_new_node(ka));
307 /* Copies the graph reachable from current_ir_graph->end to the obstack
308 in current_ir_graph and fixes the environment.
309 Then fixes the fields in current_ir_graph containing nodes of the
314 /* Not all nodes remembered in current_ir_graph might be reachable
315 from the end node. Assure their link is set to NULL, so that
316 we can test whether new nodes have been computed. */
317 set_irn_link(get_irg_frame (current_ir_graph), NULL);
318 set_irn_link(get_irg_globals(current_ir_graph), NULL);
319 set_irn_link(get_irg_args (current_ir_graph), NULL);
321 /* we use the block walk flag for removing Bads from Blocks ins. */
322 inc_irg_block_visited(current_ir_graph);
327 /* fix the fields in current_ir_graph */
328 old_end = get_irg_end(current_ir_graph);
329 set_irg_end (current_ir_graph, get_new_node(old_end));
331 set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
332 if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
333 copy_node (get_irg_frame(current_ir_graph), NULL);
334 copy_preds(get_irg_frame(current_ir_graph), NULL);
336 if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
337 copy_node (get_irg_globals(current_ir_graph), NULL);
338 copy_preds(get_irg_globals(current_ir_graph), NULL);
340 if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
341 copy_node (get_irg_args(current_ir_graph), NULL);
342 copy_preds(get_irg_args(current_ir_graph), NULL);
344 set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
346 set_irg_start_block(current_ir_graph,
347 get_new_node(get_irg_start_block(current_ir_graph)));
348 set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
349 set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
350 set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
351 if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
352 copy_node(get_irg_bad(current_ir_graph), NULL);
353 copy_preds(get_irg_bad(current_ir_graph), NULL);
355 set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
356 if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
357 copy_node(get_irg_unknown(current_ir_graph), NULL);
358 copy_preds(get_irg_unknown(current_ir_graph), NULL);
360 set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
363 /* Copies all reachable nodes to a new obstack. Removes bad inputs
364 from block nodes and the corresponding inputs from Phi nodes.
365 Merges single exit blocks with single entry blocks and removes
367 Adds all new nodes to a new hash table for cse. Does not
368 perform cse, so the hash table might contain common subexpressions. */
369 /* Amroq call this emigrate() */
371 dead_node_elimination(ir_graph *irg) {
373 struct obstack *graveyard_obst = NULL;
374 struct obstack *rebirth_obst = NULL;
376 /* Remember external state of current_ir_graph. */
377 rem = current_ir_graph;
378 current_ir_graph = irg;
380 /* Handle graph state */
381 assert(get_irg_phase_state(current_ir_graph) != phase_building);
382 free_outs(current_ir_graph);
384 /* @@@ so far we loose loops when copying */
385 set_irg_loop(current_ir_graph, NULL);
387 if (get_optimize() && get_opt_dead_node_elimination()) {
389 /* A quiet place, where the old obstack can rest in peace,
390 until it will be cremated. */
391 graveyard_obst = irg->obst;
393 /* A new obstack, where the reachable nodes will be copied to. */
394 rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
395 current_ir_graph->obst = rebirth_obst;
396 obstack_init (current_ir_graph->obst);
398 /* We also need a new hash table for cse */
399 del_identities (irg->value_table);
400 irg->value_table = new_identities ();
402 /* Copy the graph from the old to the new obstack */
405 /* Free memory from old unoptimized obstack */
406 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
407 xfree (graveyard_obst); /* ... then free it. */
410 current_ir_graph = rem;
413 void remove_bad_predecessors(ir_graph *irg) {
414 printf("remove_bad_predecessors not implemented!!!\n");
417 /**********************************************************************/
418 /* Funcionality for inlining */
419 /**********************************************************************/
421 /* Copy node for inlineing. Copies the node by calling copy_node and
422 then updates the entity if it's a local one. env must be a pointer
423 to the frame type of the procedure. The new entities must be in
424 the link field of the entities. */
426 copy_node_inline (ir_node *n, void *env) {
428 type *frame_tp = (type *)env;
431 if (get_irn_op(n) == op_Sel) {
432 new = get_new_node (n);
433 assert(get_irn_op(new) == op_Sel);
434 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
435 set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
440 void inline_method(ir_node *call, ir_graph *called_graph) {
442 ir_node *post_call, *post_bl;
444 ir_node *end, *end_bl;
448 ir_node *cf_op = NULL, *bl;
449 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
452 if (!get_optimize() || !get_opt_inline()) return;
453 /** Turn off optimizations, this can cause problems when allocating new nodes. **/
454 rem_opt = get_optimize();
457 /* Handle graph state */
458 assert(get_irg_phase_state(current_ir_graph) != phase_building);
459 assert(get_irg_pinned(current_ir_graph) == pinned);
460 assert(get_irg_pinned(called_graph) == pinned);
461 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
462 set_irg_outs_inconsistent(current_ir_graph);
464 /** Check preconditions **/
465 assert(get_irn_op(call) == op_Call);
466 /* @@@ TODO does not work for InterfaceIII.java after cgana
467 assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
468 assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
469 get_Call_type(call)));
471 assert(get_type_tpop(get_Call_type(call)) == type_method);
472 if (called_graph == current_ir_graph) return;
475 /** Part the Call node into two nodes. Pre_call collects the parameters of
476 the procedure and later replaces the Start node of the called graph.
477 Post_call is the old Call node and collects the results of the called
478 graph. Both will end up being a tuple. **/
479 post_bl = get_nodes_Block(call);
480 set_irg_current_block(current_ir_graph, post_bl);
481 /* XxMxPxP of Start + parameter of Call */
483 in[1] = get_Call_mem(call);
484 in[2] = get_irg_frame(current_ir_graph);
485 in[3] = get_irg_globals(current_ir_graph);
486 in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
487 pre_call = new_Tuple(5, in);
490 /** Part the block of the Call node into two blocks.
491 The new block gets the ins of the old block, pre_call and all its
492 predecessors and all Phi nodes. **/
493 part_block(pre_call);
495 /** Prepare state for dead node elimination **/
496 /* Visited flags in calling irg must be >= flag in called irg.
497 Else walker and arity computation will not work. */
498 if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
499 set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
500 if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
501 set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
502 /* Set pre_call as new Start node in link field of the start node of
503 calling graph and pre_calls block as new block for the start block
505 Further mark these nodes so that they are not visited by the
507 set_irn_link(get_irg_start(called_graph), pre_call);
508 set_irn_visited(get_irg_start(called_graph),
509 get_irg_visited(current_ir_graph));
510 set_irn_link(get_irg_start_block(called_graph),
511 get_nodes_Block(pre_call));
512 set_irn_visited(get_irg_start_block(called_graph),
513 get_irg_visited(current_ir_graph));
515 /* Initialize for compaction of in arrays */
516 inc_irg_block_visited(current_ir_graph);
518 /*** Replicate local entities of the called_graph ***/
519 /* copy the entities. */
520 called_frame = get_irg_frame_type(called_graph);
521 for (i = 0; i < get_class_n_members(called_frame); i++) {
522 entity *new_ent, *old_ent;
523 old_ent = get_class_member(called_frame, i);
524 new_ent = copy_entity_own(old_ent, get_cur_frame_type());
525 set_entity_link(old_ent, new_ent);
528 /* visited is > than that of called graph. With this trick visited will
529 remain unchanged so that an outer walker, e.g., searching the call nodes
530 to inline, calling this inline will not visit the inlined nodes. */
531 set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
533 /** Performing dead node elimination inlines the graph **/
534 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
536 /* @@@ endless loops are not copied!! -- they should be, I think... */
537 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
538 get_irg_frame_type(called_graph));
540 /* Repair called_graph */
541 set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
542 set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
543 set_Block_block_visited(get_irg_start_block(called_graph), 0);
545 /*** Merge the end of the inlined procedure with the call site ***/
546 /* We will turn the old Call node into a Tuple with the following
549 0: Phi of all Memories of Return statements.
550 1: Jmp from new Block that merges the control flow from all exception
551 predecessors of the old end block.
552 2: Tuple of all arguments.
553 3: Phi of Exception memories.
556 /** Precompute some values **/
557 end_bl = get_new_node(get_irg_end_block(called_graph));
558 end = get_new_node(get_irg_end(called_graph));
559 arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
560 n_res = get_method_n_ress(get_Call_type(call));
562 res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
563 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
565 set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
567 /** archive keepalives **/
568 for (i = 0; i < get_irn_arity(end); i++)
569 add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
570 /* The new end node will die, but the in array is not on the obstack ... */
573 /** Collect control flow from Return blocks to post_calls block. Replace
574 Return nodes by Jump nodes. **/
576 for (i = 0; i < arity; i++) {
578 ret = get_irn_n(end_bl, i);
579 if (get_irn_op(ret) == op_Return) {
580 cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
584 set_irn_in(post_bl, n_ret, cf_pred);
586 /** Collect results from Return nodes to post_call. Post_call is
587 turned into a tuple. **/
588 turn_into_tuple(post_call, 4);
589 /* First the Memory-Phi */
591 for (i = 0; i < arity; i++) {
592 ret = get_irn_n(end_bl, i);
593 if (get_irn_op(ret) == op_Return) {
594 cf_pred[n_ret] = get_Return_mem(ret);
598 phi = new_Phi(n_ret, cf_pred, mode_M);
599 set_Tuple_pred(call, 0, phi);
600 /* Conserve Phi-list for further inlinings -- but might be optimized */
601 if (get_nodes_Block(phi) == post_bl) {
602 set_irn_link(phi, get_irn_link(post_bl));
603 set_irn_link(post_bl, phi);
605 /* Now the real results */
607 for (j = 0; j < n_res; j++) {
609 for (i = 0; i < arity; i++) {
610 ret = get_irn_n(end_bl, i);
611 if (get_irn_op(ret) == op_Return) {
612 cf_pred[n_ret] = get_Return_res(ret, j);
616 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
618 /* Conserve Phi-list for further inlinings -- but might be optimized */
619 if (get_nodes_Block(phi) == post_bl) {
620 set_irn_link(phi, get_irn_link(post_bl));
621 set_irn_link(post_bl, phi);
624 set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
626 set_Tuple_pred(call, 2, new_Bad());
628 /* Finally the exception control flow. We need to add a Phi node to
629 collect the memory containing the exception objects. Further we need
630 to add another block to get a correct representation of this Phi. To
631 this block we add a Jmp that resolves into the X output of the Call
632 when the Call is turned into a tuple. */
634 for (i = 0; i < arity; i++) {
636 ret = get_irn_n(end_bl, i);
637 if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
638 cf_pred[n_exc] = ret;
643 new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
644 set_Tuple_pred(call, 1, new_Jmp());
645 /* The Phi for the memories with the exception objects */
647 for (i = 0; i < arity; i++) {
649 ret = skip_Proj(get_irn_n(end_bl, i));
650 if (get_irn_op(ret) == op_Call) {
651 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
653 } else if (is_fragile_op(ret)) {
654 /* We rely that all cfops have the memory output at the same position. */
655 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
657 } else if (get_irn_op(ret) == op_Raise) {
658 cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
662 set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
664 set_Tuple_pred(call, 1, new_Bad());
665 set_Tuple_pred(call, 3, new_Bad());
670 /*** Correct the control flow to the end node.
671 If the exception control flow from the Call directly branched to the
672 end block we now have the following control flow predecessor pattern:
673 ProjX -> Tuple -> Jmp.
674 We must remove the Jmp along with it's empty block and add Jmp's
675 predecessors as predecessors of this end block. ***/
676 /* find the problematic predecessor of the end block. */
677 end_bl = get_irg_end_block(current_ir_graph);
678 for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
679 cf_op = get_Block_cfgpred(end_bl, i);
680 if (get_irn_op(cf_op) == op_Proj) {
681 cf_op = get_Proj_pred(cf_op);
682 if (get_irn_op(cf_op) == op_Tuple) {
683 cf_op = get_Tuple_pred(cf_op, 1);
684 assert(get_irn_op(cf_op) == op_Jmp);
690 if (i < get_Block_n_cfgpreds(end_bl)) {
691 bl = get_nodes_Block(cf_op);
692 arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
693 cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
694 for (j = 0; j < i; j++)
695 cf_pred[j] = get_Block_cfgpred(end_bl, j);
696 for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
697 cf_pred[j] = get_Block_cfgpred(bl, j-i);
698 for (j = j; j < arity; j++)
699 cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
700 set_irn_in(end_bl, arity, cf_pred);
704 /** Turn cse back on. **/
705 set_optimize(rem_opt);
708 /********************************************************************/
709 /* Apply inlineing to small methods. */
710 /********************************************************************/
714 /* It makes no sense to inline too many calls in one procedure. Anyways,
715 I didn't get a version with NEW_ARR_F to run. */
716 #define MAX_INLINE 1024
718 static void collect_calls(ir_node *call, void *env) {
719 ir_node **calls = (ir_node **)env;
722 ir_graph *called_irg;
724 if (get_irn_op(call) != op_Call) return;
726 addr = get_Call_ptr(call);
727 if (get_irn_op(addr) == op_Const) {
728 /* Check whether the constant is the pointer to a compiled entity. */
729 tv = get_Const_tarval(addr);
731 called_irg = get_entity_irg(tv->u.P.ent);
732 if (called_irg && pos < MAX_INLINE) {
733 /* The Call node calls a locally defined method. Remember to inline. */
741 /* Inlines all small methods at call sites where the called address comes
742 from a Const node that references the entity representing the called
744 The size argument is a rough measure for the code size of the method:
745 Methods where the obstack containing the firm graph is smaller than
747 void inline_small_irgs(ir_graph *irg, int size) {
749 ir_node *calls[MAX_INLINE];
750 ir_graph *rem = current_ir_graph;
752 if (!(get_optimize() && get_opt_inline())) return;
754 current_ir_graph = irg;
755 /* Handle graph state */
756 assert(get_irg_phase_state(current_ir_graph) != phase_building);
758 /* Find Call nodes to inline.
759 (We can not inline during a walk of the graph, as inlineing the same
760 method several times changes the visited flag of the walked graph:
761 after the first inlineing visited of the callee equals visited of
762 the caller. With the next inlineing both are increased.) */
764 irg_walk(get_irg_end(irg), NULL, collect_calls, (void *) calls);
766 if ((pos > 0) && (pos < MAX_INLINE)) {
767 /* There are calls to inline */
768 collect_phiprojs(irg);
769 for (i = 0; i < pos; i++) {
773 tv = get_Const_tarval(get_Call_ptr(calls[i]));
774 callee = get_entity_irg(tv->u.P.ent);
775 if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
776 inline_method(calls[i], callee);
781 current_ir_graph = rem;
785 /********************************************************************/
786 /* Code Placement. Pinns all floating nodes to a block where they */
787 /* will be executed only if needed. */
788 /********************************************************************/
790 static pdeq *worklist; /* worklist of ir_node*s */
792 /* Find the earliest correct block for N. --- Place N into the
793 same Block as its dominance-deepest Input. */
795 place_floats_early (ir_node *n)
799 /* we must not run into an infinite loop */
800 assert (irn_not_visited(n));
803 /* Place floating nodes. */
804 if (get_op_pinned(get_irn_op(n)) == floats) {
806 ir_node *b = new_Bad(); /* The block to place this node in */
808 assert(get_irn_op(n) != op_Block);
810 if ((get_irn_op(n) == op_Const) ||
811 (get_irn_op(n) == op_SymConst) ||
813 /* These nodes will not be placed by the loop below. */
814 b = get_irg_start_block(current_ir_graph);
818 /* find the block for this node. */
819 for (i = 0; i < get_irn_arity(n); i++) {
820 ir_node *dep = get_irn_n(n, i);
822 if ((irn_not_visited(dep)) &&
823 (get_op_pinned(get_irn_op(dep)) == floats)) {
824 place_floats_early (dep);
826 /* Because all loops contain at least one pinned node, now all
827 our inputs are either pinned or place_early has already
828 been finished on them. We do not have any unfinished inputs! */
829 dep_block = get_nodes_Block(dep);
830 if ((!is_Bad(dep_block)) &&
831 (get_Block_dom_depth(dep_block) > depth)) {
833 depth = get_Block_dom_depth(dep_block);
835 /* Avoid that the node is placed in the Start block */
836 if ((depth == 1) && (get_Block_dom_depth(get_nodes_Block(n)) > 1)) {
837 b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
838 assert(b != get_irg_start_block(current_ir_graph));
842 set_nodes_Block(n, b);
845 /* Add predecessors of non floating nodes on worklist. */
846 start = (get_irn_op(n) == op_Block) ? 0 : -1;
847 for (i = start; i < get_irn_arity(n); i++) {
848 ir_node *pred = get_irn_n(n, i);
849 if (irn_not_visited(pred)) {
850 pdeq_putr (worklist, pred);
855 /* Floating nodes form subgraphs that begin at nodes as Const, Load,
856 Start, Call and end at pinned nodes as Store, Call. Place_early
857 places all floating nodes reachable from its argument through floating
858 nodes and adds all beginnings at pinned nodes to the worklist. */
859 INLINE void place_early () {
861 inc_irg_visited(current_ir_graph);
863 /* this inits the worklist */
864 place_floats_early (get_irg_end(current_ir_graph));
866 /* Work the content of the worklist. */
867 while (!pdeq_empty (worklist)) {
868 ir_node *n = pdeq_getl (worklist);
869 if (irn_not_visited(n)) place_floats_early (n);
872 set_irg_outs_inconsistent(current_ir_graph);
873 current_ir_graph->pinned = pinned;
877 /* deepest common dominance ancestor of DCA and CONSUMER of PRODUCER */
879 consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
881 ir_node *block = NULL;
883 /* Compute the latest block into which we can place a node so that it is
885 if (get_irn_op(consumer) == op_Phi) {
886 /* our comsumer is a Phi-node, the effective use is in all those
887 blocks through which the Phi-node reaches producer */
889 ir_node *phi_block = get_nodes_Block(consumer);
890 for (i = 0; i < get_irn_arity(consumer); i++) {
891 if (get_irn_n(consumer, i) == producer) {
892 block = get_nodes_Block(get_Block_cfgpred(phi_block, i));
896 assert(is_no_Block(consumer));
897 block = get_nodes_Block(consumer);
900 /* Compute the deepest common ancestor of block and dca. */
902 if (!dca) return block;
903 while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
904 block = get_Block_idom(block);
905 while (get_Block_dom_depth(dca) > get_Block_dom_depth(block))
906 dca = get_Block_idom(dca);
908 { block = get_Block_idom(block); dca = get_Block_idom(dca); }
913 INLINE int get_irn_loop_depth(ir_node *n) {
914 return get_loop_depth(get_irn_loop(n));
917 /* Move n to a block with less loop depth than it's current block. The
918 new block must be dominated by early. */
920 move_out_of_loops (ir_node *n, ir_node *early)
926 /* Find the region deepest in the dominator tree dominating
927 dca with the least loop nesting depth, but still dominated
928 by our early placement. */
929 dca = get_nodes_Block(n);
931 while (dca != early) {
932 dca = get_Block_idom(dca);
933 if (!dca) break; /* should we put assert(dca)? */
934 if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
938 if (best != get_nodes_Block(n)) {
940 printf("Moving out of loop: "); DDMN(n);
941 printf(" Outermost block: "); DDMN(early);
942 printf(" Best block: "); DDMN(best);
943 printf(" Innermost block: "); DDMN(get_nodes_Block(n));
945 set_nodes_Block(n, best);
949 /* Find the latest legal block for N and place N into the
950 `optimal' Block between the latest and earliest legal block.
951 The `optimal' block is the dominance-deepest block of those
952 with the least loop-nesting-depth. This places N out of as many
953 loops as possible and then makes it as controldependant as
956 place_floats_late (ir_node *n)
961 assert (irn_not_visited(n)); /* no multiple placement */
963 /* no need to place block nodes, control nodes are already placed. */
964 if ((get_irn_op(n) != op_Block) &&
966 (get_irn_mode(n) != mode_X)) {
967 /* Remember the early palacement of this block to move it
968 out of loop no further than the early placement. */
969 early = get_nodes_Block(n);
970 /* Assure that our users are all placed, except the Phi-nodes.
971 --- Each dataflow cycle contains at least one Phi-node. We
972 have to break the `user has to be placed before the
973 producer' dependance cycle and the Phi-nodes are the
974 place to do so, because we need to base our placement on the
975 final region of our users, which is OK with Phi-nodes, as they
976 are pinned, and they never have to be placed after a
977 producer of one of their inputs in the same block anyway. */
978 for (i = 0; i < get_irn_n_outs(n); i++) {
979 ir_node *succ = get_irn_out(n, i);
980 if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
981 place_floats_late (succ);
984 /* We have to determine the final block of this node... except for
986 if ((get_op_pinned(get_irn_op(n)) == floats) &&
987 (get_irn_op(n) != op_Const) &&
988 (get_irn_op(n) != op_SymConst)) {
989 ir_node *dca = NULL; /* deepest common ancestor in the
990 dominator tree of all nodes'
991 blocks depending on us; our final
992 placement has to dominate DCA. */
993 for (i = 0; i < get_irn_n_outs(n); i++) {
994 dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
996 set_nodes_Block(n, dca);
998 move_out_of_loops (n, early);
1002 mark_irn_visited(n);
1004 /* Add predecessors of all non-floating nodes on list. (Those of floating
1005 nodes are placeded already and therefore are marked.) */
1006 for (i = 0; i < get_irn_n_outs(n); i++) {
1007 if (irn_not_visited(get_irn_out(n, i))) {
1008 pdeq_putr (worklist, get_irn_out(n, i));
1013 INLINE void place_late() {
1015 inc_irg_visited(current_ir_graph);
1017 /* This fills the worklist initially. */
1018 place_floats_late(get_irg_start_block(current_ir_graph));
1019 /* And now empty the worklist again... */
1020 while (!pdeq_empty (worklist)) {
1021 ir_node *n = pdeq_getl (worklist);
1022 if (irn_not_visited(n)) place_floats_late(n);
1026 void place_code(ir_graph *irg) {
1027 ir_graph *rem = current_ir_graph;
1028 current_ir_graph = irg;
1030 if (!(get_optimize() && get_opt_global_cse())) return;
1032 /* Handle graph state */
1033 assert(get_irg_phase_state(irg) != phase_building);
1034 if (get_irg_dom_state(irg) != dom_consistent)
1037 construct_backedges(irg);
1039 /* Place all floating nodes as early as possible. This guarantees
1040 a legal code placement. */
1041 worklist = new_pdeq ();
1044 /* place_early invalidates the outs, place_late needs them. */
1046 /* Now move the nodes down in the dominator tree. This reduces the
1047 unnecessary executions of the node. */
1050 set_irg_outs_inconsistent(current_ir_graph);
1051 del_pdeq (worklist);
1052 current_ir_graph = rem;
1057 /********************************************************************/
1058 /* Control flow optimization. */
1059 /* Removes Bad control flow predecessors and empty blocks. A block */
1060 /* is empty if it contains only a Jmp node. */
1062 /********************************************************************/
1065 static void merge_blocks(ir_node *n, void *env) {
1067 set_irn_link(n, NULL);
1069 if (get_irn_op(n) == op_Block) {
1071 for (i = 0; i < get_Block_n_cfgpreds(n); i++)
1072 set_Block_cfgpred(n, i, skip_Tuple(get_Block_cfgpred(n, i)));
1073 } else if (get_irn_mode(n) == mode_X) {
1074 /* We will soon visit a block. Optimize it before visiting! */
1075 ir_node *b = get_nodes_Block(n);
1076 ir_node *new = equivalent_node(b);
1077 while (irn_not_visited(b) && (!is_Bad(new)) && (new != b)) {
1078 /* We would have to run gigo if new is bad. */
1079 if (get_optimize() && get_opt_control_flow()) exchange (b, new);
1081 new = equivalent_node(b);
1083 if (is_Bad(new)) exchange (n, new_Bad());
1087 static void collect_nodes(ir_node *n, void *env) {
1088 if (is_no_Block(n)) {
1089 ir_node *b = get_nodes_Block(n);
1091 if ((get_irn_op(n) == op_Phi)) {
1092 /* Collect Phi nodes to compact ins along with block's ins. */
1093 set_irn_link(n, get_irn_link(b));
1095 } else if (get_irn_op(n) != op_Jmp) { /* Check for non empty block. */
1096 mark_Block_block_visited(b);
1101 /* Returns true if pred is pred of block */
1102 int is_pred_of(ir_node *pred, ir_node *b) {
1104 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1105 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1106 if (b_pred == pred) return 1;
1111 int test_whether_dispensable(ir_node *b, int pos) {
1112 int i, j, n_preds = 1;
1113 int dispensable = 1;
1114 ir_node *cfop = get_Block_cfgpred(b, pos);
1115 ir_node *pred = get_nodes_Block(cfop);
1117 if (get_Block_block_visited(pred) + 1
1118 < get_irg_block_visited(current_ir_graph)) {
1119 if (!get_optimize() || !get_opt_control_flow()) {
1120 /* Mark block so that is will not be removed. */
1121 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1124 /* Seems to be empty. */
1125 if (!get_irn_link(b)) {
1126 /* There are no Phi nodes ==> dispensable. */
1127 n_preds = get_Block_n_cfgpreds(pred);
1129 /* b's pred blocks and pred's pred blocks must be pairwise disjunct.
1130 Work preds < pos as if they were already removed. */
1131 for (i = 0; i < pos; i++) {
1132 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1133 if (get_Block_block_visited(b_pred) + 1
1134 < get_irg_block_visited(current_ir_graph)) {
1135 for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
1136 ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
1137 if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
1140 if (is_pred_of(b_pred, pred)) dispensable = 0;
1143 for (i = pos +1; i < get_Block_n_cfgpreds(b); i++) {
1144 ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
1145 if (is_pred_of(b_pred, pred)) dispensable = 0;
1148 set_Block_block_visited(pred, get_irg_block_visited(current_ir_graph)-1);
1151 n_preds = get_Block_n_cfgpreds(pred);
1159 void optimize_blocks(ir_node *b, void *env) {
1160 int i, j, k, max_preds, n_preds;
1161 ir_node *pred, *phi;
1164 /* Count the number of predecessor if this block is merged with pred blocks
1167 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1168 max_preds += test_whether_dispensable(b, i);
1170 in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
1173 printf(" working on "); DDMN(b);
1174 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1175 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1176 if (is_Bad(get_Block_cfgpred(b, i))) {
1177 printf(" removing Bad %i\n ", i);
1178 } else if (get_Block_block_visited(pred) +1
1179 < get_irg_block_visited(current_ir_graph)) {
1180 printf(" removing pred %i ", i); DDMN(pred);
1181 } else { printf(" Nothing to do for "); DDMN(pred); }
1183 ** end Debug output **/
1185 /** Fix the Phi nodes **/
1186 phi = get_irn_link(b);
1188 assert(get_irn_op(phi) == op_Phi);
1189 /* Find the new predecessors for the Phi */
1191 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1192 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1193 if (is_Bad(get_Block_cfgpred(b, i))) {
1195 } else if (get_Block_block_visited(pred) +1
1196 < get_irg_block_visited(current_ir_graph)) {
1197 /* It's an empty block and not yet visited. */
1198 ir_node *phi_pred = get_Phi_pred(phi, i);
1199 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1200 if (get_nodes_Block(phi_pred) == pred) {
1201 assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
1202 in[n_preds] = get_Phi_pred(phi_pred, j);
1204 in[n_preds] = phi_pred;
1209 /* @@@ hier brauche ich schleifeninformation!!! Wenn keine Rueckwaertskante
1210 dann darfs auch keine Verwendung geben. */
1211 if (get_nodes_Block(phi_pred) == pred) {
1212 /* remove the Phi as it might be kept alive. Further there
1213 might be other users. */
1214 exchange(phi_pred, phi); /* geht, is aber doch semantisch falsch! */
1218 in[n_preds] = get_Phi_pred(phi, i);
1223 set_irn_in(phi, n_preds, in);
1224 phi = get_irn_link(phi);
1227 /** Move Phi nodes from removed blocks to this one.
1228 This happens only if merge between loop backedge and single loop entry. **/
1229 for (k = 0; k < get_Block_n_cfgpreds(b); k++) {
1230 pred = get_nodes_Block(get_Block_cfgpred(b, k));
1231 if (get_Block_block_visited(pred) +1
1232 < get_irg_block_visited(current_ir_graph)) {
1233 phi = get_irn_link(pred);
1235 if (get_irn_op(phi) == op_Phi) {
1236 set_nodes_Block(phi, b);
1239 for (i = 0; i < k; i++) {
1240 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1241 if (is_Bad(get_Block_cfgpred(b, i))) {
1243 } else if (get_Block_block_visited(pred) +1
1244 < get_irg_block_visited(current_ir_graph)) {
1245 /* It's an empty block and not yet visited. */
1246 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1247 /* @@@ Hier brauche ich schleifeninformation!!! Kontrllflusskante
1248 muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
1249 Anweisungen.) Trotzdem tuts bisher!! */
1258 for (i = 0; i < get_Phi_n_preds(phi); i++) {
1259 in[n_preds] = get_Phi_pred(phi, i);
1262 for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
1263 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1264 if (is_Bad(get_Block_cfgpred(b, i))) {
1266 } else if (get_Block_block_visited(pred) +1
1267 < get_irg_block_visited(current_ir_graph)) {
1268 /* It's an empty block and not yet visited. */
1269 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1278 set_irn_in(phi, n_preds, in);
1280 phi = get_irn_link(phi);
1285 /** Fix the block **/
1287 for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
1288 pred = get_nodes_Block(get_Block_cfgpred(b, i));
1289 if (is_Bad(get_Block_cfgpred(b, i))) {
1291 } else if (get_Block_block_visited(pred) +1
1292 < get_irg_block_visited(current_ir_graph)) {
1293 /* It's an empty block and not yet visited. */
1294 assert(get_Block_n_cfgpreds(b) > 1);
1295 /* Else it should be optimized by equivalent_node. */
1296 for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
1297 in[n_preds] = get_Block_cfgpred(pred, j);
1300 /* Remove block as it might be kept alive. */
1301 exchange(pred, b/*new_Bad()*/);
1303 in[n_preds] = get_Block_cfgpred(b, i);
1307 set_irn_in(b, n_preds, in);
1311 void optimize_cf(ir_graph *irg) {
1314 ir_node *end = get_irg_end(irg);
1315 ir_graph *rem = current_ir_graph;
1316 current_ir_graph = irg;
1319 /* Handle graph state */
1320 assert(get_irg_phase_state(irg) != phase_building);
1321 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
1322 set_irg_outs_inconsistent(current_ir_graph);
1323 if (get_irg_dom_state(current_ir_graph) == dom_consistent)
1324 set_irg_dom_inconsistent(current_ir_graph);
1326 /* Use block visited flag to mark non-empty blocks. */
1327 inc_irg_block_visited(irg);
1328 irg_walk(end, merge_blocks, collect_nodes, NULL);
1330 /* Optimize the standard code. */
1331 irg_block_walk(get_irg_end_block(irg), optimize_blocks, NULL, NULL);
1333 /* Walk all keep alives, optimize them if block, add to new in-array
1334 for end if useful. */
1335 in = NEW_ARR_F (ir_node *, 1);
1336 in[0] = get_nodes_Block(end);
1338 inc_irg_visited(current_ir_graph);
1339 for(i = 0; i < get_End_n_keepalives(end); i++) {
1340 ir_node *ka = get_End_keepalive(end, i);
1341 if (irn_not_visited(ka)) {
1342 if ((get_irn_op(ka) == op_Block) && Block_not_block_visited(ka)) {
1343 set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
1344 get_irg_block_visited(current_ir_graph)-1);
1345 irg_block_walk(ka, optimize_blocks, NULL, NULL);
1346 mark_irn_visited(ka);
1347 ARR_APP1 (ir_node *, in, ka);
1348 } else if (get_irn_op(ka) == op_Phi) {
1349 mark_irn_visited(ka);
1350 ARR_APP1 (ir_node *, in, ka);
1354 /* DEL_ARR_F(end->in); GL @@@ tut nicht ! */
1357 current_ir_graph = rem;