2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Dead node elimination and Procedure Inlining.
23 * @author Michael Beck, Goetz Lindenmaier
32 #include "irgraph_t.h"
35 #include "iroptimize.h"
52 #include "irbackedge_t.h"
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
63 #include "iropt_dbg.h"
66 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
68 /*------------------------------------------------------------------*/
69 /* Routines for dead node elimination / copying garbage collection */
71 /*------------------------------------------------------------------*/
74 * Remember the new node in the old node by using a field all nodes have.
76 #define set_new_node(oldn, newn) set_irn_link(oldn, newn)
79 * Get this new node, before the old node is forgotten.
81 #define get_new_node(oldn) get_irn_link(oldn)
84 * Check if a new node was set.
86 #define has_new_node(n) (get_new_node(n) != NULL)
89 * We use the block_visited flag to mark that we have computed the
90 * number of useful predecessors for this block.
91 * Further we encode the new arity in this flag in the old blocks.
92 * Remembering the arity is useful, as it saves a lot of pointer
93 * accesses. This function is called for all Phi and Block nodes
97 compute_new_arity(ir_node *b) {
98 int i, res, irn_arity;
101 irg_v = get_irg_block_visited(current_ir_graph);
102 block_v = get_Block_block_visited(b);
103 if (block_v >= irg_v) {
104 /* we computed the number of preds for this block and saved it in the
106 return block_v - irg_v;
108 /* compute the number of good predecessors */
109 res = irn_arity = get_irn_arity(b);
110 for (i = 0; i < irn_arity; i++)
111 if (is_Bad(get_irn_n(b, i))) res--;
112 /* save it in the flag. */
113 set_Block_block_visited(b, irg_v + res);
119 * Copies the node to the new obstack. The Ins of the new node point to
120 * the predecessors on the old obstack. For block/phi nodes not all
121 * predecessors might be copied. n->link points to the new node.
122 * For Phi and Block nodes the function allocates in-arrays with an arity
123 * only for useful predecessors. The arity is determined by counting
124 * the non-bad predecessors of the block.
126 * @param n The node to be copied
127 * @param env if non-NULL, the node number attribute will be copied to the new node
129 * Note: Also used for loop unrolling.
131 static void copy_node(ir_node *n, void *env) {
134 ir_op *op = get_irn_op(n);
138 /* node copied already */
140 } else if (op == op_Block) {
142 new_arity = compute_new_arity(n);
143 n->attr.block.graph_arr = NULL;
145 block = get_nodes_block(n);
147 new_arity = compute_new_arity(block);
149 new_arity = get_irn_arity(n);
152 nn = new_ir_node(get_irn_dbg_info(n),
159 /* Copy the attributes. These might point to additional data. If this
160 was allocated on the old obstack the pointers now are dangling. This
161 frees e.g. the memory of the graph_arr allocated in new_immBlock. */
162 if (op == op_Block) {
163 /* we cannot allow blocks WITHOUT macroblock input */
164 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
166 copy_node_attr(n, nn);
169 /* for easier debugging, we want to copy the node numbers too */
170 nn->node_nr = n->node_nr;
174 hook_dead_node_elim_subst(current_ir_graph, n, nn);
178 * Copies new predecessors of old node to new node remembered in link.
179 * Spare the Bad predecessors of Phi and Block nodes.
181 static void copy_preds(ir_node *n, void *env) {
186 nn = get_new_node(n);
189 /* copy the macro block header */
190 ir_node *mbh = get_Block_MacroBlock(n);
193 /* this block is a macroblock header */
194 set_Block_MacroBlock(nn, nn);
196 /* get the macro block header */
197 ir_node *nmbh = get_new_node(mbh);
198 assert(nmbh != NULL);
199 set_Block_MacroBlock(nn, nmbh);
202 /* Don't copy Bad nodes. */
204 irn_arity = get_irn_arity(n);
205 for (i = 0; i < irn_arity; i++) {
206 if (! is_Bad(get_irn_n(n, i))) {
207 ir_node *pred = get_irn_n(n, i);
208 set_irn_n(nn, j, get_new_node(pred));
212 /* repair the block visited flag from above misuse. Repair it in both
213 graphs so that the old one can still be used. */
214 set_Block_block_visited(nn, 0);
215 set_Block_block_visited(n, 0);
216 /* Local optimization could not merge two subsequent blocks if
217 in array contained Bads. Now it's possible.
218 We don't call optimize_in_place as it requires
219 that the fields in ir_graph are set properly. */
220 if (!has_Block_entity(nn) &&
221 get_opt_control_flow_straightening() &&
222 get_Block_n_cfgpreds(nn) == 1 &&
223 is_Jmp(get_Block_cfgpred(nn, 0))) {
224 ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
226 /* Jmp jumps into the block it is in -- deal self cycle. */
227 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
228 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
233 } else if (is_Phi(n) && get_irn_arity(n) > 0) {
234 /* Don't copy node if corresponding predecessor in block is Bad.
235 The Block itself should not be Bad. */
236 block = get_nodes_block(n);
237 set_nodes_block(nn, get_new_node(block));
239 irn_arity = get_irn_arity(n);
240 for (i = 0; i < irn_arity; i++) {
241 if (! is_Bad(get_irn_n(block, i))) {
242 ir_node *pred = get_irn_n(n, i);
243 set_irn_n(nn, j, get_new_node(pred));
244 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
248 /* If the pre walker reached this Phi after the post walker visited the
249 block block_visited is > 0. */
250 set_Block_block_visited(get_nodes_block(n), 0);
251 /* Compacting the Phi's ins might generate Phis with only one
253 if (get_irn_arity(nn) == 1)
254 exchange(nn, get_irn_n(nn, 0));
256 irn_arity = get_irn_arity(n);
257 for (i = -1; i < irn_arity; i++)
258 set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
260 /* Now the new node is complete. We can add it to the hash table for CSE.
261 @@@ inlining aborts if we identify End. Why? */
263 add_identities(current_ir_graph->value_table, nn);
267 * Copies the graph recursively, compacts the keep-alives of the end node.
269 * @param irg the graph to be copied
270 * @param copy_node_nr If non-zero, the node number will be copied
272 static void copy_graph(ir_graph *irg, int copy_node_nr) {
273 ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
274 ir_node *ka; /* keep alive */
278 /* Some nodes must be copied by hand, sigh */
279 vfl = get_irg_visited(irg);
280 set_irg_visited(irg, vfl + 1);
282 oe = get_irg_end(irg);
283 mark_irn_visited(oe);
284 /* copy the end node by hand, allocate dynamic in array! */
285 ne = new_ir_node(get_irn_dbg_info(oe),
292 /* Copy the attributes. Well, there might be some in the future... */
293 copy_node_attr(oe, ne);
294 set_new_node(oe, ne);
296 /* copy the Bad node */
297 ob = get_irg_bad(irg);
298 mark_irn_visited(ob);
299 nb = new_ir_node(get_irn_dbg_info(ob),
306 copy_node_attr(ob, nb);
307 set_new_node(ob, nb);
309 /* copy the NoMem node */
310 om = get_irg_no_mem(irg);
311 mark_irn_visited(om);
312 nm = new_ir_node(get_irn_dbg_info(om),
319 copy_node_attr(om, nm);
320 set_new_node(om, nm);
322 /* copy the live nodes */
323 set_irg_visited(irg, vfl);
324 irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
326 /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
328 /* visit the anchors as well */
329 for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
330 ir_node *n = get_irg_anchor(irg, i);
332 if (n && (get_irn_visited(n) <= vfl)) {
333 set_irg_visited(irg, vfl);
334 irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
338 /* copy_preds for the end node ... */
339 set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
341 /*- ... and now the keep alives. -*/
342 /* First pick the not marked block nodes and walk them. We must pick these
343 first as else we will oversee blocks reachable from Phis. */
344 irn_arity = get_End_n_keepalives(oe);
345 for (i = 0; i < irn_arity; i++) {
346 ka = get_End_keepalive(oe, i);
348 if (get_irn_visited(ka) <= vfl) {
349 /* We must keep the block alive and copy everything reachable */
350 set_irg_visited(irg, vfl);
351 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
353 add_End_keepalive(ne, get_new_node(ka));
357 /* Now pick other nodes. Here we will keep all! */
358 irn_arity = get_End_n_keepalives(oe);
359 for (i = 0; i < irn_arity; i++) {
360 ka = get_End_keepalive(oe, i);
362 if (get_irn_visited(ka) <= vfl) {
363 /* We didn't copy the node yet. */
364 set_irg_visited(irg, vfl);
365 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
367 add_End_keepalive(ne, get_new_node(ka));
371 /* start block sometimes only reached after keep alives */
372 set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
373 set_nodes_block(nm, get_new_node(get_nodes_block(om)));
377 * Copies the graph reachable from current_ir_graph->end to the obstack
378 * in current_ir_graph and fixes the environment.
379 * Then fixes the fields in current_ir_graph containing nodes of the
382 * @param copy_node_nr If non-zero, the node number will be copied
385 copy_graph_env(int copy_node_nr) {
386 ir_graph *irg = current_ir_graph;
387 ir_node *old_end, *new_anchor;
390 /* remove end_except and end_reg nodes */
391 old_end = get_irg_end(irg);
392 set_irg_end_except (irg, old_end);
393 set_irg_end_reg (irg, old_end);
395 /* Not all nodes remembered in irg might be reachable
396 from the end node. Assure their link is set to NULL, so that
397 we can test whether new nodes have been computed. */
398 for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
399 ir_node *n = get_irg_anchor(irg, i);
401 set_new_node(n, NULL);
403 /* we use the block walk flag for removing Bads from Blocks ins. */
404 inc_irg_block_visited(irg);
407 copy_graph(irg, copy_node_nr);
410 old_end = get_irg_end(irg);
411 new_anchor = new_Anchor(irg);
413 for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
414 ir_node *n = get_irg_anchor(irg, i);
416 set_irn_n(new_anchor, i, get_new_node(n));
419 irg->anchor = new_anchor;
421 /* ensure the new anchor is placed in the endblock */
422 set_nodes_block(new_anchor, get_irg_end_block(irg));
426 * Copies all reachable nodes to a new obstack. Removes bad inputs
427 * from block nodes and the corresponding inputs from Phi nodes.
428 * Merges single exit blocks with single entry blocks and removes
430 * Adds all new nodes to a new hash table for CSE. Does not
431 * perform CSE, so the hash table might contain common subexpressions.
433 void dead_node_elimination(ir_graph *irg) {
435 #ifdef INTERPROCEDURAL_VIEW
436 int rem_ipview = get_interprocedural_view();
438 struct obstack *graveyard_obst = NULL;
439 struct obstack *rebirth_obst = NULL;
441 edges_deactivate(irg);
443 /* inform statistics that we started a dead-node elimination run */
444 hook_dead_node_elim(irg, 1);
446 /* Remember external state of current_ir_graph. */
447 rem = current_ir_graph;
448 current_ir_graph = irg;
449 #ifdef INTERPROCEDURAL_VIEW
450 set_interprocedural_view(0);
453 assert(get_irg_phase_state(irg) != phase_building);
455 /* Handle graph state */
456 free_callee_info(irg);
460 /* @@@ so far we loose loops when copying */
461 free_loop_information(irg);
463 set_irg_doms_inconsistent(irg);
465 /* A quiet place, where the old obstack can rest in peace,
466 until it will be cremated. */
467 graveyard_obst = irg->obst;
469 /* A new obstack, where the reachable nodes will be copied to. */
470 rebirth_obst = XMALLOC(struct obstack);
471 irg->obst = rebirth_obst;
472 obstack_init(irg->obst);
473 irg->last_node_idx = 0;
475 /* We also need a new value table for CSE */
476 del_identities(irg->value_table);
477 irg->value_table = new_identities();
479 /* Copy the graph from the old to the new obstack */
480 copy_graph_env(/*copy_node_nr=*/1);
482 /* Free memory from old unoptimized obstack */
483 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
484 xfree(graveyard_obst); /* ... then free it. */
486 /* inform statistics that the run is over */
487 hook_dead_node_elim(irg, 0);
489 current_ir_graph = rem;
490 #ifdef INTERPROCEDURAL_VIEW
491 set_interprocedural_view(rem_ipview);
495 ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
496 return def_graph_pass(name ? name : "dce", dead_node_elimination);
500 * Relink bad predecessors of a block and store the old in array to the
501 * link field. This function is called by relink_bad_predecessors().
502 * The array of link field starts with the block operand at position 0.
503 * If block has bad predecessors, create a new in array without bad preds.
504 * Otherwise let in array untouched.
506 static void relink_bad_block_predecessors(ir_node *n, void *env) {
507 ir_node **new_in, *irn;
508 int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
511 /* if link field of block is NULL, look for bad predecessors otherwise
512 this is already done */
513 if (is_Block(n) && get_irn_link(n) == NULL) {
514 /* save old predecessors in link field (position 0 is the block operand)*/
515 set_irn_link(n, get_irn_in(n));
517 /* count predecessors without bad nodes */
518 old_irn_arity = get_irn_arity(n);
519 for (i = 0; i < old_irn_arity; i++)
520 if (!is_Bad(get_irn_n(n, i)))
523 /* arity changing: set new predecessors without bad nodes */
524 if (new_irn_arity < old_irn_arity) {
525 /* Get new predecessor array. We do not resize the array, as we must
526 keep the old one to update Phis. */
527 new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
529 /* set new predecessors in array */
532 for (i = 0; i < old_irn_arity; i++) {
533 irn = get_irn_n(n, i);
535 new_in[new_irn_n] = irn;
536 is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
540 /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
541 ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
543 } /* ir node has bad predecessors */
544 } /* Block is not relinked */
548 * Relinks Bad predecessors from Blocks and Phis called by walker
549 * remove_bad_predecesors(). If n is a Block, call
550 * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
551 * function of Phi's Block. If this block has bad predecessors, relink preds
554 static void relink_bad_predecessors(ir_node *n, void *env) {
555 ir_node *block, **old_in;
556 int i, old_irn_arity, new_irn_arity;
558 /* relink bad predecessors of a block */
560 relink_bad_block_predecessors(n, env);
562 /* If Phi node relink its block and its predecessors */
564 /* Relink predecessors of phi's block */
565 block = get_nodes_block(n);
566 if (get_irn_link(block) == NULL)
567 relink_bad_block_predecessors(block, env);
569 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
570 old_irn_arity = ARR_LEN(old_in);
572 /* Relink Phi predecessors if count of predecessors changed */
573 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
574 /* set new predecessors in array
575 n->in[0] remains the same block */
577 for(i = 1; i < old_irn_arity; i++)
578 if (!is_Bad(old_in[i])) {
579 n->in[new_irn_arity] = n->in[i];
580 is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
584 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
585 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
587 } /* n is a Phi node */
591 * Removes Bad Bad predecessors from Blocks and the corresponding
592 * inputs to Phi nodes as in dead_node_elimination but without
594 * On walking up set the link field to NULL, on walking down call
595 * relink_bad_predecessors() (This function stores the old in array
596 * to the link field and sets a new in array if arity of predecessors
599 void remove_bad_predecessors(ir_graph *irg) {
600 panic("Fix backedge handling first");
601 irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
608 __)|_| | \_/ | \_/(/_ |_/\__|__
610 The following stuff implements a facility that automatically patches
611 registered ir_node pointers to the new node when a dead node elimination occurs.
614 struct _survive_dce_t {
618 hook_entry_t dead_node_elim;
619 hook_entry_t dead_node_elim_subst;
622 typedef struct _survive_dce_list_t {
623 struct _survive_dce_list_t *next;
625 } survive_dce_list_t;
627 static void dead_node_hook(void *context, ir_graph *irg, int start) {
628 survive_dce_t *sd = context;
631 /* Create a new map before the dead node elimination is performed. */
633 sd->new_places = pmap_create_ex(pmap_count(sd->places));
635 /* Patch back all nodes if dead node elimination is over and something is to be done. */
636 pmap_destroy(sd->places);
637 sd->places = sd->new_places;
638 sd->new_places = NULL;
643 * Hook called when dead node elimination replaces old by nw.
645 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
646 survive_dce_t *sd = context;
647 survive_dce_list_t *list = pmap_get(sd->places, old);
650 /* If the node is to be patched back, write the new address to all registered locations. */
652 survive_dce_list_t *p;
654 for (p = list; p; p = p->next)
657 pmap_insert(sd->new_places, nw, list);
662 * Make a new Survive DCE environment.
664 survive_dce_t *new_survive_dce(void) {
665 survive_dce_t *res = XMALLOC(survive_dce_t);
666 obstack_init(&res->obst);
667 res->places = pmap_create();
668 res->new_places = NULL;
670 res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
671 res->dead_node_elim.context = res;
672 res->dead_node_elim.next = NULL;
674 res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
675 res->dead_node_elim_subst.context = res;
676 res->dead_node_elim_subst.next = NULL;
678 register_hook(hook_dead_node_elim, &res->dead_node_elim);
679 register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
684 * Free a Survive DCE environment.
686 void free_survive_dce(survive_dce_t *sd) {
687 obstack_free(&sd->obst, NULL);
688 pmap_destroy(sd->places);
689 unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
690 unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
695 * Register a node pointer to be patched upon DCE.
696 * When DCE occurs, the node pointer specified by @p place will be
697 * patched to the new address of the node it is pointing to.
699 * @param sd The Survive DCE environment.
700 * @param place The address of the node pointer.
702 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
703 if (*place != NULL) {
704 ir_node *irn = *place;
705 survive_dce_list_t *curr = pmap_get(sd->places, irn);
706 survive_dce_list_t *nw = OALLOC(&sd->obst, survive_dce_list_t);
711 pmap_insert(sd->places, irn, nw);
715 /*--------------------------------------------------------------------*/
716 /* Functionality for inlining */
717 /*--------------------------------------------------------------------*/
720 * Copy node for inlineing. Updates attributes that change when
721 * inlineing but not for dead node elimination.
723 * Copies the node by calling copy_node() and then updates the entity if
724 * it's a local one. env must be a pointer of the frame type of the
725 * inlined procedure. The new entities must be in the link field of
728 static void copy_node_inline(ir_node *n, void *env) {
730 ir_type *frame_tp = (ir_type *)env;
734 nn = get_new_node(n);
736 /* use copied entities from the new frame */
737 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
738 set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
740 } else if (is_Block(n)) {
741 nn = get_new_node(n);
742 nn->attr.block.irg.irg = current_ir_graph;
747 * Copies new predecessors of old node and move constants to
750 static void copy_preds_inline(ir_node *n, void *env) {
754 nn = skip_Id(get_new_node(n));
755 if (is_irn_constlike(nn)) {
756 /* move Constants into the start block */
757 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
759 n = identify_remember(current_ir_graph->value_table, nn);
768 * Walker: checks if P_value_arg_base is used.
770 static void find_addr(ir_node *node, void *env) {
771 int *allow_inline = env;
773 ir_graph *irg = current_ir_graph;
774 if (get_Sel_ptr(node) == get_irg_frame(irg)) {
775 /* access to frame */
776 ir_entity *ent = get_Sel_entity(node);
777 if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
778 /* access to value_type */
782 } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
784 * Refuse to inline alloca call unless user explicitly forced so as this
785 * may change program's memory overhead drastically when the function
786 * using alloca is called in loop. In GCC present in SPEC2000 inlining
787 * into schedule_block cause it to require 2GB of ram instead of 256MB.
789 * Sorrily this is true with our implementation also.
790 * Moreover, we cannot differentiate between alloca() and VLA yet, so this
791 * disables inlining of functions using VLA (with are completely save).
794 * - add a flag to the Alloc node for "real" alloca() calls
795 * - add a new Stack-Restore node at the end of a function using alloca()
802 * Check if we can inline a given call.
803 * Currently, we cannot inline two cases:
804 * - call with compound arguments
805 * - graphs that take the address of a parameter
807 * check these conditions here
809 static int can_inline(ir_node *call, ir_graph *called_graph) {
810 ir_type *call_type = get_Call_type(call);
811 int params, ress, i, res;
812 assert(is_Method_type(call_type));
814 params = get_method_n_params(call_type);
815 ress = get_method_n_ress(call_type);
817 /* check parameters for compound arguments */
818 for (i = 0; i < params; ++i) {
819 ir_type *p_type = get_method_param_type(call_type, i);
821 if (is_compound_type(p_type))
825 /* check results for compound arguments */
826 for (i = 0; i < ress; ++i) {
827 ir_type *r_type = get_method_res_type(call_type, i);
829 if (is_compound_type(r_type))
834 irg_walk_graph(called_graph, find_addr, NULL, &res);
840 exc_handler, /**< There is a handler. */
841 exc_no_handler /**< Exception handling not represented. */
844 /* Inlines a method at the given call site. */
845 int inline_method(ir_node *call, ir_graph *called_graph) {
847 ir_node *post_call, *post_bl;
848 ir_node *in[pn_Start_max];
849 ir_node *end, *end_bl, *block;
854 int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
856 enum exc_mode exc_handling;
857 ir_type *called_frame, *curr_frame, *mtp, *ctp;
860 irg_inline_property prop = get_irg_inline_property(called_graph);
861 unsigned long visited;
863 if (prop == irg_inline_forbidden)
866 ent = get_irg_entity(called_graph);
868 mtp = get_entity_type(ent);
869 ctp = get_Call_type(call);
870 n_params = get_method_n_params(mtp);
871 n_res = get_method_n_ress(mtp);
872 if (n_params > get_method_n_params(ctp)) {
873 /* this is a bad feature of C: without a prototype, we can
874 * call a function with less parameters than needed. Currently
875 * we don't support this, although we could use Unknown than. */
878 if (n_res != get_method_n_ress(ctp)) {
882 /* Argh, compiling C has some bad consequences:
883 * It is implementation dependent what happens in that case.
884 * We support inlining, if the bitsize of the types matches AND
885 * the same arithmetic is used. */
886 for (i = n_params - 1; i >= 0; --i) {
887 ir_type *param_tp = get_method_param_type(mtp, i);
888 ir_type *arg_tp = get_method_param_type(ctp, i);
890 if (param_tp != arg_tp) {
891 ir_mode *pmode = get_type_mode(param_tp);
892 ir_mode *amode = get_type_mode(arg_tp);
894 if (pmode == NULL || amode == NULL)
896 if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
898 if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
900 /* otherwise we can simply "reinterpret" the bits */
903 for (i = n_res - 1; i >= 0; --i) {
904 ir_type *decl_res_tp = get_method_res_type(mtp, i);
905 ir_type *used_res_tp = get_method_res_type(ctp, i);
907 if (decl_res_tp != used_res_tp) {
908 ir_mode *decl_mode = get_type_mode(decl_res_tp);
909 ir_mode *used_mode = get_type_mode(used_res_tp);
910 if (decl_mode == NULL || used_mode == NULL)
912 if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
914 if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
916 /* otherwise we can "reinterpret" the bits */
920 irg = get_irn_irg(call);
923 * We cannot inline a recursive call. The graph must be copied before
924 * the call the inline_method() using create_irg_copy().
926 if (called_graph == irg)
930 * currently, we cannot inline two cases:
931 * - call with compound arguments
932 * - graphs that take the address of a parameter
934 if (! can_inline(call, called_graph))
937 rem = current_ir_graph;
938 current_ir_graph = irg;
940 DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
942 /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
943 rem_opt = get_opt_optimize();
946 /* Handle graph state */
947 assert(get_irg_phase_state(irg) != phase_building);
948 assert(get_irg_pinned(irg) == op_pin_state_pinned);
949 assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
950 set_irg_outs_inconsistent(irg);
951 set_irg_extblk_inconsistent(irg);
952 set_irg_doms_inconsistent(irg);
953 set_irg_loopinfo_inconsistent(irg);
954 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
955 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
957 /* -- Check preconditions -- */
958 assert(is_Call(call));
960 /* here we know we WILL inline, so inform the statistics */
961 hook_inline(call, called_graph);
963 /* -- Decide how to handle exception control flow: Is there a handler
964 for the Call node, or do we branch directly to End on an exception?
966 0 There is a handler.
967 2 Exception handling not represented in Firm. -- */
969 ir_node *Xproj = NULL;
971 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
972 long proj_nr = get_Proj_proj(proj);
973 if (proj_nr == pn_Call_X_except) Xproj = proj;
975 exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
978 /* create the argument tuple */
979 NEW_ARR_A(ir_type *, args_in, n_params);
981 block = get_nodes_block(call);
982 for (i = n_params - 1; i >= 0; --i) {
983 ir_node *arg = get_Call_param(call, i);
984 ir_type *param_tp = get_method_param_type(mtp, i);
985 ir_mode *mode = get_type_mode(param_tp);
987 if (mode != get_irn_mode(arg)) {
988 arg = new_r_Conv(block, arg, mode);
994 the procedure and later replaces the Start node of the called graph.
995 Post_call is the old Call node and collects the results of the called
996 graph. Both will end up being a tuple. -- */
997 post_bl = get_nodes_block(call);
998 set_irg_current_block(irg, post_bl);
999 /* XxMxPxPxPxT of Start + parameter of Call */
1000 in[pn_Start_X_initial_exec] = new_Jmp();
1001 in[pn_Start_M] = get_Call_mem(call);
1002 in[pn_Start_P_frame_base] = get_irg_frame(irg);
1003 in[pn_Start_P_tls] = get_irg_tls(irg);
1004 in[pn_Start_T_args] = new_Tuple(n_params, args_in);
1005 pre_call = new_Tuple(pn_Start_max, in);
1009 The new block gets the ins of the old block, pre_call and all its
1010 predecessors and all Phi nodes. -- */
1011 part_block(pre_call);
1013 /* -- Prepare state for dead node elimination -- */
1014 /* Visited flags in calling irg must be >= flag in called irg.
1015 Else walker and arity computation will not work. */
1016 if (get_irg_visited(irg) <= get_irg_visited(called_graph))
1017 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
1018 if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
1019 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
1020 visited = get_irg_visited(irg);
1022 /* Set pre_call as new Start node in link field of the start node of
1023 calling graph and pre_calls block as new block for the start block
1025 Further mark these nodes so that they are not visited by the
1027 set_irn_link(get_irg_start(called_graph), pre_call);
1028 set_irn_visited(get_irg_start(called_graph), visited);
1029 set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1030 set_irn_visited(get_irg_start_block(called_graph), visited);
1032 set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1033 set_irn_visited(get_irg_bad(called_graph), visited);
1035 set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1036 set_irn_visited(get_irg_no_mem(called_graph), visited);
1038 /* Initialize for compaction of in arrays */
1039 inc_irg_block_visited(irg);
1041 /* -- Replicate local entities of the called_graph -- */
1042 /* copy the entities. */
1043 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1044 called_frame = get_irg_frame_type(called_graph);
1045 curr_frame = get_irg_frame_type(irg);
1046 for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1047 ir_entity *new_ent, *old_ent;
1048 old_ent = get_class_member(called_frame, i);
1049 new_ent = copy_entity_own(old_ent, curr_frame);
1050 set_entity_link(old_ent, new_ent);
1053 /* visited is > than that of called graph. With this trick visited will
1054 remain unchanged so that an outer walker, e.g., searching the call nodes
1055 to inline, calling this inline will not visit the inlined nodes. */
1056 set_irg_visited(irg, get_irg_visited(irg)-1);
1058 /* -- Performing dead node elimination inlines the graph -- */
1059 /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1061 irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1062 get_irg_frame_type(called_graph));
1064 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1066 /* Repair called_graph */
1067 set_irg_visited(called_graph, get_irg_visited(irg));
1068 set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1069 set_Block_block_visited(get_irg_start_block(called_graph), 0);
1071 /* -- Merge the end of the inlined procedure with the call site -- */
1072 /* We will turn the old Call node into a Tuple with the following
1075 0: Phi of all Memories of Return statements.
1076 1: Jmp from new Block that merges the control flow from all exception
1077 predecessors of the old end block.
1078 2: Tuple of all arguments.
1079 3: Phi of Exception memories.
1080 In case the old Call directly branches to End on an exception we don't
1081 need the block merging all exceptions nor the Phi of the exception
1085 /* -- Precompute some values -- */
1086 end_bl = get_new_node(get_irg_end_block(called_graph));
1087 end = get_new_node(get_irg_end(called_graph));
1088 arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
1089 n_res = get_method_n_ress(get_Call_type(call));
1091 res_pred = XMALLOCN(ir_node*, n_res);
1092 cf_pred = XMALLOCN(ir_node*, arity);
1094 set_irg_current_block(irg, post_bl); /* just to make sure */
1096 /* -- archive keepalives -- */
1097 irn_arity = get_irn_arity(end);
1098 for (i = 0; i < irn_arity; i++) {
1099 ir_node *ka = get_End_keepalive(end, i);
1101 add_End_keepalive(get_irg_end(irg), ka);
1104 /* The new end node will die. We need not free as the in array is on the obstack:
1105 copy_node() only generated 'D' arrays. */
1107 /* -- Replace Return nodes by Jump nodes. -- */
1109 for (i = 0; i < arity; i++) {
1111 ret = get_Block_cfgpred(end_bl, i);
1112 if (is_Return(ret)) {
1113 cf_pred[n_ret] = new_r_Jmp(get_nodes_block(ret));
1117 set_irn_in(post_bl, n_ret, cf_pred);
1119 /* -- Build a Tuple for all results of the method.
1120 Add Phi node if there was more than one Return. -- */
1121 turn_into_tuple(post_call, pn_Call_max);
1122 /* First the Memory-Phi */
1124 for (i = 0; i < arity; i++) {
1125 ret = get_Block_cfgpred(end_bl, i);
1126 if (is_Return(ret)) {
1127 cf_pred[n_mem_phi++] = get_Return_mem(ret);
1129 /* memory output for some exceptions is directly connected to End */
1131 cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
1132 } else if (is_fragile_op(ret)) {
1133 /* We rely that all cfops have the memory output at the same position. */
1134 cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
1135 } else if (is_Raise(ret)) {
1136 cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
1139 phi = new_Phi(n_mem_phi, cf_pred, mode_M);
1140 set_Tuple_pred(call, pn_Call_M, phi);
1141 /* Conserve Phi-list for further inlinings -- but might be optimized */
1142 if (get_nodes_block(phi) == post_bl) {
1143 set_irn_link(phi, get_irn_link(post_bl));
1144 set_irn_link(post_bl, phi);
1146 /* Now the real results */
1148 for (j = 0; j < n_res; j++) {
1149 ir_type *res_type = get_method_res_type(ctp, j);
1150 ir_mode *res_mode = get_type_mode(res_type);
1152 for (i = 0; i < arity; i++) {
1153 ret = get_Block_cfgpred(end_bl, i);
1154 if (is_Return(ret)) {
1155 ir_node *res = get_Return_res(ret, j);
1156 if (get_irn_mode(res) != res_mode) {
1157 ir_node *block = get_nodes_block(res);
1158 res = new_r_Conv(block, res, res_mode);
1160 cf_pred[n_ret] = res;
1165 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1169 /* Conserve Phi-list for further inlinings -- but might be optimized */
1170 if (get_nodes_block(phi) == post_bl) {
1171 set_Phi_next(phi, get_Block_phis(post_bl));
1172 set_Block_phis(post_bl, phi);
1175 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1177 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1179 /* handle the regular call */
1180 set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1182 /* For now, we cannot inline calls with value_base */
1183 set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1185 /* Finally the exception control flow.
1186 We have two possible situations:
1187 First if the Call branches to an exception handler:
1188 We need to add a Phi node to
1189 collect the memory containing the exception objects. Further we need
1190 to add another block to get a correct representation of this Phi. To
1191 this block we add a Jmp that resolves into the X output of the Call
1192 when the Call is turned into a tuple.
1193 Second: There is no exception edge. Just add all inlined exception
1194 branches to the End node.
1196 if (exc_handling == exc_handler) {
1198 for (i = 0; i < arity; i++) {
1200 ret = get_Block_cfgpred(end_bl, i);
1201 irn = skip_Proj(ret);
1202 if (is_fragile_op(irn) || is_Raise(irn)) {
1203 cf_pred[n_exc] = ret;
1210 set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
1212 ir_node *block = new_Block(n_exc, cf_pred);
1213 set_cur_block(block);
1214 set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1217 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1220 ir_node *main_end_bl;
1221 int main_end_bl_arity;
1222 ir_node **end_preds;
1224 /* assert(exc_handling == 1 || no exceptions. ) */
1226 for (i = 0; i < arity; i++) {
1227 ir_node *ret = get_Block_cfgpred(end_bl, i);
1228 ir_node *irn = skip_Proj(ret);
1230 if (is_fragile_op(irn) || is_Raise(irn)) {
1231 cf_pred[n_exc] = ret;
1235 main_end_bl = get_irg_end_block(irg);
1236 main_end_bl_arity = get_irn_arity(main_end_bl);
1237 end_preds = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
1239 for (i = 0; i < main_end_bl_arity; ++i)
1240 end_preds[i] = get_irn_n(main_end_bl, i);
1241 for (i = 0; i < n_exc; ++i)
1242 end_preds[main_end_bl_arity + i] = cf_pred[i];
1243 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1244 set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1250 /* -- Turn CSE back on. -- */
1251 set_optimize(rem_opt);
1252 current_ir_graph = rem;
1257 /********************************************************************/
1258 /* Apply inlining to small methods. */
1259 /********************************************************************/
1261 static struct obstack temp_obst;
1263 /** Represents a possible inlinable call in a graph. */
1264 typedef struct _call_entry {
1265 ir_node *call; /**< The Call node. */
1266 ir_graph *callee; /**< The callee IR-graph. */
1267 list_head list; /**< List head for linking the next one. */
1268 int loop_depth; /**< The loop depth of this call. */
1269 int benefice; /**< The calculated benefice of this call. */
1270 unsigned local_adr:1; /**< Set if this call gets an address of a local variable. */
1271 unsigned all_const:1; /**< Set if this call has only constant parameters. */
1275 * environment for inlining small irgs
1277 typedef struct _inline_env_t {
1278 struct obstack obst; /**< An obstack where call_entries are allocated on. */
1279 list_head calls; /**< The call entry list. */
1283 * Returns the irg called from a Call node. If the irg is not
1284 * known, NULL is returned.
1286 * @param call the call node
1288 static ir_graph *get_call_called_irg(ir_node *call) {
1291 addr = get_Call_ptr(call);
1292 if (is_Global(addr)) {
1293 ir_entity *ent = get_Global_entity(addr);
1294 return get_entity_irg(ent);
1301 * Walker: Collect all calls to known graphs inside a graph.
1303 static void collect_calls(ir_node *call, void *env) {
1305 if (is_Call(call)) {
1306 ir_graph *called_irg = get_call_called_irg(call);
1308 if (called_irg != NULL) {
1309 /* The Call node calls a locally defined method. Remember to inline. */
1310 inline_env_t *ienv = env;
1311 call_entry *entry = OALLOC(&ienv->obst, call_entry);
1313 entry->callee = called_irg;
1314 entry->loop_depth = 0;
1315 entry->benefice = 0;
1316 entry->local_adr = 0;
1317 entry->all_const = 0;
1319 list_add_tail(&entry->list, &ienv->calls);
1325 * Inlines all small methods at call sites where the called address comes
1326 * from a Const node that references the entity representing the called
1328 * The size argument is a rough measure for the code size of the method:
1329 * Methods where the obstack containing the firm graph is smaller than
1332 void inline_small_irgs(ir_graph *irg, int size) {
1333 ir_graph *rem = current_ir_graph;
1337 current_ir_graph = irg;
1338 /* Handle graph state */
1339 assert(get_irg_phase_state(irg) != phase_building);
1340 free_callee_info(irg);
1342 /* Find Call nodes to inline.
1343 (We can not inline during a walk of the graph, as inlining the same
1344 method several times changes the visited flag of the walked graph:
1345 after the first inlining visited of the callee equals visited of
1346 the caller. With the next inlining both are increased.) */
1347 obstack_init(&env.obst);
1348 INIT_LIST_HEAD(&env.calls);
1349 irg_walk_graph(irg, NULL, collect_calls, &env);
1351 if (! list_empty(&env.calls)) {
1352 /* There are calls to inline */
1353 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1354 collect_phiprojs(irg);
1356 list_for_each_entry(call_entry, entry, &env.calls, list) {
1357 ir_graph *callee = entry->callee;
1358 irg_inline_property prop = get_irg_inline_property(callee);
1360 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1361 /* do not inline forbidden / weak graphs */
1365 if (prop >= irg_inline_forced ||
1366 _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1367 inline_method(entry->call, callee);
1370 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1372 obstack_free(&env.obst, NULL);
1373 current_ir_graph = rem;
1376 struct inline_small_irgs_pass_t {
1377 ir_graph_pass_t pass;
1382 * Wrapper to run inline_small_irgs() as a pass.
1384 static int inline_small_irgs_wrapper(ir_graph *irg, void *context) {
1385 struct inline_small_irgs_pass_t *pass = context;
1387 inline_small_irgs(irg, pass->size);
1391 /* create a pass for inline_small_irgs() */
1392 ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size) {
1393 struct inline_small_irgs_pass_t *pass =
1394 XMALLOCZ(struct inline_small_irgs_pass_t);
1397 return def_graph_pass_constructor(
1398 &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
1402 * Environment for inlining irgs.
1405 list_head calls; /**< List of of all call nodes in this graph. */
1406 unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1407 unsigned n_nodes; /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1408 unsigned n_blocks; /**< Number of Blocks in graph without Start and End block. */
1409 unsigned n_nodes_orig; /**< for statistics */
1410 unsigned n_call_nodes; /**< Number of Call nodes in the graph. */
1411 unsigned n_call_nodes_orig; /**< for statistics */
1412 unsigned n_callers; /**< Number of known graphs that call this graphs. */
1413 unsigned n_callers_orig; /**< for statistics */
1414 unsigned got_inline:1; /**< Set, if at least one call inside this graph was inlined. */
1415 unsigned recursive:1; /**< Set, if this function is self recursive. */
1419 * Allocate a new environment for inlining.
1421 static inline_irg_env *alloc_inline_irg_env(void) {
1422 inline_irg_env *env = OALLOC(&temp_obst, inline_irg_env);
1423 INIT_LIST_HEAD(&env->calls);
1424 env->local_weights = NULL;
1425 env->n_nodes = -2; /* do not count count Start, End */
1426 env->n_blocks = -2; /* do not count count Start, End Block */
1427 env->n_nodes_orig = -2; /* do not count Start, End */
1428 env->n_call_nodes = 0;
1429 env->n_call_nodes_orig = 0;
1431 env->n_callers_orig = 0;
1432 env->got_inline = 0;
1437 typedef struct walker_env {
1438 inline_irg_env *x; /**< the inline environment */
1439 char ignore_runtime; /**< the ignore runtime flag */
1440 char ignore_callers; /**< if set, do change callers data */
1444 * post-walker: collect all calls in the inline-environment
1445 * of a graph and sum some statistics.
1447 static void collect_calls2(ir_node *call, void *ctx) {
1449 inline_irg_env *x = env->x;
1450 ir_opcode code = get_irn_opcode(call);
1454 /* count meaningful nodes in irg */
1455 if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1456 if (code != iro_Block) {
1464 if (code != iro_Call) return;
1466 /* check, if it's a runtime call */
1467 if (env->ignore_runtime) {
1468 ir_node *symc = get_Call_ptr(call);
1470 if (is_Global(symc)) {
1471 ir_entity *ent = get_Global_entity(symc);
1473 if (get_entity_additional_properties(ent) & mtp_property_runtime)
1478 /* collect all call nodes */
1480 ++x->n_call_nodes_orig;
1482 callee = get_call_called_irg(call);
1483 if (callee != NULL) {
1484 if (! env->ignore_callers) {
1485 inline_irg_env *callee_env = get_irg_link(callee);
1486 /* count all static callers */
1487 ++callee_env->n_callers;
1488 ++callee_env->n_callers_orig;
1490 if (callee == current_ir_graph)
1493 /* link it in the list of possible inlinable entries */
1494 entry = OALLOC(&temp_obst, call_entry);
1496 entry->callee = callee;
1497 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1498 entry->benefice = 0;
1499 entry->local_adr = 0;
1500 entry->all_const = 0;
1502 list_add_tail(&entry->list, &x->calls);
1507 * Returns TRUE if the number of callers is 0 in the irg's environment,
1508 * hence this irg is a leave.
1510 inline static int is_leave(ir_graph *irg) {
1511 inline_irg_env *env = get_irg_link(irg);
1512 return env->n_call_nodes == 0;
1516 * Returns TRUE if the number of nodes in the callee is
1517 * smaller then size in the irg's environment.
1519 inline static int is_smaller(ir_graph *callee, unsigned size) {
1520 inline_irg_env *env = get_irg_link(callee);
1521 return env->n_nodes < size;
1525 * Duplicate a call entry.
1527 * @param entry the original entry to duplicate
1528 * @param new_call the new call node
1529 * @param loop_depth_delta
1530 * delta value for the loop depth
1532 static call_entry *duplicate_call_entry(const call_entry *entry,
1533 ir_node *new_call, int loop_depth_delta) {
1534 call_entry *nentry = OALLOC(&temp_obst, call_entry);
1535 nentry->call = new_call;
1536 nentry->callee = entry->callee;
1537 nentry->benefice = entry->benefice;
1538 nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1539 nentry->local_adr = entry->local_adr;
1540 nentry->all_const = entry->all_const;
1546 * Append all call nodes of the source environment to the nodes of in the destination
1549 * @param dst destination environment
1550 * @param src source environment
1551 * @param loop_depth the loop depth of the call that is replaced by the src list
1553 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
1554 call_entry *entry, *nentry;
1556 /* Note that the src list points to Call nodes in the inlined graph, but
1557 we need Call nodes in our graph. Luckily the inliner leaves this information
1558 in the link field. */
1559 list_for_each_entry(call_entry, entry, &src->calls, list) {
1560 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1561 list_add_tail(&nentry->list, &dst->calls);
1563 dst->n_call_nodes += src->n_call_nodes;
1564 dst->n_nodes += src->n_nodes;
1568 * Inlines small leave methods at call sites where the called address comes
1569 * from a Const node that references the entity representing the called
1571 * The size argument is a rough measure for the code size of the method:
1572 * Methods where the obstack containing the firm graph is smaller than
1575 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1576 unsigned size, int ignore_runtime)
1578 inline_irg_env *env;
1584 call_entry *entry, *next;
1585 const call_entry *centry;
1586 pmap *copied_graphs;
1587 pmap_entry *pm_entry;
1589 rem = current_ir_graph;
1590 obstack_init(&temp_obst);
1592 /* a map for the copied graphs, used to inline recursive calls */
1593 copied_graphs = pmap_create();
1595 /* extend all irgs by a temporary data structure for inlining. */
1596 n_irgs = get_irp_n_irgs();
1597 for (i = 0; i < n_irgs; ++i)
1598 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1600 /* Pre-compute information in temporary data structure. */
1601 wenv.ignore_runtime = ignore_runtime;
1602 wenv.ignore_callers = 0;
1603 for (i = 0; i < n_irgs; ++i) {
1604 ir_graph *irg = get_irp_irg(i);
1606 assert(get_irg_phase_state(irg) != phase_building);
1607 free_callee_info(irg);
1609 assure_cf_loop(irg);
1610 wenv.x = get_irg_link(irg);
1611 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1614 /* -- and now inline. -- */
1616 /* Inline leaves recursively -- we might construct new leaves. */
1620 for (i = 0; i < n_irgs; ++i) {
1622 int phiproj_computed = 0;
1624 current_ir_graph = get_irp_irg(i);
1625 env = get_irg_link(current_ir_graph);
1627 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1628 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1630 irg_inline_property prop;
1632 if (env->n_nodes > maxsize)
1636 callee = entry->callee;
1638 prop = get_irg_inline_property(callee);
1639 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1640 /* do not inline forbidden / weak graphs */
1644 if (is_leave(callee) && (
1645 is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1646 if (!phiproj_computed) {
1647 phiproj_computed = 1;
1648 collect_phiprojs(current_ir_graph);
1650 did_inline = inline_method(call, callee);
1653 inline_irg_env *callee_env = get_irg_link(callee);
1655 /* call was inlined, Phi/Projs for current graph must be recomputed */
1656 phiproj_computed = 0;
1658 /* Do some statistics */
1659 env->got_inline = 1;
1660 --env->n_call_nodes;
1661 env->n_nodes += callee_env->n_nodes;
1662 --callee_env->n_callers;
1664 /* remove this call from the list */
1665 list_del(&entry->list);
1670 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1672 } while (did_inline);
1674 /* inline other small functions. */
1675 for (i = 0; i < n_irgs; ++i) {
1677 int phiproj_computed = 0;
1679 current_ir_graph = get_irp_irg(i);
1680 env = get_irg_link(current_ir_graph);
1682 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1684 /* note that the list of possible calls is updated during the process */
1685 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1686 irg_inline_property prop;
1691 callee = entry->callee;
1693 prop = get_irg_inline_property(callee);
1694 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1695 /* do not inline forbidden / weak graphs */
1699 e = pmap_find(copied_graphs, callee);
1702 * Remap callee if we have a copy.
1703 * FIXME: Should we do this only for recursive Calls ?
1708 if (prop >= irg_inline_forced ||
1709 (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1710 if (current_ir_graph == callee) {
1712 * Recursive call: we cannot directly inline because we cannot walk
1713 * the graph and change it. So we have to make a copy of the graph
1717 inline_irg_env *callee_env;
1720 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1723 * No copy yet, create one.
1724 * Note that recursive methods are never leaves, so it is sufficient
1725 * to test this condition here.
1727 copy = create_irg_copy(callee);
1729 /* create_irg_copy() destroys the Proj links, recompute them */
1730 phiproj_computed = 0;
1732 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1734 /* allocate new environment */
1735 callee_env = alloc_inline_irg_env();
1736 set_irg_link(copy, callee_env);
1738 assure_cf_loop(copy);
1739 wenv.x = callee_env;
1740 wenv.ignore_callers = 1;
1741 irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1744 * Enter the entity of the original graph. This is needed
1745 * for inline_method(). However, note that ent->irg still points
1746 * to callee, NOT to copy.
1748 set_irg_entity(copy, get_irg_entity(callee));
1750 pmap_insert(copied_graphs, callee, copy);
1753 /* we have only one caller: the original graph */
1754 callee_env->n_callers = 1;
1755 callee_env->n_callers_orig = 1;
1757 if (! phiproj_computed) {
1758 phiproj_computed = 1;
1759 collect_phiprojs(current_ir_graph);
1761 did_inline = inline_method(call, callee);
1763 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1765 /* call was inlined, Phi/Projs for current graph must be recomputed */
1766 phiproj_computed = 0;
1768 /* callee was inline. Append it's call list. */
1769 env->got_inline = 1;
1770 --env->n_call_nodes;
1771 append_call_list(env, callee_env, entry->loop_depth);
1772 --callee_env->n_callers;
1774 /* after we have inlined callee, all called methods inside callee
1775 are now called once more */
1776 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1777 inline_irg_env *penv = get_irg_link(centry->callee);
1781 /* remove this call from the list */
1782 list_del(&entry->list);
1787 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1790 for (i = 0; i < n_irgs; ++i) {
1791 irg = get_irp_irg(i);
1792 env = get_irg_link(irg);
1794 if (env->got_inline) {
1795 optimize_graph_df(irg);
1798 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1799 DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1800 env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1801 env->n_callers_orig, env->n_callers,
1802 get_entity_name(get_irg_entity(irg))));
1806 /* kill the copied graphs: we don't need them anymore */
1807 foreach_pmap(copied_graphs, pm_entry) {
1808 ir_graph *copy = pm_entry->value;
1810 /* reset the entity, otherwise it will be deleted in the next step ... */
1811 set_irg_entity(copy, NULL);
1812 free_ir_graph(copy);
1814 pmap_destroy(copied_graphs);
1816 obstack_free(&temp_obst, NULL);
1817 current_ir_graph = rem;
1820 struct inline_leave_functions_pass_t {
1821 ir_prog_pass_t pass;
1829 * Wrapper to run inline_leave_functions() as a ir_prog pass.
1831 static int inline_leave_functions_wrapper(ir_prog *irp, void *context) {
1832 struct inline_leave_functions_pass_t *pass = context;
1835 inline_leave_functions(
1836 pass->maxsize, pass->leavesize,
1837 pass->size, pass->ignore_runtime);
1841 /* create a pass for inline_leave_functions() */
1842 ir_prog_pass_t *inline_leave_functions_pass(
1843 const char *name, unsigned maxsize, unsigned leavesize,
1844 unsigned size, int ignore_runtime) {
1845 struct inline_leave_functions_pass_t *pass =
1846 XMALLOCZ(struct inline_leave_functions_pass_t);
1848 pass->maxsize = maxsize;
1849 pass->leavesize = leavesize;
1851 pass->ignore_runtime = ignore_runtime;
1853 return def_prog_pass_constructor(
1855 name ? name : "inline_leave_functions",
1856 inline_leave_functions_wrapper);
1860 * Calculate the parameter weights for transmitting the address of a local variable.
1862 static unsigned calc_method_local_weight(ir_node *arg) {
1864 unsigned v, weight = 0;
1866 for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1867 ir_node *succ = get_irn_out(arg, i);
1869 switch (get_irn_opcode(succ)) {
1872 /* Loads and Store can be removed */
1876 /* check if all args are constant */
1877 for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1878 ir_node *idx = get_Sel_index(succ, j);
1879 if (! is_Const(idx))
1882 /* Check users on this Sel. Note: if a 0 is returned here, there was
1883 some unsupported node. */
1884 v = calc_method_local_weight(succ);
1887 /* we can kill one Sel with constant indexes, this is cheap */
1891 /* when looking backward we might find Id nodes */
1892 weight += calc_method_local_weight(succ);
1895 /* unoptimized tuple */
1896 for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1897 ir_node *pred = get_Tuple_pred(succ, j);
1899 /* look for Proj(j) */
1900 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1901 ir_node *succ_succ = get_irn_out(succ, k);
1902 if (is_Proj(succ_succ)) {
1903 if (get_Proj_proj(succ_succ) == j) {
1905 weight += calc_method_local_weight(succ_succ);
1908 /* this should NOT happen */
1916 /* any other node: unsupported yet or bad. */
1924 * Calculate the parameter weights for transmitting the address of a local variable.
1926 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1927 ir_entity *ent = get_irg_entity(irg);
1929 int nparams, i, proj_nr;
1930 ir_node *irg_args, *arg;
1932 mtp = get_entity_type(ent);
1933 nparams = get_method_n_params(mtp);
1935 /* allocate a new array. currently used as 'analysed' flag */
1936 env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1938 /* If the method haven't parameters we have nothing to do. */
1942 assure_irg_outs(irg);
1943 irg_args = get_irg_args(irg);
1944 for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1945 arg = get_irn_out(irg_args, i);
1946 proj_nr = get_Proj_proj(arg);
1947 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1952 * Calculate the benefice for transmitting an local variable address.
1953 * After inlining, the local variable might be transformed into a
1954 * SSA variable by scalar_replacement().
1956 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1957 inline_irg_env *env = get_irg_link(callee);
1959 if (env->local_weights != NULL) {
1960 if (pos < ARR_LEN(env->local_weights))
1961 return env->local_weights[pos];
1965 analyze_irg_local_weights(env, callee);
1967 if (pos < ARR_LEN(env->local_weights))
1968 return env->local_weights[pos];
1973 * Calculate a benefice value for inlining the given call.
1975 * @param call the call node we have to inspect
1976 * @param callee the called graph
1978 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1980 ir_node *call = entry->call;
1981 ir_entity *ent = get_irg_entity(callee);
1985 int i, n_params, all_const;
1987 irg_inline_property prop;
1989 inline_irg_env *callee_env;
1991 prop = get_irg_inline_property(callee);
1992 if (prop == irg_inline_forbidden) {
1993 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1995 return entry->benefice = INT_MIN;
1998 if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
1999 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
2001 return entry->benefice = INT_MIN;
2004 /* costs for every passed parameter */
2005 n_params = get_Call_n_params(call);
2006 mtp = get_entity_type(ent);
2007 cc = get_method_calling_convention(mtp);
2008 if (cc & cc_reg_param) {
2009 /* register parameter, smaller costs for register parameters */
2010 int max_regs = cc & ~cc_bits;
2012 if (max_regs < n_params)
2013 weight += max_regs * 2 + (n_params - max_regs) * 5;
2015 weight += n_params * 2;
2017 /* parameters are passed an stack */
2018 weight += 5 * n_params;
2021 /* constant parameters improve the benefice */
2022 frame_ptr = get_irg_frame(current_ir_graph);
2024 for (i = 0; i < n_params; ++i) {
2025 ir_node *param = get_Call_param(call, i);
2027 if (is_Const(param)) {
2028 weight += get_method_param_weight(ent, i);
2031 if (is_SymConst(param))
2032 weight += get_method_param_weight(ent, i);
2033 else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
2035 * An address of a local variable is transmitted. After
2036 * inlining, scalar_replacement might be able to remove the
2037 * local variable, so honor this.
2039 v = get_method_local_adress_weight(callee, i);
2042 entry->local_adr = 1;
2046 entry->all_const = all_const;
2048 callee_env = get_irg_link(callee);
2049 if (callee_env->n_callers == 1 &&
2050 callee != current_ir_graph &&
2051 get_entity_visibility(ent) == visibility_local) {
2055 /* give a bonus for functions with one block */
2056 if (callee_env->n_blocks == 1)
2057 weight = weight * 3 / 2;
2059 /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
2060 if (callee_env->n_nodes < 30 && !callee_env->recursive)
2063 /* and finally for leaves: they do not increase the register pressure
2064 because of callee safe registers */
2065 if (callee_env->n_call_nodes == 0)
2068 /** it's important to inline inner loops first */
2069 if (entry->loop_depth > 30)
2070 weight += 30 * 1024;
2072 weight += entry->loop_depth * 1024;
2075 * All arguments constant is probably a good sign, give an extra bonus
2080 return entry->benefice = weight;
2083 static ir_graph **irgs;
2084 static int last_irg;
2087 * Callgraph walker, collect all visited graphs.
2089 static void callgraph_walker(ir_graph *irg, void *data) {
2091 irgs[last_irg++] = irg;
2095 * Creates an inline order for all graphs.
2097 * @return the list of graphs.
2099 static ir_graph **create_irg_list(void) {
2100 ir_entity **free_methods;
2102 int n_irgs = get_irp_n_irgs();
2104 cgana(&arr_len, &free_methods);
2105 xfree(free_methods);
2107 compute_callgraph();
2110 irgs = XMALLOCNZ(ir_graph*, n_irgs);
2112 callgraph_walk(NULL, callgraph_walker, NULL);
2113 assert(n_irgs == last_irg);
2119 * Push a call onto the priority list if its benefice is big enough.
2121 * @param pqueue the priority queue of calls
2122 * @param call the call entry
2123 * @param inlien_threshold
2124 * the threshold value
2126 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
2127 int inline_threshold)
2129 ir_graph *callee = call->callee;
2130 irg_inline_property prop = get_irg_inline_property(callee);
2131 int benefice = calc_inline_benefice(call, callee);
2133 DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
2134 get_irn_irg(call->call), call->call, callee, benefice));
2136 if (prop < irg_inline_forced && benefice < inline_threshold) {
2140 pqueue_put(pqueue, call, benefice);
2144 * Try to inline calls into a graph.
2146 * @param irg the graph into which we inline
2147 * @param maxsize do NOT inline if the size of irg gets
2148 * bigger than this amount
2149 * @param inline_threshold
2150 * threshold value for inline decision
2151 * @param copied_graphs
2152 * map containing copied of recursive graphs
2154 static void inline_into(ir_graph *irg, unsigned maxsize,
2155 int inline_threshold, pmap *copied_graphs)
2157 int phiproj_computed = 0;
2158 inline_irg_env *env = get_irg_link(irg);
2159 call_entry *curr_call;
2163 if (env->n_call_nodes == 0)
2166 if (env->n_nodes > maxsize) {
2167 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
2171 current_ir_graph = irg;
2172 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2174 /* put irgs into the pqueue */
2175 pqueue = new_pqueue();
2177 list_for_each_entry(call_entry, curr_call, &env->calls, list) {
2178 assert(is_Call(curr_call->call));
2179 maybe_push_call(pqueue, curr_call, inline_threshold);
2182 /* note that the list of possible calls is updated during the process */
2183 while (!pqueue_empty(pqueue)) {
2185 call_entry *curr_call = pqueue_pop_front(pqueue);
2186 ir_graph *callee = curr_call->callee;
2187 ir_node *call_node = curr_call->call;
2188 inline_irg_env *callee_env = get_irg_link(callee);
2189 irg_inline_property prop = get_irg_inline_property(callee);
2191 const call_entry *centry;
2194 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
2195 DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
2196 env->n_nodes, callee, callee_env->n_nodes));
2200 e = pmap_find(copied_graphs, callee);
2202 int benefice = curr_call->benefice;
2204 * Reduce the weight for recursive function IFF not all arguments are const.
2205 * inlining recursive functions is rarely good.
2207 if (!curr_call->all_const)
2209 if (benefice < inline_threshold)
2213 * Remap callee if we have a copy.
2216 callee_env = get_irg_link(callee);
2219 if (current_ir_graph == callee) {
2221 * Recursive call: we cannot directly inline because we cannot
2222 * walk the graph and change it. So we have to make a copy of
2225 int benefice = curr_call->benefice;
2229 * Reduce the weight for recursive function IFF not all arguments are const.
2230 * inlining recursive functions is rarely good.
2232 if (!curr_call->all_const)
2234 if (benefice < inline_threshold)
2237 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2240 * No copy yet, create one.
2241 * Note that recursive methods are never leaves, so it is
2242 * sufficient to test this condition here.
2244 copy = create_irg_copy(callee);
2246 /* create_irg_copy() destroys the Proj links, recompute them */
2247 phiproj_computed = 0;
2249 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2251 /* allocate a new environment */
2252 callee_env = alloc_inline_irg_env();
2253 set_irg_link(copy, callee_env);
2255 assure_cf_loop(copy);
2256 wenv.x = callee_env;
2257 wenv.ignore_callers = 1;
2258 irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2261 * Enter the entity of the original graph. This is needed
2262 * for inline_method(). However, note that ent->irg still points
2263 * to callee, NOT to copy.
2265 set_irg_entity(copy, get_irg_entity(callee));
2267 pmap_insert(copied_graphs, callee, copy);
2270 /* we have only one caller: the original graph */
2271 callee_env->n_callers = 1;
2272 callee_env->n_callers_orig = 1;
2274 if (! phiproj_computed) {
2275 phiproj_computed = 1;
2276 collect_phiprojs(current_ir_graph);
2278 did_inline = inline_method(call_node, callee);
2282 /* call was inlined, Phi/Projs for current graph must be recomputed */
2283 phiproj_computed = 0;
2285 /* remove it from the caller list */
2286 list_del(&curr_call->list);
2288 /* callee was inline. Append it's call list. */
2289 env->got_inline = 1;
2290 --env->n_call_nodes;
2292 /* we just generate a bunch of new calls */
2293 loop_depth = curr_call->loop_depth;
2294 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2295 inline_irg_env *penv = get_irg_link(centry->callee);
2297 call_entry *new_entry;
2299 /* after we have inlined callee, all called methods inside
2300 * callee are now called once more */
2303 /* Note that the src list points to Call nodes in the inlined graph,
2304 * but we need Call nodes in our graph. Luckily the inliner leaves
2305 * this information in the link field. */
2306 new_call = get_irn_link(centry->call);
2307 assert(is_Call(new_call));
2309 new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2310 list_add_tail(&new_entry->list, &env->calls);
2311 maybe_push_call(pqueue, new_entry, inline_threshold);
2314 env->n_call_nodes += callee_env->n_call_nodes;
2315 env->n_nodes += callee_env->n_nodes;
2316 --callee_env->n_callers;
2318 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2323 * Heuristic inliner. Calculates a benefice value for every call and inlines
2324 * those calls with a value higher than the threshold.
2326 void inline_functions(unsigned maxsize, int inline_threshold,
2327 opt_ptr after_inline_opt)
2329 inline_irg_env *env;
2333 pmap *copied_graphs;
2334 pmap_entry *pm_entry;
2337 rem = current_ir_graph;
2338 obstack_init(&temp_obst);
2340 irgs = create_irg_list();
2342 /* a map for the copied graphs, used to inline recursive calls */
2343 copied_graphs = pmap_create();
2345 /* extend all irgs by a temporary data structure for inlining. */
2346 n_irgs = get_irp_n_irgs();
2347 for (i = 0; i < n_irgs; ++i)
2348 set_irg_link(irgs[i], alloc_inline_irg_env());
2350 /* Pre-compute information in temporary data structure. */
2351 wenv.ignore_runtime = 0;
2352 wenv.ignore_callers = 0;
2353 for (i = 0; i < n_irgs; ++i) {
2354 ir_graph *irg = irgs[i];
2356 free_callee_info(irg);
2358 wenv.x = get_irg_link(irg);
2359 assure_cf_loop(irg);
2360 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2363 /* -- and now inline. -- */
2364 for (i = 0; i < n_irgs; ++i) {
2365 ir_graph *irg = irgs[i];
2367 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2370 for (i = 0; i < n_irgs; ++i) {
2371 ir_graph *irg = irgs[i];
2373 env = get_irg_link(irg);
2374 if (env->got_inline && after_inline_opt != NULL) {
2375 /* this irg got calls inlined: optimize it */
2376 after_inline_opt(irg);
2378 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2379 DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2380 env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2381 env->n_callers_orig, env->n_callers,
2382 get_entity_name(get_irg_entity(irg))));
2386 /* kill the copied graphs: we don't need them anymore */
2387 foreach_pmap(copied_graphs, pm_entry) {
2388 ir_graph *copy = pm_entry->value;
2390 /* reset the entity, otherwise it will be deleted in the next step ... */
2391 set_irg_entity(copy, NULL);
2392 free_ir_graph(copy);
2394 pmap_destroy(copied_graphs);
2398 obstack_free(&temp_obst, NULL);
2399 current_ir_graph = rem;
2402 struct inline_functions_pass_t {
2403 ir_prog_pass_t pass;
2405 int inline_threshold;
2406 opt_ptr after_inline_opt;
2410 * Wrapper to run inline_functions() as a ir_prog pass.
2412 static int inline_functions_wrapper(ir_prog *irp, void *context) {
2413 struct inline_functions_pass_t *pass = context;
2416 inline_functions(pass->maxsize, pass->inline_threshold,
2417 pass->after_inline_opt);
2421 /* create a ir_prog pass for inline_functions */
2422 ir_prog_pass_t *inline_functions_pass(
2423 const char *name, unsigned maxsize, int inline_threshold,
2424 opt_ptr after_inline_opt) {
2425 struct inline_functions_pass_t *pass =
2426 XMALLOCZ(struct inline_functions_pass_t);
2428 pass->maxsize = maxsize;
2429 pass->inline_threshold = inline_threshold;
2430 pass->after_inline_opt = after_inline_opt;
2432 return def_prog_pass_constructor(
2433 &pass->pass, name ? name : "inline_functions",
2434 inline_functions_wrapper);
2437 void firm_init_inline(void) {
2438 FIRM_DBG_REGISTER(dbg, "firm.opt.inline");