disable inline of functions which call alloca(), causes stack overrun in 176.gcc
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <limits.h>
31 #include <assert.h>
32
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irprog_t.h"
36
37 #include "iroptimize.h"
38 #include "ircons_t.h"
39 #include "iropt_t.h"
40 #include "irgopt.h"
41 #include "irgmod.h"
42 #include "irgwalk.h"
43
44 #include "adt/array.h"
45 #include "adt/pset.h"
46 #include "adt/pmap.h"
47 #include "adt/pdeq.h"
48 #include "adt/xmalloc.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
65
66 /*------------------------------------------------------------------*/
67 /* Routines for dead node elimination / copying garbage collection  */
68 /* of the obstack.                                                  */
69 /*------------------------------------------------------------------*/
70
71 /**
72  * Remember the new node in the old node by using a field all nodes have.
73  */
74 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
75
76 /**
77  * Get this new node, before the old node is forgotten.
78  */
79 #define get_new_node(oldn) get_irn_link(oldn)
80
81 /**
82  * Check if a new node was set.
83  */
84 #define has_new_node(n) (get_new_node(n) != NULL)
85
86 /**
87  * We use the block_visited flag to mark that we have computed the
88  * number of useful predecessors for this block.
89  * Further we encode the new arity in this flag in the old blocks.
90  * Remembering the arity is useful, as it saves a lot of pointer
91  * accesses.  This function is called for all Phi and Block nodes
92  * in a Block.
93  */
94 static INLINE int
95 compute_new_arity(ir_node *b) {
96         int i, res, irn_arity;
97         int irg_v, block_v;
98
99         irg_v = get_irg_block_visited(current_ir_graph);
100         block_v = get_Block_block_visited(b);
101         if (block_v >= irg_v) {
102                 /* we computed the number of preds for this block and saved it in the
103                    block_v flag */
104                 return block_v - irg_v;
105         } else {
106                 /* compute the number of good predecessors */
107                 res = irn_arity = get_irn_arity(b);
108                 for (i = 0; i < irn_arity; i++)
109                         if (is_Bad(get_irn_n(b, i))) res--;
110                         /* save it in the flag. */
111                         set_Block_block_visited(b, irg_v + res);
112                         return res;
113         }
114 }
115
116 /**
117  * Copies the node to the new obstack. The Ins of the new node point to
118  * the predecessors on the old obstack.  For block/phi nodes not all
119  * predecessors might be copied.  n->link points to the new node.
120  * For Phi and Block nodes the function allocates in-arrays with an arity
121  * only for useful predecessors.  The arity is determined by counting
122  * the non-bad predecessors of the block.
123  *
124  * @param n    The node to be copied
125  * @param env  if non-NULL, the node number attribute will be copied to the new node
126  *
127  * Note: Also used for loop unrolling.
128  */
129 static void copy_node(ir_node *n, void *env) {
130         ir_node *nn, *block;
131         int new_arity;
132         ir_op *op = get_irn_op(n);
133         (void) env;
134
135         if (op == op_Bad) {
136                 /* node copied already */
137                 return;
138         } else if (op == op_Block) {
139                 block = NULL;
140                 new_arity = compute_new_arity(n);
141                 n->attr.block.graph_arr = NULL;
142         } else {
143                 block = get_nodes_block(n);
144                 if (op == op_Phi) {
145                         new_arity = compute_new_arity(block);
146                 } else {
147                         new_arity = get_irn_arity(n);
148                 }
149         }
150         nn = new_ir_node(get_irn_dbg_info(n),
151                 current_ir_graph,
152                 block,
153                 op,
154                 get_irn_mode(n),
155                 new_arity,
156                 get_irn_in(n) + 1);
157         /* Copy the attributes.  These might point to additional data.  If this
158            was allocated on the old obstack the pointers now are dangling.  This
159            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
160         if (op == op_Block) {
161                 /* we cannot allow blocks WITHOUT macroblock input */
162                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
163         }
164         copy_node_attr(n, nn);
165
166 #ifdef DEBUG_libfirm
167         {
168                 int copy_node_nr = env != NULL;
169                 if (copy_node_nr) {
170                         /* for easier debugging, we want to copy the node numbers too */
171                         nn->node_nr = n->node_nr;
172                 }
173         }
174 #endif
175
176         set_new_node(n, nn);
177         hook_dead_node_elim_subst(current_ir_graph, n, nn);
178 }
179
180 /**
181  * Copies new predecessors of old node to new node remembered in link.
182  * Spare the Bad predecessors of Phi and Block nodes.
183  */
184 static void copy_preds(ir_node *n, void *env) {
185         ir_node *nn, *block;
186         int i, j, irn_arity;
187         (void) env;
188
189         nn = get_new_node(n);
190
191         if (is_Block(n)) {
192                 /* copy the macro block header */
193                 ir_node *mbh = get_Block_MacroBlock(n);
194
195                 if (mbh == n) {
196                         /* this block is a macroblock header */
197                         set_Block_MacroBlock(nn, nn);
198                 } else {
199                         /* get the macro block header */
200                         ir_node *nmbh = get_new_node(mbh);
201                         assert(nmbh != NULL);
202                         set_Block_MacroBlock(nn, nmbh);
203                 }
204
205                 /* Don't copy Bad nodes. */
206                 j = 0;
207                 irn_arity = get_irn_arity(n);
208                 for (i = 0; i < irn_arity; i++) {
209                         if (! is_Bad(get_irn_n(n, i))) {
210                                 ir_node *pred = get_irn_n(n, i);
211                                 set_irn_n(nn, j, get_new_node(pred));
212                                 j++;
213                         }
214                 }
215                 /* repair the block visited flag from above misuse. Repair it in both
216                    graphs so that the old one can still be used. */
217                 set_Block_block_visited(nn, 0);
218                 set_Block_block_visited(n, 0);
219                 /* Local optimization could not merge two subsequent blocks if
220                    in array contained Bads.  Now it's possible.
221                    We don't call optimize_in_place as it requires
222                    that the fields in ir_graph are set properly. */
223                 if ((get_opt_control_flow_straightening()) &&
224                         (get_Block_n_cfgpreds(nn) == 1) &&
225                         is_Jmp(get_Block_cfgpred(nn, 0))) {
226                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
227                         if (nn == old) {
228                                 /* Jmp jumps into the block it is in -- deal self cycle. */
229                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
230                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
231                         } else {
232                                 exchange(nn, old);
233                         }
234                 }
235         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
236                 /* Don't copy node if corresponding predecessor in block is Bad.
237                    The Block itself should not be Bad. */
238                 block = get_nodes_block(n);
239                 set_nodes_block(nn, get_new_node(block));
240                 j = 0;
241                 irn_arity = get_irn_arity(n);
242                 for (i = 0; i < irn_arity; i++) {
243                         if (! is_Bad(get_irn_n(block, i))) {
244                                 ir_node *pred = get_irn_n(n, i);
245                                 set_irn_n(nn, j, get_new_node(pred));
246                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
247                                 j++;
248                         }
249                 }
250                 /* If the pre walker reached this Phi after the post walker visited the
251                    block block_visited is > 0. */
252                 set_Block_block_visited(get_nodes_block(n), 0);
253                 /* Compacting the Phi's ins might generate Phis with only one
254                    predecessor. */
255                 if (get_irn_arity(nn) == 1)
256                         exchange(nn, get_irn_n(nn, 0));
257         } else {
258                 irn_arity = get_irn_arity(n);
259                 for (i = -1; i < irn_arity; i++)
260                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
261         }
262         /* Now the new node is complete.  We can add it to the hash table for CSE.
263            @@@ inlining aborts if we identify End. Why? */
264         if (!is_End(nn))
265                 add_identities(current_ir_graph->value_table, nn);
266 }
267
268 /**
269  * Copies the graph recursively, compacts the keep-alives of the end node.
270  *
271  * @param irg           the graph to be copied
272  * @param copy_node_nr  If non-zero, the node number will be copied
273  */
274 static void copy_graph(ir_graph *irg, int copy_node_nr) {
275         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
276         ir_node *ka;      /* keep alive */
277         int i, irn_arity;
278         unsigned long vfl;
279
280         /* Some nodes must be copied by hand, sigh */
281         vfl = get_irg_visited(irg);
282         set_irg_visited(irg, vfl + 1);
283
284         oe = get_irg_end(irg);
285         mark_irn_visited(oe);
286         /* copy the end node by hand, allocate dynamic in array! */
287         ne = new_ir_node(get_irn_dbg_info(oe),
288                 irg,
289                 NULL,
290                 op_End,
291                 mode_X,
292                 -1,
293                 NULL);
294         /* Copy the attributes.  Well, there might be some in the future... */
295         copy_node_attr(oe, ne);
296         set_new_node(oe, ne);
297
298         /* copy the Bad node */
299         ob = get_irg_bad(irg);
300         mark_irn_visited(ob);
301         nb = new_ir_node(get_irn_dbg_info(ob),
302                 irg,
303                 NULL,
304                 op_Bad,
305                 mode_T,
306                 0,
307                 NULL);
308         copy_node_attr(ob, nb);
309         set_new_node(ob, nb);
310
311         /* copy the NoMem node */
312         om = get_irg_no_mem(irg);
313         mark_irn_visited(om);
314         nm = new_ir_node(get_irn_dbg_info(om),
315                 irg,
316                 NULL,
317                 op_NoMem,
318                 mode_M,
319                 0,
320                 NULL);
321         copy_node_attr(om, nm);
322         set_new_node(om, nm);
323
324         /* copy the live nodes */
325         set_irg_visited(irg, vfl);
326         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
327
328         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
329
330         /* visit the anchors as well */
331         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
332                 ir_node *n = get_irg_anchor(irg, i);
333
334                 if (n && (get_irn_visited(n) <= vfl)) {
335                         set_irg_visited(irg, vfl);
336                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
337                 }
338         }
339
340         /* copy_preds for the end node ... */
341         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
342
343         /*- ... and now the keep alives. -*/
344         /* First pick the not marked block nodes and walk them.  We must pick these
345            first as else we will oversee blocks reachable from Phis. */
346         irn_arity = get_End_n_keepalives(oe);
347         for (i = 0; i < irn_arity; i++) {
348                 ka = get_End_keepalive(oe, i);
349                 if (is_Block(ka)) {
350                         if (get_irn_visited(ka) <= vfl) {
351                                 /* We must keep the block alive and copy everything reachable */
352                                 set_irg_visited(irg, vfl);
353                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
354                         }
355                         add_End_keepalive(ne, get_new_node(ka));
356                 }
357         }
358
359         /* Now pick other nodes.  Here we will keep all! */
360         irn_arity = get_End_n_keepalives(oe);
361         for (i = 0; i < irn_arity; i++) {
362                 ka = get_End_keepalive(oe, i);
363                 if (!is_Block(ka)) {
364                         if (get_irn_visited(ka) <= vfl) {
365                                 /* We didn't copy the node yet.  */
366                                 set_irg_visited(irg, vfl);
367                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
368                         }
369                         add_End_keepalive(ne, get_new_node(ka));
370                 }
371         }
372
373         /* start block sometimes only reached after keep alives */
374         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
375         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
376 }
377
378 /**
379  * Copies the graph reachable from current_ir_graph->end to the obstack
380  * in current_ir_graph and fixes the environment.
381  * Then fixes the fields in current_ir_graph containing nodes of the
382  * graph.
383  *
384  * @param copy_node_nr  If non-zero, the node number will be copied
385  */
386 static void
387 copy_graph_env(int copy_node_nr) {
388         ir_graph *irg = current_ir_graph;
389         ir_node *old_end, *new_anchor;
390         int i;
391
392         /* remove end_except and end_reg nodes */
393         old_end = get_irg_end(irg);
394         set_irg_end_except (irg, old_end);
395         set_irg_end_reg    (irg, old_end);
396
397         /* Not all nodes remembered in irg might be reachable
398            from the end node.  Assure their link is set to NULL, so that
399            we can test whether new nodes have been computed. */
400         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
401                 ir_node *n = get_irg_anchor(irg, i);
402                 if (n != NULL)
403                         set_new_node(n, NULL);
404         }
405         /* we use the block walk flag for removing Bads from Blocks ins. */
406         inc_irg_block_visited(irg);
407
408         /* copy the graph */
409         copy_graph(irg, copy_node_nr);
410
411         /* fix the anchor */
412         old_end    = get_irg_end(irg);
413         new_anchor = new_Anchor(irg);
414
415         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
416                 ir_node *n = get_irg_anchor(irg, i);
417                 if (n)
418                         set_irn_n(new_anchor, i, get_new_node(n));
419         }
420         free_End(old_end);
421         irg->anchor = new_anchor;
422
423         /* ensure the new anchor is placed in the endblock */
424         set_nodes_block(new_anchor, get_irg_end_block(irg));
425 }
426
427 /**
428  * Copies all reachable nodes to a new obstack.  Removes bad inputs
429  * from block nodes and the corresponding inputs from Phi nodes.
430  * Merges single exit blocks with single entry blocks and removes
431  * 1-input Phis.
432  * Adds all new nodes to a new hash table for CSE.  Does not
433  * perform CSE, so the hash table might contain common subexpressions.
434  */
435 void dead_node_elimination(ir_graph *irg) {
436         ir_graph *rem;
437 #ifdef INTERPROCEDURAL_VIEW
438         int rem_ipview = get_interprocedural_view();
439 #endif
440         struct obstack *graveyard_obst = NULL;
441         struct obstack *rebirth_obst   = NULL;
442         assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
443
444         /* inform statistics that we started a dead-node elimination run */
445         hook_dead_node_elim(irg, 1);
446
447         /* Remember external state of current_ir_graph. */
448         rem = current_ir_graph;
449         current_ir_graph = irg;
450 #ifdef INTERPROCEDURAL_VIEW
451         set_interprocedural_view(0);
452 #endif
453
454         assert(get_irg_phase_state(irg) != phase_building);
455
456         /* Handle graph state */
457         free_callee_info(irg);
458         free_irg_outs(irg);
459         free_trouts();
460
461         /* @@@ so far we loose loops when copying */
462         free_loop_information(irg);
463
464         set_irg_doms_inconsistent(irg);
465
466         /* A quiet place, where the old obstack can rest in peace,
467            until it will be cremated. */
468         graveyard_obst = irg->obst;
469
470         /* A new obstack, where the reachable nodes will be copied to. */
471         rebirth_obst = xmalloc(sizeof(*rebirth_obst));
472         irg->obst = rebirth_obst;
473         obstack_init(irg->obst);
474         irg->last_node_idx = 0;
475
476         /* We also need a new value table for CSE */
477         del_identities(irg->value_table);
478         irg->value_table = new_identities();
479
480         /* Copy the graph from the old to the new obstack */
481         copy_graph_env(/*copy_node_nr=*/1);
482
483         /* Free memory from old unoptimized obstack */
484         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
485         xfree(graveyard_obst);            /* ... then free it.           */
486
487         /* inform statistics that the run is over */
488         hook_dead_node_elim(irg, 0);
489
490         current_ir_graph = rem;
491 #ifdef INTERPROCEDURAL_VIEW
492         set_interprocedural_view(rem_ipview);
493 #endif
494 }
495
496 /**
497  * Relink bad predecessors of a block and store the old in array to the
498  * link field. This function is called by relink_bad_predecessors().
499  * The array of link field starts with the block operand at position 0.
500  * If block has bad predecessors, create a new in array without bad preds.
501  * Otherwise let in array untouched.
502  */
503 static void relink_bad_block_predecessors(ir_node *n, void *env) {
504         ir_node **new_in, *irn;
505         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
506         (void) env;
507
508         /* if link field of block is NULL, look for bad predecessors otherwise
509            this is already done */
510         if (is_Block(n) && get_irn_link(n) == NULL) {
511                 /* save old predecessors in link field (position 0 is the block operand)*/
512                 set_irn_link(n, get_irn_in(n));
513
514                 /* count predecessors without bad nodes */
515                 old_irn_arity = get_irn_arity(n);
516                 for (i = 0; i < old_irn_arity; i++)
517                         if (!is_Bad(get_irn_n(n, i)))
518                                 ++new_irn_arity;
519
520                 /* arity changing: set new predecessors without bad nodes */
521                 if (new_irn_arity < old_irn_arity) {
522                         /* Get new predecessor array. We do not resize the array, as we must
523                            keep the old one to update Phis. */
524                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
525
526                         /* set new predecessors in array */
527                         new_in[0] = NULL;
528                         new_irn_n = 1;
529                         for (i = 0; i < old_irn_arity; i++) {
530                                 irn = get_irn_n(n, i);
531                                 if (!is_Bad(irn)) {
532                                         new_in[new_irn_n] = irn;
533                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
534                                         ++new_irn_n;
535                                 }
536                         }
537                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
538                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
539                         n->in = new_in;
540                 } /* ir node has bad predecessors */
541         } /* Block is not relinked */
542 }
543
544 /**
545  * Relinks Bad predecessors from Blocks and Phis called by walker
546  * remove_bad_predecesors(). If n is a Block, call
547  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
548  * function of Phi's Block. If this block has bad predecessors, relink preds
549  * of the Phi-node.
550  */
551 static void relink_bad_predecessors(ir_node *n, void *env) {
552         ir_node *block, **old_in;
553         int i, old_irn_arity, new_irn_arity;
554
555         /* relink bad predecessors of a block */
556         if (is_Block(n))
557                 relink_bad_block_predecessors(n, env);
558
559         /* If Phi node relink its block and its predecessors */
560         if (is_Phi(n)) {
561                 /* Relink predecessors of phi's block */
562                 block = get_nodes_block(n);
563                 if (get_irn_link(block) == NULL)
564                         relink_bad_block_predecessors(block, env);
565
566                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
567                 old_irn_arity = ARR_LEN(old_in);
568
569                 /* Relink Phi predecessors if count of predecessors changed */
570                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
571                         /* set new predecessors in array
572                            n->in[0] remains the same block */
573                         new_irn_arity = 1;
574                         for(i = 1; i < old_irn_arity; i++)
575                                 if (!is_Bad(old_in[i])) {
576                                         n->in[new_irn_arity] = n->in[i];
577                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
578                                         ++new_irn_arity;
579                                 }
580
581                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
582                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
583                 }
584         } /* n is a Phi node */
585 }
586
587 /*
588  * Removes Bad Bad predecessors from Blocks and the corresponding
589  * inputs to Phi nodes as in dead_node_elimination but without
590  * copying the graph.
591  * On walking up set the link field to NULL, on walking down call
592  * relink_bad_predecessors() (This function stores the old in array
593  * to the link field and sets a new in array if arity of predecessors
594  * changes).
595  */
596 void remove_bad_predecessors(ir_graph *irg) {
597         panic("Fix backedge handling first");
598         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
599 }
600
601
602 /*
603    __                      _  __ __
604   (_     __    o     _    | \/  |_
605   __)|_| | \_/ | \_/(/_   |_/\__|__
606
607   The following stuff implements a facility that automatically patches
608   registered ir_node pointers to the new node when a dead node elimination occurs.
609 */
610
611 struct _survive_dce_t {
612         struct obstack obst;
613         pmap *places;
614         pmap *new_places;
615         hook_entry_t dead_node_elim;
616         hook_entry_t dead_node_elim_subst;
617 };
618
619 typedef struct _survive_dce_list_t {
620         struct _survive_dce_list_t *next;
621         ir_node **place;
622 } survive_dce_list_t;
623
624 static void dead_node_hook(void *context, ir_graph *irg, int start) {
625         survive_dce_t *sd = context;
626         (void) irg;
627
628         /* Create a new map before the dead node elimination is performed. */
629         if (start) {
630                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
631         } else {
632                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
633                 pmap_destroy(sd->places);
634                 sd->places     = sd->new_places;
635                 sd->new_places = NULL;
636         }
637 }
638
639 /**
640  * Hook called when dead node elimination replaces old by nw.
641  */
642 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
643         survive_dce_t *sd = context;
644         survive_dce_list_t *list = pmap_get(sd->places, old);
645         (void) irg;
646
647         /* If the node is to be patched back, write the new address to all registered locations. */
648         if (list) {
649                 survive_dce_list_t *p;
650
651                 for (p = list; p; p = p->next)
652                         *(p->place) = nw;
653
654                 pmap_insert(sd->new_places, nw, list);
655         }
656 }
657
658 /**
659  * Make a new Survive DCE environment.
660  */
661 survive_dce_t *new_survive_dce(void) {
662         survive_dce_t *res = xmalloc(sizeof(res[0]));
663         obstack_init(&res->obst);
664         res->places     = pmap_create();
665         res->new_places = NULL;
666
667         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
668         res->dead_node_elim.context                   = res;
669         res->dead_node_elim.next                      = NULL;
670
671         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
672         res->dead_node_elim_subst.context = res;
673         res->dead_node_elim_subst.next    = NULL;
674
675 #ifndef FIRM_ENABLE_HOOKS
676         assert(0 && "need hooks enabled");
677 #endif
678
679         register_hook(hook_dead_node_elim, &res->dead_node_elim);
680         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
681         return res;
682 }
683
684 /**
685  * Free a Survive DCE environment.
686  */
687 void free_survive_dce(survive_dce_t *sd) {
688         obstack_free(&sd->obst, NULL);
689         pmap_destroy(sd->places);
690         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
691         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
692         xfree(sd);
693 }
694
695 /**
696  * Register a node pointer to be patched upon DCE.
697  * When DCE occurs, the node pointer specified by @p place will be
698  * patched to the new address of the node it is pointing to.
699  *
700  * @param sd    The Survive DCE environment.
701  * @param place The address of the node pointer.
702  */
703 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
704         if (*place != NULL) {
705                 ir_node *irn      = *place;
706                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
707                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
708
709                 nw->next  = curr;
710                 nw->place = place;
711
712                 pmap_insert(sd->places, irn, nw);
713         }
714 }
715
716 /*--------------------------------------------------------------------*/
717 /*  Functionality for inlining                                         */
718 /*--------------------------------------------------------------------*/
719
720 /**
721  * Copy node for inlineing.  Updates attributes that change when
722  * inlineing but not for dead node elimination.
723  *
724  * Copies the node by calling copy_node() and then updates the entity if
725  * it's a local one.  env must be a pointer of the frame type of the
726  * inlined procedure. The new entities must be in the link field of
727  * the entities.
728  */
729 static INLINE void
730 copy_node_inline(ir_node *n, void *env) {
731         ir_node *nn;
732         ir_type *frame_tp = (ir_type *)env;
733
734         copy_node(n, NULL);
735         if (is_Sel(n)) {
736                 nn = get_new_node (n);
737                 assert(is_Sel(nn));
738                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
739                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
740                 }
741         } else if (is_Block(n)) {
742                 nn = get_new_node (n);
743                 nn->attr.block.irg = current_ir_graph;
744         }
745 }
746
747 /**
748  * Walker: checks if P_value_arg_base is used.
749  */
750 static void find_addr(ir_node *node, void *env) {
751         int *allow_inline = env;
752         if (is_Proj(node) &&
753                         is_Start(get_Proj_pred(node)) &&
754                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
755                 *allow_inline = 0;
756         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
757                 /* From GCC:
758                  * Refuse to inline alloca call unless user explicitly forced so as this
759                  * may change program's memory overhead drastically when the function
760                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
761                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
762                  *
763                  * Sorryly this is true with our implementation also.
764                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
765                  * disables inlining of functions using VLA (with are completely save).
766                  *
767                  * 2 Solutions:
768                  * - add a flag to the Alloc node for "real" alloca() calls
769                  * - add a new Stack-Restore node at the end of a function using alloca()
770                  */
771                 *allow_inline = 0;
772         }
773 }
774
775 /**
776  * Check if we can inline a given call.
777  * Currently, we cannot inline two cases:
778  * - call with compound arguments
779  * - graphs that take the address of a parameter
780  *
781  * check these conditions here
782  */
783 static int can_inline(ir_node *call, ir_graph *called_graph) {
784         ir_type *call_type = get_Call_type(call);
785         int params, ress, i, res;
786         assert(is_Method_type(call_type));
787
788         params = get_method_n_params(call_type);
789         ress   = get_method_n_ress(call_type);
790
791         /* check parameters for compound arguments */
792         for (i = 0; i < params; ++i) {
793                 ir_type *p_type = get_method_param_type(call_type, i);
794
795                 if (is_compound_type(p_type))
796                         return 0;
797         }
798
799         /* check results for compound arguments */
800         for (i = 0; i < ress; ++i) {
801                 ir_type *r_type = get_method_res_type(call_type, i);
802
803                 if (is_compound_type(r_type))
804                         return 0;
805         }
806
807         res = 1;
808         irg_walk_graph(called_graph, find_addr, NULL, &res);
809
810         return res;
811 }
812
813 enum exc_mode {
814         exc_handler    = 0, /**< There is a handler. */
815         exc_to_end     = 1, /**< Branches to End. */
816         exc_no_handler = 2  /**< Exception handling not represented. */
817 };
818
819 /* Inlines a method at the given call site. */
820 int inline_method(ir_node *call, ir_graph *called_graph) {
821         ir_node             *pre_call;
822         ir_node             *post_call, *post_bl;
823         ir_node             *in[pn_Start_max];
824         ir_node             *end, *end_bl;
825         ir_node             **res_pred;
826         ir_node             **cf_pred;
827         ir_node             *ret, *phi;
828         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity;
829         enum exc_mode       exc_handling;
830         ir_type             *called_frame, *curr_frame;
831         ir_entity           *ent;
832         ir_graph            *rem, *irg;
833         irg_inline_property prop = get_irg_inline_property(called_graph);
834
835         if (prop == irg_inline_forbidden)
836                 return 0;
837
838         ent = get_irg_entity(called_graph);
839
840         /* Do not inline variadic functions. */
841         if (get_method_variadicity(get_entity_type(ent)) == variadicity_variadic)
842                 return 0;
843
844         assert(get_method_n_params(get_entity_type(ent)) ==
845                get_method_n_params(get_Call_type(call)));
846
847         irg = get_irn_irg(call);
848
849         /*
850          * We cannot inline a recursive call. The graph must be copied before
851          * the call the inline_method() using create_irg_copy().
852          */
853         if (called_graph == irg)
854                 return 0;
855
856         /*
857          * currently, we cannot inline two cases:
858          * - call with compound arguments
859          * - graphs that take the address of a parameter
860          */
861         if (! can_inline(call, called_graph))
862                 return 0;
863
864         rem = current_ir_graph;
865         current_ir_graph = irg;
866
867         DB((dbg, SET_LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
868
869         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
870         rem_opt = get_opt_optimize();
871         set_optimize(0);
872
873         /* Handle graph state */
874         assert(get_irg_phase_state(irg) != phase_building);
875         assert(get_irg_pinned(irg) == op_pin_state_pinned);
876         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
877         set_irg_outs_inconsistent(irg);
878         set_irg_extblk_inconsistent(irg);
879         set_irg_doms_inconsistent(irg);
880         set_irg_loopinfo_inconsistent(irg);
881         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
882
883         /* -- Check preconditions -- */
884         assert(is_Call(call));
885
886         /* here we know we WILL inline, so inform the statistics */
887         hook_inline(call, called_graph);
888
889         /* -- Decide how to handle exception control flow: Is there a handler
890            for the Call node, or do we branch directly to End on an exception?
891            exc_handling:
892            0 There is a handler.
893            1 Branches to End.
894            2 Exception handling not represented in Firm. -- */
895         {
896                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
897                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
898                         long proj_nr = get_Proj_proj(proj);
899                         if (proj_nr == pn_Call_X_except) Xproj = proj;
900                         if (proj_nr == pn_Call_M_except) Mproj = proj;
901                 }
902                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
903                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
904                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
905         }
906
907         /* --
908            the procedure and later replaces the Start node of the called graph.
909            Post_call is the old Call node and collects the results of the called
910            graph. Both will end up being a tuple.  -- */
911         post_bl = get_nodes_block(call);
912         set_irg_current_block(irg, post_bl);
913         /* XxMxPxPxPxT of Start + parameter of Call */
914         in[pn_Start_X_initial_exec]   = new_Jmp();
915         in[pn_Start_M]                = get_Call_mem(call);
916         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
917         in[pn_Start_P_globals]        = get_irg_globals(irg);
918         in[pn_Start_P_tls]            = get_irg_tls(irg);
919         in[pn_Start_T_args]           = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
920         /* in[pn_Start_P_value_arg_base] = ??? */
921         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
922         pre_call = new_Tuple(pn_Start_max - 1, in);
923         post_call = call;
924
925         /* --
926            The new block gets the ins of the old block, pre_call and all its
927            predecessors and all Phi nodes. -- */
928         part_block(pre_call);
929
930         /* -- Prepare state for dead node elimination -- */
931         /* Visited flags in calling irg must be >= flag in called irg.
932            Else walker and arity computation will not work. */
933         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
934                 set_irg_visited(irg, get_irg_visited(called_graph)+1);
935         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
936                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
937         /* Set pre_call as new Start node in link field of the start node of
938            calling graph and pre_calls block as new block for the start block
939            of calling graph.
940            Further mark these nodes so that they are not visited by the
941            copying. */
942         set_irn_link(get_irg_start(called_graph), pre_call);
943         set_irn_visited(get_irg_start(called_graph), get_irg_visited(irg));
944         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
945         set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(irg));
946         set_irn_link(get_irg_bad(called_graph), get_irg_bad(irg));
947         set_irn_visited(get_irg_bad(called_graph), get_irg_visited(irg));
948
949         /* Initialize for compaction of in arrays */
950         inc_irg_block_visited(irg);
951
952         /* -- Replicate local entities of the called_graph -- */
953         /* copy the entities. */
954         called_frame = get_irg_frame_type(called_graph);
955         curr_frame   = get_irg_frame_type(irg);
956         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
957                 ir_entity *new_ent, *old_ent;
958                 old_ent = get_class_member(called_frame, i);
959                 new_ent = copy_entity_own(old_ent, curr_frame);
960                 set_entity_link(old_ent, new_ent);
961         }
962
963         /* visited is > than that of called graph.  With this trick visited will
964            remain unchanged so that an outer walker, e.g., searching the call nodes
965             to inline, calling this inline will not visit the inlined nodes. */
966         set_irg_visited(irg, get_irg_visited(irg)-1);
967
968         /* -- Performing dead node elimination inlines the graph -- */
969         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
970            entities. */
971         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
972                  get_irg_frame_type(called_graph));
973
974         /* Repair called_graph */
975         set_irg_visited(called_graph, get_irg_visited(irg));
976         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
977         set_Block_block_visited(get_irg_start_block(called_graph), 0);
978
979         /* -- Merge the end of the inlined procedure with the call site -- */
980         /* We will turn the old Call node into a Tuple with the following
981            predecessors:
982            -1:  Block of Tuple.
983            0: Phi of all Memories of Return statements.
984            1: Jmp from new Block that merges the control flow from all exception
985            predecessors of the old end block.
986            2: Tuple of all arguments.
987            3: Phi of Exception memories.
988            In case the old Call directly branches to End on an exception we don't
989            need the block merging all exceptions nor the Phi of the exception
990            memories.
991         */
992
993         /* -- Precompute some values -- */
994         end_bl = get_new_node(get_irg_end_block(called_graph));
995         end = get_new_node(get_irg_end(called_graph));
996         arity = get_irn_arity(end_bl);    /* arity = n_exc + n_ret  */
997         n_res = get_method_n_ress(get_Call_type(call));
998
999         res_pred = xmalloc(n_res * sizeof(*res_pred));
1000         cf_pred  = xmalloc(arity * sizeof(*res_pred));
1001
1002         set_irg_current_block(irg, post_bl); /* just to make sure */
1003
1004         /* -- archive keepalives -- */
1005         irn_arity = get_irn_arity(end);
1006         for (i = 0; i < irn_arity; i++) {
1007                 ir_node *ka = get_End_keepalive(end, i);
1008                 if (! is_Bad(ka))
1009                         add_End_keepalive(get_irg_end(irg), ka);
1010         }
1011
1012         /* The new end node will die.  We need not free as the in array is on the obstack:
1013            copy_node() only generated 'D' arrays. */
1014
1015         /* -- Replace Return nodes by Jump nodes. -- */
1016         n_ret = 0;
1017         for (i = 0; i < arity; i++) {
1018                 ir_node *ret;
1019                 ret = get_irn_n(end_bl, i);
1020                 if (is_Return(ret)) {
1021                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1022                         n_ret++;
1023                 }
1024         }
1025         set_irn_in(post_bl, n_ret, cf_pred);
1026
1027         /* -- Build a Tuple for all results of the method.
1028            Add Phi node if there was more than one Return.  -- */
1029         turn_into_tuple(post_call, pn_Call_max);
1030         /* First the Memory-Phi */
1031         n_ret = 0;
1032         for (i = 0; i < arity; i++) {
1033                 ret = get_irn_n(end_bl, i);
1034                 if (is_Return(ret)) {
1035                         cf_pred[n_ret] = get_Return_mem(ret);
1036                         n_ret++;
1037                 }
1038         }
1039         phi = new_Phi(n_ret, cf_pred, mode_M);
1040         set_Tuple_pred(call, pn_Call_M_regular, phi);
1041         /* Conserve Phi-list for further inlinings -- but might be optimized */
1042         if (get_nodes_block(phi) == post_bl) {
1043                 set_irn_link(phi, get_irn_link(post_bl));
1044                 set_irn_link(post_bl, phi);
1045         }
1046         /* Now the real results */
1047         if (n_res > 0) {
1048                 for (j = 0; j < n_res; j++) {
1049                         n_ret = 0;
1050                         for (i = 0; i < arity; i++) {
1051                                 ret = get_irn_n(end_bl, i);
1052                                 if (is_Return(ret)) {
1053                                         cf_pred[n_ret] = get_Return_res(ret, j);
1054                                         n_ret++;
1055                                 }
1056                         }
1057                         if (n_ret > 0)
1058                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1059                         else
1060                                 phi = new_Bad();
1061                         res_pred[j] = phi;
1062                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1063                         if (get_nodes_block(phi) == post_bl) {
1064                                 set_Phi_next(phi, get_Block_phis(post_bl));
1065                                 set_Block_phis(post_bl, phi);
1066                         }
1067                 }
1068                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1069         } else {
1070                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1071         }
1072         /* handle the regular call */
1073         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1074
1075         /* For now, we cannot inline calls with value_base */
1076         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1077
1078         /* Finally the exception control flow.
1079            We have two (three) possible situations:
1080            First if the Call branches to an exception handler: We need to add a Phi node to
1081            collect the memory containing the exception objects.  Further we need
1082            to add another block to get a correct representation of this Phi.  To
1083            this block we add a Jmp that resolves into the X output of the Call
1084            when the Call is turned into a tuple.
1085            Second the Call branches to End, the exception is not handled.  Just
1086            add all inlined exception branches to the End node.
1087            Third: there is no Exception edge at all. Handle as case two. */
1088         if (exc_handling == exc_handler) {
1089                 n_exc = 0;
1090                 for (i = 0; i < arity; i++) {
1091                         ir_node *ret, *irn;
1092                         ret = get_irn_n(end_bl, i);
1093                         irn = skip_Proj(ret);
1094                         if (is_fragile_op(irn) || is_Raise(irn)) {
1095                                 cf_pred[n_exc] = ret;
1096                                 ++n_exc;
1097                         }
1098                 }
1099                 if (n_exc > 0) {
1100                         new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
1101                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1102                         /* The Phi for the memories with the exception objects */
1103                         n_exc = 0;
1104                         for (i = 0; i < arity; i++) {
1105                                 ir_node *ret;
1106                                 ret = skip_Proj(get_irn_n(end_bl, i));
1107                                 if (is_Call(ret)) {
1108                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1109                                         n_exc++;
1110                                 } else if (is_fragile_op(ret)) {
1111                                         /* We rely that all cfops have the memory output at the same position. */
1112                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1113                                         n_exc++;
1114                                 } else if (is_Raise(ret)) {
1115                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1116                                         n_exc++;
1117                                 }
1118                         }
1119                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1120                 } else {
1121                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1122                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1123                 }
1124         } else {
1125                 ir_node *main_end_bl;
1126                 int main_end_bl_arity;
1127                 ir_node **end_preds;
1128
1129                 /* assert(exc_handling == 1 || no exceptions. ) */
1130                 n_exc = 0;
1131                 for (i = 0; i < arity; i++) {
1132                         ir_node *ret = get_irn_n(end_bl, i);
1133                         ir_node *irn = skip_Proj(ret);
1134
1135                         if (is_fragile_op(irn) || is_Raise(irn)) {
1136                                 cf_pred[n_exc] = ret;
1137                                 n_exc++;
1138                         }
1139                 }
1140                 main_end_bl = get_irg_end_block(irg);
1141                 main_end_bl_arity = get_irn_arity(main_end_bl);
1142                 end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
1143
1144                 for (i = 0; i < main_end_bl_arity; ++i)
1145                         end_preds[i] = get_irn_n(main_end_bl, i);
1146                 for (i = 0; i < n_exc; ++i)
1147                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1148                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1149                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1150                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1151                 free(end_preds);
1152         }
1153         free(res_pred);
1154         free(cf_pred);
1155
1156         /* --  Turn CSE back on. -- */
1157         set_optimize(rem_opt);
1158         current_ir_graph = rem;
1159
1160         return 1;
1161 }
1162
1163 /********************************************************************/
1164 /* Apply inlineing to small methods.                                */
1165 /********************************************************************/
1166
1167 static struct obstack  temp_obst;
1168
1169 /** Represents a possible inlinable call in a graph. */
1170 typedef struct _call_entry call_entry;
1171 struct _call_entry {
1172         ir_node    *call;   /**< the Call */
1173         ir_graph   *callee; /**< the callee called here */
1174         call_entry *next;   /**< for linking the next one */
1175         unsigned   weight;  /**< the weight of the call */
1176 };
1177
1178 /**
1179  * environment for inlining small irgs
1180  */
1181 typedef struct _inline_env_t {
1182         struct obstack obst;  /**< an obstack where call_entries are allocated on. */
1183         call_entry *head;     /**< the head of the call entry list */
1184         call_entry *tail;     /**< the tail of the call entry list */
1185 } inline_env_t;
1186
1187 /**
1188  * Returns the irg called from a Call node. If the irg is not
1189  * known, NULL is returned.
1190  *
1191  * @param call  the call node
1192  */
1193 static ir_graph *get_call_called_irg(ir_node *call) {
1194         ir_node *addr;
1195
1196         addr = get_Call_ptr(call);
1197         if (is_Global(addr)) {
1198                 ir_entity *ent = get_Global_entity(addr);
1199                 return get_entity_irg(ent);
1200         }
1201
1202         return NULL;
1203 }
1204
1205 /**
1206  * Walker: Collect all calls to known graphs inside a graph.
1207  */
1208 static void collect_calls(ir_node *call, void *env) {
1209         if (is_Call(call)) {
1210                 ir_graph *called_irg = get_call_called_irg(call);
1211
1212                 if (called_irg != NULL) {
1213                         /* The Call node calls a locally defined method.  Remember to inline. */
1214                         inline_env_t *ienv  = env;
1215                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1216                         entry->call   = call;
1217                         entry->callee = called_irg;
1218                         entry->next   = NULL;
1219                         entry->weight = 0;
1220
1221                         if (ienv->tail == NULL)
1222                                 ienv->head = entry;
1223                         else
1224                                 ienv->tail->next = entry;
1225                         ienv->tail = entry;
1226                 }
1227         }
1228 }
1229
1230 /**
1231  * Inlines all small methods at call sites where the called address comes
1232  * from a Const node that references the entity representing the called
1233  * method.
1234  * The size argument is a rough measure for the code size of the method:
1235  * Methods where the obstack containing the firm graph is smaller than
1236  * size are inlined.
1237  */
1238 void inline_small_irgs(ir_graph *irg, int size) {
1239         ir_graph *rem = current_ir_graph;
1240         inline_env_t env;
1241         call_entry *entry;
1242
1243         current_ir_graph = irg;
1244         /* Handle graph state */
1245         assert(get_irg_phase_state(irg) != phase_building);
1246         free_callee_info(irg);
1247
1248         /* Find Call nodes to inline.
1249            (We can not inline during a walk of the graph, as inlineing the same
1250            method several times changes the visited flag of the walked graph:
1251            after the first inlineing visited of the callee equals visited of
1252            the caller.  With the next inlineing both are increased.) */
1253         obstack_init(&env.obst);
1254         env.head = env.tail = NULL;
1255         irg_walk_graph(irg, NULL, collect_calls, &env);
1256
1257         if (env.head != NULL) {
1258                 /* There are calls to inline */
1259                 collect_phiprojs(irg);
1260                 for (entry = env.head; entry != NULL; entry = entry->next) {
1261                         ir_graph *callee = entry->callee;
1262                         if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
1263                             (get_irg_inline_property(callee) >= irg_inline_forced)) {
1264                                 inline_method(entry->call, callee);
1265                         }
1266                 }
1267         }
1268         obstack_free(&env.obst, NULL);
1269         current_ir_graph = rem;
1270 }
1271
1272 /**
1273  * Environment for inlining irgs.
1274  */
1275 typedef struct {
1276         int n_nodes;             /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1277         int n_blocks;            /**< Number of Blocks in graph without Start and End block. */
1278         int n_nodes_orig;        /**< for statistics */
1279         call_entry *call_head;   /**< The head of the list of all call nodes in this graph. */
1280         call_entry *call_tail;   /**< The tail of the list of all call nodes in this graph .*/
1281         int n_call_nodes;        /**< Number of Call nodes in the graph. */
1282         int n_call_nodes_orig;   /**< for statistics */
1283         int n_callers;           /**< Number of known graphs that call this graphs. */
1284         int n_callers_orig;      /**< for statistics */
1285         unsigned got_inline:1;   /**< Set, if at least one call inside this graph was inlined. */
1286         unsigned local_vars:1;   /**< Set, if a inlined function gets the address of an inlined variable. */
1287         unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1288 } inline_irg_env;
1289
1290 /**
1291  * Allocate a new environment for inlining.
1292  */
1293 static inline_irg_env *alloc_inline_irg_env(void) {
1294         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1295         env->n_nodes           = -2; /* do not count count Start, End */
1296         env->n_blocks          = -2; /* do not count count Start, End Block */
1297         env->n_nodes_orig      = -2; /* do not count Start, End */
1298         env->call_head         = NULL;
1299         env->call_tail         = NULL;
1300         env->n_call_nodes      = 0;
1301         env->n_call_nodes_orig = 0;
1302         env->n_callers         = 0;
1303         env->n_callers_orig    = 0;
1304         env->got_inline        = 0;
1305         env->local_vars        = 0;
1306         env->local_weights     = NULL;
1307         return env;
1308 }
1309
1310 typedef struct walker_env {
1311         inline_irg_env *x;    /**< the inline environment */
1312         char ignore_runtime;  /**< the ignore runtime flag */
1313         char ignore_callers;  /**< if set, do change callers data */
1314 } wenv_t;
1315
1316 /**
1317  * post-walker: collect all calls in the inline-environment
1318  * of a graph and sum some statistics.
1319  */
1320 static void collect_calls2(ir_node *call, void *ctx) {
1321         wenv_t         *env = ctx;
1322         inline_irg_env *x = env->x;
1323         ir_opcode      code = get_irn_opcode(call);
1324         ir_graph       *callee;
1325         call_entry     *entry;
1326
1327         /* count meaningful nodes in irg */
1328         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1329                 if (code != iro_Block) {
1330                         ++x->n_nodes;
1331                         ++x->n_nodes_orig;
1332                 } else {
1333                         ++x->n_blocks;
1334                 }
1335         }
1336
1337         if (code != iro_Call) return;
1338
1339         /* check, if it's a runtime call */
1340         if (env->ignore_runtime) {
1341                 ir_node *symc = get_Call_ptr(call);
1342
1343                 if (is_Global(symc)) {
1344                         ir_entity *ent = get_Global_entity(symc);
1345
1346                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1347                                 return;
1348                 }
1349         }
1350
1351         /* collect all call nodes */
1352         ++x->n_call_nodes;
1353         ++x->n_call_nodes_orig;
1354
1355         callee = get_call_called_irg(call);
1356         if (callee != NULL) {
1357                 if (! env->ignore_callers) {
1358                         inline_irg_env *callee_env = get_irg_link(callee);
1359                         /* count all static callers */
1360                         ++callee_env->n_callers;
1361                         ++callee_env->n_callers_orig;
1362                 }
1363
1364                 /* link it in the list of possible inlinable entries */
1365                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1366                 entry->call   = call;
1367                 entry->callee = callee;
1368                 entry->next   = NULL;
1369                 if (x->call_tail == NULL)
1370                         x->call_head = entry;
1371                 else
1372                         x->call_tail->next = entry;
1373                 x->call_tail = entry;
1374         }
1375 }
1376
1377 /**
1378  * Returns TRUE if the number of callers is 0 in the irg's environment,
1379  * hence this irg is a leave.
1380  */
1381 INLINE static int is_leave(ir_graph *irg) {
1382         inline_irg_env *env = get_irg_link(irg);
1383         return env->n_call_nodes == 0;
1384 }
1385
1386 /**
1387  * Returns TRUE if the number of nodes in the callee is
1388  * smaller then size in the irg's environment.
1389  */
1390 INLINE static int is_smaller(ir_graph *callee, int size) {
1391         inline_irg_env *env = get_irg_link(callee);
1392         return env->n_nodes < size;
1393 }
1394
1395 /**
1396  * Append the nodes of the list src to the nodes of the list in environment dst.
1397  */
1398 static void append_call_list(inline_irg_env *dst, call_entry *src) {
1399         call_entry *entry, *nentry;
1400
1401         /* Note that the src list points to Call nodes in the inlined graph, but
1402            we need Call nodes in our graph. Luckily the inliner leaves this information
1403            in the link field. */
1404         for (entry = src; entry != NULL; entry = entry->next) {
1405                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1406                 nentry->call   = get_irn_link(entry->call);
1407                 nentry->callee = entry->callee;
1408                 nentry->next   = NULL;
1409                 dst->call_tail->next = nentry;
1410                 dst->call_tail       = nentry;
1411         }
1412 }
1413
1414 /*
1415  * Inlines small leave methods at call sites where the called address comes
1416  * from a Const node that references the entity representing the called
1417  * method.
1418  * The size argument is a rough measure for the code size of the method:
1419  * Methods where the obstack containing the firm graph is smaller than
1420  * size are inlined.
1421  */
1422 void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime) {
1423         inline_irg_env   *env;
1424         ir_graph         *irg;
1425         int              i, n_irgs;
1426         ir_graph         *rem;
1427         int              did_inline;
1428         wenv_t           wenv;
1429         call_entry       *entry, *tail;
1430         const call_entry *centry;
1431         pmap             *copied_graphs;
1432         pmap_entry       *pm_entry;
1433
1434         rem = current_ir_graph;
1435         obstack_init(&temp_obst);
1436
1437         /* a map for the copied graphs, used to inline recursive calls */
1438         copied_graphs = pmap_create();
1439
1440         /* extend all irgs by a temporary data structure for inlining. */
1441         n_irgs = get_irp_n_irgs();
1442         for (i = 0; i < n_irgs; ++i)
1443                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1444
1445         /* Precompute information in temporary data structure. */
1446         wenv.ignore_runtime = ignore_runtime;
1447         wenv.ignore_callers = 0;
1448         for (i = 0; i < n_irgs; ++i) {
1449                 ir_graph *irg = get_irp_irg(i);
1450
1451                 assert(get_irg_phase_state(irg) != phase_building);
1452                 free_callee_info(irg);
1453
1454                 wenv.x = get_irg_link(irg);
1455                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1456         }
1457
1458         /* -- and now inline. -- */
1459
1460         /* Inline leaves recursively -- we might construct new leaves. */
1461         do {
1462                 did_inline = 0;
1463
1464                 for (i = 0; i < n_irgs; ++i) {
1465                         ir_node *call;
1466                         int phiproj_computed = 0;
1467
1468                         current_ir_graph = get_irp_irg(i);
1469                         env = (inline_irg_env *)get_irg_link(current_ir_graph);
1470
1471                         tail = NULL;
1472                         for (entry = env->call_head; entry != NULL; entry = entry->next) {
1473                                 ir_graph *callee;
1474
1475                                 if (env->n_nodes > maxsize) break;
1476
1477                                 call   = entry->call;
1478                                 callee = entry->callee;
1479
1480                                 if (is_leave(callee) && (
1481                                     is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
1482                                         if (!phiproj_computed) {
1483                                                 phiproj_computed = 1;
1484                                                 collect_phiprojs(current_ir_graph);
1485                                         }
1486                                         did_inline = inline_method(call, callee);
1487
1488                                         if (did_inline) {
1489                                                 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1490
1491                                                 /* was inlined, must be recomputed */
1492                                                 phiproj_computed = 0;
1493
1494                                                 /* Do some statistics */
1495                                                 env->got_inline = 1;
1496                                                 --env->n_call_nodes;
1497                                                 env->n_nodes += callee_env->n_nodes;
1498                                                 --callee_env->n_callers;
1499
1500                                                 /* remove this call from the list */
1501                                                 if (tail != NULL)
1502                                                         tail->next = entry->next;
1503                                                 else
1504                                                         env->call_head = entry->next;
1505                                                 continue;
1506                                         }
1507                                 }
1508                                 tail = entry;
1509                         }
1510                         env->call_tail = tail;
1511                 }
1512         } while (did_inline);
1513
1514         /* inline other small functions. */
1515         for (i = 0; i < n_irgs; ++i) {
1516                 ir_node *call;
1517                 int phiproj_computed = 0;
1518
1519                 current_ir_graph = get_irp_irg(i);
1520                 env = (inline_irg_env *)get_irg_link(current_ir_graph);
1521
1522                 /* note that the list of possible calls is updated during the process */
1523                 tail = NULL;
1524                 for (entry = env->call_head; entry != NULL; entry = entry->next) {
1525                         ir_graph   *callee;
1526                         pmap_entry *e;
1527
1528                         call   = entry->call;
1529                         callee = entry->callee;
1530
1531                         e = pmap_find(copied_graphs, callee);
1532                         if (e != NULL) {
1533                                 /*
1534                                  * Remap callee if we have a copy.
1535                                  * FIXME: Should we do this only for recursive Calls ?
1536                                  */
1537                                 callee = e->value;
1538                         }
1539
1540                         if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) ||    /* small function */
1541                                 (get_irg_inline_property(callee) >= irg_inline_forced))) {
1542                                 if (current_ir_graph == callee) {
1543                                         /*
1544                                          * Recursive call: we cannot directly inline because we cannot walk
1545                                          * the graph and change it. So we have to make a copy of the graph
1546                                          * first.
1547                                          */
1548
1549                                         inline_irg_env *callee_env;
1550                                         ir_graph       *copy;
1551
1552                                         /*
1553                                          * No copy yet, create one.
1554                                          * Note that recursive methods are never leaves, so it is sufficient
1555                                          * to test this condition here.
1556                                          */
1557                                         copy = create_irg_copy(callee);
1558
1559                                         /* create_irg_copy() destroys the Proj links, recompute them */
1560                                         phiproj_computed = 0;
1561
1562                                         /* allocate new environment */
1563                                         callee_env = alloc_inline_irg_env();
1564                                         set_irg_link(copy, callee_env);
1565
1566                                         wenv.x              = callee_env;
1567                                         wenv.ignore_callers = 1;
1568                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1569
1570                                         /*
1571                                          * Enter the entity of the original graph. This is needed
1572                                          * for inline_method(). However, note that ent->irg still points
1573                                          * to callee, NOT to copy.
1574                                          */
1575                                         set_irg_entity(copy, get_irg_entity(callee));
1576
1577                                         pmap_insert(copied_graphs, callee, copy);
1578                                         callee = copy;
1579
1580                                         /* we have only one caller: the original graph */
1581                                         callee_env->n_callers      = 1;
1582                                         callee_env->n_callers_orig = 1;
1583                                 }
1584                                 if (! phiproj_computed) {
1585                                         phiproj_computed = 1;
1586                                         collect_phiprojs(current_ir_graph);
1587                                 }
1588                                 did_inline = inline_method(call, callee);
1589                                 if (did_inline) {
1590                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1591
1592                                         /* was inlined, must be recomputed */
1593                                         phiproj_computed = 0;
1594
1595                                         /* callee was inline. Append it's call list. */
1596                                         env->got_inline = 1;
1597                                         --env->n_call_nodes;
1598                                         append_call_list(env, callee_env->call_head);
1599                                         env->n_call_nodes += callee_env->n_call_nodes;
1600                                         env->n_nodes += callee_env->n_nodes;
1601                                         --callee_env->n_callers;
1602
1603                                         /* after we have inlined callee, all called methods inside callee
1604                                            are now called once more */
1605                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
1606                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1607                                                 ++penv->n_callers;
1608                                         }
1609
1610                                         /* remove this call from the list */
1611                                         if (tail != NULL)
1612                                                 tail->next = entry->next;
1613                                         else
1614                                                 env->call_head = entry->next;
1615                                         continue;
1616                                 }
1617                         }
1618                         tail = entry;
1619                 }
1620                 env->call_tail = tail;
1621         }
1622
1623         for (i = 0; i < n_irgs; ++i) {
1624                 irg = get_irp_irg(i);
1625                 env = (inline_irg_env *)get_irg_link(irg);
1626
1627                 if (env->got_inline) {
1628                         optimize_graph_df(irg);
1629                         optimize_cf(irg);
1630                 }
1631                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1632                         DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1633                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1634                         env->n_callers_orig, env->n_callers,
1635                         get_entity_name(get_irg_entity(irg))));
1636                 }
1637         }
1638
1639         /* kill the copied graphs: we don't need them anymore */
1640         foreach_pmap(copied_graphs, pm_entry) {
1641                 ir_graph *copy = pm_entry->value;
1642
1643                 /* reset the entity, otherwise it will be deleted in the next step ... */
1644                 set_irg_entity(copy, NULL);
1645                 free_ir_graph(copy);
1646         }
1647         pmap_destroy(copied_graphs);
1648
1649         obstack_free(&temp_obst, NULL);
1650         current_ir_graph = rem;
1651 }
1652
1653 /**
1654  * Calculate the parameter weights for transmitting the address of a local variable.
1655  */
1656 static unsigned calc_method_local_weight(ir_node *arg) {
1657         int      i, j, k;
1658         unsigned v, weight = 0;
1659
1660         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1661                 ir_node *succ = get_irn_out(arg, i);
1662
1663                 switch (get_irn_opcode(succ)) {
1664                 case iro_Load:
1665                 case iro_Store:
1666                         /* Loads and Store can be removed */
1667                         weight += 3;
1668                         break;
1669                 case iro_Sel:
1670                         /* check if all args are constant */
1671                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1672                                 ir_node *idx = get_Sel_index(succ, j);
1673                                 if (! is_Const(idx))
1674                                         return 0;
1675                         }
1676                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1677                            some unsupported node. */
1678                         v = calc_method_local_weight(succ);
1679                         if (v == 0)
1680                                 return 0;
1681                         /* we can kill one Sel with constant indexes, this is cheap */
1682                         weight += v + 1;
1683                         break;
1684                 case iro_Id:
1685                         /* when looking backward we might find Id nodes */
1686                         weight += calc_method_local_weight(succ);
1687                         break;
1688                 case iro_Tuple:
1689                         /* unoptimized tuple */
1690                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1691                                 ir_node *pred = get_Tuple_pred(succ, j);
1692                                 if (pred == arg) {
1693                                         /* look for Proj(j) */
1694                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1695                                                 ir_node *succ_succ = get_irn_out(succ, k);
1696                                                 if (is_Proj(succ_succ)) {
1697                                                         if (get_Proj_proj(succ_succ) == j) {
1698                                                                 /* found */
1699                                                                 weight += calc_method_local_weight(succ_succ);
1700                                                         }
1701                                                 } else {
1702                                                         /* this should NOT happen */
1703                                                         return 0;
1704                                                 }
1705                                         }
1706                                 }
1707                         }
1708                 default:
1709                         /* any other node: unsupported yet or bad. */
1710                         return 0;
1711                 }
1712         }
1713         return weight;
1714 }
1715
1716 /**
1717  * Calculate the parameter weights for transmitting the address of a local variable.
1718  */
1719 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1720         ir_entity *ent = get_irg_entity(irg);
1721         ir_type  *mtp;
1722         int      nparams, i, proj_nr;
1723         ir_node  *irg_args, *arg;
1724
1725         mtp      = get_entity_type(ent);
1726         nparams  = get_method_n_params(mtp);
1727
1728         /* allocate a new array. currently used as 'analysed' flag */
1729         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1730
1731         /* If the method haven't parameters we have nothing to do. */
1732         if (nparams <= 0)
1733                 return;
1734
1735         assure_irg_outs(irg);
1736         irg_args = get_irg_args(irg);
1737         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1738                 arg     = get_irn_out(irg_args, i);
1739                 proj_nr = get_Proj_proj(arg);
1740                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1741         }
1742 }
1743
1744 /**
1745  * Calculate the benefice for transmitting an local variable address.
1746  * After inlining, the local variable might be transformed into a
1747  * SSA variable by scalar_replacement().
1748  */
1749 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1750         inline_irg_env *env = get_irg_link(callee);
1751
1752         if (env->local_weights != NULL) {
1753                 if (pos < ARR_LEN(env->local_weights))
1754                         return env->local_weights[pos];
1755                 return 0;
1756         }
1757
1758         analyze_irg_local_weights(env, callee);
1759
1760         if (pos < ARR_LEN(env->local_weights))
1761                 return env->local_weights[pos];
1762         return 0;
1763 }
1764
1765 /**
1766  * calculate a benefice value for inlining the given call.
1767  */
1768 static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
1769         ir_entity *ent = get_irg_entity(callee);
1770         ir_node   *frame_ptr;
1771         ir_type   *mtp;
1772         int       weight = 0;
1773         int       i, n_params;
1774         unsigned  cc, v;
1775
1776         inline_irg_env *curr_env, *callee_env;
1777
1778         if (get_entity_additional_properties(ent) & mtp_property_noreturn) {
1779                 /* do NOT inline noreturn calls */
1780                 return INT_MIN;
1781         }
1782
1783         /* costs for every passed parameter */
1784         n_params = get_Call_n_params(call);
1785         mtp      = get_entity_type(ent);
1786         cc       = get_method_calling_convention(mtp);
1787         if (cc & cc_reg_param) {
1788                 /* register parameter, smaller costs for register parameters */
1789                 int max_regs = cc & ~cc_bits;
1790
1791                 if (max_regs < n_params)
1792                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1793                 else
1794                         weight += n_params * 2;
1795         } else {
1796                 /* parameters are passed an stack */
1797                 weight += 5 * n_params;
1798         }
1799
1800         /* constant parameters improve the benefice */
1801         frame_ptr = get_irg_frame(current_ir_graph);
1802         for (i = 0; i < n_params; ++i) {
1803                 ir_node *param = get_Call_param(call, i);
1804
1805                 if (is_Const(param) || is_SymConst(param))
1806                         weight += get_method_param_weight(ent, i);
1807                 else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1808                         /*
1809                          * An address of a local variable is transmitted. After inlining,
1810                          * scalar_replacement might be able to remove the local variable,
1811                          * so honor this.
1812                          */
1813                         v = get_method_local_adress_weight(callee, i);
1814                         weight += v;
1815                         if (v > 0)
1816                                 *local_adr = 1;
1817                 }
1818         }
1819
1820         callee_env = get_irg_link(callee);
1821         if (get_entity_visibility(ent) == visibility_local &&
1822             callee_env->n_callers_orig == 1 &&
1823             callee != current_ir_graph) {
1824                 /* we are the only caller, give big bonus */
1825                 weight += 5000;
1826         }
1827
1828         /* do not inline big functions */
1829         weight -= callee_env->n_nodes;
1830
1831         /* reduce the benefice if the current function is already big */
1832         curr_env = get_irg_link(current_ir_graph);
1833         weight -= curr_env->n_nodes / 100;
1834
1835         /* give a bonus for functions with one block */
1836         if (callee_env->n_blocks == 1)
1837                 weight = weight * 3 / 2;
1838
1839         return weight;
1840 }
1841
1842 /**
1843  * Heuristic inliner. Calculates a benifice value for every call and inlines
1844  * those calls with a value higher than the threshold.
1845  */
1846 void inline_functions(int inline_threshold) {
1847         inline_irg_env   *env;
1848         int              i, n_irgs;
1849         ir_graph         *rem;
1850         int              did_inline;
1851         wenv_t           wenv;
1852         call_entry       *entry, *tail;
1853         const call_entry *centry;
1854         pmap             *copied_graphs;
1855         pmap_entry       *pm_entry;
1856
1857         rem = current_ir_graph;
1858         obstack_init(&temp_obst);
1859
1860         /* a map for the copied graphs, used to inline recursive calls */
1861         copied_graphs = pmap_create();
1862
1863         /* extend all irgs by a temporary data structure for inlining. */
1864         n_irgs = get_irp_n_irgs();
1865         for (i = 0; i < n_irgs; ++i)
1866                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1867
1868         /* Precompute information in temporary data structure. */
1869         wenv.ignore_runtime = 0;
1870         wenv.ignore_callers = 0;
1871         for (i = 0; i < n_irgs; ++i) {
1872                 ir_graph *irg = get_irp_irg(i);
1873
1874                 assert(get_irg_phase_state(irg) != phase_building);
1875                 free_callee_info(irg);
1876
1877                 wenv.x = get_irg_link(irg);
1878                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1879         }
1880
1881         /* -- and now inline. -- */
1882         for (i = 0; i < n_irgs; ++i) {
1883                 int      phiproj_computed = 0;
1884                 ir_node  *call;
1885                 ir_graph *irg = get_irp_irg(i);
1886
1887                 current_ir_graph = irg;
1888                 env = get_irg_link(irg);
1889
1890                 /* note that the list of possible calls is updated during the process */
1891                 tail = NULL;
1892                 for (entry = env->call_head; entry != NULL; entry = entry->next) {
1893                         ir_graph   *callee;
1894                         pmap_entry *e;
1895                         int        benefice;
1896                         unsigned   local_adr;
1897
1898                         call   = entry->call;
1899                         callee = entry->callee;
1900
1901                         /* calculate the benifice on the original call to prevent excessive inlining */
1902                         local_adr = 0;
1903                         benefice = calc_inline_benefice(call, callee, &local_adr);
1904                         DB((dbg, SET_LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
1905
1906                         e = pmap_find(copied_graphs, callee);
1907                         if (e != NULL) {
1908                                 /*
1909                                  * Remap callee if we have a copy.
1910                                  * FIXME: Should we do this only for recursive Calls ?
1911                                  */
1912                                 callee = e->value;
1913                         }
1914
1915                         if (benefice > -inline_threshold ||
1916                                 (get_irg_inline_property(callee) >= irg_inline_forced)) {
1917                                 if (current_ir_graph == callee) {
1918                                         /*
1919                                          * Recursive call: we cannot directly inline because we cannot walk
1920                                          * the graph and change it. So we have to make a copy of the graph
1921                                          * first.
1922                                          */
1923
1924                                         inline_irg_env *callee_env;
1925                                         ir_graph       *copy;
1926
1927                                         /*
1928                                          * No copy yet, create one.
1929                                          * Note that recursive methods are never leaves, so it is sufficient
1930                                          * to test this condition here.
1931                                          */
1932                                         copy = create_irg_copy(callee);
1933
1934                                         /* create_irg_copy() destroys the Proj links, recompute them */
1935                                         phiproj_computed = 0;
1936
1937                                         /* allocate new environment */
1938                                         callee_env = alloc_inline_irg_env();
1939                                         set_irg_link(copy, callee_env);
1940
1941                                         wenv.x              = callee_env;
1942                                         wenv.ignore_callers = 1;
1943                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1944
1945                                         /*
1946                                          * Enter the entity of the original graph. This is needed
1947                                          * for inline_method(). However, note that ent->irg still points
1948                                          * to callee, NOT to copy.
1949                                          */
1950                                         set_irg_entity(copy, get_irg_entity(callee));
1951
1952                                         pmap_insert(copied_graphs, callee, copy);
1953                                         callee = copy;
1954
1955                                         /* we have only one caller: the original graph */
1956                                         callee_env->n_callers      = 1;
1957                                         callee_env->n_callers_orig = 1;
1958                                 }
1959                                 if (! phiproj_computed) {
1960                                         phiproj_computed = 1;
1961                                         collect_phiprojs(current_ir_graph);
1962                                 }
1963                                 did_inline = inline_method(call, callee);
1964                                 if (did_inline) {
1965                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1966
1967                                         /* was inlined, must be recomputed */
1968                                         phiproj_computed = 0;
1969
1970                                         /* callee was inline. Append it's call list. */
1971                                         env->got_inline = 1;
1972                                         if (local_adr)
1973                                                 env->local_vars = 1;
1974                                         --env->n_call_nodes;
1975                                         append_call_list(env, callee_env->call_head);
1976                                         env->n_call_nodes += callee_env->n_call_nodes;
1977                                         env->n_nodes += callee_env->n_nodes;
1978                                         --callee_env->n_callers;
1979
1980                                         /* after we have inlined callee, all called methods inside callee
1981                                            are now called once more */
1982                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
1983                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1984                                                 ++penv->n_callers;
1985                                         }
1986
1987                                         /* remove this call from the list */
1988                                         if (tail != NULL)
1989                                                 tail->next = entry->next;
1990                                         else
1991                                                 env->call_head = entry->next;
1992                                         continue;
1993                                 }
1994                         }
1995                         tail = entry;
1996                 }
1997                 env->call_tail = tail;
1998
1999                 if (env->got_inline) {
2000                         /* this irg got calls inlined: optimize it */
2001
2002                         /* scalar replacement does not work well with Tuple nodes, so optimize them away */
2003                         optimize_graph_df(irg);
2004
2005                         if (env->local_vars) {
2006                                 if (scalar_replacement_opt(irg)) {
2007                                         optimize_graph_df(irg);
2008                                 }
2009                         }
2010
2011                         optimize_cf(irg);
2012                 }
2013                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2014                         DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2015                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2016                         env->n_callers_orig, env->n_callers,
2017                         get_entity_name(get_irg_entity(irg))));
2018                 }
2019         }
2020
2021         /* kill the copied graphs: we don't need them anymore */
2022         foreach_pmap(copied_graphs, pm_entry) {
2023                 ir_graph *copy = pm_entry->value;
2024
2025                 /* reset the entity, otherwise it will be deleted in the next step ... */
2026                 set_irg_entity(copy, NULL);
2027                 free_ir_graph(copy);
2028         }
2029         pmap_destroy(copied_graphs);
2030
2031         obstack_free(&temp_obst, NULL);
2032         current_ir_graph = rem;
2033 }
2034
2035 void firm_init_inline(void) {
2036         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2037 }