cleanup space generation logic and make it more robust for union constructs
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <limits.h>
31 #include <assert.h>
32
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irprog_t.h"
36
37 #include "iroptimize.h"
38 #include "ircons_t.h"
39 #include "iropt_t.h"
40 #include "irgopt.h"
41 #include "irgmod.h"
42 #include "irgwalk.h"
43
44 #include "adt/array.h"
45 #include "adt/pset.h"
46 #include "adt/pmap.h"
47 #include "adt/pdeq.h"
48 #include "adt/xmalloc.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
65
66 /*------------------------------------------------------------------*/
67 /* Routines for dead node elimination / copying garbage collection  */
68 /* of the obstack.                                                  */
69 /*------------------------------------------------------------------*/
70
71 /**
72  * Remember the new node in the old node by using a field all nodes have.
73  */
74 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
75
76 /**
77  * Get this new node, before the old node is forgotten.
78  */
79 #define get_new_node(oldn) get_irn_link(oldn)
80
81 /**
82  * Check if a new node was set.
83  */
84 #define has_new_node(n) (get_new_node(n) != NULL)
85
86 /**
87  * We use the block_visited flag to mark that we have computed the
88  * number of useful predecessors for this block.
89  * Further we encode the new arity in this flag in the old blocks.
90  * Remembering the arity is useful, as it saves a lot of pointer
91  * accesses.  This function is called for all Phi and Block nodes
92  * in a Block.
93  */
94 static INLINE int
95 compute_new_arity(ir_node *b) {
96         int i, res, irn_arity;
97         int irg_v, block_v;
98
99         irg_v = get_irg_block_visited(current_ir_graph);
100         block_v = get_Block_block_visited(b);
101         if (block_v >= irg_v) {
102                 /* we computed the number of preds for this block and saved it in the
103                    block_v flag */
104                 return block_v - irg_v;
105         } else {
106                 /* compute the number of good predecessors */
107                 res = irn_arity = get_irn_arity(b);
108                 for (i = 0; i < irn_arity; i++)
109                         if (is_Bad(get_irn_n(b, i))) res--;
110                         /* save it in the flag. */
111                         set_Block_block_visited(b, irg_v + res);
112                         return res;
113         }
114 }
115
116 /**
117  * Copies the node to the new obstack. The Ins of the new node point to
118  * the predecessors on the old obstack.  For block/phi nodes not all
119  * predecessors might be copied.  n->link points to the new node.
120  * For Phi and Block nodes the function allocates in-arrays with an arity
121  * only for useful predecessors.  The arity is determined by counting
122  * the non-bad predecessors of the block.
123  *
124  * @param n    The node to be copied
125  * @param env  if non-NULL, the node number attribute will be copied to the new node
126  *
127  * Note: Also used for loop unrolling.
128  */
129 static void copy_node(ir_node *n, void *env) {
130         ir_node *nn, *block;
131         int new_arity;
132         ir_op *op = get_irn_op(n);
133         (void) env;
134
135         if (op == op_Bad) {
136                 /* node copied already */
137                 return;
138         } else if (op == op_Block) {
139                 block = NULL;
140                 new_arity = compute_new_arity(n);
141                 n->attr.block.graph_arr = NULL;
142         } else {
143                 block = get_nodes_block(n);
144                 if (op == op_Phi) {
145                         new_arity = compute_new_arity(block);
146                 } else {
147                         new_arity = get_irn_arity(n);
148                 }
149         }
150         nn = new_ir_node(get_irn_dbg_info(n),
151                 current_ir_graph,
152                 block,
153                 op,
154                 get_irn_mode(n),
155                 new_arity,
156                 get_irn_in(n) + 1);
157         /* Copy the attributes.  These might point to additional data.  If this
158            was allocated on the old obstack the pointers now are dangling.  This
159            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
160         if (op == op_Block) {
161                 /* we cannot allow blocks WITHOUT macroblock input */
162                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
163         }
164         copy_node_attr(n, nn);
165
166 #ifdef DEBUG_libfirm
167         {
168                 int copy_node_nr = env != NULL;
169                 if (copy_node_nr) {
170                         /* for easier debugging, we want to copy the node numbers too */
171                         nn->node_nr = n->node_nr;
172                 }
173         }
174 #endif
175
176         set_new_node(n, nn);
177         hook_dead_node_elim_subst(current_ir_graph, n, nn);
178 }
179
180 /**
181  * Copies new predecessors of old node to new node remembered in link.
182  * Spare the Bad predecessors of Phi and Block nodes.
183  */
184 static void copy_preds(ir_node *n, void *env) {
185         ir_node *nn, *block;
186         int i, j, irn_arity;
187         (void) env;
188
189         nn = get_new_node(n);
190
191         if (is_Block(n)) {
192                 /* copy the macro block header */
193                 ir_node *mbh = get_Block_MacroBlock(n);
194
195                 if (mbh == n) {
196                         /* this block is a macroblock header */
197                         set_Block_MacroBlock(nn, nn);
198                 } else {
199                         /* get the macro block header */
200                         ir_node *nmbh = get_new_node(mbh);
201                         assert(nmbh != NULL);
202                         set_Block_MacroBlock(nn, nmbh);
203                 }
204
205                 /* Don't copy Bad nodes. */
206                 j = 0;
207                 irn_arity = get_irn_arity(n);
208                 for (i = 0; i < irn_arity; i++) {
209                         if (! is_Bad(get_irn_n(n, i))) {
210                                 ir_node *pred = get_irn_n(n, i);
211                                 set_irn_n(nn, j, get_new_node(pred));
212                                 j++;
213                         }
214                 }
215                 /* repair the block visited flag from above misuse. Repair it in both
216                    graphs so that the old one can still be used. */
217                 set_Block_block_visited(nn, 0);
218                 set_Block_block_visited(n, 0);
219                 /* Local optimization could not merge two subsequent blocks if
220                    in array contained Bads.  Now it's possible.
221                    We don't call optimize_in_place as it requires
222                    that the fields in ir_graph are set properly. */
223                 if ((get_opt_control_flow_straightening()) &&
224                         (get_Block_n_cfgpreds(nn) == 1) &&
225                         is_Jmp(get_Block_cfgpred(nn, 0))) {
226                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
227                         if (nn == old) {
228                                 /* Jmp jumps into the block it is in -- deal self cycle. */
229                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
230                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
231                         } else {
232                                 exchange(nn, old);
233                         }
234                 }
235         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
236                 /* Don't copy node if corresponding predecessor in block is Bad.
237                    The Block itself should not be Bad. */
238                 block = get_nodes_block(n);
239                 set_nodes_block(nn, get_new_node(block));
240                 j = 0;
241                 irn_arity = get_irn_arity(n);
242                 for (i = 0; i < irn_arity; i++) {
243                         if (! is_Bad(get_irn_n(block, i))) {
244                                 ir_node *pred = get_irn_n(n, i);
245                                 set_irn_n(nn, j, get_new_node(pred));
246                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
247                                 j++;
248                         }
249                 }
250                 /* If the pre walker reached this Phi after the post walker visited the
251                    block block_visited is > 0. */
252                 set_Block_block_visited(get_nodes_block(n), 0);
253                 /* Compacting the Phi's ins might generate Phis with only one
254                    predecessor. */
255                 if (get_irn_arity(nn) == 1)
256                         exchange(nn, get_irn_n(nn, 0));
257         } else {
258                 irn_arity = get_irn_arity(n);
259                 for (i = -1; i < irn_arity; i++)
260                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
261         }
262         /* Now the new node is complete.  We can add it to the hash table for CSE.
263            @@@ inlining aborts if we identify End. Why? */
264         if (!is_End(nn))
265                 add_identities(current_ir_graph->value_table, nn);
266 }
267
268 /**
269  * Copies the graph recursively, compacts the keep-alives of the end node.
270  *
271  * @param irg           the graph to be copied
272  * @param copy_node_nr  If non-zero, the node number will be copied
273  */
274 static void copy_graph(ir_graph *irg, int copy_node_nr) {
275         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
276         ir_node *ka;      /* keep alive */
277         int i, irn_arity;
278         unsigned long vfl;
279
280         /* Some nodes must be copied by hand, sigh */
281         vfl = get_irg_visited(irg);
282         set_irg_visited(irg, vfl + 1);
283
284         oe = get_irg_end(irg);
285         mark_irn_visited(oe);
286         /* copy the end node by hand, allocate dynamic in array! */
287         ne = new_ir_node(get_irn_dbg_info(oe),
288                 irg,
289                 NULL,
290                 op_End,
291                 mode_X,
292                 -1,
293                 NULL);
294         /* Copy the attributes.  Well, there might be some in the future... */
295         copy_node_attr(oe, ne);
296         set_new_node(oe, ne);
297
298         /* copy the Bad node */
299         ob = get_irg_bad(irg);
300         mark_irn_visited(ob);
301         nb = new_ir_node(get_irn_dbg_info(ob),
302                 irg,
303                 NULL,
304                 op_Bad,
305                 mode_T,
306                 0,
307                 NULL);
308         copy_node_attr(ob, nb);
309         set_new_node(ob, nb);
310
311         /* copy the NoMem node */
312         om = get_irg_no_mem(irg);
313         mark_irn_visited(om);
314         nm = new_ir_node(get_irn_dbg_info(om),
315                 irg,
316                 NULL,
317                 op_NoMem,
318                 mode_M,
319                 0,
320                 NULL);
321         copy_node_attr(om, nm);
322         set_new_node(om, nm);
323
324         /* copy the live nodes */
325         set_irg_visited(irg, vfl);
326         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
327
328         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
329
330         /* visit the anchors as well */
331         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
332                 ir_node *n = get_irg_anchor(irg, i);
333
334                 if (n && (get_irn_visited(n) <= vfl)) {
335                         set_irg_visited(irg, vfl);
336                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
337                 }
338         }
339
340         /* copy_preds for the end node ... */
341         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
342
343         /*- ... and now the keep alives. -*/
344         /* First pick the not marked block nodes and walk them.  We must pick these
345            first as else we will oversee blocks reachable from Phis. */
346         irn_arity = get_End_n_keepalives(oe);
347         for (i = 0; i < irn_arity; i++) {
348                 ka = get_End_keepalive(oe, i);
349                 if (is_Block(ka)) {
350                         if (get_irn_visited(ka) <= vfl) {
351                                 /* We must keep the block alive and copy everything reachable */
352                                 set_irg_visited(irg, vfl);
353                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
354                         }
355                         add_End_keepalive(ne, get_new_node(ka));
356                 }
357         }
358
359         /* Now pick other nodes.  Here we will keep all! */
360         irn_arity = get_End_n_keepalives(oe);
361         for (i = 0; i < irn_arity; i++) {
362                 ka = get_End_keepalive(oe, i);
363                 if (!is_Block(ka)) {
364                         if (get_irn_visited(ka) <= vfl) {
365                                 /* We didn't copy the node yet.  */
366                                 set_irg_visited(irg, vfl);
367                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
368                         }
369                         add_End_keepalive(ne, get_new_node(ka));
370                 }
371         }
372
373         /* start block sometimes only reached after keep alives */
374         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
375         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
376 }
377
378 /**
379  * Copies the graph reachable from current_ir_graph->end to the obstack
380  * in current_ir_graph and fixes the environment.
381  * Then fixes the fields in current_ir_graph containing nodes of the
382  * graph.
383  *
384  * @param copy_node_nr  If non-zero, the node number will be copied
385  */
386 static void
387 copy_graph_env(int copy_node_nr) {
388         ir_graph *irg = current_ir_graph;
389         ir_node *old_end, *new_anchor;
390         int i;
391
392         /* remove end_except and end_reg nodes */
393         old_end = get_irg_end(irg);
394         set_irg_end_except (irg, old_end);
395         set_irg_end_reg    (irg, old_end);
396
397         /* Not all nodes remembered in irg might be reachable
398            from the end node.  Assure their link is set to NULL, so that
399            we can test whether new nodes have been computed. */
400         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
401                 ir_node *n = get_irg_anchor(irg, i);
402                 if (n != NULL)
403                         set_new_node(n, NULL);
404         }
405         /* we use the block walk flag for removing Bads from Blocks ins. */
406         inc_irg_block_visited(irg);
407
408         /* copy the graph */
409         copy_graph(irg, copy_node_nr);
410
411         /* fix the anchor */
412         old_end    = get_irg_end(irg);
413         new_anchor = new_Anchor(irg);
414
415         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
416                 ir_node *n = get_irg_anchor(irg, i);
417                 if (n)
418                         set_irn_n(new_anchor, i, get_new_node(n));
419         }
420         free_End(old_end);
421         irg->anchor = new_anchor;
422
423         /* ensure the new anchor is placed in the endblock */
424         set_nodes_block(new_anchor, get_irg_end_block(irg));
425 }
426
427 /**
428  * Copies all reachable nodes to a new obstack.  Removes bad inputs
429  * from block nodes and the corresponding inputs from Phi nodes.
430  * Merges single exit blocks with single entry blocks and removes
431  * 1-input Phis.
432  * Adds all new nodes to a new hash table for CSE.  Does not
433  * perform CSE, so the hash table might contain common subexpressions.
434  */
435 void dead_node_elimination(ir_graph *irg) {
436         ir_graph *rem;
437 #ifdef INTERPROCEDURAL_VIEW
438         int rem_ipview = get_interprocedural_view();
439 #endif
440         struct obstack *graveyard_obst = NULL;
441         struct obstack *rebirth_obst   = NULL;
442         assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
443
444         /* inform statistics that we started a dead-node elimination run */
445         hook_dead_node_elim(irg, 1);
446
447         /* Remember external state of current_ir_graph. */
448         rem = current_ir_graph;
449         current_ir_graph = irg;
450 #ifdef INTERPROCEDURAL_VIEW
451         set_interprocedural_view(0);
452 #endif
453
454         assert(get_irg_phase_state(irg) != phase_building);
455
456         /* Handle graph state */
457         free_callee_info(irg);
458         free_irg_outs(irg);
459         free_trouts();
460
461         /* @@@ so far we loose loops when copying */
462         free_loop_information(irg);
463
464         set_irg_doms_inconsistent(irg);
465
466         /* A quiet place, where the old obstack can rest in peace,
467            until it will be cremated. */
468         graveyard_obst = irg->obst;
469
470         /* A new obstack, where the reachable nodes will be copied to. */
471         rebirth_obst = xmalloc(sizeof(*rebirth_obst));
472         irg->obst = rebirth_obst;
473         obstack_init(irg->obst);
474         irg->last_node_idx = 0;
475
476         /* We also need a new value table for CSE */
477         del_identities(irg->value_table);
478         irg->value_table = new_identities();
479
480         /* Copy the graph from the old to the new obstack */
481         copy_graph_env(/*copy_node_nr=*/1);
482
483         /* Free memory from old unoptimized obstack */
484         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
485         xfree(graveyard_obst);            /* ... then free it.           */
486
487         /* inform statistics that the run is over */
488         hook_dead_node_elim(irg, 0);
489
490         current_ir_graph = rem;
491 #ifdef INTERPROCEDURAL_VIEW
492         set_interprocedural_view(rem_ipview);
493 #endif
494 }
495
496 /**
497  * Relink bad predecessors of a block and store the old in array to the
498  * link field. This function is called by relink_bad_predecessors().
499  * The array of link field starts with the block operand at position 0.
500  * If block has bad predecessors, create a new in array without bad preds.
501  * Otherwise let in array untouched.
502  */
503 static void relink_bad_block_predecessors(ir_node *n, void *env) {
504         ir_node **new_in, *irn;
505         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
506         (void) env;
507
508         /* if link field of block is NULL, look for bad predecessors otherwise
509            this is already done */
510         if (is_Block(n) && get_irn_link(n) == NULL) {
511                 /* save old predecessors in link field (position 0 is the block operand)*/
512                 set_irn_link(n, get_irn_in(n));
513
514                 /* count predecessors without bad nodes */
515                 old_irn_arity = get_irn_arity(n);
516                 for (i = 0; i < old_irn_arity; i++)
517                         if (!is_Bad(get_irn_n(n, i)))
518                                 ++new_irn_arity;
519
520                 /* arity changing: set new predecessors without bad nodes */
521                 if (new_irn_arity < old_irn_arity) {
522                         /* Get new predecessor array. We do not resize the array, as we must
523                            keep the old one to update Phis. */
524                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
525
526                         /* set new predecessors in array */
527                         new_in[0] = NULL;
528                         new_irn_n = 1;
529                         for (i = 0; i < old_irn_arity; i++) {
530                                 irn = get_irn_n(n, i);
531                                 if (!is_Bad(irn)) {
532                                         new_in[new_irn_n] = irn;
533                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
534                                         ++new_irn_n;
535                                 }
536                         }
537                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
538                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
539                         n->in = new_in;
540                 } /* ir node has bad predecessors */
541         } /* Block is not relinked */
542 }
543
544 /**
545  * Relinks Bad predecessors from Blocks and Phis called by walker
546  * remove_bad_predecesors(). If n is a Block, call
547  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
548  * function of Phi's Block. If this block has bad predecessors, relink preds
549  * of the Phi-node.
550  */
551 static void relink_bad_predecessors(ir_node *n, void *env) {
552         ir_node *block, **old_in;
553         int i, old_irn_arity, new_irn_arity;
554
555         /* relink bad predecessors of a block */
556         if (is_Block(n))
557                 relink_bad_block_predecessors(n, env);
558
559         /* If Phi node relink its block and its predecessors */
560         if (is_Phi(n)) {
561                 /* Relink predecessors of phi's block */
562                 block = get_nodes_block(n);
563                 if (get_irn_link(block) == NULL)
564                         relink_bad_block_predecessors(block, env);
565
566                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
567                 old_irn_arity = ARR_LEN(old_in);
568
569                 /* Relink Phi predecessors if count of predecessors changed */
570                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
571                         /* set new predecessors in array
572                            n->in[0] remains the same block */
573                         new_irn_arity = 1;
574                         for(i = 1; i < old_irn_arity; i++)
575                                 if (!is_Bad(old_in[i])) {
576                                         n->in[new_irn_arity] = n->in[i];
577                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
578                                         ++new_irn_arity;
579                                 }
580
581                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
582                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
583                 }
584         } /* n is a Phi node */
585 }
586
587 /*
588  * Removes Bad Bad predecessors from Blocks and the corresponding
589  * inputs to Phi nodes as in dead_node_elimination but without
590  * copying the graph.
591  * On walking up set the link field to NULL, on walking down call
592  * relink_bad_predecessors() (This function stores the old in array
593  * to the link field and sets a new in array if arity of predecessors
594  * changes).
595  */
596 void remove_bad_predecessors(ir_graph *irg) {
597         panic("Fix backedge handling first");
598         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
599 }
600
601
602 /*
603    __                      _  __ __
604   (_     __    o     _    | \/  |_
605   __)|_| | \_/ | \_/(/_   |_/\__|__
606
607   The following stuff implements a facility that automatically patches
608   registered ir_node pointers to the new node when a dead node elimination occurs.
609 */
610
611 struct _survive_dce_t {
612         struct obstack obst;
613         pmap *places;
614         pmap *new_places;
615         hook_entry_t dead_node_elim;
616         hook_entry_t dead_node_elim_subst;
617 };
618
619 typedef struct _survive_dce_list_t {
620         struct _survive_dce_list_t *next;
621         ir_node **place;
622 } survive_dce_list_t;
623
624 static void dead_node_hook(void *context, ir_graph *irg, int start) {
625         survive_dce_t *sd = context;
626         (void) irg;
627
628         /* Create a new map before the dead node elimination is performed. */
629         if (start) {
630                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
631         } else {
632                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
633                 pmap_destroy(sd->places);
634                 sd->places     = sd->new_places;
635                 sd->new_places = NULL;
636         }
637 }
638
639 /**
640  * Hook called when dead node elimination replaces old by nw.
641  */
642 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
643         survive_dce_t *sd = context;
644         survive_dce_list_t *list = pmap_get(sd->places, old);
645         (void) irg;
646
647         /* If the node is to be patched back, write the new address to all registered locations. */
648         if (list) {
649                 survive_dce_list_t *p;
650
651                 for (p = list; p; p = p->next)
652                         *(p->place) = nw;
653
654                 pmap_insert(sd->new_places, nw, list);
655         }
656 }
657
658 /**
659  * Make a new Survive DCE environment.
660  */
661 survive_dce_t *new_survive_dce(void) {
662         survive_dce_t *res = xmalloc(sizeof(res[0]));
663         obstack_init(&res->obst);
664         res->places     = pmap_create();
665         res->new_places = NULL;
666
667         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
668         res->dead_node_elim.context                   = res;
669         res->dead_node_elim.next                      = NULL;
670
671         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
672         res->dead_node_elim_subst.context = res;
673         res->dead_node_elim_subst.next    = NULL;
674
675 #ifndef FIRM_ENABLE_HOOKS
676         assert(0 && "need hooks enabled");
677 #endif
678
679         register_hook(hook_dead_node_elim, &res->dead_node_elim);
680         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
681         return res;
682 }
683
684 /**
685  * Free a Survive DCE environment.
686  */
687 void free_survive_dce(survive_dce_t *sd) {
688         obstack_free(&sd->obst, NULL);
689         pmap_destroy(sd->places);
690         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
691         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
692         xfree(sd);
693 }
694
695 /**
696  * Register a node pointer to be patched upon DCE.
697  * When DCE occurs, the node pointer specified by @p place will be
698  * patched to the new address of the node it is pointing to.
699  *
700  * @param sd    The Survive DCE environment.
701  * @param place The address of the node pointer.
702  */
703 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
704         if (*place != NULL) {
705                 ir_node *irn      = *place;
706                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
707                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
708
709                 nw->next  = curr;
710                 nw->place = place;
711
712                 pmap_insert(sd->places, irn, nw);
713         }
714 }
715
716 /*--------------------------------------------------------------------*/
717 /*  Functionality for inlining                                         */
718 /*--------------------------------------------------------------------*/
719
720 /**
721  * Copy node for inlineing.  Updates attributes that change when
722  * inlineing but not for dead node elimination.
723  *
724  * Copies the node by calling copy_node() and then updates the entity if
725  * it's a local one.  env must be a pointer of the frame type of the
726  * inlined procedure. The new entities must be in the link field of
727  * the entities.
728  */
729 static INLINE void
730 copy_node_inline(ir_node *n, void *env) {
731         ir_node *nn;
732         ir_type *frame_tp = (ir_type *)env;
733
734         copy_node(n, NULL);
735         if (is_Sel(n)) {
736                 nn = get_new_node (n);
737                 assert(is_Sel(nn));
738                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
739                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
740                 }
741         } else if (is_Block(n)) {
742                 nn = get_new_node (n);
743                 nn->attr.block.irg = current_ir_graph;
744         }
745 }
746
747 /**
748  * Walker: checks if P_value_arg_base is used.
749  */
750 static void find_addr(ir_node *node, void *env) {
751         int *allow_inline = env;
752         if (is_Proj(node) &&
753                         is_Start(get_Proj_pred(node)) &&
754                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
755                 *allow_inline = 0;
756         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
757                 /* From GCC:
758                  * Refuse to inline alloca call unless user explicitly forced so as this
759                  * may change program's memory overhead drastically when the function
760                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
761                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
762                  *
763                  * Sorryly this is true with our implementation also.
764                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
765                  * disables inlining of functions using VLA (with are completely save).
766                  *
767                  * 2 Solutions:
768                  * - add a flag to the Alloc node for "real" alloca() calls
769                  * - add a new Stack-Restore node at the end of a function using alloca()
770                  */
771                 *allow_inline = 0;
772         }
773 }
774
775 /**
776  * Check if we can inline a given call.
777  * Currently, we cannot inline two cases:
778  * - call with compound arguments
779  * - graphs that take the address of a parameter
780  *
781  * check these conditions here
782  */
783 static int can_inline(ir_node *call, ir_graph *called_graph) {
784         ir_type *call_type = get_Call_type(call);
785         int params, ress, i, res;
786         assert(is_Method_type(call_type));
787
788         params = get_method_n_params(call_type);
789         ress   = get_method_n_ress(call_type);
790
791         /* check parameters for compound arguments */
792         for (i = 0; i < params; ++i) {
793                 ir_type *p_type = get_method_param_type(call_type, i);
794
795                 if (is_compound_type(p_type))
796                         return 0;
797         }
798
799         /* check results for compound arguments */
800         for (i = 0; i < ress; ++i) {
801                 ir_type *r_type = get_method_res_type(call_type, i);
802
803                 if (is_compound_type(r_type))
804                         return 0;
805         }
806
807         res = 1;
808         irg_walk_graph(called_graph, find_addr, NULL, &res);
809
810         return res;
811 }
812
813 enum exc_mode {
814         exc_handler    = 0, /**< There is a handler. */
815         exc_to_end     = 1, /**< Branches to End. */
816         exc_no_handler = 2  /**< Exception handling not represented. */
817 };
818
819 /* Inlines a method at the given call site. */
820 int inline_method(ir_node *call, ir_graph *called_graph) {
821         ir_node             *pre_call;
822         ir_node             *post_call, *post_bl;
823         ir_node             *in[pn_Start_max];
824         ir_node             *end, *end_bl;
825         ir_node             **res_pred;
826         ir_node             **cf_pred;
827         ir_node             *ret, *phi;
828         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity;
829         enum exc_mode       exc_handling;
830         ir_type             *called_frame, *curr_frame;
831         ir_entity           *ent;
832         ir_graph            *rem, *irg;
833         irg_inline_property prop = get_irg_inline_property(called_graph);
834
835         if (prop == irg_inline_forbidden)
836                 return 0;
837
838         ent = get_irg_entity(called_graph);
839
840         /* Do not inline variadic functions. */
841         if (get_method_variadicity(get_entity_type(ent)) == variadicity_variadic) {
842                 /* Arg, KR functions are marked as variadic one's, so check further */
843                 ir_type *mtp     = get_entity_type(ent);
844                 ir_type *ctp     = get_Call_type(call);
845                 int     n_params = get_method_n_params(mtp);
846                 int     i;
847
848                 /* This is too strong, but probably ok. Function calls with a wrong number of
849                    parameters should not be inlined. */
850                 if (n_params != get_method_n_params(ctp))
851                         return 0;
852
853                 /* check types: for K&R calls, this was not done by the compiler. Again, this is
854                    too strong, but ok for now. */
855                 for (i = n_params - 1; i >= 0; --i) {
856                         ir_type *param_tp = get_method_param_type(mtp, i);
857                         ir_type *arg_tp   = get_method_param_type(ctp, i);
858
859                         if (param_tp != arg_tp)
860                                 return 0;
861                 }
862                 DB((dbg, LEVEL_1, "Inlining allowed for variadic function %+F\n", called_graph));
863                 /* types match, fine: when the frame is access, the inliner stops at can_inline() */
864         }
865
866         assert(get_method_n_params(get_entity_type(ent)) ==
867                get_method_n_params(get_Call_type(call)));
868
869         irg = get_irn_irg(call);
870
871         /*
872          * We cannot inline a recursive call. The graph must be copied before
873          * the call the inline_method() using create_irg_copy().
874          */
875         if (called_graph == irg)
876                 return 0;
877
878         /*
879          * currently, we cannot inline two cases:
880          * - call with compound arguments
881          * - graphs that take the address of a parameter
882          */
883         if (! can_inline(call, called_graph))
884                 return 0;
885
886         rem = current_ir_graph;
887         current_ir_graph = irg;
888
889         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
890
891         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
892         rem_opt = get_opt_optimize();
893         set_optimize(0);
894
895         /* Handle graph state */
896         assert(get_irg_phase_state(irg) != phase_building);
897         assert(get_irg_pinned(irg) == op_pin_state_pinned);
898         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
899         set_irg_outs_inconsistent(irg);
900         set_irg_extblk_inconsistent(irg);
901         set_irg_doms_inconsistent(irg);
902         set_irg_loopinfo_inconsistent(irg);
903         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
904
905         /* -- Check preconditions -- */
906         assert(is_Call(call));
907
908         /* here we know we WILL inline, so inform the statistics */
909         hook_inline(call, called_graph);
910
911         /* -- Decide how to handle exception control flow: Is there a handler
912            for the Call node, or do we branch directly to End on an exception?
913            exc_handling:
914            0 There is a handler.
915            1 Branches to End.
916            2 Exception handling not represented in Firm. -- */
917         {
918                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
919                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
920                         long proj_nr = get_Proj_proj(proj);
921                         if (proj_nr == pn_Call_X_except) Xproj = proj;
922                         if (proj_nr == pn_Call_M_except) Mproj = proj;
923                 }
924                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
925                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
926                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
927         }
928
929         /* --
930            the procedure and later replaces the Start node of the called graph.
931            Post_call is the old Call node and collects the results of the called
932            graph. Both will end up being a tuple.  -- */
933         post_bl = get_nodes_block(call);
934         set_irg_current_block(irg, post_bl);
935         /* XxMxPxPxPxT of Start + parameter of Call */
936         in[pn_Start_X_initial_exec]   = new_Jmp();
937         in[pn_Start_M]                = get_Call_mem(call);
938         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
939         in[pn_Start_P_tls]            = get_irg_tls(irg);
940         in[pn_Start_T_args]           = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
941         /* in[pn_Start_P_value_arg_base] = ??? */
942         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
943         pre_call = new_Tuple(pn_Start_max - 1, in);
944         post_call = call;
945
946         /* --
947            The new block gets the ins of the old block, pre_call and all its
948            predecessors and all Phi nodes. -- */
949         part_block(pre_call);
950
951         /* -- Prepare state for dead node elimination -- */
952         /* Visited flags in calling irg must be >= flag in called irg.
953            Else walker and arity computation will not work. */
954         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
955                 set_irg_visited(irg, get_irg_visited(called_graph)+1);
956         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
957                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
958         /* Set pre_call as new Start node in link field of the start node of
959            calling graph and pre_calls block as new block for the start block
960            of calling graph.
961            Further mark these nodes so that they are not visited by the
962            copying. */
963         set_irn_link(get_irg_start(called_graph), pre_call);
964         set_irn_visited(get_irg_start(called_graph), get_irg_visited(irg));
965         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
966         set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(irg));
967         set_irn_link(get_irg_bad(called_graph), get_irg_bad(irg));
968         set_irn_visited(get_irg_bad(called_graph), get_irg_visited(irg));
969
970         /* Initialize for compaction of in arrays */
971         inc_irg_block_visited(irg);
972
973         /* -- Replicate local entities of the called_graph -- */
974         /* copy the entities. */
975         called_frame = get_irg_frame_type(called_graph);
976         curr_frame   = get_irg_frame_type(irg);
977         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
978                 ir_entity *new_ent, *old_ent;
979                 old_ent = get_class_member(called_frame, i);
980                 new_ent = copy_entity_own(old_ent, curr_frame);
981                 set_entity_link(old_ent, new_ent);
982         }
983
984         /* visited is > than that of called graph.  With this trick visited will
985            remain unchanged so that an outer walker, e.g., searching the call nodes
986             to inline, calling this inline will not visit the inlined nodes. */
987         set_irg_visited(irg, get_irg_visited(irg)-1);
988
989         /* -- Performing dead node elimination inlines the graph -- */
990         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
991            entities. */
992         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
993                  get_irg_frame_type(called_graph));
994
995         /* Repair called_graph */
996         set_irg_visited(called_graph, get_irg_visited(irg));
997         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
998         set_Block_block_visited(get_irg_start_block(called_graph), 0);
999
1000         /* -- Merge the end of the inlined procedure with the call site -- */
1001         /* We will turn the old Call node into a Tuple with the following
1002            predecessors:
1003            -1:  Block of Tuple.
1004            0: Phi of all Memories of Return statements.
1005            1: Jmp from new Block that merges the control flow from all exception
1006            predecessors of the old end block.
1007            2: Tuple of all arguments.
1008            3: Phi of Exception memories.
1009            In case the old Call directly branches to End on an exception we don't
1010            need the block merging all exceptions nor the Phi of the exception
1011            memories.
1012         */
1013
1014         /* -- Precompute some values -- */
1015         end_bl = get_new_node(get_irg_end_block(called_graph));
1016         end = get_new_node(get_irg_end(called_graph));
1017         arity = get_irn_arity(end_bl);    /* arity = n_exc + n_ret  */
1018         n_res = get_method_n_ress(get_Call_type(call));
1019
1020         res_pred = xmalloc(n_res * sizeof(*res_pred));
1021         cf_pred  = xmalloc(arity * sizeof(*res_pred));
1022
1023         set_irg_current_block(irg, post_bl); /* just to make sure */
1024
1025         /* -- archive keepalives -- */
1026         irn_arity = get_irn_arity(end);
1027         for (i = 0; i < irn_arity; i++) {
1028                 ir_node *ka = get_End_keepalive(end, i);
1029                 if (! is_Bad(ka))
1030                         add_End_keepalive(get_irg_end(irg), ka);
1031         }
1032
1033         /* The new end node will die.  We need not free as the in array is on the obstack:
1034            copy_node() only generated 'D' arrays. */
1035
1036         /* -- Replace Return nodes by Jump nodes. -- */
1037         n_ret = 0;
1038         for (i = 0; i < arity; i++) {
1039                 ir_node *ret;
1040                 ret = get_irn_n(end_bl, i);
1041                 if (is_Return(ret)) {
1042                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1043                         n_ret++;
1044                 }
1045         }
1046         set_irn_in(post_bl, n_ret, cf_pred);
1047
1048         /* -- Build a Tuple for all results of the method.
1049            Add Phi node if there was more than one Return.  -- */
1050         turn_into_tuple(post_call, pn_Call_max);
1051         /* First the Memory-Phi */
1052         n_ret = 0;
1053         for (i = 0; i < arity; i++) {
1054                 ret = get_irn_n(end_bl, i);
1055                 if (is_Return(ret)) {
1056                         cf_pred[n_ret] = get_Return_mem(ret);
1057                         n_ret++;
1058                 }
1059         }
1060         phi = new_Phi(n_ret, cf_pred, mode_M);
1061         set_Tuple_pred(call, pn_Call_M_regular, phi);
1062         /* Conserve Phi-list for further inlinings -- but might be optimized */
1063         if (get_nodes_block(phi) == post_bl) {
1064                 set_irn_link(phi, get_irn_link(post_bl));
1065                 set_irn_link(post_bl, phi);
1066         }
1067         /* Now the real results */
1068         if (n_res > 0) {
1069                 for (j = 0; j < n_res; j++) {
1070                         n_ret = 0;
1071                         for (i = 0; i < arity; i++) {
1072                                 ret = get_irn_n(end_bl, i);
1073                                 if (is_Return(ret)) {
1074                                         cf_pred[n_ret] = get_Return_res(ret, j);
1075                                         n_ret++;
1076                                 }
1077                         }
1078                         if (n_ret > 0)
1079                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1080                         else
1081                                 phi = new_Bad();
1082                         res_pred[j] = phi;
1083                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1084                         if (get_nodes_block(phi) == post_bl) {
1085                                 set_Phi_next(phi, get_Block_phis(post_bl));
1086                                 set_Block_phis(post_bl, phi);
1087                         }
1088                 }
1089                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1090         } else {
1091                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1092         }
1093         /* handle the regular call */
1094         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1095
1096         /* For now, we cannot inline calls with value_base */
1097         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1098
1099         /* Finally the exception control flow.
1100            We have two (three) possible situations:
1101            First if the Call branches to an exception handler: We need to add a Phi node to
1102            collect the memory containing the exception objects.  Further we need
1103            to add another block to get a correct representation of this Phi.  To
1104            this block we add a Jmp that resolves into the X output of the Call
1105            when the Call is turned into a tuple.
1106            Second the Call branches to End, the exception is not handled.  Just
1107            add all inlined exception branches to the End node.
1108            Third: there is no Exception edge at all. Handle as case two. */
1109         if (exc_handling == exc_handler) {
1110                 n_exc = 0;
1111                 for (i = 0; i < arity; i++) {
1112                         ir_node *ret, *irn;
1113                         ret = get_irn_n(end_bl, i);
1114                         irn = skip_Proj(ret);
1115                         if (is_fragile_op(irn) || is_Raise(irn)) {
1116                                 cf_pred[n_exc] = ret;
1117                                 ++n_exc;
1118                         }
1119                 }
1120                 if (n_exc > 0) {
1121                         new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
1122                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1123                         /* The Phi for the memories with the exception objects */
1124                         n_exc = 0;
1125                         for (i = 0; i < arity; i++) {
1126                                 ir_node *ret;
1127                                 ret = skip_Proj(get_irn_n(end_bl, i));
1128                                 if (is_Call(ret)) {
1129                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1130                                         n_exc++;
1131                                 } else if (is_fragile_op(ret)) {
1132                                         /* We rely that all cfops have the memory output at the same position. */
1133                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1134                                         n_exc++;
1135                                 } else if (is_Raise(ret)) {
1136                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1137                                         n_exc++;
1138                                 }
1139                         }
1140                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1141                 } else {
1142                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1143                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1144                 }
1145         } else {
1146                 ir_node *main_end_bl;
1147                 int main_end_bl_arity;
1148                 ir_node **end_preds;
1149
1150                 /* assert(exc_handling == 1 || no exceptions. ) */
1151                 n_exc = 0;
1152                 for (i = 0; i < arity; i++) {
1153                         ir_node *ret = get_irn_n(end_bl, i);
1154                         ir_node *irn = skip_Proj(ret);
1155
1156                         if (is_fragile_op(irn) || is_Raise(irn)) {
1157                                 cf_pred[n_exc] = ret;
1158                                 n_exc++;
1159                         }
1160                 }
1161                 main_end_bl = get_irg_end_block(irg);
1162                 main_end_bl_arity = get_irn_arity(main_end_bl);
1163                 end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
1164
1165                 for (i = 0; i < main_end_bl_arity; ++i)
1166                         end_preds[i] = get_irn_n(main_end_bl, i);
1167                 for (i = 0; i < n_exc; ++i)
1168                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1169                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1170                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1171                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1172                 free(end_preds);
1173         }
1174         free(res_pred);
1175         free(cf_pred);
1176
1177         /* --  Turn CSE back on. -- */
1178         set_optimize(rem_opt);
1179         current_ir_graph = rem;
1180
1181         return 1;
1182 }
1183
1184 /********************************************************************/
1185 /* Apply inlineing to small methods.                                */
1186 /********************************************************************/
1187
1188 static struct obstack  temp_obst;
1189
1190 /** Represents a possible inlinable call in a graph. */
1191 typedef struct _call_entry call_entry;
1192 struct _call_entry {
1193         ir_node    *call;      /**< the Call node */
1194         ir_graph   *callee;    /**< the callee IR-graph called here */
1195         call_entry *next;      /**< for linking the next one */
1196         int        loop_depth; /**< the loop depth of this call */
1197 };
1198
1199 /**
1200  * environment for inlining small irgs
1201  */
1202 typedef struct _inline_env_t {
1203         struct obstack obst;  /**< an obstack where call_entries are allocated on. */
1204         call_entry *head;     /**< the head of the call entry list */
1205         call_entry *tail;     /**< the tail of the call entry list */
1206 } inline_env_t;
1207
1208 /**
1209  * Returns the irg called from a Call node. If the irg is not
1210  * known, NULL is returned.
1211  *
1212  * @param call  the call node
1213  */
1214 static ir_graph *get_call_called_irg(ir_node *call) {
1215         ir_node *addr;
1216
1217         addr = get_Call_ptr(call);
1218         if (is_Global(addr)) {
1219                 ir_entity *ent = get_Global_entity(addr);
1220                 return get_entity_irg(ent);
1221         }
1222
1223         return NULL;
1224 }
1225
1226 /**
1227  * Walker: Collect all calls to known graphs inside a graph.
1228  */
1229 static void collect_calls(ir_node *call, void *env) {
1230         if (is_Call(call)) {
1231                 ir_graph *called_irg = get_call_called_irg(call);
1232
1233                 if (called_irg != NULL) {
1234                         /* The Call node calls a locally defined method.  Remember to inline. */
1235                         inline_env_t *ienv  = env;
1236                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1237                         entry->call       = call;
1238                         entry->callee     = called_irg;
1239                         entry->next       = NULL;
1240                         entry->loop_depth = 0;
1241
1242                         if (ienv->tail == NULL)
1243                                 ienv->head = entry;
1244                         else
1245                                 ienv->tail->next = entry;
1246                         ienv->tail = entry;
1247                 }
1248         }
1249 }
1250
1251 /**
1252  * Inlines all small methods at call sites where the called address comes
1253  * from a Const node that references the entity representing the called
1254  * method.
1255  * The size argument is a rough measure for the code size of the method:
1256  * Methods where the obstack containing the firm graph is smaller than
1257  * size are inlined.
1258  */
1259 void inline_small_irgs(ir_graph *irg, int size) {
1260         ir_graph *rem = current_ir_graph;
1261         inline_env_t env;
1262         call_entry *entry;
1263
1264         current_ir_graph = irg;
1265         /* Handle graph state */
1266         assert(get_irg_phase_state(irg) != phase_building);
1267         free_callee_info(irg);
1268
1269         /* Find Call nodes to inline.
1270            (We can not inline during a walk of the graph, as inlineing the same
1271            method several times changes the visited flag of the walked graph:
1272            after the first inlineing visited of the callee equals visited of
1273            the caller.  With the next inlineing both are increased.) */
1274         obstack_init(&env.obst);
1275         env.head = env.tail = NULL;
1276         irg_walk_graph(irg, NULL, collect_calls, &env);
1277
1278         if (env.head != NULL) {
1279                 /* There are calls to inline */
1280                 collect_phiprojs(irg);
1281                 for (entry = env.head; entry != NULL; entry = entry->next) {
1282                         ir_graph *callee = entry->callee;
1283                         if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
1284                             (get_irg_inline_property(callee) >= irg_inline_forced)) {
1285                                 inline_method(entry->call, callee);
1286                         }
1287                 }
1288         }
1289         obstack_free(&env.obst, NULL);
1290         current_ir_graph = rem;
1291 }
1292
1293 /**
1294  * Environment for inlining irgs.
1295  */
1296 typedef struct {
1297         int n_nodes;             /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1298         int n_blocks;            /**< Number of Blocks in graph without Start and End block. */
1299         int n_nodes_orig;        /**< for statistics */
1300         int n_call_nodes;        /**< Number of Call nodes in the graph. */
1301         int n_call_nodes_orig;   /**< for statistics */
1302         int n_callers;           /**< Number of known graphs that call this graphs. */
1303         int n_callers_orig;      /**< for statistics */
1304         unsigned got_inline:1;   /**< Set, if at least one call inside this graph was inlined. */
1305         unsigned local_vars:1;   /**< Set, if a inlined function gets the address of an inlined variable. */
1306         unsigned recursive:1;    /**< Set, if this function is self recursive. */
1307         call_entry *call_head;   /**< The head of the list of all call nodes in this graph. */
1308         call_entry *call_tail;   /**< The tail of the list of all call nodes in this graph .*/
1309         unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1310 } inline_irg_env;
1311
1312 /**
1313  * Allocate a new environment for inlining.
1314  */
1315 static inline_irg_env *alloc_inline_irg_env(void) {
1316         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1317         env->n_nodes           = -2; /* do not count count Start, End */
1318         env->n_blocks          = -2; /* do not count count Start, End Block */
1319         env->n_nodes_orig      = -2; /* do not count Start, End */
1320         env->call_head         = NULL;
1321         env->call_tail         = NULL;
1322         env->n_call_nodes      = 0;
1323         env->n_call_nodes_orig = 0;
1324         env->n_callers         = 0;
1325         env->n_callers_orig    = 0;
1326         env->got_inline        = 0;
1327         env->local_vars        = 0;
1328         env->recursive         = 0;
1329         env->local_weights     = NULL;
1330         return env;
1331 }
1332
1333 typedef struct walker_env {
1334         inline_irg_env *x;     /**< the inline environment */
1335         call_entry *last_call; /**< points to the last inserted call */
1336         char ignore_runtime;   /**< the ignore runtime flag */
1337         char ignore_callers;   /**< if set, do change callers data */
1338 } wenv_t;
1339
1340 /**
1341  * post-walker: collect all calls in the inline-environment
1342  * of a graph and sum some statistics.
1343  */
1344 static void collect_calls2(ir_node *call, void *ctx) {
1345         wenv_t         *env = ctx;
1346         inline_irg_env *x = env->x;
1347         ir_opcode      code = get_irn_opcode(call);
1348         ir_graph       *callee;
1349         call_entry     *entry;
1350
1351         /* count meaningful nodes in irg */
1352         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1353                 if (code != iro_Block) {
1354                         ++x->n_nodes;
1355                         ++x->n_nodes_orig;
1356                 } else {
1357                         ++x->n_blocks;
1358                 }
1359         }
1360
1361         if (code != iro_Call) return;
1362
1363         /* check, if it's a runtime call */
1364         if (env->ignore_runtime) {
1365                 ir_node *symc = get_Call_ptr(call);
1366
1367                 if (is_Global(symc)) {
1368                         ir_entity *ent = get_Global_entity(symc);
1369
1370                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1371                                 return;
1372                 }
1373         }
1374
1375         /* collect all call nodes */
1376         ++x->n_call_nodes;
1377         ++x->n_call_nodes_orig;
1378
1379         callee = get_call_called_irg(call);
1380         if (callee != NULL) {
1381                 if (! env->ignore_callers) {
1382                         inline_irg_env *callee_env = get_irg_link(callee);
1383                         /* count all static callers */
1384                         ++callee_env->n_callers;
1385                         ++callee_env->n_callers_orig;
1386                 }
1387                 if (callee == current_ir_graph)
1388                         x->recursive = 1;
1389
1390                 /* link it in the list of possible inlinable entries */
1391                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1392                 entry->call       = call;
1393                 entry->callee     = callee;
1394                 entry->next       = NULL;
1395                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1396
1397                 /* note: we use call_tail here as a pointer to the last inserted */
1398                 if (x->call_head == NULL) {
1399                         x->call_head = entry;
1400                 } else {
1401                         if (entry->loop_depth == env->last_call->loop_depth) {
1402                                 /* same depth as the last one, enqueue after it */
1403                                 entry->next          = env->last_call->next;
1404                                 env->last_call->next = entry;
1405                         } else if (entry->loop_depth > x->call_head->loop_depth) {
1406                                 /* put first */
1407                                 entry->next  = x->call_head;
1408                                 x->call_head = entry;
1409                         } else {
1410                                 /* search the insertion point */
1411                                 call_entry *p;
1412
1413                                 for (p = x->call_head; p->next != NULL; p = p->next)
1414                                         if (entry->loop_depth > p->next->loop_depth)
1415                                                 break;
1416                                 entry->next = p->next;
1417                                 p->next     = entry;
1418                         }
1419                 }
1420                 env->last_call = entry;
1421                 if (entry->next == NULL) {
1422                         /* keep tail up to date */
1423                         x->call_tail = entry;
1424                 }
1425         }
1426 }
1427
1428 /**
1429  * Returns TRUE if the number of callers is 0 in the irg's environment,
1430  * hence this irg is a leave.
1431  */
1432 INLINE static int is_leave(ir_graph *irg) {
1433         inline_irg_env *env = get_irg_link(irg);
1434         return env->n_call_nodes == 0;
1435 }
1436
1437 /**
1438  * Returns TRUE if the number of nodes in the callee is
1439  * smaller then size in the irg's environment.
1440  */
1441 INLINE static int is_smaller(ir_graph *callee, int size) {
1442         inline_irg_env *env = get_irg_link(callee);
1443         return env->n_nodes < size;
1444 }
1445
1446 /**
1447  * Append the nodes of the list src to the nodes of the list in environment dst.
1448  */
1449 static void append_call_list(inline_irg_env *dst, call_entry *src) {
1450         call_entry *entry, *nentry;
1451
1452         /* Note that the src list points to Call nodes in the inlined graph, but
1453            we need Call nodes in our graph. Luckily the inliner leaves this information
1454            in the link field. */
1455         for (entry = src; entry != NULL; entry = entry->next) {
1456                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1457                 nentry->call         = get_irn_link(entry->call);
1458                 nentry->callee       = entry->callee;
1459                 nentry->next         = NULL;
1460                 nentry->loop_depth   = entry->loop_depth;
1461                 dst->call_tail->next = nentry;
1462                 dst->call_tail       = nentry;
1463         }
1464 }
1465
1466 /**
1467  * Add the nodes of the list src in front to the nodes of the list dst.
1468  */
1469 static call_entry *replace_entry_by_call_list(call_entry *dst, call_entry *src) {
1470         call_entry *entry, *nentry, *head, *tail;
1471
1472         /* Note that the src list points to Call nodes in the inlined graph, but
1473            we need Call nodes in our graph. Luckily the inliner leaves this information
1474            in the link field. */
1475         head = tail = NULL;
1476         for (entry = src; entry != NULL; entry = entry->next) {
1477                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1478                 nentry->call         = get_irn_link(entry->call);
1479                 nentry->callee       = entry->callee;
1480                 nentry->next         = NULL;
1481                 nentry->loop_depth   = entry->loop_depth + dst->loop_depth;
1482                 if (head == NULL)
1483                         head = nentry;
1484                 else
1485                         tail->next = nentry;
1486                 tail = nentry;
1487         }
1488         /* skip the head of dst */
1489         if (head != NULL) {
1490                 tail->next = dst->next;
1491         } else {
1492                 head = dst->next;
1493         }
1494         return head;
1495 }
1496
1497 /*
1498  * Inlines small leave methods at call sites where the called address comes
1499  * from a Const node that references the entity representing the called
1500  * method.
1501  * The size argument is a rough measure for the code size of the method:
1502  * Methods where the obstack containing the firm graph is smaller than
1503  * size are inlined.
1504  */
1505 void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime) {
1506         inline_irg_env   *env;
1507         ir_graph         *irg;
1508         int              i, n_irgs;
1509         ir_graph         *rem;
1510         int              did_inline;
1511         wenv_t           wenv;
1512         call_entry       *entry, *tail;
1513         const call_entry *centry;
1514         pmap             *copied_graphs;
1515         pmap_entry       *pm_entry;
1516
1517         rem = current_ir_graph;
1518         obstack_init(&temp_obst);
1519
1520         /* a map for the copied graphs, used to inline recursive calls */
1521         copied_graphs = pmap_create();
1522
1523         /* extend all irgs by a temporary data structure for inlining. */
1524         n_irgs = get_irp_n_irgs();
1525         for (i = 0; i < n_irgs; ++i)
1526                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1527
1528         /* Precompute information in temporary data structure. */
1529         wenv.ignore_runtime = ignore_runtime;
1530         wenv.ignore_callers = 0;
1531         for (i = 0; i < n_irgs; ++i) {
1532                 ir_graph *irg = get_irp_irg(i);
1533
1534                 assert(get_irg_phase_state(irg) != phase_building);
1535                 free_callee_info(irg);
1536
1537                 assure_cf_loop(irg);
1538                 wenv.x = get_irg_link(irg);
1539                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1540         }
1541
1542         /* -- and now inline. -- */
1543
1544         /* Inline leaves recursively -- we might construct new leaves. */
1545         do {
1546                 did_inline = 0;
1547
1548                 for (i = 0; i < n_irgs; ++i) {
1549                         ir_node *call;
1550                         int phiproj_computed = 0;
1551
1552                         current_ir_graph = get_irp_irg(i);
1553                         env = (inline_irg_env *)get_irg_link(current_ir_graph);
1554
1555                         tail = NULL;
1556                         for (entry = env->call_head; entry != NULL; entry = entry->next) {
1557                                 ir_graph *callee;
1558
1559                                 if (env->n_nodes > maxsize) break;
1560
1561                                 call   = entry->call;
1562                                 callee = entry->callee;
1563
1564                                 if (is_leave(callee) && (
1565                                     is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
1566                                         if (!phiproj_computed) {
1567                                                 phiproj_computed = 1;
1568                                                 collect_phiprojs(current_ir_graph);
1569                                         }
1570                                         did_inline = inline_method(call, callee);
1571
1572                                         if (did_inline) {
1573                                                 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1574
1575                                                 /* was inlined, must be recomputed */
1576                                                 phiproj_computed = 0;
1577
1578                                                 /* Do some statistics */
1579                                                 env->got_inline = 1;
1580                                                 --env->n_call_nodes;
1581                                                 env->n_nodes += callee_env->n_nodes;
1582                                                 --callee_env->n_callers;
1583
1584                                                 /* remove this call from the list */
1585                                                 if (tail != NULL)
1586                                                         tail->next = entry->next;
1587                                                 else
1588                                                         env->call_head = entry->next;
1589                                                 continue;
1590                                         }
1591                                 }
1592                                 tail = entry;
1593                         }
1594                         env->call_tail = tail;
1595                 }
1596         } while (did_inline);
1597
1598         /* inline other small functions. */
1599         for (i = 0; i < n_irgs; ++i) {
1600                 ir_node *call;
1601                 int phiproj_computed = 0;
1602
1603                 current_ir_graph = get_irp_irg(i);
1604                 env = (inline_irg_env *)get_irg_link(current_ir_graph);
1605
1606                 /* note that the list of possible calls is updated during the process */
1607                 tail = NULL;
1608                 for (entry = env->call_head; entry != NULL; entry = entry->next) {
1609                         ir_graph   *callee;
1610                         pmap_entry *e;
1611
1612                         call   = entry->call;
1613                         callee = entry->callee;
1614
1615                         e = pmap_find(copied_graphs, callee);
1616                         if (e != NULL) {
1617                                 /*
1618                                  * Remap callee if we have a copy.
1619                                  * FIXME: Should we do this only for recursive Calls ?
1620                                  */
1621                                 callee = e->value;
1622                         }
1623
1624                         if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) ||    /* small function */
1625                                 (get_irg_inline_property(callee) >= irg_inline_forced))) {
1626                                 if (current_ir_graph == callee) {
1627                                         /*
1628                                          * Recursive call: we cannot directly inline because we cannot walk
1629                                          * the graph and change it. So we have to make a copy of the graph
1630                                          * first.
1631                                          */
1632
1633                                         inline_irg_env *callee_env;
1634                                         ir_graph       *copy;
1635
1636                                         /*
1637                                          * No copy yet, create one.
1638                                          * Note that recursive methods are never leaves, so it is sufficient
1639                                          * to test this condition here.
1640                                          */
1641                                         copy = create_irg_copy(callee);
1642
1643                                         /* create_irg_copy() destroys the Proj links, recompute them */
1644                                         phiproj_computed = 0;
1645
1646                                         /* allocate new environment */
1647                                         callee_env = alloc_inline_irg_env();
1648                                         set_irg_link(copy, callee_env);
1649
1650                                         assure_cf_loop(copy);
1651                                         wenv.x              = callee_env;
1652                                         wenv.ignore_callers = 1;
1653                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1654
1655                                         /*
1656                                          * Enter the entity of the original graph. This is needed
1657                                          * for inline_method(). However, note that ent->irg still points
1658                                          * to callee, NOT to copy.
1659                                          */
1660                                         set_irg_entity(copy, get_irg_entity(callee));
1661
1662                                         pmap_insert(copied_graphs, callee, copy);
1663                                         callee = copy;
1664
1665                                         /* we have only one caller: the original graph */
1666                                         callee_env->n_callers      = 1;
1667                                         callee_env->n_callers_orig = 1;
1668                                 }
1669                                 if (! phiproj_computed) {
1670                                         phiproj_computed = 1;
1671                                         collect_phiprojs(current_ir_graph);
1672                                 }
1673                                 did_inline = inline_method(call, callee);
1674                                 if (did_inline) {
1675                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1676
1677                                         /* was inlined, must be recomputed */
1678                                         phiproj_computed = 0;
1679
1680                                         /* callee was inline. Append it's call list. */
1681                                         env->got_inline = 1;
1682                                         --env->n_call_nodes;
1683                                         append_call_list(env, callee_env->call_head);
1684                                         env->n_call_nodes += callee_env->n_call_nodes;
1685                                         env->n_nodes += callee_env->n_nodes;
1686                                         --callee_env->n_callers;
1687
1688                                         /* after we have inlined callee, all called methods inside callee
1689                                            are now called once more */
1690                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
1691                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1692                                                 ++penv->n_callers;
1693                                         }
1694
1695                                         /* remove this call from the list */
1696                                         if (tail != NULL)
1697                                                 tail->next = entry->next;
1698                                         else
1699                                                 env->call_head = entry->next;
1700                                         continue;
1701                                 }
1702                         }
1703                         tail = entry;
1704                 }
1705                 env->call_tail = tail;
1706         }
1707
1708         for (i = 0; i < n_irgs; ++i) {
1709                 irg = get_irp_irg(i);
1710                 env = (inline_irg_env *)get_irg_link(irg);
1711
1712                 if (env->got_inline) {
1713                         optimize_graph_df(irg);
1714                         optimize_cf(irg);
1715                 }
1716                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1717                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1718                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1719                         env->n_callers_orig, env->n_callers,
1720                         get_entity_name(get_irg_entity(irg))));
1721                 }
1722         }
1723
1724         /* kill the copied graphs: we don't need them anymore */
1725         foreach_pmap(copied_graphs, pm_entry) {
1726                 ir_graph *copy = pm_entry->value;
1727
1728                 /* reset the entity, otherwise it will be deleted in the next step ... */
1729                 set_irg_entity(copy, NULL);
1730                 free_ir_graph(copy);
1731         }
1732         pmap_destroy(copied_graphs);
1733
1734         obstack_free(&temp_obst, NULL);
1735         current_ir_graph = rem;
1736 }
1737
1738 /**
1739  * Calculate the parameter weights for transmitting the address of a local variable.
1740  */
1741 static unsigned calc_method_local_weight(ir_node *arg) {
1742         int      i, j, k;
1743         unsigned v, weight = 0;
1744
1745         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1746                 ir_node *succ = get_irn_out(arg, i);
1747
1748                 switch (get_irn_opcode(succ)) {
1749                 case iro_Load:
1750                 case iro_Store:
1751                         /* Loads and Store can be removed */
1752                         weight += 3;
1753                         break;
1754                 case iro_Sel:
1755                         /* check if all args are constant */
1756                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1757                                 ir_node *idx = get_Sel_index(succ, j);
1758                                 if (! is_Const(idx))
1759                                         return 0;
1760                         }
1761                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1762                            some unsupported node. */
1763                         v = calc_method_local_weight(succ);
1764                         if (v == 0)
1765                                 return 0;
1766                         /* we can kill one Sel with constant indexes, this is cheap */
1767                         weight += v + 1;
1768                         break;
1769                 case iro_Id:
1770                         /* when looking backward we might find Id nodes */
1771                         weight += calc_method_local_weight(succ);
1772                         break;
1773                 case iro_Tuple:
1774                         /* unoptimized tuple */
1775                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1776                                 ir_node *pred = get_Tuple_pred(succ, j);
1777                                 if (pred == arg) {
1778                                         /* look for Proj(j) */
1779                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1780                                                 ir_node *succ_succ = get_irn_out(succ, k);
1781                                                 if (is_Proj(succ_succ)) {
1782                                                         if (get_Proj_proj(succ_succ) == j) {
1783                                                                 /* found */
1784                                                                 weight += calc_method_local_weight(succ_succ);
1785                                                         }
1786                                                 } else {
1787                                                         /* this should NOT happen */
1788                                                         return 0;
1789                                                 }
1790                                         }
1791                                 }
1792                         }
1793                         break;
1794                 default:
1795                         /* any other node: unsupported yet or bad. */
1796                         return 0;
1797                 }
1798         }
1799         return weight;
1800 }
1801
1802 /**
1803  * Calculate the parameter weights for transmitting the address of a local variable.
1804  */
1805 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1806         ir_entity *ent = get_irg_entity(irg);
1807         ir_type  *mtp;
1808         int      nparams, i, proj_nr;
1809         ir_node  *irg_args, *arg;
1810
1811         mtp      = get_entity_type(ent);
1812         nparams  = get_method_n_params(mtp);
1813
1814         /* allocate a new array. currently used as 'analysed' flag */
1815         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1816
1817         /* If the method haven't parameters we have nothing to do. */
1818         if (nparams <= 0)
1819                 return;
1820
1821         assure_irg_outs(irg);
1822         irg_args = get_irg_args(irg);
1823         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1824                 arg     = get_irn_out(irg_args, i);
1825                 proj_nr = get_Proj_proj(arg);
1826                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1827         }
1828 }
1829
1830 /**
1831  * Calculate the benefice for transmitting an local variable address.
1832  * After inlining, the local variable might be transformed into a
1833  * SSA variable by scalar_replacement().
1834  */
1835 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1836         inline_irg_env *env = get_irg_link(callee);
1837
1838         if (env->local_weights != NULL) {
1839                 if (pos < ARR_LEN(env->local_weights))
1840                         return env->local_weights[pos];
1841                 return 0;
1842         }
1843
1844         analyze_irg_local_weights(env, callee);
1845
1846         if (pos < ARR_LEN(env->local_weights))
1847                 return env->local_weights[pos];
1848         return 0;
1849 }
1850
1851 /**
1852  * calculate a benefice value for inlining the given call.
1853  */
1854 static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
1855         ir_entity *ent = get_irg_entity(callee);
1856         ir_node   *frame_ptr;
1857         ir_type   *mtp;
1858         int       weight = 0;
1859         int       i, n_params, all_const;
1860         unsigned  cc, v;
1861
1862         inline_irg_env *curr_env, *callee_env;
1863
1864         if (get_entity_additional_properties(ent) & mtp_property_noreturn) {
1865                 /* do NOT inline noreturn calls */
1866                 return INT_MIN;
1867         }
1868
1869         /* costs for every passed parameter */
1870         n_params = get_Call_n_params(call);
1871         mtp      = get_entity_type(ent);
1872         cc       = get_method_calling_convention(mtp);
1873         if (cc & cc_reg_param) {
1874                 /* register parameter, smaller costs for register parameters */
1875                 int max_regs = cc & ~cc_bits;
1876
1877                 if (max_regs < n_params)
1878                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1879                 else
1880                         weight += n_params * 2;
1881         } else {
1882                 /* parameters are passed an stack */
1883                 weight += 5 * n_params;
1884         }
1885
1886         /* constant parameters improve the benefice */
1887         frame_ptr = get_irg_frame(current_ir_graph);
1888         all_const = 1;
1889         for (i = 0; i < n_params; ++i) {
1890                 ir_node *param = get_Call_param(call, i);
1891
1892                 if (is_Const(param))
1893                         weight += get_method_param_weight(ent, i);
1894                 else {
1895                         all_const = 0;
1896                         if (is_SymConst(param))
1897                                 weight += get_method_param_weight(ent, i);
1898                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1899                                 /*
1900                                  * An address of a local variable is transmitted. After inlining,
1901                                  * scalar_replacement might be able to remove the local variable,
1902                                  * so honor this.
1903                                  */
1904                                 v = get_method_local_adress_weight(callee, i);
1905                                 weight += v;
1906                                 if (v > 0)
1907                                         *local_adr = 1;
1908                         }
1909                 }
1910         }
1911
1912         callee_env = get_irg_link(callee);
1913         if (get_entity_visibility(ent) == visibility_local &&
1914             callee_env->n_callers_orig == 1 &&
1915             callee != current_ir_graph) {
1916                 /* we are the only caller, give big bonus */
1917                 weight += 5000;
1918         }
1919
1920         /* do not inline big functions */
1921         weight -= callee_env->n_nodes;
1922
1923         /* reduce the benefice if the current function is already big */
1924         curr_env = get_irg_link(current_ir_graph);
1925         weight -= curr_env->n_nodes / 50;
1926
1927         /* give a bonus for functions with one block */
1928         if (callee_env->n_blocks == 1)
1929                 weight = weight * 3 / 2;
1930
1931         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1932         else if (callee_env->n_nodes < 20 && !callee_env->recursive)
1933                 weight += 5000;
1934
1935         /* and finally for leaves: they do not increase the register pressure
1936            because of callee safe registers */
1937         else if (callee_env->n_call_nodes == 0)
1938                 weight += 25;
1939
1940         /*
1941          * Reduce the weight for recursive function IFF not all arguments are const.
1942          * inlining recursive functions is rarely good.
1943          */
1944         if (callee_env->recursive && !all_const)
1945                 weight -= 500;
1946
1947         /*
1948          * All arguments constant is probably a good sign, give an extra bonus
1949          */
1950         if (all_const)
1951                 weight += 100;
1952
1953         return weight;
1954 }
1955
1956 /**
1957  * Heuristic inliner. Calculates a benefice value for every call and inlines
1958  * those calls with a value higher than the threshold.
1959  */
1960 void inline_functions(int maxsize, int inline_threshold) {
1961         inline_irg_env   *env;
1962         int              i, n_irgs;
1963         ir_graph         *rem;
1964         int              did_inline;
1965         wenv_t           wenv;
1966         call_entry       *curr_call, **last_call;
1967         const call_entry *centry;
1968         pmap             *copied_graphs;
1969         pmap_entry       *pm_entry;
1970
1971         rem = current_ir_graph;
1972         obstack_init(&temp_obst);
1973
1974         /* a map for the copied graphs, used to inline recursive calls */
1975         copied_graphs = pmap_create();
1976
1977         /* extend all irgs by a temporary data structure for inlining. */
1978         n_irgs = get_irp_n_irgs();
1979         for (i = 0; i < n_irgs; ++i)
1980                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1981
1982         /* Precompute information in temporary data structure. */
1983         wenv.ignore_runtime = 0;
1984         wenv.ignore_callers = 0;
1985         for (i = 0; i < n_irgs; ++i) {
1986                 ir_graph *irg = get_irp_irg(i);
1987
1988                 assert(get_irg_phase_state(irg) != phase_building);
1989                 free_callee_info(irg);
1990
1991                 wenv.x         = get_irg_link(irg);
1992                 wenv.last_call = NULL;
1993                 assure_cf_loop(irg);
1994                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1995         }
1996
1997         /* -- and now inline. -- */
1998         for (i = 0; i < n_irgs; ++i) {
1999                 int      phiproj_computed = 0;
2000                 ir_node  *call;
2001                 ir_graph *irg = get_irp_irg(i);
2002
2003                 current_ir_graph = irg;
2004                 env = get_irg_link(irg);
2005
2006                 /* note that the list of possible calls is updated during the process */
2007                 last_call = &env->call_head;
2008                 for (curr_call = env->call_head; curr_call != NULL;) {
2009                         ir_graph   *callee;
2010                         pmap_entry *e;
2011                         int        benefice;
2012                         unsigned   local_adr;
2013
2014                         if (env->n_nodes > maxsize) break;
2015
2016                         call   = curr_call->call;
2017                         callee = curr_call->callee;
2018
2019                         e = pmap_find(copied_graphs, callee);
2020                         if (e != NULL) {
2021                                 /*
2022                                 * Remap callee if we have a copy.
2023                                 * FIXME: Should we do this only for recursive Calls ?
2024                                 */
2025                                 callee = e->value;
2026                         }
2027
2028                         /* calculate the benefice on the original call to prevent excessive inlining */
2029                         local_adr = 0;
2030                         benefice = calc_inline_benefice(call, callee, &local_adr);
2031                         DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
2032
2033                         if (benefice > -inline_threshold ||
2034                                 (get_irg_inline_property(callee) >= irg_inline_forced)) {
2035                                 if (current_ir_graph == callee) {
2036                                         /*
2037                                          * Recursive call: we cannot directly inline because we cannot walk
2038                                          * the graph and change it. So we have to make a copy of the graph
2039                                          * first.
2040                                          */
2041
2042                                         inline_irg_env *callee_env;
2043                                         ir_graph       *copy;
2044
2045                                         /*
2046                                          * No copy yet, create one.
2047                                          * Note that recursive methods are never leaves, so it is sufficient
2048                                          * to test this condition here.
2049                                          */
2050                                         copy = create_irg_copy(callee);
2051
2052                                         /* create_irg_copy() destroys the Proj links, recompute them */
2053                                         phiproj_computed = 0;
2054
2055                                         /* allocate new environment */
2056                                         callee_env = alloc_inline_irg_env();
2057                                         set_irg_link(copy, callee_env);
2058
2059                                         assure_cf_loop(copy);
2060                                         wenv.x              = callee_env;
2061                                         wenv.ignore_callers = 1;
2062                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2063
2064                                         /*
2065                                          * Enter the entity of the original graph. This is needed
2066                                          * for inline_method(). However, note that ent->irg still points
2067                                          * to callee, NOT to copy.
2068                                          */
2069                                         set_irg_entity(copy, get_irg_entity(callee));
2070
2071                                         pmap_insert(copied_graphs, callee, copy);
2072                                         callee = copy;
2073
2074                                         /* we have only one caller: the original graph */
2075                                         callee_env->n_callers      = 1;
2076                                         callee_env->n_callers_orig = 1;
2077                                 }
2078                                 if (! phiproj_computed) {
2079                                         phiproj_computed = 1;
2080                                         collect_phiprojs(current_ir_graph);
2081                                 }
2082                                 did_inline = inline_method(call, callee);
2083                                 if (did_inline) {
2084                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
2085
2086                                         /* was inlined, must be recomputed */
2087                                         phiproj_computed = 0;
2088
2089                                         /* after we have inlined callee, all called methods inside callee
2090                                         are now called once more */
2091                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
2092                                                 inline_irg_env *penv = get_irg_link(centry->callee);
2093                                                 ++penv->n_callers;
2094                                         }
2095
2096                                         /* callee was inline. Append it's call list. */
2097                                         env->got_inline = 1;
2098                                         if (local_adr)
2099                                                 env->local_vars = 1;
2100                                         --env->n_call_nodes;
2101                                         curr_call = replace_entry_by_call_list(curr_call, callee_env->call_head);
2102                                         env->n_call_nodes += callee_env->n_call_nodes;
2103                                         env->n_nodes += callee_env->n_nodes;
2104                                         --callee_env->n_callers;
2105
2106                                         /* remove the current call entry from the list */
2107                                         *last_call = curr_call;
2108                                         continue;
2109                                 }
2110                         }
2111                         last_call = &curr_call->next;
2112                         curr_call = curr_call->next;
2113                 }
2114
2115                 if (env->got_inline) {
2116                         /* this irg got calls inlined: optimize it */
2117
2118                         /* scalar replacement does not work well with Tuple nodes, so optimize them away */
2119                         optimize_graph_df(irg);
2120
2121                         if (env->local_vars) {
2122                                 if (scalar_replacement_opt(irg)) {
2123                                         optimize_graph_df(irg);
2124                                 }
2125                         }
2126                         optimize_cf(irg);
2127                 }
2128                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2129                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2130                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2131                         env->n_callers_orig, env->n_callers,
2132                         get_entity_name(get_irg_entity(irg))));
2133                 }
2134         }
2135
2136         /* kill the copied graphs: we don't need them anymore */
2137         foreach_pmap(copied_graphs, pm_entry) {
2138                 ir_graph *copy = pm_entry->value;
2139
2140                 /* reset the entity, otherwise it will be deleted in the next step ... */
2141                 set_irg_entity(copy, NULL);
2142                 free_ir_graph(copy);
2143         }
2144         pmap_destroy(copied_graphs);
2145
2146         obstack_free(&temp_obst, NULL);
2147         current_ir_graph = rem;
2148 }
2149
2150 void firm_init_inline(void) {
2151         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2152 }