BugFix: map all anchored nodes of the called graph to nodes of the callers graph
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <limits.h>
31 #include <assert.h>
32
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irprog_t.h"
36
37 #include "iroptimize.h"
38 #include "ircons_t.h"
39 #include "iropt_t.h"
40 #include "irgopt.h"
41 #include "irgmod.h"
42 #include "irgwalk.h"
43
44 #include "adt/array.h"
45 #include "adt/pset.h"
46 #include "adt/pmap.h"
47 #include "adt/pdeq.h"
48 #include "adt/xmalloc.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63
64 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
65
66 /*------------------------------------------------------------------*/
67 /* Routines for dead node elimination / copying garbage collection  */
68 /* of the obstack.                                                  */
69 /*------------------------------------------------------------------*/
70
71 /**
72  * Remember the new node in the old node by using a field all nodes have.
73  */
74 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
75
76 /**
77  * Get this new node, before the old node is forgotten.
78  */
79 #define get_new_node(oldn) get_irn_link(oldn)
80
81 /**
82  * Check if a new node was set.
83  */
84 #define has_new_node(n) (get_new_node(n) != NULL)
85
86 /**
87  * We use the block_visited flag to mark that we have computed the
88  * number of useful predecessors for this block.
89  * Further we encode the new arity in this flag in the old blocks.
90  * Remembering the arity is useful, as it saves a lot of pointer
91  * accesses.  This function is called for all Phi and Block nodes
92  * in a Block.
93  */
94 static INLINE int
95 compute_new_arity(ir_node *b) {
96         int i, res, irn_arity;
97         int irg_v, block_v;
98
99         irg_v = get_irg_block_visited(current_ir_graph);
100         block_v = get_Block_block_visited(b);
101         if (block_v >= irg_v) {
102                 /* we computed the number of preds for this block and saved it in the
103                    block_v flag */
104                 return block_v - irg_v;
105         } else {
106                 /* compute the number of good predecessors */
107                 res = irn_arity = get_irn_arity(b);
108                 for (i = 0; i < irn_arity; i++)
109                         if (is_Bad(get_irn_n(b, i))) res--;
110                         /* save it in the flag. */
111                         set_Block_block_visited(b, irg_v + res);
112                         return res;
113         }
114 }
115
116 /**
117  * Copies the node to the new obstack. The Ins of the new node point to
118  * the predecessors on the old obstack.  For block/phi nodes not all
119  * predecessors might be copied.  n->link points to the new node.
120  * For Phi and Block nodes the function allocates in-arrays with an arity
121  * only for useful predecessors.  The arity is determined by counting
122  * the non-bad predecessors of the block.
123  *
124  * @param n    The node to be copied
125  * @param env  if non-NULL, the node number attribute will be copied to the new node
126  *
127  * Note: Also used for loop unrolling.
128  */
129 static void copy_node(ir_node *n, void *env) {
130         ir_node *nn, *block;
131         int new_arity;
132         ir_op *op = get_irn_op(n);
133         (void) env;
134
135         if (op == op_Bad) {
136                 /* node copied already */
137                 return;
138         } else if (op == op_Block) {
139                 block = NULL;
140                 new_arity = compute_new_arity(n);
141                 n->attr.block.graph_arr = NULL;
142         } else {
143                 block = get_nodes_block(n);
144                 if (op == op_Phi) {
145                         new_arity = compute_new_arity(block);
146                 } else {
147                         new_arity = get_irn_arity(n);
148                 }
149         }
150         nn = new_ir_node(get_irn_dbg_info(n),
151                 current_ir_graph,
152                 block,
153                 op,
154                 get_irn_mode(n),
155                 new_arity,
156                 get_irn_in(n) + 1);
157         /* Copy the attributes.  These might point to additional data.  If this
158            was allocated on the old obstack the pointers now are dangling.  This
159            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
160         if (op == op_Block) {
161                 /* we cannot allow blocks WITHOUT macroblock input */
162                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
163         }
164         copy_node_attr(n, nn);
165
166 #ifdef DEBUG_libfirm
167         {
168                 int copy_node_nr = env != NULL;
169                 if (copy_node_nr) {
170                         /* for easier debugging, we want to copy the node numbers too */
171                         nn->node_nr = n->node_nr;
172                 }
173         }
174 #endif
175
176         set_new_node(n, nn);
177         hook_dead_node_elim_subst(current_ir_graph, n, nn);
178 }
179
180 /**
181  * Copies new predecessors of old node to new node remembered in link.
182  * Spare the Bad predecessors of Phi and Block nodes.
183  */
184 static void copy_preds(ir_node *n, void *env) {
185         ir_node *nn, *block;
186         int i, j, irn_arity;
187         (void) env;
188
189         nn = get_new_node(n);
190
191         if (is_Block(n)) {
192                 /* copy the macro block header */
193                 ir_node *mbh = get_Block_MacroBlock(n);
194
195                 if (mbh == n) {
196                         /* this block is a macroblock header */
197                         set_Block_MacroBlock(nn, nn);
198                 } else {
199                         /* get the macro block header */
200                         ir_node *nmbh = get_new_node(mbh);
201                         assert(nmbh != NULL);
202                         set_Block_MacroBlock(nn, nmbh);
203                 }
204
205                 /* Don't copy Bad nodes. */
206                 j = 0;
207                 irn_arity = get_irn_arity(n);
208                 for (i = 0; i < irn_arity; i++) {
209                         if (! is_Bad(get_irn_n(n, i))) {
210                                 ir_node *pred = get_irn_n(n, i);
211                                 set_irn_n(nn, j, get_new_node(pred));
212                                 j++;
213                         }
214                 }
215                 /* repair the block visited flag from above misuse. Repair it in both
216                    graphs so that the old one can still be used. */
217                 set_Block_block_visited(nn, 0);
218                 set_Block_block_visited(n, 0);
219                 /* Local optimization could not merge two subsequent blocks if
220                    in array contained Bads.  Now it's possible.
221                    We don't call optimize_in_place as it requires
222                    that the fields in ir_graph are set properly. */
223                 if ((get_opt_control_flow_straightening()) &&
224                         (get_Block_n_cfgpreds(nn) == 1) &&
225                         is_Jmp(get_Block_cfgpred(nn, 0))) {
226                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
227                         if (nn == old) {
228                                 /* Jmp jumps into the block it is in -- deal self cycle. */
229                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
230                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
231                         } else {
232                                 exchange(nn, old);
233                         }
234                 }
235         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
236                 /* Don't copy node if corresponding predecessor in block is Bad.
237                    The Block itself should not be Bad. */
238                 block = get_nodes_block(n);
239                 set_nodes_block(nn, get_new_node(block));
240                 j = 0;
241                 irn_arity = get_irn_arity(n);
242                 for (i = 0; i < irn_arity; i++) {
243                         if (! is_Bad(get_irn_n(block, i))) {
244                                 ir_node *pred = get_irn_n(n, i);
245                                 set_irn_n(nn, j, get_new_node(pred));
246                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
247                                 j++;
248                         }
249                 }
250                 /* If the pre walker reached this Phi after the post walker visited the
251                    block block_visited is > 0. */
252                 set_Block_block_visited(get_nodes_block(n), 0);
253                 /* Compacting the Phi's ins might generate Phis with only one
254                    predecessor. */
255                 if (get_irn_arity(nn) == 1)
256                         exchange(nn, get_irn_n(nn, 0));
257         } else {
258                 irn_arity = get_irn_arity(n);
259                 for (i = -1; i < irn_arity; i++)
260                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
261         }
262         /* Now the new node is complete.  We can add it to the hash table for CSE.
263            @@@ inlining aborts if we identify End. Why? */
264         if (!is_End(nn))
265                 add_identities(current_ir_graph->value_table, nn);
266 }
267
268 /**
269  * Copies the graph recursively, compacts the keep-alives of the end node.
270  *
271  * @param irg           the graph to be copied
272  * @param copy_node_nr  If non-zero, the node number will be copied
273  */
274 static void copy_graph(ir_graph *irg, int copy_node_nr) {
275         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
276         ir_node *ka;      /* keep alive */
277         int i, irn_arity;
278         unsigned long vfl;
279
280         /* Some nodes must be copied by hand, sigh */
281         vfl = get_irg_visited(irg);
282         set_irg_visited(irg, vfl + 1);
283
284         oe = get_irg_end(irg);
285         mark_irn_visited(oe);
286         /* copy the end node by hand, allocate dynamic in array! */
287         ne = new_ir_node(get_irn_dbg_info(oe),
288                 irg,
289                 NULL,
290                 op_End,
291                 mode_X,
292                 -1,
293                 NULL);
294         /* Copy the attributes.  Well, there might be some in the future... */
295         copy_node_attr(oe, ne);
296         set_new_node(oe, ne);
297
298         /* copy the Bad node */
299         ob = get_irg_bad(irg);
300         mark_irn_visited(ob);
301         nb = new_ir_node(get_irn_dbg_info(ob),
302                 irg,
303                 NULL,
304                 op_Bad,
305                 mode_T,
306                 0,
307                 NULL);
308         copy_node_attr(ob, nb);
309         set_new_node(ob, nb);
310
311         /* copy the NoMem node */
312         om = get_irg_no_mem(irg);
313         mark_irn_visited(om);
314         nm = new_ir_node(get_irn_dbg_info(om),
315                 irg,
316                 NULL,
317                 op_NoMem,
318                 mode_M,
319                 0,
320                 NULL);
321         copy_node_attr(om, nm);
322         set_new_node(om, nm);
323
324         /* copy the live nodes */
325         set_irg_visited(irg, vfl);
326         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
327
328         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
329
330         /* visit the anchors as well */
331         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
332                 ir_node *n = get_irg_anchor(irg, i);
333
334                 if (n && (get_irn_visited(n) <= vfl)) {
335                         set_irg_visited(irg, vfl);
336                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
337                 }
338         }
339
340         /* copy_preds for the end node ... */
341         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
342
343         /*- ... and now the keep alives. -*/
344         /* First pick the not marked block nodes and walk them.  We must pick these
345            first as else we will oversee blocks reachable from Phis. */
346         irn_arity = get_End_n_keepalives(oe);
347         for (i = 0; i < irn_arity; i++) {
348                 ka = get_End_keepalive(oe, i);
349                 if (is_Block(ka)) {
350                         if (get_irn_visited(ka) <= vfl) {
351                                 /* We must keep the block alive and copy everything reachable */
352                                 set_irg_visited(irg, vfl);
353                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
354                         }
355                         add_End_keepalive(ne, get_new_node(ka));
356                 }
357         }
358
359         /* Now pick other nodes.  Here we will keep all! */
360         irn_arity = get_End_n_keepalives(oe);
361         for (i = 0; i < irn_arity; i++) {
362                 ka = get_End_keepalive(oe, i);
363                 if (!is_Block(ka)) {
364                         if (get_irn_visited(ka) <= vfl) {
365                                 /* We didn't copy the node yet.  */
366                                 set_irg_visited(irg, vfl);
367                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
368                         }
369                         add_End_keepalive(ne, get_new_node(ka));
370                 }
371         }
372
373         /* start block sometimes only reached after keep alives */
374         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
375         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
376 }
377
378 /**
379  * Copies the graph reachable from current_ir_graph->end to the obstack
380  * in current_ir_graph and fixes the environment.
381  * Then fixes the fields in current_ir_graph containing nodes of the
382  * graph.
383  *
384  * @param copy_node_nr  If non-zero, the node number will be copied
385  */
386 static void
387 copy_graph_env(int copy_node_nr) {
388         ir_graph *irg = current_ir_graph;
389         ir_node *old_end, *new_anchor;
390         int i;
391
392         /* remove end_except and end_reg nodes */
393         old_end = get_irg_end(irg);
394         set_irg_end_except (irg, old_end);
395         set_irg_end_reg    (irg, old_end);
396
397         /* Not all nodes remembered in irg might be reachable
398            from the end node.  Assure their link is set to NULL, so that
399            we can test whether new nodes have been computed. */
400         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
401                 ir_node *n = get_irg_anchor(irg, i);
402                 if (n != NULL)
403                         set_new_node(n, NULL);
404         }
405         /* we use the block walk flag for removing Bads from Blocks ins. */
406         inc_irg_block_visited(irg);
407
408         /* copy the graph */
409         copy_graph(irg, copy_node_nr);
410
411         /* fix the anchor */
412         old_end    = get_irg_end(irg);
413         new_anchor = new_Anchor(irg);
414
415         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
416                 ir_node *n = get_irg_anchor(irg, i);
417                 if (n)
418                         set_irn_n(new_anchor, i, get_new_node(n));
419         }
420         free_End(old_end);
421         irg->anchor = new_anchor;
422
423         /* ensure the new anchor is placed in the endblock */
424         set_nodes_block(new_anchor, get_irg_end_block(irg));
425 }
426
427 /**
428  * Copies all reachable nodes to a new obstack.  Removes bad inputs
429  * from block nodes and the corresponding inputs from Phi nodes.
430  * Merges single exit blocks with single entry blocks and removes
431  * 1-input Phis.
432  * Adds all new nodes to a new hash table for CSE.  Does not
433  * perform CSE, so the hash table might contain common subexpressions.
434  */
435 void dead_node_elimination(ir_graph *irg) {
436         ir_graph *rem;
437 #ifdef INTERPROCEDURAL_VIEW
438         int rem_ipview = get_interprocedural_view();
439 #endif
440         struct obstack *graveyard_obst = NULL;
441         struct obstack *rebirth_obst   = NULL;
442
443         edges_deactivate(irg);
444
445         /* inform statistics that we started a dead-node elimination run */
446         hook_dead_node_elim(irg, 1);
447
448         /* Remember external state of current_ir_graph. */
449         rem = current_ir_graph;
450         current_ir_graph = irg;
451 #ifdef INTERPROCEDURAL_VIEW
452         set_interprocedural_view(0);
453 #endif
454
455         assert(get_irg_phase_state(irg) != phase_building);
456
457         /* Handle graph state */
458         free_callee_info(irg);
459         free_irg_outs(irg);
460         free_trouts();
461
462         /* @@@ so far we loose loops when copying */
463         free_loop_information(irg);
464
465         set_irg_doms_inconsistent(irg);
466
467         /* A quiet place, where the old obstack can rest in peace,
468            until it will be cremated. */
469         graveyard_obst = irg->obst;
470
471         /* A new obstack, where the reachable nodes will be copied to. */
472         rebirth_obst = xmalloc(sizeof(*rebirth_obst));
473         irg->obst = rebirth_obst;
474         obstack_init(irg->obst);
475         irg->last_node_idx = 0;
476
477         /* We also need a new value table for CSE */
478         del_identities(irg->value_table);
479         irg->value_table = new_identities();
480
481         /* Copy the graph from the old to the new obstack */
482         copy_graph_env(/*copy_node_nr=*/1);
483
484         /* Free memory from old unoptimized obstack */
485         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
486         xfree(graveyard_obst);            /* ... then free it.           */
487
488         /* inform statistics that the run is over */
489         hook_dead_node_elim(irg, 0);
490
491         current_ir_graph = rem;
492 #ifdef INTERPROCEDURAL_VIEW
493         set_interprocedural_view(rem_ipview);
494 #endif
495 }
496
497 /**
498  * Relink bad predecessors of a block and store the old in array to the
499  * link field. This function is called by relink_bad_predecessors().
500  * The array of link field starts with the block operand at position 0.
501  * If block has bad predecessors, create a new in array without bad preds.
502  * Otherwise let in array untouched.
503  */
504 static void relink_bad_block_predecessors(ir_node *n, void *env) {
505         ir_node **new_in, *irn;
506         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
507         (void) env;
508
509         /* if link field of block is NULL, look for bad predecessors otherwise
510            this is already done */
511         if (is_Block(n) && get_irn_link(n) == NULL) {
512                 /* save old predecessors in link field (position 0 is the block operand)*/
513                 set_irn_link(n, get_irn_in(n));
514
515                 /* count predecessors without bad nodes */
516                 old_irn_arity = get_irn_arity(n);
517                 for (i = 0; i < old_irn_arity; i++)
518                         if (!is_Bad(get_irn_n(n, i)))
519                                 ++new_irn_arity;
520
521                 /* arity changing: set new predecessors without bad nodes */
522                 if (new_irn_arity < old_irn_arity) {
523                         /* Get new predecessor array. We do not resize the array, as we must
524                            keep the old one to update Phis. */
525                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
526
527                         /* set new predecessors in array */
528                         new_in[0] = NULL;
529                         new_irn_n = 1;
530                         for (i = 0; i < old_irn_arity; i++) {
531                                 irn = get_irn_n(n, i);
532                                 if (!is_Bad(irn)) {
533                                         new_in[new_irn_n] = irn;
534                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
535                                         ++new_irn_n;
536                                 }
537                         }
538                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
539                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
540                         n->in = new_in;
541                 } /* ir node has bad predecessors */
542         } /* Block is not relinked */
543 }
544
545 /**
546  * Relinks Bad predecessors from Blocks and Phis called by walker
547  * remove_bad_predecesors(). If n is a Block, call
548  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
549  * function of Phi's Block. If this block has bad predecessors, relink preds
550  * of the Phi-node.
551  */
552 static void relink_bad_predecessors(ir_node *n, void *env) {
553         ir_node *block, **old_in;
554         int i, old_irn_arity, new_irn_arity;
555
556         /* relink bad predecessors of a block */
557         if (is_Block(n))
558                 relink_bad_block_predecessors(n, env);
559
560         /* If Phi node relink its block and its predecessors */
561         if (is_Phi(n)) {
562                 /* Relink predecessors of phi's block */
563                 block = get_nodes_block(n);
564                 if (get_irn_link(block) == NULL)
565                         relink_bad_block_predecessors(block, env);
566
567                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
568                 old_irn_arity = ARR_LEN(old_in);
569
570                 /* Relink Phi predecessors if count of predecessors changed */
571                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
572                         /* set new predecessors in array
573                            n->in[0] remains the same block */
574                         new_irn_arity = 1;
575                         for(i = 1; i < old_irn_arity; i++)
576                                 if (!is_Bad(old_in[i])) {
577                                         n->in[new_irn_arity] = n->in[i];
578                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
579                                         ++new_irn_arity;
580                                 }
581
582                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
583                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
584                 }
585         } /* n is a Phi node */
586 }
587
588 /*
589  * Removes Bad Bad predecessors from Blocks and the corresponding
590  * inputs to Phi nodes as in dead_node_elimination but without
591  * copying the graph.
592  * On walking up set the link field to NULL, on walking down call
593  * relink_bad_predecessors() (This function stores the old in array
594  * to the link field and sets a new in array if arity of predecessors
595  * changes).
596  */
597 void remove_bad_predecessors(ir_graph *irg) {
598         panic("Fix backedge handling first");
599         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
600 }
601
602
603 /*
604    __                      _  __ __
605   (_     __    o     _    | \/  |_
606   __)|_| | \_/ | \_/(/_   |_/\__|__
607
608   The following stuff implements a facility that automatically patches
609   registered ir_node pointers to the new node when a dead node elimination occurs.
610 */
611
612 struct _survive_dce_t {
613         struct obstack obst;
614         pmap *places;
615         pmap *new_places;
616         hook_entry_t dead_node_elim;
617         hook_entry_t dead_node_elim_subst;
618 };
619
620 typedef struct _survive_dce_list_t {
621         struct _survive_dce_list_t *next;
622         ir_node **place;
623 } survive_dce_list_t;
624
625 static void dead_node_hook(void *context, ir_graph *irg, int start) {
626         survive_dce_t *sd = context;
627         (void) irg;
628
629         /* Create a new map before the dead node elimination is performed. */
630         if (start) {
631                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
632         } else {
633                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
634                 pmap_destroy(sd->places);
635                 sd->places     = sd->new_places;
636                 sd->new_places = NULL;
637         }
638 }
639
640 /**
641  * Hook called when dead node elimination replaces old by nw.
642  */
643 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
644         survive_dce_t *sd = context;
645         survive_dce_list_t *list = pmap_get(sd->places, old);
646         (void) irg;
647
648         /* If the node is to be patched back, write the new address to all registered locations. */
649         if (list) {
650                 survive_dce_list_t *p;
651
652                 for (p = list; p; p = p->next)
653                         *(p->place) = nw;
654
655                 pmap_insert(sd->new_places, nw, list);
656         }
657 }
658
659 /**
660  * Make a new Survive DCE environment.
661  */
662 survive_dce_t *new_survive_dce(void) {
663         survive_dce_t *res = xmalloc(sizeof(res[0]));
664         obstack_init(&res->obst);
665         res->places     = pmap_create();
666         res->new_places = NULL;
667
668         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
669         res->dead_node_elim.context                   = res;
670         res->dead_node_elim.next                      = NULL;
671
672         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
673         res->dead_node_elim_subst.context = res;
674         res->dead_node_elim_subst.next    = NULL;
675
676 #ifndef FIRM_ENABLE_HOOKS
677         assert(0 && "need hooks enabled");
678 #endif
679
680         register_hook(hook_dead_node_elim, &res->dead_node_elim);
681         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
682         return res;
683 }
684
685 /**
686  * Free a Survive DCE environment.
687  */
688 void free_survive_dce(survive_dce_t *sd) {
689         obstack_free(&sd->obst, NULL);
690         pmap_destroy(sd->places);
691         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
692         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
693         xfree(sd);
694 }
695
696 /**
697  * Register a node pointer to be patched upon DCE.
698  * When DCE occurs, the node pointer specified by @p place will be
699  * patched to the new address of the node it is pointing to.
700  *
701  * @param sd    The Survive DCE environment.
702  * @param place The address of the node pointer.
703  */
704 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
705         if (*place != NULL) {
706                 ir_node *irn      = *place;
707                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
708                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
709
710                 nw->next  = curr;
711                 nw->place = place;
712
713                 pmap_insert(sd->places, irn, nw);
714         }
715 }
716
717 /*--------------------------------------------------------------------*/
718 /*  Functionality for inlining                                         */
719 /*--------------------------------------------------------------------*/
720
721 /**
722  * Copy node for inlineing.  Updates attributes that change when
723  * inlineing but not for dead node elimination.
724  *
725  * Copies the node by calling copy_node() and then updates the entity if
726  * it's a local one.  env must be a pointer of the frame type of the
727  * inlined procedure. The new entities must be in the link field of
728  * the entities.
729  */
730 static void copy_node_inline(ir_node *n, void *env) {
731         ir_node *nn;
732         ir_type *frame_tp = (ir_type *)env;
733
734         copy_node(n, NULL);
735         if (is_Sel(n)) {
736                 nn = get_new_node (n);
737                 assert(is_Sel(nn));
738                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
739                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
740                 }
741         } else if (is_Block(n)) {
742                 nn = get_new_node (n);
743                 nn->attr.block.irg = current_ir_graph;
744         }
745 }
746
747 /**
748  * Copies new predecessors of old node and move constants to
749  * the Start Block.
750  */
751 static void copy_preds_inline(ir_node *n, void *env) {
752         ir_node *nn;
753
754         copy_preds(n, env);
755         nn = skip_Id(get_new_node(n));
756         if (is_irn_constlike(nn)) {
757                 /* move Constants into the start block */
758                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
759
760                 n = identify_remember(current_ir_graph->value_table, nn);
761                 if (nn != n) {
762                         exchange(nn, n);
763                 }
764         }
765 }
766
767 /**
768  * Walker: checks if P_value_arg_base is used.
769  */
770 static void find_addr(ir_node *node, void *env) {
771         int *allow_inline = env;
772         if (is_Proj(node) &&
773                         is_Start(get_Proj_pred(node)) &&
774                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
775                 *allow_inline = 0;
776         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
777                 /* From GCC:
778                  * Refuse to inline alloca call unless user explicitly forced so as this
779                  * may change program's memory overhead drastically when the function
780                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
781                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
782                  *
783                  * Sorryly this is true with our implementation also.
784                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
785                  * disables inlining of functions using VLA (with are completely save).
786                  *
787                  * 2 Solutions:
788                  * - add a flag to the Alloc node for "real" alloca() calls
789                  * - add a new Stack-Restore node at the end of a function using alloca()
790                  */
791                 *allow_inline = 0;
792         }
793 }
794
795 /**
796  * Check if we can inline a given call.
797  * Currently, we cannot inline two cases:
798  * - call with compound arguments
799  * - graphs that take the address of a parameter
800  *
801  * check these conditions here
802  */
803 static int can_inline(ir_node *call, ir_graph *called_graph) {
804         ir_type *call_type = get_Call_type(call);
805         int params, ress, i, res;
806         assert(is_Method_type(call_type));
807
808         params = get_method_n_params(call_type);
809         ress   = get_method_n_ress(call_type);
810
811         /* check parameters for compound arguments */
812         for (i = 0; i < params; ++i) {
813                 ir_type *p_type = get_method_param_type(call_type, i);
814
815                 if (is_compound_type(p_type))
816                         return 0;
817         }
818
819         /* check results for compound arguments */
820         for (i = 0; i < ress; ++i) {
821                 ir_type *r_type = get_method_res_type(call_type, i);
822
823                 if (is_compound_type(r_type))
824                         return 0;
825         }
826
827         res = 1;
828         irg_walk_graph(called_graph, find_addr, NULL, &res);
829
830         return res;
831 }
832
833 enum exc_mode {
834         exc_handler    = 0, /**< There is a handler. */
835         exc_to_end     = 1, /**< Branches to End. */
836         exc_no_handler = 2  /**< Exception handling not represented. */
837 };
838
839 /* Inlines a method at the given call site. */
840 int inline_method(ir_node *call, ir_graph *called_graph) {
841         ir_node             *pre_call;
842         ir_node             *post_call, *post_bl;
843         ir_node             *in[pn_Start_max];
844         ir_node             *end, *end_bl, *block;
845         ir_node             **res_pred;
846         ir_node             **cf_pred;
847         ir_node             **args_in;
848         ir_node             *ret, *phi;
849         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
850         enum exc_mode       exc_handling;
851         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
852         ir_entity           *ent;
853         ir_graph            *rem, *irg;
854         irg_inline_property prop = get_irg_inline_property(called_graph);
855         unsigned long       visited;
856
857         if (prop == irg_inline_forbidden)
858                 return 0;
859
860         ent = get_irg_entity(called_graph);
861
862         mtp = get_entity_type(ent);
863         ctp = get_Call_type(call);
864         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
865                 /* this is a bad feature of C: without a prototype, we can can call a function with less
866                 parameters than needed. Currently we don't support this, although it would be
867                 to use Unknown than. */
868                 return 0;
869         }
870
871         /* Argh, compiling C has some bad consequences:
872            the call type AND the method type might be different.
873            It is implementation defendant what happens in that case.
874            We support inlining, if the bitsize of the types matches AND
875            the same arithmetic is used. */
876         n_params = get_method_n_params(mtp);
877         for (i = n_params - 1; i >= 0; --i) {
878                 ir_type *param_tp = get_method_param_type(mtp, i);
879                 ir_type *arg_tp   = get_method_param_type(ctp, i);
880
881                 if (param_tp != arg_tp) {
882                         ir_mode *pmode = get_type_mode(param_tp);
883                         ir_mode *amode = get_type_mode(arg_tp);
884
885                         if (pmode == NULL || amode == NULL)
886                                 return 0;
887                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
888                                 return 0;
889                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
890                                 return 0;
891                         /* otherwise we can simply "reinterpret" the bits */
892                 }
893         }
894
895         irg = get_irn_irg(call);
896
897         /*
898          * We cannot inline a recursive call. The graph must be copied before
899          * the call the inline_method() using create_irg_copy().
900          */
901         if (called_graph == irg)
902                 return 0;
903
904         /*
905          * currently, we cannot inline two cases:
906          * - call with compound arguments
907          * - graphs that take the address of a parameter
908          */
909         if (! can_inline(call, called_graph))
910                 return 0;
911
912         rem = current_ir_graph;
913         current_ir_graph = irg;
914
915         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
916
917         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
918         rem_opt = get_opt_optimize();
919         set_optimize(0);
920
921         /* Handle graph state */
922         assert(get_irg_phase_state(irg) != phase_building);
923         assert(get_irg_pinned(irg) == op_pin_state_pinned);
924         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
925         set_irg_outs_inconsistent(irg);
926         set_irg_extblk_inconsistent(irg);
927         set_irg_doms_inconsistent(irg);
928         set_irg_loopinfo_inconsistent(irg);
929         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
930
931         /* -- Check preconditions -- */
932         assert(is_Call(call));
933
934         /* here we know we WILL inline, so inform the statistics */
935         hook_inline(call, called_graph);
936
937         /* -- Decide how to handle exception control flow: Is there a handler
938            for the Call node, or do we branch directly to End on an exception?
939            exc_handling:
940            0 There is a handler.
941            1 Branches to End.
942            2 Exception handling not represented in Firm. -- */
943         {
944                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
945                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
946                         long proj_nr = get_Proj_proj(proj);
947                         if (proj_nr == pn_Call_X_except) Xproj = proj;
948                         if (proj_nr == pn_Call_M_except) Mproj = proj;
949                 }
950                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
951                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
952                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
953         }
954
955         /* create the argument tuple */
956         NEW_ARR_A(ir_type *, args_in, n_params);
957
958         block = get_nodes_block(call);
959         for (i = n_params - 1; i >= 0; --i) {
960                 ir_node *arg      = get_Call_param(call, i);
961                 ir_type *param_tp = get_method_param_type(mtp, i);
962                 ir_mode *mode     = get_type_mode(param_tp);
963
964                 if (mode != get_irn_mode(arg)) {
965                         arg = new_r_Conv(irg, block, arg, mode);
966                 }
967                 args_in[i] = arg;
968         }
969
970         /* --
971            the procedure and later replaces the Start node of the called graph.
972            Post_call is the old Call node and collects the results of the called
973            graph. Both will end up being a tuple.  -- */
974         post_bl = get_nodes_block(call);
975         set_irg_current_block(irg, post_bl);
976         /* XxMxPxPxPxT of Start + parameter of Call */
977         in[pn_Start_X_initial_exec]   = new_Jmp();
978         in[pn_Start_M]                = get_Call_mem(call);
979         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
980         in[pn_Start_P_tls]            = get_irg_tls(irg);
981         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
982         /* in[pn_Start_P_value_arg_base] = ??? */
983         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
984         pre_call = new_Tuple(pn_Start_max - 1, in);
985         post_call = call;
986
987         /* --
988            The new block gets the ins of the old block, pre_call and all its
989            predecessors and all Phi nodes. -- */
990         part_block(pre_call);
991
992         /* -- Prepare state for dead node elimination -- */
993         /* Visited flags in calling irg must be >= flag in called irg.
994            Else walker and arity computation will not work. */
995         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
996                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
997         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
998                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
999         visited = get_irg_visited(irg);
1000
1001         /* Set pre_call as new Start node in link field of the start node of
1002            calling graph and pre_calls block as new block for the start block
1003            of calling graph.
1004            Further mark these nodes so that they are not visited by the
1005            copying. */
1006         set_irn_link(get_irg_start(called_graph), pre_call);
1007         set_irn_visited(get_irg_start(called_graph), visited);
1008         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1009         set_irn_visited(get_irg_start_block(called_graph), visited);
1010
1011         assert(get_irg_n_anchors(called_graph) == get_irg_n_anchors(irg));
1012
1013         for (i = get_irg_n_anchors(called_graph) - 1; i >= 0; --i) {
1014                 ir_node *anchor = get_irg_anchor(called_graph, i);
1015
1016                 if (get_irn_visited(anchor) >= visited) {
1017                         /* already set above */
1018                         continue;
1019                 }
1020                 set_irn_link(anchor, get_irg_anchor(irg, i));
1021                 set_irn_visited(anchor, visited);
1022         }
1023
1024         /* Initialize for compaction of in arrays */
1025         inc_irg_block_visited(irg);
1026
1027         /* -- Replicate local entities of the called_graph -- */
1028         /* copy the entities. */
1029         called_frame = get_irg_frame_type(called_graph);
1030         curr_frame   = get_irg_frame_type(irg);
1031         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1032                 ir_entity *new_ent, *old_ent;
1033                 old_ent = get_class_member(called_frame, i);
1034                 new_ent = copy_entity_own(old_ent, curr_frame);
1035                 set_entity_link(old_ent, new_ent);
1036         }
1037
1038         /* visited is > than that of called graph.  With this trick visited will
1039            remain unchanged so that an outer walker, e.g., searching the call nodes
1040             to inline, calling this inline will not visit the inlined nodes. */
1041         set_irg_visited(irg, get_irg_visited(irg)-1);
1042
1043         /* -- Performing dead node elimination inlines the graph -- */
1044         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1045            entities. */
1046         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1047                  get_irg_frame_type(called_graph));
1048
1049         /* Repair called_graph */
1050         set_irg_visited(called_graph, get_irg_visited(irg));
1051         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1052         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1053
1054         /* -- Merge the end of the inlined procedure with the call site -- */
1055         /* We will turn the old Call node into a Tuple with the following
1056            predecessors:
1057            -1:  Block of Tuple.
1058            0: Phi of all Memories of Return statements.
1059            1: Jmp from new Block that merges the control flow from all exception
1060            predecessors of the old end block.
1061            2: Tuple of all arguments.
1062            3: Phi of Exception memories.
1063            In case the old Call directly branches to End on an exception we don't
1064            need the block merging all exceptions nor the Phi of the exception
1065            memories.
1066         */
1067
1068         /* -- Precompute some values -- */
1069         end_bl = get_new_node(get_irg_end_block(called_graph));
1070         end = get_new_node(get_irg_end(called_graph));
1071         arity = get_irn_arity(end_bl);    /* arity = n_exc + n_ret  */
1072         n_res = get_method_n_ress(get_Call_type(call));
1073
1074         res_pred = xmalloc(n_res * sizeof(*res_pred));
1075         cf_pred  = xmalloc(arity * sizeof(*res_pred));
1076
1077         set_irg_current_block(irg, post_bl); /* just to make sure */
1078
1079         /* -- archive keepalives -- */
1080         irn_arity = get_irn_arity(end);
1081         for (i = 0; i < irn_arity; i++) {
1082                 ir_node *ka = get_End_keepalive(end, i);
1083                 if (! is_Bad(ka))
1084                         add_End_keepalive(get_irg_end(irg), ka);
1085         }
1086
1087         /* The new end node will die.  We need not free as the in array is on the obstack:
1088            copy_node() only generated 'D' arrays. */
1089
1090         /* -- Replace Return nodes by Jump nodes. -- */
1091         n_ret = 0;
1092         for (i = 0; i < arity; i++) {
1093                 ir_node *ret;
1094                 ret = get_irn_n(end_bl, i);
1095                 if (is_Return(ret)) {
1096                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1097                         n_ret++;
1098                 }
1099         }
1100         set_irn_in(post_bl, n_ret, cf_pred);
1101
1102         /* -- Build a Tuple for all results of the method.
1103            Add Phi node if there was more than one Return.  -- */
1104         turn_into_tuple(post_call, pn_Call_max);
1105         /* First the Memory-Phi */
1106         n_ret = 0;
1107         for (i = 0; i < arity; i++) {
1108                 ret = get_irn_n(end_bl, i);
1109                 if (is_Return(ret)) {
1110                         cf_pred[n_ret] = get_Return_mem(ret);
1111                         n_ret++;
1112                 }
1113         }
1114         phi = new_Phi(n_ret, cf_pred, mode_M);
1115         set_Tuple_pred(call, pn_Call_M_regular, phi);
1116         /* Conserve Phi-list for further inlinings -- but might be optimized */
1117         if (get_nodes_block(phi) == post_bl) {
1118                 set_irn_link(phi, get_irn_link(post_bl));
1119                 set_irn_link(post_bl, phi);
1120         }
1121         /* Now the real results */
1122         if (n_res > 0) {
1123                 for (j = 0; j < n_res; j++) {
1124                         n_ret = 0;
1125                         for (i = 0; i < arity; i++) {
1126                                 ret = get_irn_n(end_bl, i);
1127                                 if (is_Return(ret)) {
1128                                         cf_pred[n_ret] = get_Return_res(ret, j);
1129                                         n_ret++;
1130                                 }
1131                         }
1132                         if (n_ret > 0)
1133                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1134                         else
1135                                 phi = new_Bad();
1136                         res_pred[j] = phi;
1137                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1138                         if (get_nodes_block(phi) == post_bl) {
1139                                 set_Phi_next(phi, get_Block_phis(post_bl));
1140                                 set_Block_phis(post_bl, phi);
1141                         }
1142                 }
1143                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1144         } else {
1145                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1146         }
1147         /* handle the regular call */
1148         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1149
1150         /* For now, we cannot inline calls with value_base */
1151         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1152
1153         /* Finally the exception control flow.
1154            We have two (three) possible situations:
1155            First if the Call branches to an exception handler: We need to add a Phi node to
1156            collect the memory containing the exception objects.  Further we need
1157            to add another block to get a correct representation of this Phi.  To
1158            this block we add a Jmp that resolves into the X output of the Call
1159            when the Call is turned into a tuple.
1160            Second the Call branches to End, the exception is not handled.  Just
1161            add all inlined exception branches to the End node.
1162            Third: there is no Exception edge at all. Handle as case two. */
1163         if (exc_handling == exc_handler) {
1164                 n_exc = 0;
1165                 for (i = 0; i < arity; i++) {
1166                         ir_node *ret, *irn;
1167                         ret = get_irn_n(end_bl, i);
1168                         irn = skip_Proj(ret);
1169                         if (is_fragile_op(irn) || is_Raise(irn)) {
1170                                 cf_pred[n_exc] = ret;
1171                                 ++n_exc;
1172                         }
1173                 }
1174                 if (n_exc > 0) {
1175                         new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
1176                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1177                         /* The Phi for the memories with the exception objects */
1178                         n_exc = 0;
1179                         for (i = 0; i < arity; i++) {
1180                                 ir_node *ret;
1181                                 ret = skip_Proj(get_irn_n(end_bl, i));
1182                                 if (is_Call(ret)) {
1183                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1184                                         n_exc++;
1185                                 } else if (is_fragile_op(ret)) {
1186                                         /* We rely that all cfops have the memory output at the same position. */
1187                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1188                                         n_exc++;
1189                                 } else if (is_Raise(ret)) {
1190                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1191                                         n_exc++;
1192                                 }
1193                         }
1194                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1195                 } else {
1196                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1197                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1198                 }
1199         } else {
1200                 ir_node *main_end_bl;
1201                 int main_end_bl_arity;
1202                 ir_node **end_preds;
1203
1204                 /* assert(exc_handling == 1 || no exceptions. ) */
1205                 n_exc = 0;
1206                 for (i = 0; i < arity; i++) {
1207                         ir_node *ret = get_irn_n(end_bl, i);
1208                         ir_node *irn = skip_Proj(ret);
1209
1210                         if (is_fragile_op(irn) || is_Raise(irn)) {
1211                                 cf_pred[n_exc] = ret;
1212                                 n_exc++;
1213                         }
1214                 }
1215                 main_end_bl = get_irg_end_block(irg);
1216                 main_end_bl_arity = get_irn_arity(main_end_bl);
1217                 end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
1218
1219                 for (i = 0; i < main_end_bl_arity; ++i)
1220                         end_preds[i] = get_irn_n(main_end_bl, i);
1221                 for (i = 0; i < n_exc; ++i)
1222                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1223                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1224                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1225                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1226                 free(end_preds);
1227         }
1228         free(res_pred);
1229         free(cf_pred);
1230
1231         /* --  Turn CSE back on. -- */
1232         set_optimize(rem_opt);
1233         current_ir_graph = rem;
1234
1235         return 1;
1236 }
1237
1238 /********************************************************************/
1239 /* Apply inlineing to small methods.                                */
1240 /********************************************************************/
1241
1242 static struct obstack  temp_obst;
1243
1244 /** Represents a possible inlinable call in a graph. */
1245 typedef struct _call_entry call_entry;
1246 struct _call_entry {
1247         ir_node    *call;      /**< the Call node */
1248         ir_graph   *callee;    /**< the callee IR-graph called here */
1249         call_entry *next;      /**< for linking the next one */
1250         int        loop_depth; /**< the loop depth of this call */
1251 };
1252
1253 /**
1254  * environment for inlining small irgs
1255  */
1256 typedef struct _inline_env_t {
1257         struct obstack obst;  /**< an obstack where call_entries are allocated on. */
1258         call_entry *head;     /**< the head of the call entry list */
1259         call_entry *tail;     /**< the tail of the call entry list */
1260 } inline_env_t;
1261
1262 /**
1263  * Returns the irg called from a Call node. If the irg is not
1264  * known, NULL is returned.
1265  *
1266  * @param call  the call node
1267  */
1268 static ir_graph *get_call_called_irg(ir_node *call) {
1269         ir_node *addr;
1270
1271         addr = get_Call_ptr(call);
1272         if (is_Global(addr)) {
1273                 ir_entity *ent = get_Global_entity(addr);
1274                 return get_entity_irg(ent);
1275         }
1276
1277         return NULL;
1278 }
1279
1280 /**
1281  * Walker: Collect all calls to known graphs inside a graph.
1282  */
1283 static void collect_calls(ir_node *call, void *env) {
1284         if (is_Call(call)) {
1285                 ir_graph *called_irg = get_call_called_irg(call);
1286
1287                 if (called_irg != NULL) {
1288                         /* The Call node calls a locally defined method.  Remember to inline. */
1289                         inline_env_t *ienv  = env;
1290                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1291                         entry->call       = call;
1292                         entry->callee     = called_irg;
1293                         entry->next       = NULL;
1294                         entry->loop_depth = 0;
1295
1296                         if (ienv->tail == NULL)
1297                                 ienv->head = entry;
1298                         else
1299                                 ienv->tail->next = entry;
1300                         ienv->tail = entry;
1301                 }
1302         }
1303 }
1304
1305 /**
1306  * Inlines all small methods at call sites where the called address comes
1307  * from a Const node that references the entity representing the called
1308  * method.
1309  * The size argument is a rough measure for the code size of the method:
1310  * Methods where the obstack containing the firm graph is smaller than
1311  * size are inlined.
1312  */
1313 void inline_small_irgs(ir_graph *irg, int size) {
1314         ir_graph *rem = current_ir_graph;
1315         inline_env_t env;
1316         call_entry *entry;
1317
1318         current_ir_graph = irg;
1319         /* Handle graph state */
1320         assert(get_irg_phase_state(irg) != phase_building);
1321         free_callee_info(irg);
1322
1323         /* Find Call nodes to inline.
1324            (We can not inline during a walk of the graph, as inlineing the same
1325            method several times changes the visited flag of the walked graph:
1326            after the first inlineing visited of the callee equals visited of
1327            the caller.  With the next inlineing both are increased.) */
1328         obstack_init(&env.obst);
1329         env.head = env.tail = NULL;
1330         irg_walk_graph(irg, NULL, collect_calls, &env);
1331
1332         if (env.head != NULL) {
1333                 /* There are calls to inline */
1334                 collect_phiprojs(irg);
1335                 for (entry = env.head; entry != NULL; entry = entry->next) {
1336                         ir_graph *callee = entry->callee;
1337                         if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
1338                             (get_irg_inline_property(callee) >= irg_inline_forced)) {
1339                                 inline_method(entry->call, callee);
1340                         }
1341                 }
1342         }
1343         obstack_free(&env.obst, NULL);
1344         current_ir_graph = rem;
1345 }
1346
1347 /**
1348  * Environment for inlining irgs.
1349  */
1350 typedef struct {
1351         int n_nodes;             /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1352         int n_blocks;            /**< Number of Blocks in graph without Start and End block. */
1353         int n_nodes_orig;        /**< for statistics */
1354         int n_call_nodes;        /**< Number of Call nodes in the graph. */
1355         int n_call_nodes_orig;   /**< for statistics */
1356         int n_callers;           /**< Number of known graphs that call this graphs. */
1357         int n_callers_orig;      /**< for statistics */
1358         unsigned got_inline:1;   /**< Set, if at least one call inside this graph was inlined. */
1359         unsigned local_vars:1;   /**< Set, if a inlined function gets the address of an inlined variable. */
1360         unsigned recursive:1;    /**< Set, if this function is self recursive. */
1361         call_entry *call_head;   /**< The head of the list of all call nodes in this graph. */
1362         call_entry *call_tail;   /**< The tail of the list of all call nodes in this graph .*/
1363         unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1364 } inline_irg_env;
1365
1366 /**
1367  * Allocate a new environment for inlining.
1368  */
1369 static inline_irg_env *alloc_inline_irg_env(void) {
1370         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1371         env->n_nodes           = -2; /* do not count count Start, End */
1372         env->n_blocks          = -2; /* do not count count Start, End Block */
1373         env->n_nodes_orig      = -2; /* do not count Start, End */
1374         env->call_head         = NULL;
1375         env->call_tail         = NULL;
1376         env->n_call_nodes      = 0;
1377         env->n_call_nodes_orig = 0;
1378         env->n_callers         = 0;
1379         env->n_callers_orig    = 0;
1380         env->got_inline        = 0;
1381         env->local_vars        = 0;
1382         env->recursive         = 0;
1383         env->local_weights     = NULL;
1384         return env;
1385 }
1386
1387 typedef struct walker_env {
1388         inline_irg_env *x;     /**< the inline environment */
1389         call_entry *last_call; /**< points to the last inserted call */
1390         char ignore_runtime;   /**< the ignore runtime flag */
1391         char ignore_callers;   /**< if set, do change callers data */
1392 } wenv_t;
1393
1394 /**
1395  * post-walker: collect all calls in the inline-environment
1396  * of a graph and sum some statistics.
1397  */
1398 static void collect_calls2(ir_node *call, void *ctx) {
1399         wenv_t         *env = ctx;
1400         inline_irg_env *x = env->x;
1401         ir_opcode      code = get_irn_opcode(call);
1402         ir_graph       *callee;
1403         call_entry     *entry;
1404
1405         /* count meaningful nodes in irg */
1406         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1407                 if (code != iro_Block) {
1408                         ++x->n_nodes;
1409                         ++x->n_nodes_orig;
1410                 } else {
1411                         ++x->n_blocks;
1412                 }
1413         }
1414
1415         if (code != iro_Call) return;
1416
1417         /* check, if it's a runtime call */
1418         if (env->ignore_runtime) {
1419                 ir_node *symc = get_Call_ptr(call);
1420
1421                 if (is_Global(symc)) {
1422                         ir_entity *ent = get_Global_entity(symc);
1423
1424                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1425                                 return;
1426                 }
1427         }
1428
1429         /* collect all call nodes */
1430         ++x->n_call_nodes;
1431         ++x->n_call_nodes_orig;
1432
1433         callee = get_call_called_irg(call);
1434         if (callee != NULL) {
1435                 if (! env->ignore_callers) {
1436                         inline_irg_env *callee_env = get_irg_link(callee);
1437                         /* count all static callers */
1438                         ++callee_env->n_callers;
1439                         ++callee_env->n_callers_orig;
1440                 }
1441                 if (callee == current_ir_graph)
1442                         x->recursive = 1;
1443
1444                 /* link it in the list of possible inlinable entries */
1445                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1446                 entry->call       = call;
1447                 entry->callee     = callee;
1448                 entry->next       = NULL;
1449                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1450
1451                 /* note: we use call_tail here as a pointer to the last inserted */
1452                 if (x->call_head == NULL) {
1453                         x->call_head = entry;
1454                 } else {
1455                         if (entry->loop_depth == env->last_call->loop_depth) {
1456                                 /* same depth as the last one, enqueue after it */
1457                                 entry->next          = env->last_call->next;
1458                                 env->last_call->next = entry;
1459                         } else if (entry->loop_depth > x->call_head->loop_depth) {
1460                                 /* put first */
1461                                 entry->next  = x->call_head;
1462                                 x->call_head = entry;
1463                         } else {
1464                                 /* search the insertion point */
1465                                 call_entry *p;
1466
1467                                 for (p = x->call_head; p->next != NULL; p = p->next)
1468                                         if (entry->loop_depth > p->next->loop_depth)
1469                                                 break;
1470                                 entry->next = p->next;
1471                                 p->next     = entry;
1472                         }
1473                 }
1474                 env->last_call = entry;
1475                 if (entry->next == NULL) {
1476                         /* keep tail up to date */
1477                         x->call_tail = entry;
1478                 }
1479         }
1480 }
1481
1482 /**
1483  * Returns TRUE if the number of callers is 0 in the irg's environment,
1484  * hence this irg is a leave.
1485  */
1486 INLINE static int is_leave(ir_graph *irg) {
1487         inline_irg_env *env = get_irg_link(irg);
1488         return env->n_call_nodes == 0;
1489 }
1490
1491 /**
1492  * Returns TRUE if the number of nodes in the callee is
1493  * smaller then size in the irg's environment.
1494  */
1495 INLINE static int is_smaller(ir_graph *callee, int size) {
1496         inline_irg_env *env = get_irg_link(callee);
1497         return env->n_nodes < size;
1498 }
1499
1500 /**
1501  * Append the nodes of the list src to the nodes of the list in environment dst.
1502  */
1503 static void append_call_list(inline_irg_env *dst, call_entry *src) {
1504         call_entry *entry, *nentry;
1505
1506         /* Note that the src list points to Call nodes in the inlined graph, but
1507            we need Call nodes in our graph. Luckily the inliner leaves this information
1508            in the link field. */
1509         for (entry = src; entry != NULL; entry = entry->next) {
1510                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1511                 nentry->call         = get_irn_link(entry->call);
1512                 nentry->callee       = entry->callee;
1513                 nentry->next         = NULL;
1514                 nentry->loop_depth   = entry->loop_depth;
1515                 dst->call_tail->next = nentry;
1516                 dst->call_tail       = nentry;
1517         }
1518 }
1519
1520 /**
1521  * Add the nodes of the list src in front to the nodes of the list dst.
1522  */
1523 static call_entry *replace_entry_by_call_list(call_entry *dst, call_entry *src) {
1524         call_entry *entry, *nentry, *head, *tail;
1525
1526         /* Note that the src list points to Call nodes in the inlined graph, but
1527            we need Call nodes in our graph. Luckily the inliner leaves this information
1528            in the link field. */
1529         head = tail = NULL;
1530         for (entry = src; entry != NULL; entry = entry->next) {
1531                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1532                 nentry->call         = get_irn_link(entry->call);
1533                 nentry->callee       = entry->callee;
1534                 nentry->next         = NULL;
1535                 nentry->loop_depth   = entry->loop_depth + dst->loop_depth;
1536                 if (head == NULL)
1537                         head = nentry;
1538                 else
1539                         tail->next = nentry;
1540                 tail = nentry;
1541         }
1542         /* skip the head of dst */
1543         if (head != NULL) {
1544                 tail->next = dst->next;
1545         } else {
1546                 head = dst->next;
1547         }
1548         return head;
1549 }
1550
1551 /*
1552  * Inlines small leave methods at call sites where the called address comes
1553  * from a Const node that references the entity representing the called
1554  * method.
1555  * The size argument is a rough measure for the code size of the method:
1556  * Methods where the obstack containing the firm graph is smaller than
1557  * size are inlined.
1558  */
1559 void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime) {
1560         inline_irg_env   *env;
1561         ir_graph         *irg;
1562         int              i, n_irgs;
1563         ir_graph         *rem;
1564         int              did_inline;
1565         wenv_t           wenv;
1566         call_entry       *entry, *tail;
1567         const call_entry *centry;
1568         pmap             *copied_graphs;
1569         pmap_entry       *pm_entry;
1570
1571         rem = current_ir_graph;
1572         obstack_init(&temp_obst);
1573
1574         /* a map for the copied graphs, used to inline recursive calls */
1575         copied_graphs = pmap_create();
1576
1577         /* extend all irgs by a temporary data structure for inlining. */
1578         n_irgs = get_irp_n_irgs();
1579         for (i = 0; i < n_irgs; ++i)
1580                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1581
1582         /* Precompute information in temporary data structure. */
1583         wenv.ignore_runtime = ignore_runtime;
1584         wenv.ignore_callers = 0;
1585         for (i = 0; i < n_irgs; ++i) {
1586                 ir_graph *irg = get_irp_irg(i);
1587
1588                 assert(get_irg_phase_state(irg) != phase_building);
1589                 free_callee_info(irg);
1590
1591                 assure_cf_loop(irg);
1592                 wenv.x = get_irg_link(irg);
1593                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1594         }
1595
1596         /* -- and now inline. -- */
1597
1598         /* Inline leaves recursively -- we might construct new leaves. */
1599         do {
1600                 did_inline = 0;
1601
1602                 for (i = 0; i < n_irgs; ++i) {
1603                         ir_node *call;
1604                         int phiproj_computed = 0;
1605
1606                         current_ir_graph = get_irp_irg(i);
1607                         env = (inline_irg_env *)get_irg_link(current_ir_graph);
1608
1609                         tail = NULL;
1610                         for (entry = env->call_head; entry != NULL; entry = entry->next) {
1611                                 ir_graph *callee;
1612
1613                                 if (env->n_nodes > maxsize) break;
1614
1615                                 call   = entry->call;
1616                                 callee = entry->callee;
1617
1618                                 if (is_leave(callee) && (
1619                                     is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
1620                                         if (!phiproj_computed) {
1621                                                 phiproj_computed = 1;
1622                                                 collect_phiprojs(current_ir_graph);
1623                                         }
1624                                         did_inline = inline_method(call, callee);
1625
1626                                         if (did_inline) {
1627                                                 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1628
1629                                                 /* was inlined, must be recomputed */
1630                                                 phiproj_computed = 0;
1631
1632                                                 /* Do some statistics */
1633                                                 env->got_inline = 1;
1634                                                 --env->n_call_nodes;
1635                                                 env->n_nodes += callee_env->n_nodes;
1636                                                 --callee_env->n_callers;
1637
1638                                                 /* remove this call from the list */
1639                                                 if (tail != NULL)
1640                                                         tail->next = entry->next;
1641                                                 else
1642                                                         env->call_head = entry->next;
1643                                                 continue;
1644                                         }
1645                                 }
1646                                 tail = entry;
1647                         }
1648                         env->call_tail = tail;
1649                 }
1650         } while (did_inline);
1651
1652         /* inline other small functions. */
1653         for (i = 0; i < n_irgs; ++i) {
1654                 ir_node *call;
1655                 int phiproj_computed = 0;
1656
1657                 current_ir_graph = get_irp_irg(i);
1658                 env = (inline_irg_env *)get_irg_link(current_ir_graph);
1659
1660                 /* note that the list of possible calls is updated during the process */
1661                 tail = NULL;
1662                 for (entry = env->call_head; entry != NULL; entry = entry->next) {
1663                         ir_graph   *callee;
1664                         pmap_entry *e;
1665
1666                         call   = entry->call;
1667                         callee = entry->callee;
1668
1669                         e = pmap_find(copied_graphs, callee);
1670                         if (e != NULL) {
1671                                 /*
1672                                  * Remap callee if we have a copy.
1673                                  * FIXME: Should we do this only for recursive Calls ?
1674                                  */
1675                                 callee = e->value;
1676                         }
1677
1678                         if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) ||    /* small function */
1679                                 (get_irg_inline_property(callee) >= irg_inline_forced))) {
1680                                 if (current_ir_graph == callee) {
1681                                         /*
1682                                          * Recursive call: we cannot directly inline because we cannot walk
1683                                          * the graph and change it. So we have to make a copy of the graph
1684                                          * first.
1685                                          */
1686
1687                                         inline_irg_env *callee_env;
1688                                         ir_graph       *copy;
1689
1690                                         /*
1691                                          * No copy yet, create one.
1692                                          * Note that recursive methods are never leaves, so it is sufficient
1693                                          * to test this condition here.
1694                                          */
1695                                         copy = create_irg_copy(callee);
1696
1697                                         /* create_irg_copy() destroys the Proj links, recompute them */
1698                                         phiproj_computed = 0;
1699
1700                                         /* allocate new environment */
1701                                         callee_env = alloc_inline_irg_env();
1702                                         set_irg_link(copy, callee_env);
1703
1704                                         assure_cf_loop(copy);
1705                                         wenv.x              = callee_env;
1706                                         wenv.ignore_callers = 1;
1707                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1708
1709                                         /*
1710                                          * Enter the entity of the original graph. This is needed
1711                                          * for inline_method(). However, note that ent->irg still points
1712                                          * to callee, NOT to copy.
1713                                          */
1714                                         set_irg_entity(copy, get_irg_entity(callee));
1715
1716                                         pmap_insert(copied_graphs, callee, copy);
1717                                         callee = copy;
1718
1719                                         /* we have only one caller: the original graph */
1720                                         callee_env->n_callers      = 1;
1721                                         callee_env->n_callers_orig = 1;
1722                                 }
1723                                 if (! phiproj_computed) {
1724                                         phiproj_computed = 1;
1725                                         collect_phiprojs(current_ir_graph);
1726                                 }
1727                                 did_inline = inline_method(call, callee);
1728                                 if (did_inline) {
1729                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1730
1731                                         /* was inlined, must be recomputed */
1732                                         phiproj_computed = 0;
1733
1734                                         /* callee was inline. Append it's call list. */
1735                                         env->got_inline = 1;
1736                                         --env->n_call_nodes;
1737                                         append_call_list(env, callee_env->call_head);
1738                                         env->n_call_nodes += callee_env->n_call_nodes;
1739                                         env->n_nodes += callee_env->n_nodes;
1740                                         --callee_env->n_callers;
1741
1742                                         /* after we have inlined callee, all called methods inside callee
1743                                            are now called once more */
1744                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
1745                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1746                                                 ++penv->n_callers;
1747                                         }
1748
1749                                         /* remove this call from the list */
1750                                         if (tail != NULL)
1751                                                 tail->next = entry->next;
1752                                         else
1753                                                 env->call_head = entry->next;
1754                                         continue;
1755                                 }
1756                         }
1757                         tail = entry;
1758                 }
1759                 env->call_tail = tail;
1760         }
1761
1762         for (i = 0; i < n_irgs; ++i) {
1763                 irg = get_irp_irg(i);
1764                 env = (inline_irg_env *)get_irg_link(irg);
1765
1766                 if (env->got_inline) {
1767                         optimize_graph_df(irg);
1768                         optimize_cf(irg);
1769                 }
1770                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1771                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1772                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1773                         env->n_callers_orig, env->n_callers,
1774                         get_entity_name(get_irg_entity(irg))));
1775                 }
1776         }
1777
1778         /* kill the copied graphs: we don't need them anymore */
1779         foreach_pmap(copied_graphs, pm_entry) {
1780                 ir_graph *copy = pm_entry->value;
1781
1782                 /* reset the entity, otherwise it will be deleted in the next step ... */
1783                 set_irg_entity(copy, NULL);
1784                 free_ir_graph(copy);
1785         }
1786         pmap_destroy(copied_graphs);
1787
1788         obstack_free(&temp_obst, NULL);
1789         current_ir_graph = rem;
1790 }
1791
1792 /**
1793  * Calculate the parameter weights for transmitting the address of a local variable.
1794  */
1795 static unsigned calc_method_local_weight(ir_node *arg) {
1796         int      i, j, k;
1797         unsigned v, weight = 0;
1798
1799         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1800                 ir_node *succ = get_irn_out(arg, i);
1801
1802                 switch (get_irn_opcode(succ)) {
1803                 case iro_Load:
1804                 case iro_Store:
1805                         /* Loads and Store can be removed */
1806                         weight += 3;
1807                         break;
1808                 case iro_Sel:
1809                         /* check if all args are constant */
1810                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1811                                 ir_node *idx = get_Sel_index(succ, j);
1812                                 if (! is_Const(idx))
1813                                         return 0;
1814                         }
1815                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1816                            some unsupported node. */
1817                         v = calc_method_local_weight(succ);
1818                         if (v == 0)
1819                                 return 0;
1820                         /* we can kill one Sel with constant indexes, this is cheap */
1821                         weight += v + 1;
1822                         break;
1823                 case iro_Id:
1824                         /* when looking backward we might find Id nodes */
1825                         weight += calc_method_local_weight(succ);
1826                         break;
1827                 case iro_Tuple:
1828                         /* unoptimized tuple */
1829                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1830                                 ir_node *pred = get_Tuple_pred(succ, j);
1831                                 if (pred == arg) {
1832                                         /* look for Proj(j) */
1833                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1834                                                 ir_node *succ_succ = get_irn_out(succ, k);
1835                                                 if (is_Proj(succ_succ)) {
1836                                                         if (get_Proj_proj(succ_succ) == j) {
1837                                                                 /* found */
1838                                                                 weight += calc_method_local_weight(succ_succ);
1839                                                         }
1840                                                 } else {
1841                                                         /* this should NOT happen */
1842                                                         return 0;
1843                                                 }
1844                                         }
1845                                 }
1846                         }
1847                         break;
1848                 default:
1849                         /* any other node: unsupported yet or bad. */
1850                         return 0;
1851                 }
1852         }
1853         return weight;
1854 }
1855
1856 /**
1857  * Calculate the parameter weights for transmitting the address of a local variable.
1858  */
1859 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1860         ir_entity *ent = get_irg_entity(irg);
1861         ir_type  *mtp;
1862         int      nparams, i, proj_nr;
1863         ir_node  *irg_args, *arg;
1864
1865         mtp      = get_entity_type(ent);
1866         nparams  = get_method_n_params(mtp);
1867
1868         /* allocate a new array. currently used as 'analysed' flag */
1869         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1870
1871         /* If the method haven't parameters we have nothing to do. */
1872         if (nparams <= 0)
1873                 return;
1874
1875         assure_irg_outs(irg);
1876         irg_args = get_irg_args(irg);
1877         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1878                 arg     = get_irn_out(irg_args, i);
1879                 proj_nr = get_Proj_proj(arg);
1880                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1881         }
1882 }
1883
1884 /**
1885  * Calculate the benefice for transmitting an local variable address.
1886  * After inlining, the local variable might be transformed into a
1887  * SSA variable by scalar_replacement().
1888  */
1889 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1890         inline_irg_env *env = get_irg_link(callee);
1891
1892         if (env->local_weights != NULL) {
1893                 if (pos < ARR_LEN(env->local_weights))
1894                         return env->local_weights[pos];
1895                 return 0;
1896         }
1897
1898         analyze_irg_local_weights(env, callee);
1899
1900         if (pos < ARR_LEN(env->local_weights))
1901                 return env->local_weights[pos];
1902         return 0;
1903 }
1904
1905 /**
1906  * Calculate a benefice value for inlining the given call.
1907  *
1908  * @param call       the call node we have to inspect
1909  * @param callee     the called graph
1910  * @param local_adr  set after return if an address of a local variable is
1911  *                   transmitted as a parameter
1912  */
1913 static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
1914         ir_entity *ent = get_irg_entity(callee);
1915         ir_node   *frame_ptr;
1916         ir_type   *mtp;
1917         int       weight = 0;
1918         int       i, n_params, all_const;
1919         unsigned  cc, v;
1920
1921         inline_irg_env *curr_env, *callee_env;
1922
1923         if (get_entity_additional_properties(ent) & mtp_property_noreturn) {
1924                 /* do NOT inline noreturn calls */
1925                 return INT_MIN;
1926         }
1927
1928         /* costs for every passed parameter */
1929         n_params = get_Call_n_params(call);
1930         mtp      = get_entity_type(ent);
1931         cc       = get_method_calling_convention(mtp);
1932         if (cc & cc_reg_param) {
1933                 /* register parameter, smaller costs for register parameters */
1934                 int max_regs = cc & ~cc_bits;
1935
1936                 if (max_regs < n_params)
1937                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1938                 else
1939                         weight += n_params * 2;
1940         } else {
1941                 /* parameters are passed an stack */
1942                 weight += 5 * n_params;
1943         }
1944
1945         /* constant parameters improve the benefice */
1946         frame_ptr = get_irg_frame(current_ir_graph);
1947         all_const = 1;
1948         for (i = 0; i < n_params; ++i) {
1949                 ir_node *param = get_Call_param(call, i);
1950
1951                 if (is_Const(param)) {
1952                         weight += get_method_param_weight(ent, i);
1953                 } else {
1954                         all_const = 0;
1955                         if (is_SymConst(param))
1956                                 weight += get_method_param_weight(ent, i);
1957                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1958                                 /*
1959                                  * An address of a local variable is transmitted. After inlining,
1960                                  * scalar_replacement might be able to remove the local variable,
1961                                  * so honor this.
1962                                  */
1963                                 v = get_method_local_adress_weight(callee, i);
1964                                 weight += v;
1965                                 if (v > 0)
1966                                         *local_adr = 1;
1967                         }
1968                 }
1969         }
1970
1971         callee_env = get_irg_link(callee);
1972         if (get_entity_visibility(ent) == visibility_local &&
1973             callee_env->n_callers_orig == 1 &&
1974             callee != current_ir_graph) {
1975                 /* we are the only caller, give big bonus */
1976                 weight += 5000;
1977         }
1978
1979         /* do not inline big functions */
1980         weight -= callee_env->n_nodes;
1981
1982         /* reduce the benefice if the current function is already big */
1983         curr_env = get_irg_link(current_ir_graph);
1984         weight -= curr_env->n_nodes / 50;
1985
1986         /* give a bonus for functions with one block */
1987         if (callee_env->n_blocks == 1)
1988                 weight = weight * 3 / 2;
1989
1990         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1991         else if (callee_env->n_nodes < 20 && !callee_env->recursive)
1992                 weight += 5000;
1993
1994         /* and finally for leaves: they do not increase the register pressure
1995            because of callee safe registers */
1996         else if (callee_env->n_call_nodes == 0)
1997                 weight += 25;
1998
1999         /*
2000          * Reduce the weight for recursive function IFF not all arguments are const.
2001          * inlining recursive functions is rarely good.
2002          */
2003         if (callee_env->recursive && !all_const)
2004                 weight -= 500;
2005
2006         /*
2007          * All arguments constant is probably a good sign, give an extra bonus
2008          */
2009         if (all_const)
2010                 weight += 100;
2011
2012         return weight;
2013 }
2014
2015 /**
2016  * Heuristic inliner. Calculates a benefice value for every call and inlines
2017  * those calls with a value higher than the threshold.
2018  */
2019 void inline_functions(int maxsize, int inline_threshold) {
2020         inline_irg_env   *env;
2021         int              i, n_irgs;
2022         ir_graph         *rem;
2023         int              did_inline;
2024         wenv_t           wenv;
2025         call_entry       *curr_call, **last_call;
2026         const call_entry *centry;
2027         pmap             *copied_graphs;
2028         pmap_entry       *pm_entry;
2029
2030         rem = current_ir_graph;
2031         obstack_init(&temp_obst);
2032
2033         /* a map for the copied graphs, used to inline recursive calls */
2034         copied_graphs = pmap_create();
2035
2036         /* extend all irgs by a temporary data structure for inlining. */
2037         n_irgs = get_irp_n_irgs();
2038         for (i = 0; i < n_irgs; ++i)
2039                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
2040
2041         /* Precompute information in temporary data structure. */
2042         wenv.ignore_runtime = 0;
2043         wenv.ignore_callers = 0;
2044         for (i = 0; i < n_irgs; ++i) {
2045                 ir_graph *irg = get_irp_irg(i);
2046
2047                 assert(get_irg_phase_state(irg) != phase_building);
2048                 free_callee_info(irg);
2049
2050                 wenv.x         = get_irg_link(irg);
2051                 wenv.last_call = NULL;
2052                 assure_cf_loop(irg);
2053                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2054         }
2055
2056         /* -- and now inline. -- */
2057         for (i = 0; i < n_irgs; ++i) {
2058                 int      phiproj_computed = 0;
2059                 ir_node  *call;
2060                 ir_graph *irg = get_irp_irg(i);
2061
2062                 current_ir_graph = irg;
2063                 env = get_irg_link(irg);
2064
2065                 /* note that the list of possible calls is updated during the process */
2066                 last_call = &env->call_head;
2067                 for (curr_call = env->call_head; curr_call != NULL;) {
2068                         ir_graph   *callee;
2069                         pmap_entry *e;
2070                         int        benefice;
2071                         unsigned   local_adr;
2072
2073                         if (env->n_nodes > maxsize) break;
2074
2075                         call   = curr_call->call;
2076                         callee = curr_call->callee;
2077
2078                         e = pmap_find(copied_graphs, callee);
2079                         if (e != NULL) {
2080                                 /*
2081                                 * Remap callee if we have a copy.
2082                                 * FIXME: Should we do this only for recursive Calls ?
2083                                 */
2084                                 callee = e->value;
2085                         }
2086
2087                         /* calculate the benefice on the original call to prevent excessive inlining */
2088                         local_adr = 0;
2089                         benefice = calc_inline_benefice(call, callee, &local_adr);
2090                         DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
2091
2092                         if (benefice > -inline_threshold ||
2093                                 (get_irg_inline_property(callee) >= irg_inline_forced)) {
2094                                 if (current_ir_graph == callee) {
2095                                         /*
2096                                          * Recursive call: we cannot directly inline because we cannot walk
2097                                          * the graph and change it. So we have to make a copy of the graph
2098                                          * first.
2099                                          */
2100
2101                                         inline_irg_env *callee_env;
2102                                         ir_graph       *copy;
2103
2104                                         /*
2105                                          * No copy yet, create one.
2106                                          * Note that recursive methods are never leaves, so it is sufficient
2107                                          * to test this condition here.
2108                                          */
2109                                         copy = create_irg_copy(callee);
2110
2111                                         /* create_irg_copy() destroys the Proj links, recompute them */
2112                                         phiproj_computed = 0;
2113
2114                                         /* allocate new environment */
2115                                         callee_env = alloc_inline_irg_env();
2116                                         set_irg_link(copy, callee_env);
2117
2118                                         assure_cf_loop(copy);
2119                                         wenv.x              = callee_env;
2120                                         wenv.ignore_callers = 1;
2121                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2122
2123                                         /*
2124                                          * Enter the entity of the original graph. This is needed
2125                                          * for inline_method(). However, note that ent->irg still points
2126                                          * to callee, NOT to copy.
2127                                          */
2128                                         set_irg_entity(copy, get_irg_entity(callee));
2129
2130                                         pmap_insert(copied_graphs, callee, copy);
2131                                         callee = copy;
2132
2133                                         /* we have only one caller: the original graph */
2134                                         callee_env->n_callers      = 1;
2135                                         callee_env->n_callers_orig = 1;
2136                                 }
2137                                 if (! phiproj_computed) {
2138                                         phiproj_computed = 1;
2139                                         collect_phiprojs(current_ir_graph);
2140                                 }
2141                                 did_inline = inline_method(call, callee);
2142                                 if (did_inline) {
2143                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
2144
2145                                         /* was inlined, must be recomputed */
2146                                         phiproj_computed = 0;
2147
2148                                         /* after we have inlined callee, all called methods inside callee
2149                                         are now called once more */
2150                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
2151                                                 inline_irg_env *penv = get_irg_link(centry->callee);
2152                                                 ++penv->n_callers;
2153                                         }
2154
2155                                         /* callee was inline. Append it's call list. */
2156                                         env->got_inline = 1;
2157                                         if (local_adr)
2158                                                 env->local_vars = 1;
2159                                         --env->n_call_nodes;
2160                                         curr_call = replace_entry_by_call_list(curr_call, callee_env->call_head);
2161                                         env->n_call_nodes += callee_env->n_call_nodes;
2162                                         env->n_nodes += callee_env->n_nodes;
2163                                         --callee_env->n_callers;
2164
2165                                         /* remove the current call entry from the list */
2166                                         *last_call = curr_call;
2167                                         continue;
2168                                 }
2169                         }
2170                         last_call = &curr_call->next;
2171                         curr_call = curr_call->next;
2172                 }
2173
2174                 if (env->got_inline) {
2175                         /* this irg got calls inlined: optimize it */
2176
2177                         /* scalar replacement does not work well with Tuple nodes, so optimize them away */
2178                         optimize_graph_df(irg);
2179
2180                         if (env->local_vars) {
2181                                 if (scalar_replacement_opt(irg)) {
2182                                         optimize_graph_df(irg);
2183                                 }
2184                         }
2185                         optimize_cf(irg);
2186                 }
2187                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2188                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2189                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2190                         env->n_callers_orig, env->n_callers,
2191                         get_entity_name(get_irg_entity(irg))));
2192                 }
2193         }
2194
2195         /* kill the copied graphs: we don't need them anymore */
2196         foreach_pmap(copied_graphs, pm_entry) {
2197                 ir_graph *copy = pm_entry->value;
2198
2199                 /* reset the entity, otherwise it will be deleted in the next step ... */
2200                 set_irg_entity(copy, NULL);
2201                 free_ir_graph(copy);
2202         }
2203         pmap_destroy(copied_graphs);
2204
2205         obstack_free(&temp_obst, NULL);
2206         current_ir_graph = rem;
2207 }
2208
2209 void firm_init_inline(void) {
2210         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2211 }