- implemented mtp_property_weak
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #ifdef HAVE_CONFIG_H
27 # include "config.h"
28 #endif
29
30 #include <limits.h>
31 #include <assert.h>
32
33 #include "irnode_t.h"
34 #include "irgraph_t.h"
35 #include "irprog_t.h"
36
37 #include "iroptimize.h"
38 #include "ircons_t.h"
39 #include "iropt_t.h"
40 #include "irgopt.h"
41 #include "irgmod.h"
42 #include "irgwalk.h"
43
44 #include "adt/array.h"
45 #include "adt/pset.h"
46 #include "adt/pmap.h"
47 #include "adt/pdeq.h"
48 #include "adt/xmalloc.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
66
67 /*------------------------------------------------------------------*/
68 /* Routines for dead node elimination / copying garbage collection  */
69 /* of the obstack.                                                  */
70 /*------------------------------------------------------------------*/
71
72 /**
73  * Remember the new node in the old node by using a field all nodes have.
74  */
75 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
76
77 /**
78  * Get this new node, before the old node is forgotten.
79  */
80 #define get_new_node(oldn) get_irn_link(oldn)
81
82 /**
83  * Check if a new node was set.
84  */
85 #define has_new_node(n) (get_new_node(n) != NULL)
86
87 /**
88  * We use the block_visited flag to mark that we have computed the
89  * number of useful predecessors for this block.
90  * Further we encode the new arity in this flag in the old blocks.
91  * Remembering the arity is useful, as it saves a lot of pointer
92  * accesses.  This function is called for all Phi and Block nodes
93  * in a Block.
94  */
95 static INLINE int
96 compute_new_arity(ir_node *b) {
97         int i, res, irn_arity;
98         int irg_v, block_v;
99
100         irg_v = get_irg_block_visited(current_ir_graph);
101         block_v = get_Block_block_visited(b);
102         if (block_v >= irg_v) {
103                 /* we computed the number of preds for this block and saved it in the
104                    block_v flag */
105                 return block_v - irg_v;
106         } else {
107                 /* compute the number of good predecessors */
108                 res = irn_arity = get_irn_arity(b);
109                 for (i = 0; i < irn_arity; i++)
110                         if (is_Bad(get_irn_n(b, i))) res--;
111                         /* save it in the flag. */
112                         set_Block_block_visited(b, irg_v + res);
113                         return res;
114         }
115 }
116
117 /**
118  * Copies the node to the new obstack. The Ins of the new node point to
119  * the predecessors on the old obstack.  For block/phi nodes not all
120  * predecessors might be copied.  n->link points to the new node.
121  * For Phi and Block nodes the function allocates in-arrays with an arity
122  * only for useful predecessors.  The arity is determined by counting
123  * the non-bad predecessors of the block.
124  *
125  * @param n    The node to be copied
126  * @param env  if non-NULL, the node number attribute will be copied to the new node
127  *
128  * Note: Also used for loop unrolling.
129  */
130 static void copy_node(ir_node *n, void *env) {
131         ir_node *nn, *block;
132         int new_arity;
133         ir_op *op = get_irn_op(n);
134         (void) env;
135
136         if (op == op_Bad) {
137                 /* node copied already */
138                 return;
139         } else if (op == op_Block) {
140                 block = NULL;
141                 new_arity = compute_new_arity(n);
142                 n->attr.block.graph_arr = NULL;
143         } else {
144                 block = get_nodes_block(n);
145                 if (op == op_Phi) {
146                         new_arity = compute_new_arity(block);
147                 } else {
148                         new_arity = get_irn_arity(n);
149                 }
150         }
151         nn = new_ir_node(get_irn_dbg_info(n),
152                 current_ir_graph,
153                 block,
154                 op,
155                 get_irn_mode(n),
156                 new_arity,
157                 get_irn_in(n) + 1);
158         /* Copy the attributes.  These might point to additional data.  If this
159            was allocated on the old obstack the pointers now are dangling.  This
160            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
161         if (op == op_Block) {
162                 /* we cannot allow blocks WITHOUT macroblock input */
163                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
164         }
165         copy_node_attr(n, nn);
166
167 #ifdef DEBUG_libfirm
168         {
169                 int copy_node_nr = env != NULL;
170                 if (copy_node_nr) {
171                         /* for easier debugging, we want to copy the node numbers too */
172                         nn->node_nr = n->node_nr;
173                 }
174         }
175 #endif
176
177         set_new_node(n, nn);
178         hook_dead_node_elim_subst(current_ir_graph, n, nn);
179 }
180
181 /**
182  * Copies new predecessors of old node to new node remembered in link.
183  * Spare the Bad predecessors of Phi and Block nodes.
184  */
185 static void copy_preds(ir_node *n, void *env) {
186         ir_node *nn, *block;
187         int i, j, irn_arity;
188         (void) env;
189
190         nn = get_new_node(n);
191
192         if (is_Block(n)) {
193                 /* copy the macro block header */
194                 ir_node *mbh = get_Block_MacroBlock(n);
195
196                 if (mbh == n) {
197                         /* this block is a macroblock header */
198                         set_Block_MacroBlock(nn, nn);
199                 } else {
200                         /* get the macro block header */
201                         ir_node *nmbh = get_new_node(mbh);
202                         assert(nmbh != NULL);
203                         set_Block_MacroBlock(nn, nmbh);
204                 }
205
206                 /* Don't copy Bad nodes. */
207                 j = 0;
208                 irn_arity = get_irn_arity(n);
209                 for (i = 0; i < irn_arity; i++) {
210                         if (! is_Bad(get_irn_n(n, i))) {
211                                 ir_node *pred = get_irn_n(n, i);
212                                 set_irn_n(nn, j, get_new_node(pred));
213                                 j++;
214                         }
215                 }
216                 /* repair the block visited flag from above misuse. Repair it in both
217                    graphs so that the old one can still be used. */
218                 set_Block_block_visited(nn, 0);
219                 set_Block_block_visited(n, 0);
220                 /* Local optimization could not merge two subsequent blocks if
221                    in array contained Bads.  Now it's possible.
222                    We don't call optimize_in_place as it requires
223                    that the fields in ir_graph are set properly. */
224                 if ((get_opt_control_flow_straightening()) &&
225                         (get_Block_n_cfgpreds(nn) == 1) &&
226                         is_Jmp(get_Block_cfgpred(nn, 0))) {
227                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
228                         if (nn == old) {
229                                 /* Jmp jumps into the block it is in -- deal self cycle. */
230                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
231                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
232                         } else {
233                                 exchange(nn, old);
234                         }
235                 }
236         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
237                 /* Don't copy node if corresponding predecessor in block is Bad.
238                    The Block itself should not be Bad. */
239                 block = get_nodes_block(n);
240                 set_nodes_block(nn, get_new_node(block));
241                 j = 0;
242                 irn_arity = get_irn_arity(n);
243                 for (i = 0; i < irn_arity; i++) {
244                         if (! is_Bad(get_irn_n(block, i))) {
245                                 ir_node *pred = get_irn_n(n, i);
246                                 set_irn_n(nn, j, get_new_node(pred));
247                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
248                                 j++;
249                         }
250                 }
251                 /* If the pre walker reached this Phi after the post walker visited the
252                    block block_visited is > 0. */
253                 set_Block_block_visited(get_nodes_block(n), 0);
254                 /* Compacting the Phi's ins might generate Phis with only one
255                    predecessor. */
256                 if (get_irn_arity(nn) == 1)
257                         exchange(nn, get_irn_n(nn, 0));
258         } else {
259                 irn_arity = get_irn_arity(n);
260                 for (i = -1; i < irn_arity; i++)
261                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
262         }
263         /* Now the new node is complete.  We can add it to the hash table for CSE.
264            @@@ inlining aborts if we identify End. Why? */
265         if (!is_End(nn))
266                 add_identities(current_ir_graph->value_table, nn);
267 }
268
269 /**
270  * Copies the graph recursively, compacts the keep-alives of the end node.
271  *
272  * @param irg           the graph to be copied
273  * @param copy_node_nr  If non-zero, the node number will be copied
274  */
275 static void copy_graph(ir_graph *irg, int copy_node_nr) {
276         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
277         ir_node *ka;      /* keep alive */
278         int i, irn_arity;
279         unsigned long vfl;
280
281         /* Some nodes must be copied by hand, sigh */
282         vfl = get_irg_visited(irg);
283         set_irg_visited(irg, vfl + 1);
284
285         oe = get_irg_end(irg);
286         mark_irn_visited(oe);
287         /* copy the end node by hand, allocate dynamic in array! */
288         ne = new_ir_node(get_irn_dbg_info(oe),
289                 irg,
290                 NULL,
291                 op_End,
292                 mode_X,
293                 -1,
294                 NULL);
295         /* Copy the attributes.  Well, there might be some in the future... */
296         copy_node_attr(oe, ne);
297         set_new_node(oe, ne);
298
299         /* copy the Bad node */
300         ob = get_irg_bad(irg);
301         mark_irn_visited(ob);
302         nb = new_ir_node(get_irn_dbg_info(ob),
303                 irg,
304                 NULL,
305                 op_Bad,
306                 mode_T,
307                 0,
308                 NULL);
309         copy_node_attr(ob, nb);
310         set_new_node(ob, nb);
311
312         /* copy the NoMem node */
313         om = get_irg_no_mem(irg);
314         mark_irn_visited(om);
315         nm = new_ir_node(get_irn_dbg_info(om),
316                 irg,
317                 NULL,
318                 op_NoMem,
319                 mode_M,
320                 0,
321                 NULL);
322         copy_node_attr(om, nm);
323         set_new_node(om, nm);
324
325         /* copy the live nodes */
326         set_irg_visited(irg, vfl);
327         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
328
329         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
330
331         /* visit the anchors as well */
332         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
333                 ir_node *n = get_irg_anchor(irg, i);
334
335                 if (n && (get_irn_visited(n) <= vfl)) {
336                         set_irg_visited(irg, vfl);
337                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
338                 }
339         }
340
341         /* copy_preds for the end node ... */
342         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
343
344         /*- ... and now the keep alives. -*/
345         /* First pick the not marked block nodes and walk them.  We must pick these
346            first as else we will oversee blocks reachable from Phis. */
347         irn_arity = get_End_n_keepalives(oe);
348         for (i = 0; i < irn_arity; i++) {
349                 ka = get_End_keepalive(oe, i);
350                 if (is_Block(ka)) {
351                         if (get_irn_visited(ka) <= vfl) {
352                                 /* We must keep the block alive and copy everything reachable */
353                                 set_irg_visited(irg, vfl);
354                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
355                         }
356                         add_End_keepalive(ne, get_new_node(ka));
357                 }
358         }
359
360         /* Now pick other nodes.  Here we will keep all! */
361         irn_arity = get_End_n_keepalives(oe);
362         for (i = 0; i < irn_arity; i++) {
363                 ka = get_End_keepalive(oe, i);
364                 if (!is_Block(ka)) {
365                         if (get_irn_visited(ka) <= vfl) {
366                                 /* We didn't copy the node yet.  */
367                                 set_irg_visited(irg, vfl);
368                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
369                         }
370                         add_End_keepalive(ne, get_new_node(ka));
371                 }
372         }
373
374         /* start block sometimes only reached after keep alives */
375         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
376         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
377 }
378
379 /**
380  * Copies the graph reachable from current_ir_graph->end to the obstack
381  * in current_ir_graph and fixes the environment.
382  * Then fixes the fields in current_ir_graph containing nodes of the
383  * graph.
384  *
385  * @param copy_node_nr  If non-zero, the node number will be copied
386  */
387 static void
388 copy_graph_env(int copy_node_nr) {
389         ir_graph *irg = current_ir_graph;
390         ir_node *old_end, *new_anchor;
391         int i;
392
393         /* remove end_except and end_reg nodes */
394         old_end = get_irg_end(irg);
395         set_irg_end_except (irg, old_end);
396         set_irg_end_reg    (irg, old_end);
397
398         /* Not all nodes remembered in irg might be reachable
399            from the end node.  Assure their link is set to NULL, so that
400            we can test whether new nodes have been computed. */
401         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
402                 ir_node *n = get_irg_anchor(irg, i);
403                 if (n != NULL)
404                         set_new_node(n, NULL);
405         }
406         /* we use the block walk flag for removing Bads from Blocks ins. */
407         inc_irg_block_visited(irg);
408
409         /* copy the graph */
410         copy_graph(irg, copy_node_nr);
411
412         /* fix the anchor */
413         old_end    = get_irg_end(irg);
414         new_anchor = new_Anchor(irg);
415
416         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
417                 ir_node *n = get_irg_anchor(irg, i);
418                 if (n)
419                         set_irn_n(new_anchor, i, get_new_node(n));
420         }
421         free_End(old_end);
422         irg->anchor = new_anchor;
423
424         /* ensure the new anchor is placed in the endblock */
425         set_nodes_block(new_anchor, get_irg_end_block(irg));
426 }
427
428 /**
429  * Copies all reachable nodes to a new obstack.  Removes bad inputs
430  * from block nodes and the corresponding inputs from Phi nodes.
431  * Merges single exit blocks with single entry blocks and removes
432  * 1-input Phis.
433  * Adds all new nodes to a new hash table for CSE.  Does not
434  * perform CSE, so the hash table might contain common subexpressions.
435  */
436 void dead_node_elimination(ir_graph *irg) {
437         ir_graph *rem;
438 #ifdef INTERPROCEDURAL_VIEW
439         int rem_ipview = get_interprocedural_view();
440 #endif
441         struct obstack *graveyard_obst = NULL;
442         struct obstack *rebirth_obst   = NULL;
443
444         edges_deactivate(irg);
445
446         /* inform statistics that we started a dead-node elimination run */
447         hook_dead_node_elim(irg, 1);
448
449         /* Remember external state of current_ir_graph. */
450         rem = current_ir_graph;
451         current_ir_graph = irg;
452 #ifdef INTERPROCEDURAL_VIEW
453         set_interprocedural_view(0);
454 #endif
455
456         assert(get_irg_phase_state(irg) != phase_building);
457
458         /* Handle graph state */
459         free_callee_info(irg);
460         free_irg_outs(irg);
461         free_trouts();
462
463         /* @@@ so far we loose loops when copying */
464         free_loop_information(irg);
465
466         set_irg_doms_inconsistent(irg);
467
468         /* A quiet place, where the old obstack can rest in peace,
469            until it will be cremated. */
470         graveyard_obst = irg->obst;
471
472         /* A new obstack, where the reachable nodes will be copied to. */
473         rebirth_obst = xmalloc(sizeof(*rebirth_obst));
474         irg->obst = rebirth_obst;
475         obstack_init(irg->obst);
476         irg->last_node_idx = 0;
477
478         /* We also need a new value table for CSE */
479         del_identities(irg->value_table);
480         irg->value_table = new_identities();
481
482         /* Copy the graph from the old to the new obstack */
483         copy_graph_env(/*copy_node_nr=*/1);
484
485         /* Free memory from old unoptimized obstack */
486         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
487         xfree(graveyard_obst);            /* ... then free it.           */
488
489         /* inform statistics that the run is over */
490         hook_dead_node_elim(irg, 0);
491
492         current_ir_graph = rem;
493 #ifdef INTERPROCEDURAL_VIEW
494         set_interprocedural_view(rem_ipview);
495 #endif
496 }
497
498 /**
499  * Relink bad predecessors of a block and store the old in array to the
500  * link field. This function is called by relink_bad_predecessors().
501  * The array of link field starts with the block operand at position 0.
502  * If block has bad predecessors, create a new in array without bad preds.
503  * Otherwise let in array untouched.
504  */
505 static void relink_bad_block_predecessors(ir_node *n, void *env) {
506         ir_node **new_in, *irn;
507         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
508         (void) env;
509
510         /* if link field of block is NULL, look for bad predecessors otherwise
511            this is already done */
512         if (is_Block(n) && get_irn_link(n) == NULL) {
513                 /* save old predecessors in link field (position 0 is the block operand)*/
514                 set_irn_link(n, get_irn_in(n));
515
516                 /* count predecessors without bad nodes */
517                 old_irn_arity = get_irn_arity(n);
518                 for (i = 0; i < old_irn_arity; i++)
519                         if (!is_Bad(get_irn_n(n, i)))
520                                 ++new_irn_arity;
521
522                 /* arity changing: set new predecessors without bad nodes */
523                 if (new_irn_arity < old_irn_arity) {
524                         /* Get new predecessor array. We do not resize the array, as we must
525                            keep the old one to update Phis. */
526                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
527
528                         /* set new predecessors in array */
529                         new_in[0] = NULL;
530                         new_irn_n = 1;
531                         for (i = 0; i < old_irn_arity; i++) {
532                                 irn = get_irn_n(n, i);
533                                 if (!is_Bad(irn)) {
534                                         new_in[new_irn_n] = irn;
535                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
536                                         ++new_irn_n;
537                                 }
538                         }
539                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
540                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
541                         n->in = new_in;
542                 } /* ir node has bad predecessors */
543         } /* Block is not relinked */
544 }
545
546 /**
547  * Relinks Bad predecessors from Blocks and Phis called by walker
548  * remove_bad_predecesors(). If n is a Block, call
549  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
550  * function of Phi's Block. If this block has bad predecessors, relink preds
551  * of the Phi-node.
552  */
553 static void relink_bad_predecessors(ir_node *n, void *env) {
554         ir_node *block, **old_in;
555         int i, old_irn_arity, new_irn_arity;
556
557         /* relink bad predecessors of a block */
558         if (is_Block(n))
559                 relink_bad_block_predecessors(n, env);
560
561         /* If Phi node relink its block and its predecessors */
562         if (is_Phi(n)) {
563                 /* Relink predecessors of phi's block */
564                 block = get_nodes_block(n);
565                 if (get_irn_link(block) == NULL)
566                         relink_bad_block_predecessors(block, env);
567
568                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
569                 old_irn_arity = ARR_LEN(old_in);
570
571                 /* Relink Phi predecessors if count of predecessors changed */
572                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
573                         /* set new predecessors in array
574                            n->in[0] remains the same block */
575                         new_irn_arity = 1;
576                         for(i = 1; i < old_irn_arity; i++)
577                                 if (!is_Bad(old_in[i])) {
578                                         n->in[new_irn_arity] = n->in[i];
579                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
580                                         ++new_irn_arity;
581                                 }
582
583                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
584                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
585                 }
586         } /* n is a Phi node */
587 }
588
589 /*
590  * Removes Bad Bad predecessors from Blocks and the corresponding
591  * inputs to Phi nodes as in dead_node_elimination but without
592  * copying the graph.
593  * On walking up set the link field to NULL, on walking down call
594  * relink_bad_predecessors() (This function stores the old in array
595  * to the link field and sets a new in array if arity of predecessors
596  * changes).
597  */
598 void remove_bad_predecessors(ir_graph *irg) {
599         panic("Fix backedge handling first");
600         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
601 }
602
603
604 /*
605    __                      _  __ __
606   (_     __    o     _    | \/  |_
607   __)|_| | \_/ | \_/(/_   |_/\__|__
608
609   The following stuff implements a facility that automatically patches
610   registered ir_node pointers to the new node when a dead node elimination occurs.
611 */
612
613 struct _survive_dce_t {
614         struct obstack obst;
615         pmap *places;
616         pmap *new_places;
617         hook_entry_t dead_node_elim;
618         hook_entry_t dead_node_elim_subst;
619 };
620
621 typedef struct _survive_dce_list_t {
622         struct _survive_dce_list_t *next;
623         ir_node **place;
624 } survive_dce_list_t;
625
626 static void dead_node_hook(void *context, ir_graph *irg, int start) {
627         survive_dce_t *sd = context;
628         (void) irg;
629
630         /* Create a new map before the dead node elimination is performed. */
631         if (start) {
632                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
633         } else {
634                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
635                 pmap_destroy(sd->places);
636                 sd->places     = sd->new_places;
637                 sd->new_places = NULL;
638         }
639 }
640
641 /**
642  * Hook called when dead node elimination replaces old by nw.
643  */
644 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
645         survive_dce_t *sd = context;
646         survive_dce_list_t *list = pmap_get(sd->places, old);
647         (void) irg;
648
649         /* If the node is to be patched back, write the new address to all registered locations. */
650         if (list) {
651                 survive_dce_list_t *p;
652
653                 for (p = list; p; p = p->next)
654                         *(p->place) = nw;
655
656                 pmap_insert(sd->new_places, nw, list);
657         }
658 }
659
660 /**
661  * Make a new Survive DCE environment.
662  */
663 survive_dce_t *new_survive_dce(void) {
664         survive_dce_t *res = xmalloc(sizeof(res[0]));
665         obstack_init(&res->obst);
666         res->places     = pmap_create();
667         res->new_places = NULL;
668
669         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
670         res->dead_node_elim.context                   = res;
671         res->dead_node_elim.next                      = NULL;
672
673         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
674         res->dead_node_elim_subst.context = res;
675         res->dead_node_elim_subst.next    = NULL;
676
677 #ifndef FIRM_ENABLE_HOOKS
678         assert(0 && "need hooks enabled");
679 #endif
680
681         register_hook(hook_dead_node_elim, &res->dead_node_elim);
682         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
683         return res;
684 }
685
686 /**
687  * Free a Survive DCE environment.
688  */
689 void free_survive_dce(survive_dce_t *sd) {
690         obstack_free(&sd->obst, NULL);
691         pmap_destroy(sd->places);
692         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
693         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
694         xfree(sd);
695 }
696
697 /**
698  * Register a node pointer to be patched upon DCE.
699  * When DCE occurs, the node pointer specified by @p place will be
700  * patched to the new address of the node it is pointing to.
701  *
702  * @param sd    The Survive DCE environment.
703  * @param place The address of the node pointer.
704  */
705 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
706         if (*place != NULL) {
707                 ir_node *irn      = *place;
708                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
709                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
710
711                 nw->next  = curr;
712                 nw->place = place;
713
714                 pmap_insert(sd->places, irn, nw);
715         }
716 }
717
718 /*--------------------------------------------------------------------*/
719 /*  Functionality for inlining                                         */
720 /*--------------------------------------------------------------------*/
721
722 /**
723  * Copy node for inlineing.  Updates attributes that change when
724  * inlineing but not for dead node elimination.
725  *
726  * Copies the node by calling copy_node() and then updates the entity if
727  * it's a local one.  env must be a pointer of the frame type of the
728  * inlined procedure. The new entities must be in the link field of
729  * the entities.
730  */
731 static void copy_node_inline(ir_node *n, void *env) {
732         ir_node *nn;
733         ir_type *frame_tp = (ir_type *)env;
734
735         copy_node(n, NULL);
736         if (is_Sel(n)) {
737                 nn = get_new_node (n);
738                 assert(is_Sel(nn));
739                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
740                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
741                 }
742         } else if (is_Block(n)) {
743                 nn = get_new_node (n);
744                 nn->attr.block.irg = current_ir_graph;
745         }
746 }
747
748 /**
749  * Copies new predecessors of old node and move constants to
750  * the Start Block.
751  */
752 static void copy_preds_inline(ir_node *n, void *env) {
753         ir_node *nn;
754
755         copy_preds(n, env);
756         nn = skip_Id(get_new_node(n));
757         if (is_irn_constlike(nn)) {
758                 /* move Constants into the start block */
759                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
760
761                 n = identify_remember(current_ir_graph->value_table, nn);
762                 if (nn != n) {
763                         DBG_OPT_CSE(nn, n);
764                         exchange(nn, n);
765                 }
766         }
767 }
768
769 /**
770  * Walker: checks if P_value_arg_base is used.
771  */
772 static void find_addr(ir_node *node, void *env) {
773         int *allow_inline = env;
774         if (is_Proj(node) &&
775                         is_Start(get_Proj_pred(node)) &&
776                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
777                 *allow_inline = 0;
778         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
779                 /* From GCC:
780                  * Refuse to inline alloca call unless user explicitly forced so as this
781                  * may change program's memory overhead drastically when the function
782                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
783                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
784                  *
785                  * Sorryly this is true with our implementation also.
786                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
787                  * disables inlining of functions using VLA (with are completely save).
788                  *
789                  * 2 Solutions:
790                  * - add a flag to the Alloc node for "real" alloca() calls
791                  * - add a new Stack-Restore node at the end of a function using alloca()
792                  */
793                 *allow_inline = 0;
794         }
795 }
796
797 /**
798  * Check if we can inline a given call.
799  * Currently, we cannot inline two cases:
800  * - call with compound arguments
801  * - graphs that take the address of a parameter
802  *
803  * check these conditions here
804  */
805 static int can_inline(ir_node *call, ir_graph *called_graph) {
806         ir_type *call_type = get_Call_type(call);
807         int params, ress, i, res;
808         assert(is_Method_type(call_type));
809
810         params = get_method_n_params(call_type);
811         ress   = get_method_n_ress(call_type);
812
813         /* check parameters for compound arguments */
814         for (i = 0; i < params; ++i) {
815                 ir_type *p_type = get_method_param_type(call_type, i);
816
817                 if (is_compound_type(p_type))
818                         return 0;
819         }
820
821         /* check results for compound arguments */
822         for (i = 0; i < ress; ++i) {
823                 ir_type *r_type = get_method_res_type(call_type, i);
824
825                 if (is_compound_type(r_type))
826                         return 0;
827         }
828
829         res = 1;
830         irg_walk_graph(called_graph, find_addr, NULL, &res);
831
832         return res;
833 }
834
835 enum exc_mode {
836         exc_handler    = 0, /**< There is a handler. */
837         exc_to_end     = 1, /**< Branches to End. */
838         exc_no_handler = 2  /**< Exception handling not represented. */
839 };
840
841 /* Inlines a method at the given call site. */
842 int inline_method(ir_node *call, ir_graph *called_graph) {
843         ir_node             *pre_call;
844         ir_node             *post_call, *post_bl;
845         ir_node             *in[pn_Start_max];
846         ir_node             *end, *end_bl, *block;
847         ir_node             **res_pred;
848         ir_node             **cf_pred;
849         ir_node             **args_in;
850         ir_node             *ret, *phi;
851         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
852         enum exc_mode       exc_handling;
853         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
854         ir_entity           *ent;
855         ir_graph            *rem, *irg;
856         irg_inline_property prop = get_irg_inline_property(called_graph);
857         unsigned long       visited;
858
859         if (prop == irg_inline_forbidden)
860                 return 0;
861
862         ent = get_irg_entity(called_graph);
863
864         mtp = get_entity_type(ent);
865         ctp = get_Call_type(call);
866         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
867                 /* this is a bad feature of C: without a prototype, we can can call a function with less
868                 parameters than needed. Currently we don't support this, although it would be
869                 to use Unknown than. */
870                 return 0;
871         }
872
873         /* Argh, compiling C has some bad consequences:
874            the call type AND the method type might be different.
875            It is implementation defendant what happens in that case.
876            We support inlining, if the bitsize of the types matches AND
877            the same arithmetic is used. */
878         n_params = get_method_n_params(mtp);
879         for (i = n_params - 1; i >= 0; --i) {
880                 ir_type *param_tp = get_method_param_type(mtp, i);
881                 ir_type *arg_tp   = get_method_param_type(ctp, i);
882
883                 if (param_tp != arg_tp) {
884                         ir_mode *pmode = get_type_mode(param_tp);
885                         ir_mode *amode = get_type_mode(arg_tp);
886
887                         if (pmode == NULL || amode == NULL)
888                                 return 0;
889                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
890                                 return 0;
891                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
892                                 return 0;
893                         /* otherwise we can simply "reinterpret" the bits */
894                 }
895         }
896
897         irg = get_irn_irg(call);
898
899         /*
900          * We cannot inline a recursive call. The graph must be copied before
901          * the call the inline_method() using create_irg_copy().
902          */
903         if (called_graph == irg)
904                 return 0;
905
906         /*
907          * currently, we cannot inline two cases:
908          * - call with compound arguments
909          * - graphs that take the address of a parameter
910          */
911         if (! can_inline(call, called_graph))
912                 return 0;
913
914         rem = current_ir_graph;
915         current_ir_graph = irg;
916
917         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
918
919         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
920         rem_opt = get_opt_optimize();
921         set_optimize(0);
922
923         /* Handle graph state */
924         assert(get_irg_phase_state(irg) != phase_building);
925         assert(get_irg_pinned(irg) == op_pin_state_pinned);
926         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
927         set_irg_outs_inconsistent(irg);
928         set_irg_extblk_inconsistent(irg);
929         set_irg_doms_inconsistent(irg);
930         set_irg_loopinfo_inconsistent(irg);
931         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
932
933         /* -- Check preconditions -- */
934         assert(is_Call(call));
935
936         /* here we know we WILL inline, so inform the statistics */
937         hook_inline(call, called_graph);
938
939         /* -- Decide how to handle exception control flow: Is there a handler
940            for the Call node, or do we branch directly to End on an exception?
941            exc_handling:
942            0 There is a handler.
943            1 Branches to End.
944            2 Exception handling not represented in Firm. -- */
945         {
946                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
947                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
948                         long proj_nr = get_Proj_proj(proj);
949                         if (proj_nr == pn_Call_X_except) Xproj = proj;
950                         if (proj_nr == pn_Call_M_except) Mproj = proj;
951                 }
952                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
953                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
954                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
955         }
956
957         /* create the argument tuple */
958         NEW_ARR_A(ir_type *, args_in, n_params);
959
960         block = get_nodes_block(call);
961         for (i = n_params - 1; i >= 0; --i) {
962                 ir_node *arg      = get_Call_param(call, i);
963                 ir_type *param_tp = get_method_param_type(mtp, i);
964                 ir_mode *mode     = get_type_mode(param_tp);
965
966                 if (mode != get_irn_mode(arg)) {
967                         arg = new_r_Conv(irg, block, arg, mode);
968                 }
969                 args_in[i] = arg;
970         }
971
972         /* --
973            the procedure and later replaces the Start node of the called graph.
974            Post_call is the old Call node and collects the results of the called
975            graph. Both will end up being a tuple.  -- */
976         post_bl = get_nodes_block(call);
977         set_irg_current_block(irg, post_bl);
978         /* XxMxPxPxPxT of Start + parameter of Call */
979         in[pn_Start_X_initial_exec]   = new_Jmp();
980         in[pn_Start_M]                = get_Call_mem(call);
981         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
982         in[pn_Start_P_tls]            = get_irg_tls(irg);
983         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
984         /* in[pn_Start_P_value_arg_base] = ??? */
985         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
986         pre_call = new_Tuple(pn_Start_max - 1, in);
987         post_call = call;
988
989         /* --
990            The new block gets the ins of the old block, pre_call and all its
991            predecessors and all Phi nodes. -- */
992         part_block(pre_call);
993
994         /* -- Prepare state for dead node elimination -- */
995         /* Visited flags in calling irg must be >= flag in called irg.
996            Else walker and arity computation will not work. */
997         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
998                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
999         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
1000                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
1001         visited = get_irg_visited(irg);
1002
1003         /* Set pre_call as new Start node in link field of the start node of
1004            calling graph and pre_calls block as new block for the start block
1005            of calling graph.
1006            Further mark these nodes so that they are not visited by the
1007            copying. */
1008         set_irn_link(get_irg_start(called_graph), pre_call);
1009         set_irn_visited(get_irg_start(called_graph), visited);
1010         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1011         set_irn_visited(get_irg_start_block(called_graph), visited);
1012
1013         set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1014         set_irn_visited(get_irg_bad(called_graph), visited);
1015
1016         set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1017         set_irn_visited(get_irg_no_mem(called_graph), visited);
1018
1019         /* Initialize for compaction of in arrays */
1020         inc_irg_block_visited(irg);
1021
1022         /* -- Replicate local entities of the called_graph -- */
1023         /* copy the entities. */
1024         called_frame = get_irg_frame_type(called_graph);
1025         curr_frame   = get_irg_frame_type(irg);
1026         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1027                 ir_entity *new_ent, *old_ent;
1028                 old_ent = get_class_member(called_frame, i);
1029                 new_ent = copy_entity_own(old_ent, curr_frame);
1030                 set_entity_link(old_ent, new_ent);
1031         }
1032
1033         /* visited is > than that of called graph.  With this trick visited will
1034            remain unchanged so that an outer walker, e.g., searching the call nodes
1035             to inline, calling this inline will not visit the inlined nodes. */
1036         set_irg_visited(irg, get_irg_visited(irg)-1);
1037
1038         /* -- Performing dead node elimination inlines the graph -- */
1039         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1040            entities. */
1041         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1042                  get_irg_frame_type(called_graph));
1043
1044         /* Repair called_graph */
1045         set_irg_visited(called_graph, get_irg_visited(irg));
1046         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1047         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1048
1049         /* -- Merge the end of the inlined procedure with the call site -- */
1050         /* We will turn the old Call node into a Tuple with the following
1051            predecessors:
1052            -1:  Block of Tuple.
1053            0: Phi of all Memories of Return statements.
1054            1: Jmp from new Block that merges the control flow from all exception
1055            predecessors of the old end block.
1056            2: Tuple of all arguments.
1057            3: Phi of Exception memories.
1058            In case the old Call directly branches to End on an exception we don't
1059            need the block merging all exceptions nor the Phi of the exception
1060            memories.
1061         */
1062
1063         /* -- Precompute some values -- */
1064         end_bl = get_new_node(get_irg_end_block(called_graph));
1065         end = get_new_node(get_irg_end(called_graph));
1066         arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
1067         n_res = get_method_n_ress(get_Call_type(call));
1068
1069         res_pred = xmalloc(n_res * sizeof(*res_pred));
1070         cf_pred  = xmalloc(arity * sizeof(*res_pred));
1071
1072         set_irg_current_block(irg, post_bl); /* just to make sure */
1073
1074         /* -- archive keepalives -- */
1075         irn_arity = get_irn_arity(end);
1076         for (i = 0; i < irn_arity; i++) {
1077                 ir_node *ka = get_End_keepalive(end, i);
1078                 if (! is_Bad(ka))
1079                         add_End_keepalive(get_irg_end(irg), ka);
1080         }
1081
1082         /* The new end node will die.  We need not free as the in array is on the obstack:
1083            copy_node() only generated 'D' arrays. */
1084
1085         /* -- Replace Return nodes by Jump nodes. -- */
1086         n_ret = 0;
1087         for (i = 0; i < arity; i++) {
1088                 ir_node *ret;
1089                 ret = get_Block_cfgpred(end_bl, i);
1090                 if (is_Return(ret)) {
1091                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1092                         n_ret++;
1093                 }
1094         }
1095         set_irn_in(post_bl, n_ret, cf_pred);
1096
1097         /* -- Build a Tuple for all results of the method.
1098            Add Phi node if there was more than one Return.  -- */
1099         turn_into_tuple(post_call, pn_Call_max);
1100         /* First the Memory-Phi */
1101         n_ret = 0;
1102         for (i = 0; i < arity; i++) {
1103                 ret = get_Block_cfgpred(end_bl, i);
1104                 if (is_Return(ret)) {
1105                         cf_pred[n_ret] = get_Return_mem(ret);
1106                         n_ret++;
1107                 }
1108         }
1109         phi = new_Phi(n_ret, cf_pred, mode_M);
1110         set_Tuple_pred(call, pn_Call_M_regular, phi);
1111         /* Conserve Phi-list for further inlinings -- but might be optimized */
1112         if (get_nodes_block(phi) == post_bl) {
1113                 set_irn_link(phi, get_irn_link(post_bl));
1114                 set_irn_link(post_bl, phi);
1115         }
1116         /* Now the real results */
1117         if (n_res > 0) {
1118                 for (j = 0; j < n_res; j++) {
1119                         n_ret = 0;
1120                         for (i = 0; i < arity; i++) {
1121                                 ret = get_Block_cfgpred(end_bl, i);
1122                                 if (is_Return(ret)) {
1123                                         cf_pred[n_ret] = get_Return_res(ret, j);
1124                                         n_ret++;
1125                                 }
1126                         }
1127                         if (n_ret > 0)
1128                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1129                         else
1130                                 phi = new_Bad();
1131                         res_pred[j] = phi;
1132                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1133                         if (get_nodes_block(phi) == post_bl) {
1134                                 set_Phi_next(phi, get_Block_phis(post_bl));
1135                                 set_Block_phis(post_bl, phi);
1136                         }
1137                 }
1138                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1139         } else {
1140                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1141         }
1142         /* handle the regular call */
1143         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1144
1145         /* For now, we cannot inline calls with value_base */
1146         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1147
1148         /* Finally the exception control flow.
1149            We have two (three) possible situations:
1150            First if the Call branches to an exception handler: We need to add a Phi node to
1151            collect the memory containing the exception objects.  Further we need
1152            to add another block to get a correct representation of this Phi.  To
1153            this block we add a Jmp that resolves into the X output of the Call
1154            when the Call is turned into a tuple.
1155            Second the Call branches to End, the exception is not handled.  Just
1156            add all inlined exception branches to the End node.
1157            Third: there is no Exception edge at all. Handle as case two. */
1158         if (exc_handling == exc_handler) {
1159                 n_exc = 0;
1160                 for (i = 0; i < arity; i++) {
1161                         ir_node *ret, *irn;
1162                         ret = get_Block_cfgpred(end_bl, i);
1163                         irn = skip_Proj(ret);
1164                         if (is_fragile_op(irn) || is_Raise(irn)) {
1165                                 cf_pred[n_exc] = ret;
1166                                 ++n_exc;
1167                         }
1168                 }
1169                 if (n_exc > 0) {
1170                         new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
1171                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1172                         /* The Phi for the memories with the exception objects */
1173                         n_exc = 0;
1174                         for (i = 0; i < arity; i++) {
1175                                 ir_node *ret;
1176                                 ret = skip_Proj(get_Block_cfgpred(end_bl, i));
1177                                 if (is_Call(ret)) {
1178                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1179                                         n_exc++;
1180                                 } else if (is_fragile_op(ret)) {
1181                                         /* We rely that all cfops have the memory output at the same position. */
1182                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1183                                         n_exc++;
1184                                 } else if (is_Raise(ret)) {
1185                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1186                                         n_exc++;
1187                                 }
1188                         }
1189                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1190                 } else {
1191                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1192                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1193                 }
1194         } else {
1195                 ir_node *main_end_bl;
1196                 int main_end_bl_arity;
1197                 ir_node **end_preds;
1198
1199                 /* assert(exc_handling == 1 || no exceptions. ) */
1200                 n_exc = 0;
1201                 for (i = 0; i < arity; i++) {
1202                         ir_node *ret = get_Block_cfgpred(end_bl, i);
1203                         ir_node *irn = skip_Proj(ret);
1204
1205                         if (is_fragile_op(irn) || is_Raise(irn)) {
1206                                 cf_pred[n_exc] = ret;
1207                                 n_exc++;
1208                         }
1209                 }
1210                 main_end_bl = get_irg_end_block(irg);
1211                 main_end_bl_arity = get_irn_arity(main_end_bl);
1212                 end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
1213
1214                 for (i = 0; i < main_end_bl_arity; ++i)
1215                         end_preds[i] = get_irn_n(main_end_bl, i);
1216                 for (i = 0; i < n_exc; ++i)
1217                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1218                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1219                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1220                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1221                 free(end_preds);
1222         }
1223         free(res_pred);
1224         free(cf_pred);
1225
1226         /* --  Turn CSE back on. -- */
1227         set_optimize(rem_opt);
1228         current_ir_graph = rem;
1229
1230         return 1;
1231 }
1232
1233 /********************************************************************/
1234 /* Apply inlineing to small methods.                                */
1235 /********************************************************************/
1236
1237 static struct obstack  temp_obst;
1238
1239 /** Represents a possible inlinable call in a graph. */
1240 typedef struct _call_entry call_entry;
1241 struct _call_entry {
1242         ir_node    *call;      /**< the Call node */
1243         ir_graph   *callee;    /**< the callee IR-graph called here */
1244         call_entry *next;      /**< for linking the next one */
1245         int        loop_depth; /**< the loop depth of this call */
1246 };
1247
1248 /**
1249  * environment for inlining small irgs
1250  */
1251 typedef struct _inline_env_t {
1252         struct obstack obst;  /**< an obstack where call_entries are allocated on. */
1253         call_entry *head;     /**< the head of the call entry list */
1254         call_entry *tail;     /**< the tail of the call entry list */
1255 } inline_env_t;
1256
1257 /**
1258  * Returns the irg called from a Call node. If the irg is not
1259  * known, NULL is returned.
1260  *
1261  * @param call  the call node
1262  */
1263 static ir_graph *get_call_called_irg(ir_node *call) {
1264         ir_node *addr;
1265
1266         addr = get_Call_ptr(call);
1267         if (is_Global(addr)) {
1268                 ir_entity *ent = get_Global_entity(addr);
1269                 return get_entity_irg(ent);
1270         }
1271
1272         return NULL;
1273 }
1274
1275 /**
1276  * Walker: Collect all calls to known graphs inside a graph.
1277  */
1278 static void collect_calls(ir_node *call, void *env) {
1279         if (is_Call(call)) {
1280                 ir_graph *called_irg = get_call_called_irg(call);
1281
1282                 if (called_irg != NULL) {
1283                         /* The Call node calls a locally defined method.  Remember to inline. */
1284                         inline_env_t *ienv  = env;
1285                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1286                         entry->call       = call;
1287                         entry->callee     = called_irg;
1288                         entry->next       = NULL;
1289                         entry->loop_depth = 0;
1290
1291                         if (ienv->tail == NULL)
1292                                 ienv->head = entry;
1293                         else
1294                                 ienv->tail->next = entry;
1295                         ienv->tail = entry;
1296                 }
1297         }
1298 }
1299
1300 /**
1301  * Inlines all small methods at call sites where the called address comes
1302  * from a Const node that references the entity representing the called
1303  * method.
1304  * The size argument is a rough measure for the code size of the method:
1305  * Methods where the obstack containing the firm graph is smaller than
1306  * size are inlined.
1307  */
1308 void inline_small_irgs(ir_graph *irg, int size) {
1309         ir_graph *rem = current_ir_graph;
1310         inline_env_t env;
1311         call_entry *entry;
1312
1313         current_ir_graph = irg;
1314         /* Handle graph state */
1315         assert(get_irg_phase_state(irg) != phase_building);
1316         free_callee_info(irg);
1317
1318         /* Find Call nodes to inline.
1319            (We can not inline during a walk of the graph, as inlineing the same
1320            method several times changes the visited flag of the walked graph:
1321            after the first inlineing visited of the callee equals visited of
1322            the caller.  With the next inlineing both are increased.) */
1323         obstack_init(&env.obst);
1324         env.head = env.tail = NULL;
1325         irg_walk_graph(irg, NULL, collect_calls, &env);
1326
1327         if (env.head != NULL) {
1328                 /* There are calls to inline */
1329                 collect_phiprojs(irg);
1330
1331                 for (entry = env.head; entry != NULL; entry = entry->next) {
1332                         ir_graph            *callee = entry->callee;
1333                         irg_inline_property prop    = get_irg_inline_property(callee);
1334
1335                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1336                                 /* do not inline forbidden / weak graphs */
1337                                 continue;
1338                         }
1339
1340                         if (prop >= irg_inline_forced ||
1341                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1342                                 inline_method(entry->call, callee);
1343                         }
1344                 }
1345         }
1346         obstack_free(&env.obst, NULL);
1347         current_ir_graph = rem;
1348 }
1349
1350 /**
1351  * Environment for inlining irgs.
1352  */
1353 typedef struct {
1354         int n_nodes;             /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1355         int n_blocks;            /**< Number of Blocks in graph without Start and End block. */
1356         int n_nodes_orig;        /**< for statistics */
1357         int n_call_nodes;        /**< Number of Call nodes in the graph. */
1358         int n_call_nodes_orig;   /**< for statistics */
1359         int n_callers;           /**< Number of known graphs that call this graphs. */
1360         int n_callers_orig;      /**< for statistics */
1361         unsigned got_inline:1;   /**< Set, if at least one call inside this graph was inlined. */
1362         unsigned local_vars:1;   /**< Set, if a inlined function gets the address of an inlined variable. */
1363         unsigned recursive:1;    /**< Set, if this function is self recursive. */
1364         call_entry *call_head;   /**< The head of the list of all call nodes in this graph. */
1365         call_entry *call_tail;   /**< The tail of the list of all call nodes in this graph .*/
1366         unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1367 } inline_irg_env;
1368
1369 /**
1370  * Allocate a new environment for inlining.
1371  */
1372 static inline_irg_env *alloc_inline_irg_env(void) {
1373         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1374         env->n_nodes           = -2; /* do not count count Start, End */
1375         env->n_blocks          = -2; /* do not count count Start, End Block */
1376         env->n_nodes_orig      = -2; /* do not count Start, End */
1377         env->call_head         = NULL;
1378         env->call_tail         = NULL;
1379         env->n_call_nodes      = 0;
1380         env->n_call_nodes_orig = 0;
1381         env->n_callers         = 0;
1382         env->n_callers_orig    = 0;
1383         env->got_inline        = 0;
1384         env->local_vars        = 0;
1385         env->recursive         = 0;
1386         env->local_weights     = NULL;
1387         return env;
1388 }
1389
1390 typedef struct walker_env {
1391         inline_irg_env *x;     /**< the inline environment */
1392         call_entry *last_call; /**< points to the last inserted call */
1393         char ignore_runtime;   /**< the ignore runtime flag */
1394         char ignore_callers;   /**< if set, do change callers data */
1395 } wenv_t;
1396
1397 /**
1398  * post-walker: collect all calls in the inline-environment
1399  * of a graph and sum some statistics.
1400  */
1401 static void collect_calls2(ir_node *call, void *ctx) {
1402         wenv_t         *env = ctx;
1403         inline_irg_env *x = env->x;
1404         ir_opcode      code = get_irn_opcode(call);
1405         ir_graph       *callee;
1406         call_entry     *entry;
1407
1408         /* count meaningful nodes in irg */
1409         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1410                 if (code != iro_Block) {
1411                         ++x->n_nodes;
1412                         ++x->n_nodes_orig;
1413                 } else {
1414                         ++x->n_blocks;
1415                 }
1416         }
1417
1418         if (code != iro_Call) return;
1419
1420         /* check, if it's a runtime call */
1421         if (env->ignore_runtime) {
1422                 ir_node *symc = get_Call_ptr(call);
1423
1424                 if (is_Global(symc)) {
1425                         ir_entity *ent = get_Global_entity(symc);
1426
1427                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1428                                 return;
1429                 }
1430         }
1431
1432         /* collect all call nodes */
1433         ++x->n_call_nodes;
1434         ++x->n_call_nodes_orig;
1435
1436         callee = get_call_called_irg(call);
1437         if (callee != NULL) {
1438                 if (! env->ignore_callers) {
1439                         inline_irg_env *callee_env = get_irg_link(callee);
1440                         /* count all static callers */
1441                         ++callee_env->n_callers;
1442                         ++callee_env->n_callers_orig;
1443                 }
1444                 if (callee == current_ir_graph)
1445                         x->recursive = 1;
1446
1447                 /* link it in the list of possible inlinable entries */
1448                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1449                 entry->call       = call;
1450                 entry->callee     = callee;
1451                 entry->next       = NULL;
1452                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1453
1454                 /* note: we use call_tail here as a pointer to the last inserted */
1455                 if (x->call_head == NULL) {
1456                         x->call_head = entry;
1457                 } else {
1458                         if (entry->loop_depth == env->last_call->loop_depth) {
1459                                 /* same depth as the last one, enqueue after it */
1460                                 entry->next          = env->last_call->next;
1461                                 env->last_call->next = entry;
1462                         } else if (entry->loop_depth > x->call_head->loop_depth) {
1463                                 /* put first */
1464                                 entry->next  = x->call_head;
1465                                 x->call_head = entry;
1466                         } else {
1467                                 /* search the insertion point */
1468                                 call_entry *p;
1469
1470                                 for (p = x->call_head; p->next != NULL; p = p->next)
1471                                         if (entry->loop_depth > p->next->loop_depth)
1472                                                 break;
1473                                 entry->next = p->next;
1474                                 p->next     = entry;
1475                         }
1476                 }
1477                 env->last_call = entry;
1478                 if (entry->next == NULL) {
1479                         /* keep tail up to date */
1480                         x->call_tail = entry;
1481                 }
1482         }
1483 }
1484
1485 /**
1486  * Returns TRUE if the number of callers is 0 in the irg's environment,
1487  * hence this irg is a leave.
1488  */
1489 INLINE static int is_leave(ir_graph *irg) {
1490         inline_irg_env *env = get_irg_link(irg);
1491         return env->n_call_nodes == 0;
1492 }
1493
1494 /**
1495  * Returns TRUE if the number of nodes in the callee is
1496  * smaller then size in the irg's environment.
1497  */
1498 INLINE static int is_smaller(ir_graph *callee, int size) {
1499         inline_irg_env *env = get_irg_link(callee);
1500         return env->n_nodes < size;
1501 }
1502
1503 /**
1504  * Append the nodes of the list src to the nodes of the list in environment dst.
1505  */
1506 static void append_call_list(inline_irg_env *dst, call_entry *src) {
1507         call_entry *entry, *nentry;
1508
1509         /* Note that the src list points to Call nodes in the inlined graph, but
1510            we need Call nodes in our graph. Luckily the inliner leaves this information
1511            in the link field. */
1512         for (entry = src; entry != NULL; entry = entry->next) {
1513                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1514                 nentry->call         = get_irn_link(entry->call);
1515                 nentry->callee       = entry->callee;
1516                 nentry->next         = NULL;
1517                 nentry->loop_depth   = entry->loop_depth;
1518                 dst->call_tail->next = nentry;
1519                 dst->call_tail       = nentry;
1520         }
1521 }
1522
1523 /**
1524  * Add the nodes of the list src in front to the nodes of the list dst.
1525  */
1526 static call_entry *replace_entry_by_call_list(call_entry *dst, call_entry *src) {
1527         call_entry *entry, *nentry, *head, *tail;
1528
1529         /* Note that the src list points to Call nodes in the inlined graph, but
1530            we need Call nodes in our graph. Luckily the inliner leaves this information
1531            in the link field. */
1532         head = tail = NULL;
1533         for (entry = src; entry != NULL; entry = entry->next) {
1534                 nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1535                 nentry->call         = get_irn_link(entry->call);
1536                 nentry->callee       = entry->callee;
1537                 nentry->next         = NULL;
1538                 nentry->loop_depth   = entry->loop_depth + dst->loop_depth;
1539                 if (head == NULL)
1540                         head = nentry;
1541                 else
1542                         tail->next = nentry;
1543                 tail = nentry;
1544         }
1545         /* skip the head of dst */
1546         if (head != NULL) {
1547                 tail->next = dst->next;
1548         } else {
1549                 head = dst->next;
1550         }
1551         return head;
1552 }
1553
1554 /*
1555  * Inlines small leave methods at call sites where the called address comes
1556  * from a Const node that references the entity representing the called
1557  * method.
1558  * The size argument is a rough measure for the code size of the method:
1559  * Methods where the obstack containing the firm graph is smaller than
1560  * size are inlined.
1561  */
1562 void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime) {
1563         inline_irg_env   *env;
1564         ir_graph         *irg;
1565         int              i, n_irgs;
1566         ir_graph         *rem;
1567         int              did_inline;
1568         wenv_t           wenv;
1569         call_entry       *entry, *tail;
1570         const call_entry *centry;
1571         pmap             *copied_graphs;
1572         pmap_entry       *pm_entry;
1573
1574         rem = current_ir_graph;
1575         obstack_init(&temp_obst);
1576
1577         /* a map for the copied graphs, used to inline recursive calls */
1578         copied_graphs = pmap_create();
1579
1580         /* extend all irgs by a temporary data structure for inlining. */
1581         n_irgs = get_irp_n_irgs();
1582         for (i = 0; i < n_irgs; ++i)
1583                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1584
1585         /* Precompute information in temporary data structure. */
1586         wenv.ignore_runtime = ignore_runtime;
1587         wenv.ignore_callers = 0;
1588         for (i = 0; i < n_irgs; ++i) {
1589                 ir_graph *irg = get_irp_irg(i);
1590
1591                 assert(get_irg_phase_state(irg) != phase_building);
1592                 free_callee_info(irg);
1593
1594                 assure_cf_loop(irg);
1595                 wenv.x = get_irg_link(irg);
1596                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1597         }
1598
1599         /* -- and now inline. -- */
1600
1601         /* Inline leaves recursively -- we might construct new leaves. */
1602         do {
1603                 did_inline = 0;
1604
1605                 for (i = 0; i < n_irgs; ++i) {
1606                         ir_node *call;
1607                         int phiproj_computed = 0;
1608
1609                         current_ir_graph = get_irp_irg(i);
1610                         env = (inline_irg_env *)get_irg_link(current_ir_graph);
1611
1612                         tail = NULL;
1613                         for (entry = env->call_head; entry != NULL; entry = entry->next) {
1614                                 ir_graph            *callee;
1615                                 irg_inline_property prop;
1616
1617                                 if (env->n_nodes > maxsize)
1618                                         break;
1619
1620                                 call   = entry->call;
1621                                 callee = entry->callee;
1622
1623                                 prop = get_irg_inline_property(callee);
1624                                 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1625                                         /* do not inline forbidden / weak graphs */
1626                                         continue;
1627                                 }
1628
1629                                 if (is_leave(callee) && (
1630                                     is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1631                                         if (!phiproj_computed) {
1632                                                 phiproj_computed = 1;
1633                                                 collect_phiprojs(current_ir_graph);
1634                                         }
1635                                         did_inline = inline_method(call, callee);
1636
1637                                         if (did_inline) {
1638                                                 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1639
1640                                                 /* was inlined, must be recomputed */
1641                                                 phiproj_computed = 0;
1642
1643                                                 /* Do some statistics */
1644                                                 env->got_inline = 1;
1645                                                 --env->n_call_nodes;
1646                                                 env->n_nodes += callee_env->n_nodes;
1647                                                 --callee_env->n_callers;
1648
1649                                                 /* remove this call from the list */
1650                                                 if (tail != NULL)
1651                                                         tail->next = entry->next;
1652                                                 else
1653                                                         env->call_head = entry->next;
1654                                                 continue;
1655                                         }
1656                                 }
1657                                 tail = entry;
1658                         }
1659                         env->call_tail = tail;
1660                 }
1661         } while (did_inline);
1662
1663         /* inline other small functions. */
1664         for (i = 0; i < n_irgs; ++i) {
1665                 ir_node *call;
1666                 int phiproj_computed = 0;
1667
1668                 current_ir_graph = get_irp_irg(i);
1669                 env = (inline_irg_env *)get_irg_link(current_ir_graph);
1670
1671                 /* note that the list of possible calls is updated during the process */
1672                 tail = NULL;
1673                 for (entry = env->call_head; entry != NULL; entry = entry->next) {
1674                         irg_inline_property prop;
1675                         ir_graph            *callee;
1676                         pmap_entry          *e;
1677
1678                         call   = entry->call;
1679                         callee = entry->callee;
1680
1681                         prop = get_irg_inline_property(callee);
1682                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1683                                 /* do not inline forbidden / weak graphs */
1684                                 continue;
1685                         }
1686
1687                         e = pmap_find(copied_graphs, callee);
1688                         if (e != NULL) {
1689                                 /*
1690                                  * Remap callee if we have a copy.
1691                                  * FIXME: Should we do this only for recursive Calls ?
1692                                  */
1693                                 callee = e->value;
1694                         }
1695
1696                         if (prop >= irg_inline_forced ||
1697                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1698                                 if (current_ir_graph == callee) {
1699                                         /*
1700                                          * Recursive call: we cannot directly inline because we cannot walk
1701                                          * the graph and change it. So we have to make a copy of the graph
1702                                          * first.
1703                                          */
1704
1705                                         inline_irg_env *callee_env;
1706                                         ir_graph       *copy;
1707
1708                                         /*
1709                                          * No copy yet, create one.
1710                                          * Note that recursive methods are never leaves, so it is sufficient
1711                                          * to test this condition here.
1712                                          */
1713                                         copy = create_irg_copy(callee);
1714
1715                                         /* create_irg_copy() destroys the Proj links, recompute them */
1716                                         phiproj_computed = 0;
1717
1718                                         /* allocate new environment */
1719                                         callee_env = alloc_inline_irg_env();
1720                                         set_irg_link(copy, callee_env);
1721
1722                                         assure_cf_loop(copy);
1723                                         wenv.x              = callee_env;
1724                                         wenv.ignore_callers = 1;
1725                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1726
1727                                         /*
1728                                          * Enter the entity of the original graph. This is needed
1729                                          * for inline_method(). However, note that ent->irg still points
1730                                          * to callee, NOT to copy.
1731                                          */
1732                                         set_irg_entity(copy, get_irg_entity(callee));
1733
1734                                         pmap_insert(copied_graphs, callee, copy);
1735                                         callee = copy;
1736
1737                                         /* we have only one caller: the original graph */
1738                                         callee_env->n_callers      = 1;
1739                                         callee_env->n_callers_orig = 1;
1740                                 }
1741                                 if (! phiproj_computed) {
1742                                         phiproj_computed = 1;
1743                                         collect_phiprojs(current_ir_graph);
1744                                 }
1745                                 did_inline = inline_method(call, callee);
1746                                 if (did_inline) {
1747                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1748
1749                                         /* was inlined, must be recomputed */
1750                                         phiproj_computed = 0;
1751
1752                                         /* callee was inline. Append it's call list. */
1753                                         env->got_inline = 1;
1754                                         --env->n_call_nodes;
1755                                         append_call_list(env, callee_env->call_head);
1756                                         env->n_call_nodes += callee_env->n_call_nodes;
1757                                         env->n_nodes += callee_env->n_nodes;
1758                                         --callee_env->n_callers;
1759
1760                                         /* after we have inlined callee, all called methods inside callee
1761                                            are now called once more */
1762                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
1763                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1764                                                 ++penv->n_callers;
1765                                         }
1766
1767                                         /* remove this call from the list */
1768                                         if (tail != NULL)
1769                                                 tail->next = entry->next;
1770                                         else
1771                                                 env->call_head = entry->next;
1772                                         continue;
1773                                 }
1774                         }
1775                         tail = entry;
1776                 }
1777                 env->call_tail = tail;
1778         }
1779
1780         for (i = 0; i < n_irgs; ++i) {
1781                 irg = get_irp_irg(i);
1782                 env = (inline_irg_env *)get_irg_link(irg);
1783
1784                 if (env->got_inline) {
1785                         optimize_graph_df(irg);
1786                         optimize_cf(irg);
1787                 }
1788                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1789                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1790                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1791                         env->n_callers_orig, env->n_callers,
1792                         get_entity_name(get_irg_entity(irg))));
1793                 }
1794         }
1795
1796         /* kill the copied graphs: we don't need them anymore */
1797         foreach_pmap(copied_graphs, pm_entry) {
1798                 ir_graph *copy = pm_entry->value;
1799
1800                 /* reset the entity, otherwise it will be deleted in the next step ... */
1801                 set_irg_entity(copy, NULL);
1802                 free_ir_graph(copy);
1803         }
1804         pmap_destroy(copied_graphs);
1805
1806         obstack_free(&temp_obst, NULL);
1807         current_ir_graph = rem;
1808 }
1809
1810 /**
1811  * Calculate the parameter weights for transmitting the address of a local variable.
1812  */
1813 static unsigned calc_method_local_weight(ir_node *arg) {
1814         int      i, j, k;
1815         unsigned v, weight = 0;
1816
1817         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1818                 ir_node *succ = get_irn_out(arg, i);
1819
1820                 switch (get_irn_opcode(succ)) {
1821                 case iro_Load:
1822                 case iro_Store:
1823                         /* Loads and Store can be removed */
1824                         weight += 3;
1825                         break;
1826                 case iro_Sel:
1827                         /* check if all args are constant */
1828                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1829                                 ir_node *idx = get_Sel_index(succ, j);
1830                                 if (! is_Const(idx))
1831                                         return 0;
1832                         }
1833                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1834                            some unsupported node. */
1835                         v = calc_method_local_weight(succ);
1836                         if (v == 0)
1837                                 return 0;
1838                         /* we can kill one Sel with constant indexes, this is cheap */
1839                         weight += v + 1;
1840                         break;
1841                 case iro_Id:
1842                         /* when looking backward we might find Id nodes */
1843                         weight += calc_method_local_weight(succ);
1844                         break;
1845                 case iro_Tuple:
1846                         /* unoptimized tuple */
1847                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1848                                 ir_node *pred = get_Tuple_pred(succ, j);
1849                                 if (pred == arg) {
1850                                         /* look for Proj(j) */
1851                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1852                                                 ir_node *succ_succ = get_irn_out(succ, k);
1853                                                 if (is_Proj(succ_succ)) {
1854                                                         if (get_Proj_proj(succ_succ) == j) {
1855                                                                 /* found */
1856                                                                 weight += calc_method_local_weight(succ_succ);
1857                                                         }
1858                                                 } else {
1859                                                         /* this should NOT happen */
1860                                                         return 0;
1861                                                 }
1862                                         }
1863                                 }
1864                         }
1865                         break;
1866                 default:
1867                         /* any other node: unsupported yet or bad. */
1868                         return 0;
1869                 }
1870         }
1871         return weight;
1872 }
1873
1874 /**
1875  * Calculate the parameter weights for transmitting the address of a local variable.
1876  */
1877 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1878         ir_entity *ent = get_irg_entity(irg);
1879         ir_type  *mtp;
1880         int      nparams, i, proj_nr;
1881         ir_node  *irg_args, *arg;
1882
1883         mtp      = get_entity_type(ent);
1884         nparams  = get_method_n_params(mtp);
1885
1886         /* allocate a new array. currently used as 'analysed' flag */
1887         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1888
1889         /* If the method haven't parameters we have nothing to do. */
1890         if (nparams <= 0)
1891                 return;
1892
1893         assure_irg_outs(irg);
1894         irg_args = get_irg_args(irg);
1895         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1896                 arg     = get_irn_out(irg_args, i);
1897                 proj_nr = get_Proj_proj(arg);
1898                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1899         }
1900 }
1901
1902 /**
1903  * Calculate the benefice for transmitting an local variable address.
1904  * After inlining, the local variable might be transformed into a
1905  * SSA variable by scalar_replacement().
1906  */
1907 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1908         inline_irg_env *env = get_irg_link(callee);
1909
1910         if (env->local_weights != NULL) {
1911                 if (pos < ARR_LEN(env->local_weights))
1912                         return env->local_weights[pos];
1913                 return 0;
1914         }
1915
1916         analyze_irg_local_weights(env, callee);
1917
1918         if (pos < ARR_LEN(env->local_weights))
1919                 return env->local_weights[pos];
1920         return 0;
1921 }
1922
1923 /**
1924  * Calculate a benefice value for inlining the given call.
1925  *
1926  * @param call       the call node we have to inspect
1927  * @param callee     the called graph
1928  * @param local_adr  set after return if an address of a local variable is
1929  *                   transmitted as a parameter
1930  */
1931 static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
1932         ir_entity *ent = get_irg_entity(callee);
1933         ir_node   *frame_ptr;
1934         ir_type   *mtp;
1935         int       weight = 0;
1936         int       i, n_params, all_const;
1937         unsigned  cc, v;
1938
1939         inline_irg_env *curr_env, *callee_env;
1940
1941         if (get_entity_additional_properties(ent) & mtp_property_noreturn|mtp_property_weak) {
1942                 /* do NOT inline noreturn or weak calls */
1943                 return INT_MIN;
1944         }
1945
1946         /* costs for every passed parameter */
1947         n_params = get_Call_n_params(call);
1948         mtp      = get_entity_type(ent);
1949         cc       = get_method_calling_convention(mtp);
1950         if (cc & cc_reg_param) {
1951                 /* register parameter, smaller costs for register parameters */
1952                 int max_regs = cc & ~cc_bits;
1953
1954                 if (max_regs < n_params)
1955                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1956                 else
1957                         weight += n_params * 2;
1958         } else {
1959                 /* parameters are passed an stack */
1960                 weight += 5 * n_params;
1961         }
1962
1963         /* constant parameters improve the benefice */
1964         frame_ptr = get_irg_frame(current_ir_graph);
1965         all_const = 1;
1966         for (i = 0; i < n_params; ++i) {
1967                 ir_node *param = get_Call_param(call, i);
1968
1969                 if (is_Const(param)) {
1970                         weight += get_method_param_weight(ent, i);
1971                 } else {
1972                         all_const = 0;
1973                         if (is_SymConst(param))
1974                                 weight += get_method_param_weight(ent, i);
1975                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1976                                 /*
1977                                  * An address of a local variable is transmitted. After inlining,
1978                                  * scalar_replacement might be able to remove the local variable,
1979                                  * so honor this.
1980                                  */
1981                                 v = get_method_local_adress_weight(callee, i);
1982                                 weight += v;
1983                                 if (v > 0)
1984                                         *local_adr = 1;
1985                         }
1986                 }
1987         }
1988
1989         callee_env = get_irg_link(callee);
1990         if (callee_env->n_callers == 1 && callee != current_ir_graph) {
1991                 /* we are the only caller, give big bonus */
1992                 if (get_entity_visibility(ent) == visibility_local) {
1993                         weight += 5000;
1994                 } else {
1995                         weight += 200;
1996                 }
1997         }
1998
1999         /* do not inline big functions */
2000         weight -= callee_env->n_nodes;
2001
2002         /* reduce the benefice if the current function is already big */
2003         curr_env = get_irg_link(current_ir_graph);
2004         weight -= curr_env->n_nodes / 50;
2005
2006         /* give a bonus for functions with one block */
2007         if (callee_env->n_blocks == 1)
2008                 weight = weight * 3 / 2;
2009
2010         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
2011         else if (callee_env->n_nodes < 20 && !callee_env->recursive)
2012                 weight += 5000;
2013
2014         /* and finally for leaves: they do not increase the register pressure
2015            because of callee safe registers */
2016         else if (callee_env->n_call_nodes == 0)
2017                 weight += 25;
2018
2019         /*
2020          * Reduce the weight for recursive function IFF not all arguments are const.
2021          * inlining recursive functions is rarely good.
2022          */
2023         if (callee_env->recursive && !all_const)
2024                 weight -= 500;
2025
2026         /*
2027          * All arguments constant is probably a good sign, give an extra bonus
2028          */
2029         if (all_const)
2030                 weight += 100;
2031
2032         return weight;
2033 }
2034
2035 static ir_graph **irgs;
2036 static int      last_irg;
2037
2038 static void callgraph_walker(ir_graph *irg, void *data)
2039 {
2040         (void) data;
2041         irgs[last_irg++] = irg;
2042 }
2043
2044 static ir_graph **create_irg_list(void)
2045 {
2046         ir_entity **free_methods;
2047         int       arr_len;
2048         int       n_irgs = get_irp_n_irgs();
2049
2050         cgana(&arr_len, &free_methods);
2051         xfree(free_methods);
2052
2053         compute_callgraph();
2054
2055         last_irg = 0;
2056         irgs     = xmalloc(n_irgs * sizeof(*irgs));
2057         memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
2058
2059         callgraph_walk(NULL, callgraph_walker, NULL);
2060         assert(n_irgs == last_irg);
2061
2062         return irgs;
2063 }
2064
2065 /**
2066  * Heuristic inliner. Calculates a benefice value for every call and inlines
2067  * those calls with a value higher than the threshold.
2068  */
2069 void inline_functions(int maxsize, int inline_threshold) {
2070         inline_irg_env   *env;
2071         int              i, n_irgs;
2072         ir_graph         *rem;
2073         int              did_inline;
2074         wenv_t           wenv;
2075         call_entry       *curr_call, **last_call;
2076         const call_entry *centry;
2077         pmap             *copied_graphs;
2078         pmap_entry       *pm_entry;
2079         ir_graph         **irgs;
2080
2081         rem = current_ir_graph;
2082         obstack_init(&temp_obst);
2083
2084         irgs = create_irg_list();
2085
2086         /* a map for the copied graphs, used to inline recursive calls */
2087         copied_graphs = pmap_create();
2088
2089         /* extend all irgs by a temporary data structure for inlining. */
2090         n_irgs = get_irp_n_irgs();
2091         for (i = 0; i < n_irgs; ++i)
2092                 set_irg_link(irgs[i], alloc_inline_irg_env());
2093
2094         /* Precompute information in temporary data structure. */
2095         wenv.ignore_runtime = 0;
2096         wenv.ignore_callers = 0;
2097         for (i = 0; i < n_irgs; ++i) {
2098                 ir_graph *irg = irgs[i];
2099
2100                 free_callee_info(irg);
2101
2102                 wenv.x         = get_irg_link(irg);
2103                 wenv.last_call = NULL;
2104                 assure_cf_loop(irg);
2105                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2106         }
2107
2108         /* -- and now inline. -- */
2109         for (i = 0; i < n_irgs; ++i) {
2110                 int      phiproj_computed = 0;
2111                 ir_node  *call;
2112                 ir_graph *irg = irgs[i];
2113
2114                 current_ir_graph = irg;
2115                 env = get_irg_link(irg);
2116
2117                 /* note that the list of possible calls is updated during the process */
2118                 last_call = &env->call_head;
2119                 for (curr_call = env->call_head; curr_call != NULL;) {
2120                         irg_inline_property prop;
2121                         ir_graph            *callee;
2122                         pmap_entry          *e;
2123                         int                 benefice;
2124                         unsigned            local_adr;
2125
2126                         if (env->n_nodes > maxsize) break;
2127
2128                         call   = curr_call->call;
2129                         callee = curr_call->callee;
2130
2131                         prop = get_irg_inline_property(callee);
2132                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
2133                                 /* do not inline forbidden / weak graphs */
2134                                 continue;
2135                         }
2136
2137                         e = pmap_find(copied_graphs, callee);
2138                         if (e != NULL) {
2139                                 /*
2140                                 * Remap callee if we have a copy.
2141                                 * FIXME: Should we do this only for recursive Calls ?
2142                                 */
2143                                 callee = e->value;
2144                         }
2145
2146                         /* calculate the benefice on the original call to prevent excessive inlining */
2147                         local_adr = 0;
2148                         benefice = calc_inline_benefice(call, callee, &local_adr);
2149                         DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
2150
2151                         if (benefice > -inline_threshold || prop >= irg_inline_forced) {
2152                                 if (current_ir_graph == callee) {
2153                                         /*
2154                                          * Recursive call: we cannot directly inline because we cannot walk
2155                                          * the graph and change it. So we have to make a copy of the graph
2156                                          * first.
2157                                          */
2158
2159                                         inline_irg_env *callee_env;
2160                                         ir_graph       *copy;
2161
2162                                         /*
2163                                          * No copy yet, create one.
2164                                          * Note that recursive methods are never leaves, so it is sufficient
2165                                          * to test this condition here.
2166                                          */
2167                                         copy = create_irg_copy(callee);
2168
2169                                         /* create_irg_copy() destroys the Proj links, recompute them */
2170                                         phiproj_computed = 0;
2171
2172                                         /* allocate new environment */
2173                                         callee_env = alloc_inline_irg_env();
2174                                         set_irg_link(copy, callee_env);
2175
2176                                         assure_cf_loop(copy);
2177                                         wenv.x              = callee_env;
2178                                         wenv.ignore_callers = 1;
2179                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2180
2181                                         /*
2182                                          * Enter the entity of the original graph. This is needed
2183                                          * for inline_method(). However, note that ent->irg still points
2184                                          * to callee, NOT to copy.
2185                                          */
2186                                         set_irg_entity(copy, get_irg_entity(callee));
2187
2188                                         pmap_insert(copied_graphs, callee, copy);
2189                                         callee = copy;
2190
2191                                         /* we have only one caller: the original graph */
2192                                         callee_env->n_callers      = 1;
2193                                         callee_env->n_callers_orig = 1;
2194                                 }
2195                                 if (! phiproj_computed) {
2196                                         phiproj_computed = 1;
2197                                         collect_phiprojs(current_ir_graph);
2198                                 }
2199                                 did_inline = inline_method(call, callee);
2200                                 if (did_inline) {
2201                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
2202
2203                                         /* was inlined, must be recomputed */
2204                                         phiproj_computed = 0;
2205
2206                                         /* after we have inlined callee, all called methods inside callee
2207                                         are now called once more */
2208                                         for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
2209                                                 inline_irg_env *penv = get_irg_link(centry->callee);
2210                                                 ++penv->n_callers;
2211                                         }
2212
2213                                         /* callee was inline. Append it's call list. */
2214                                         env->got_inline = 1;
2215                                         if (local_adr)
2216                                                 env->local_vars = 1;
2217                                         --env->n_call_nodes;
2218                                         curr_call = replace_entry_by_call_list(curr_call, callee_env->call_head);
2219                                         env->n_call_nodes += callee_env->n_call_nodes;
2220                                         env->n_nodes += callee_env->n_nodes;
2221                                         --callee_env->n_callers;
2222
2223                                         /* remove the current call entry from the list */
2224                                         *last_call = curr_call;
2225                                         continue;
2226                                 }
2227                         }
2228                         last_call = &curr_call->next;
2229                         curr_call = curr_call->next;
2230                 }
2231
2232         }
2233
2234         for (i = 0; i < n_irgs; ++i) {
2235                 ir_graph *irg = irgs[i];
2236
2237                 env = get_irg_link(irg);
2238                 if (env->got_inline) {
2239                         /* this irg got calls inlined: optimize it */
2240
2241                         /* scalar replacement does not work well with Tuple nodes, so optimize them away */
2242                         optimize_graph_df(irg);
2243
2244                         if (env->local_vars) {
2245                                 if (scalar_replacement_opt(irg)) {
2246                                         optimize_graph_df(irg);
2247                                 }
2248                         }
2249                         optimize_cf(irg);
2250                 }
2251                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2252                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2253                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2254                         env->n_callers_orig, env->n_callers,
2255                         get_entity_name(get_irg_entity(irg))));
2256                 }
2257         }
2258
2259         /* kill the copied graphs: we don't need them anymore */
2260         foreach_pmap(copied_graphs, pm_entry) {
2261                 ir_graph *copy = pm_entry->value;
2262
2263                 /* reset the entity, otherwise it will be deleted in the next step ... */
2264                 set_irg_entity(copy, NULL);
2265                 free_ir_graph(copy);
2266         }
2267         pmap_destroy(copied_graphs);
2268
2269         xfree(irgs);
2270
2271         obstack_free(&temp_obst, NULL);
2272         current_ir_graph = rem;
2273 }
2274
2275 void firm_init_inline(void) {
2276         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2277 }