- handle strictConv and remainderlessDiv for in opcode map
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #include "config.h"
27
28 #include <limits.h>
29 #include <assert.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irprog_t.h"
34
35 #include "iroptimize.h"
36 #include "ircons_t.h"
37 #include "iropt_t.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41
42 #include "array_t.h"
43 #include "list.h"
44 #include "pset.h"
45 #include "pmap.h"
46 #include "pdeq.h"
47 #include "xmalloc.h"
48 #include "pqueue.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
66
67 /*------------------------------------------------------------------*/
68 /* Routines for dead node elimination / copying garbage collection  */
69 /* of the obstack.                                                  */
70 /*------------------------------------------------------------------*/
71
72 /**
73  * Remember the new node in the old node by using a field all nodes have.
74  */
75 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
76
77 /**
78  * Get this new node, before the old node is forgotten.
79  */
80 #define get_new_node(oldn) get_irn_link(oldn)
81
82 /**
83  * Check if a new node was set.
84  */
85 #define has_new_node(n) (get_new_node(n) != NULL)
86
87 /**
88  * We use the block_visited flag to mark that we have computed the
89  * number of useful predecessors for this block.
90  * Further we encode the new arity in this flag in the old blocks.
91  * Remembering the arity is useful, as it saves a lot of pointer
92  * accesses.  This function is called for all Phi and Block nodes
93  * in a Block.
94  */
95 static inline int
96 compute_new_arity(ir_node *b) {
97         int i, res, irn_arity;
98         int irg_v, block_v;
99
100         irg_v = get_irg_block_visited(current_ir_graph);
101         block_v = get_Block_block_visited(b);
102         if (block_v >= irg_v) {
103                 /* we computed the number of preds for this block and saved it in the
104                    block_v flag */
105                 return block_v - irg_v;
106         } else {
107                 /* compute the number of good predecessors */
108                 res = irn_arity = get_irn_arity(b);
109                 for (i = 0; i < irn_arity; i++)
110                         if (is_Bad(get_irn_n(b, i))) res--;
111                         /* save it in the flag. */
112                         set_Block_block_visited(b, irg_v + res);
113                         return res;
114         }
115 }
116
117 /**
118  * Copies the node to the new obstack. The Ins of the new node point to
119  * the predecessors on the old obstack.  For block/phi nodes not all
120  * predecessors might be copied.  n->link points to the new node.
121  * For Phi and Block nodes the function allocates in-arrays with an arity
122  * only for useful predecessors.  The arity is determined by counting
123  * the non-bad predecessors of the block.
124  *
125  * @param n    The node to be copied
126  * @param env  if non-NULL, the node number attribute will be copied to the new node
127  *
128  * Note: Also used for loop unrolling.
129  */
130 static void copy_node(ir_node *n, void *env) {
131         ir_node *nn, *block;
132         int new_arity;
133         ir_op *op = get_irn_op(n);
134         (void) env;
135
136         if (op == op_Bad) {
137                 /* node copied already */
138                 return;
139         } else if (op == op_Block) {
140                 block = NULL;
141                 new_arity = compute_new_arity(n);
142                 n->attr.block.graph_arr = NULL;
143         } else {
144                 block = get_nodes_block(n);
145                 if (op == op_Phi) {
146                         new_arity = compute_new_arity(block);
147                 } else {
148                         new_arity = get_irn_arity(n);
149                 }
150         }
151         nn = new_ir_node(get_irn_dbg_info(n),
152                 current_ir_graph,
153                 block,
154                 op,
155                 get_irn_mode(n),
156                 new_arity,
157                 get_irn_in(n) + 1);
158         /* Copy the attributes.  These might point to additional data.  If this
159            was allocated on the old obstack the pointers now are dangling.  This
160            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
161         if (op == op_Block) {
162                 /* we cannot allow blocks WITHOUT macroblock input */
163                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
164         }
165         copy_node_attr(n, nn);
166
167 #ifdef DEBUG_libfirm
168         {
169                 int copy_node_nr = env != NULL;
170                 if (copy_node_nr) {
171                         /* for easier debugging, we want to copy the node numbers too */
172                         nn->node_nr = n->node_nr;
173                 }
174         }
175 #endif
176
177         set_new_node(n, nn);
178         hook_dead_node_elim_subst(current_ir_graph, n, nn);
179 }
180
181 /**
182  * Copies new predecessors of old node to new node remembered in link.
183  * Spare the Bad predecessors of Phi and Block nodes.
184  */
185 static void copy_preds(ir_node *n, void *env) {
186         ir_node *nn, *block;
187         int i, j, irn_arity;
188         (void) env;
189
190         nn = get_new_node(n);
191
192         if (is_Block(n)) {
193                 /* copy the macro block header */
194                 ir_node *mbh = get_Block_MacroBlock(n);
195
196                 if (mbh == n) {
197                         /* this block is a macroblock header */
198                         set_Block_MacroBlock(nn, nn);
199                 } else {
200                         /* get the macro block header */
201                         ir_node *nmbh = get_new_node(mbh);
202                         assert(nmbh != NULL);
203                         set_Block_MacroBlock(nn, nmbh);
204                 }
205
206                 /* Don't copy Bad nodes. */
207                 j = 0;
208                 irn_arity = get_irn_arity(n);
209                 for (i = 0; i < irn_arity; i++) {
210                         if (! is_Bad(get_irn_n(n, i))) {
211                                 ir_node *pred = get_irn_n(n, i);
212                                 set_irn_n(nn, j, get_new_node(pred));
213                                 j++;
214                         }
215                 }
216                 /* repair the block visited flag from above misuse. Repair it in both
217                    graphs so that the old one can still be used. */
218                 set_Block_block_visited(nn, 0);
219                 set_Block_block_visited(n, 0);
220                 /* Local optimization could not merge two subsequent blocks if
221                    in array contained Bads.  Now it's possible.
222                    We don't call optimize_in_place as it requires
223                    that the fields in ir_graph are set properly. */
224                 if ((get_opt_control_flow_straightening()) &&
225                         (get_Block_n_cfgpreds(nn) == 1) &&
226                         is_Jmp(get_Block_cfgpred(nn, 0))) {
227                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
228                         if (nn == old) {
229                                 /* Jmp jumps into the block it is in -- deal self cycle. */
230                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
231                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
232                         } else {
233                                 exchange(nn, old);
234                         }
235                 }
236         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
237                 /* Don't copy node if corresponding predecessor in block is Bad.
238                    The Block itself should not be Bad. */
239                 block = get_nodes_block(n);
240                 set_nodes_block(nn, get_new_node(block));
241                 j = 0;
242                 irn_arity = get_irn_arity(n);
243                 for (i = 0; i < irn_arity; i++) {
244                         if (! is_Bad(get_irn_n(block, i))) {
245                                 ir_node *pred = get_irn_n(n, i);
246                                 set_irn_n(nn, j, get_new_node(pred));
247                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
248                                 j++;
249                         }
250                 }
251                 /* If the pre walker reached this Phi after the post walker visited the
252                    block block_visited is > 0. */
253                 set_Block_block_visited(get_nodes_block(n), 0);
254                 /* Compacting the Phi's ins might generate Phis with only one
255                    predecessor. */
256                 if (get_irn_arity(nn) == 1)
257                         exchange(nn, get_irn_n(nn, 0));
258         } else {
259                 irn_arity = get_irn_arity(n);
260                 for (i = -1; i < irn_arity; i++)
261                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
262         }
263         /* Now the new node is complete.  We can add it to the hash table for CSE.
264            @@@ inlining aborts if we identify End. Why? */
265         if (!is_End(nn))
266                 add_identities(current_ir_graph->value_table, nn);
267 }
268
269 /**
270  * Copies the graph recursively, compacts the keep-alives of the end node.
271  *
272  * @param irg           the graph to be copied
273  * @param copy_node_nr  If non-zero, the node number will be copied
274  */
275 static void copy_graph(ir_graph *irg, int copy_node_nr) {
276         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
277         ir_node *ka;      /* keep alive */
278         int i, irn_arity;
279         unsigned long vfl;
280
281         /* Some nodes must be copied by hand, sigh */
282         vfl = get_irg_visited(irg);
283         set_irg_visited(irg, vfl + 1);
284
285         oe = get_irg_end(irg);
286         mark_irn_visited(oe);
287         /* copy the end node by hand, allocate dynamic in array! */
288         ne = new_ir_node(get_irn_dbg_info(oe),
289                 irg,
290                 NULL,
291                 op_End,
292                 mode_X,
293                 -1,
294                 NULL);
295         /* Copy the attributes.  Well, there might be some in the future... */
296         copy_node_attr(oe, ne);
297         set_new_node(oe, ne);
298
299         /* copy the Bad node */
300         ob = get_irg_bad(irg);
301         mark_irn_visited(ob);
302         nb = new_ir_node(get_irn_dbg_info(ob),
303                 irg,
304                 NULL,
305                 op_Bad,
306                 mode_T,
307                 0,
308                 NULL);
309         copy_node_attr(ob, nb);
310         set_new_node(ob, nb);
311
312         /* copy the NoMem node */
313         om = get_irg_no_mem(irg);
314         mark_irn_visited(om);
315         nm = new_ir_node(get_irn_dbg_info(om),
316                 irg,
317                 NULL,
318                 op_NoMem,
319                 mode_M,
320                 0,
321                 NULL);
322         copy_node_attr(om, nm);
323         set_new_node(om, nm);
324
325         /* copy the live nodes */
326         set_irg_visited(irg, vfl);
327         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
328
329         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
330
331         /* visit the anchors as well */
332         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
333                 ir_node *n = get_irg_anchor(irg, i);
334
335                 if (n && (get_irn_visited(n) <= vfl)) {
336                         set_irg_visited(irg, vfl);
337                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
338                 }
339         }
340
341         /* copy_preds for the end node ... */
342         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
343
344         /*- ... and now the keep alives. -*/
345         /* First pick the not marked block nodes and walk them.  We must pick these
346            first as else we will oversee blocks reachable from Phis. */
347         irn_arity = get_End_n_keepalives(oe);
348         for (i = 0; i < irn_arity; i++) {
349                 ka = get_End_keepalive(oe, i);
350                 if (is_Block(ka)) {
351                         if (get_irn_visited(ka) <= vfl) {
352                                 /* We must keep the block alive and copy everything reachable */
353                                 set_irg_visited(irg, vfl);
354                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
355                         }
356                         add_End_keepalive(ne, get_new_node(ka));
357                 }
358         }
359
360         /* Now pick other nodes.  Here we will keep all! */
361         irn_arity = get_End_n_keepalives(oe);
362         for (i = 0; i < irn_arity; i++) {
363                 ka = get_End_keepalive(oe, i);
364                 if (!is_Block(ka)) {
365                         if (get_irn_visited(ka) <= vfl) {
366                                 /* We didn't copy the node yet.  */
367                                 set_irg_visited(irg, vfl);
368                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
369                         }
370                         add_End_keepalive(ne, get_new_node(ka));
371                 }
372         }
373
374         /* start block sometimes only reached after keep alives */
375         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
376         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
377 }
378
379 /**
380  * Copies the graph reachable from current_ir_graph->end to the obstack
381  * in current_ir_graph and fixes the environment.
382  * Then fixes the fields in current_ir_graph containing nodes of the
383  * graph.
384  *
385  * @param copy_node_nr  If non-zero, the node number will be copied
386  */
387 static void
388 copy_graph_env(int copy_node_nr) {
389         ir_graph *irg = current_ir_graph;
390         ir_node *old_end, *new_anchor;
391         int i;
392
393         /* remove end_except and end_reg nodes */
394         old_end = get_irg_end(irg);
395         set_irg_end_except (irg, old_end);
396         set_irg_end_reg    (irg, old_end);
397
398         /* Not all nodes remembered in irg might be reachable
399            from the end node.  Assure their link is set to NULL, so that
400            we can test whether new nodes have been computed. */
401         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
402                 ir_node *n = get_irg_anchor(irg, i);
403                 if (n != NULL)
404                         set_new_node(n, NULL);
405         }
406         /* we use the block walk flag for removing Bads from Blocks ins. */
407         inc_irg_block_visited(irg);
408
409         /* copy the graph */
410         copy_graph(irg, copy_node_nr);
411
412         /* fix the anchor */
413         old_end    = get_irg_end(irg);
414         new_anchor = new_Anchor(irg);
415
416         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
417                 ir_node *n = get_irg_anchor(irg, i);
418                 if (n)
419                         set_irn_n(new_anchor, i, get_new_node(n));
420         }
421         free_End(old_end);
422         irg->anchor = new_anchor;
423
424         /* ensure the new anchor is placed in the endblock */
425         set_nodes_block(new_anchor, get_irg_end_block(irg));
426 }
427
428 /**
429  * Copies all reachable nodes to a new obstack.  Removes bad inputs
430  * from block nodes and the corresponding inputs from Phi nodes.
431  * Merges single exit blocks with single entry blocks and removes
432  * 1-input Phis.
433  * Adds all new nodes to a new hash table for CSE.  Does not
434  * perform CSE, so the hash table might contain common subexpressions.
435  */
436 void dead_node_elimination(ir_graph *irg) {
437         ir_graph *rem;
438 #ifdef INTERPROCEDURAL_VIEW
439         int rem_ipview = get_interprocedural_view();
440 #endif
441         struct obstack *graveyard_obst = NULL;
442         struct obstack *rebirth_obst   = NULL;
443
444         edges_deactivate(irg);
445
446         /* inform statistics that we started a dead-node elimination run */
447         hook_dead_node_elim(irg, 1);
448
449         /* Remember external state of current_ir_graph. */
450         rem = current_ir_graph;
451         current_ir_graph = irg;
452 #ifdef INTERPROCEDURAL_VIEW
453         set_interprocedural_view(0);
454 #endif
455
456         assert(get_irg_phase_state(irg) != phase_building);
457
458         /* Handle graph state */
459         free_callee_info(irg);
460         free_irg_outs(irg);
461         free_trouts();
462
463         /* @@@ so far we loose loops when copying */
464         free_loop_information(irg);
465
466         set_irg_doms_inconsistent(irg);
467
468         /* A quiet place, where the old obstack can rest in peace,
469            until it will be cremated. */
470         graveyard_obst = irg->obst;
471
472         /* A new obstack, where the reachable nodes will be copied to. */
473         rebirth_obst = XMALLOC(struct obstack);
474         irg->obst = rebirth_obst;
475         obstack_init(irg->obst);
476         irg->last_node_idx = 0;
477
478         /* We also need a new value table for CSE */
479         del_identities(irg->value_table);
480         irg->value_table = new_identities();
481
482         /* Copy the graph from the old to the new obstack */
483         copy_graph_env(/*copy_node_nr=*/1);
484
485         /* Free memory from old unoptimized obstack */
486         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
487         xfree(graveyard_obst);            /* ... then free it.           */
488
489         /* inform statistics that the run is over */
490         hook_dead_node_elim(irg, 0);
491
492         current_ir_graph = rem;
493 #ifdef INTERPROCEDURAL_VIEW
494         set_interprocedural_view(rem_ipview);
495 #endif
496 }
497
498 /**
499  * Relink bad predecessors of a block and store the old in array to the
500  * link field. This function is called by relink_bad_predecessors().
501  * The array of link field starts with the block operand at position 0.
502  * If block has bad predecessors, create a new in array without bad preds.
503  * Otherwise let in array untouched.
504  */
505 static void relink_bad_block_predecessors(ir_node *n, void *env) {
506         ir_node **new_in, *irn;
507         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
508         (void) env;
509
510         /* if link field of block is NULL, look for bad predecessors otherwise
511            this is already done */
512         if (is_Block(n) && get_irn_link(n) == NULL) {
513                 /* save old predecessors in link field (position 0 is the block operand)*/
514                 set_irn_link(n, get_irn_in(n));
515
516                 /* count predecessors without bad nodes */
517                 old_irn_arity = get_irn_arity(n);
518                 for (i = 0; i < old_irn_arity; i++)
519                         if (!is_Bad(get_irn_n(n, i)))
520                                 ++new_irn_arity;
521
522                 /* arity changing: set new predecessors without bad nodes */
523                 if (new_irn_arity < old_irn_arity) {
524                         /* Get new predecessor array. We do not resize the array, as we must
525                            keep the old one to update Phis. */
526                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
527
528                         /* set new predecessors in array */
529                         new_in[0] = NULL;
530                         new_irn_n = 1;
531                         for (i = 0; i < old_irn_arity; i++) {
532                                 irn = get_irn_n(n, i);
533                                 if (!is_Bad(irn)) {
534                                         new_in[new_irn_n] = irn;
535                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
536                                         ++new_irn_n;
537                                 }
538                         }
539                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
540                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
541                         n->in = new_in;
542                 } /* ir node has bad predecessors */
543         } /* Block is not relinked */
544 }
545
546 /**
547  * Relinks Bad predecessors from Blocks and Phis called by walker
548  * remove_bad_predecesors(). If n is a Block, call
549  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
550  * function of Phi's Block. If this block has bad predecessors, relink preds
551  * of the Phi-node.
552  */
553 static void relink_bad_predecessors(ir_node *n, void *env) {
554         ir_node *block, **old_in;
555         int i, old_irn_arity, new_irn_arity;
556
557         /* relink bad predecessors of a block */
558         if (is_Block(n))
559                 relink_bad_block_predecessors(n, env);
560
561         /* If Phi node relink its block and its predecessors */
562         if (is_Phi(n)) {
563                 /* Relink predecessors of phi's block */
564                 block = get_nodes_block(n);
565                 if (get_irn_link(block) == NULL)
566                         relink_bad_block_predecessors(block, env);
567
568                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
569                 old_irn_arity = ARR_LEN(old_in);
570
571                 /* Relink Phi predecessors if count of predecessors changed */
572                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
573                         /* set new predecessors in array
574                            n->in[0] remains the same block */
575                         new_irn_arity = 1;
576                         for(i = 1; i < old_irn_arity; i++)
577                                 if (!is_Bad(old_in[i])) {
578                                         n->in[new_irn_arity] = n->in[i];
579                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
580                                         ++new_irn_arity;
581                                 }
582
583                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
584                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
585                 }
586         } /* n is a Phi node */
587 }
588
589 /*
590  * Removes Bad Bad predecessors from Blocks and the corresponding
591  * inputs to Phi nodes as in dead_node_elimination but without
592  * copying the graph.
593  * On walking up set the link field to NULL, on walking down call
594  * relink_bad_predecessors() (This function stores the old in array
595  * to the link field and sets a new in array if arity of predecessors
596  * changes).
597  */
598 void remove_bad_predecessors(ir_graph *irg) {
599         panic("Fix backedge handling first");
600         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
601 }
602
603
604 /*
605    __                      _  __ __
606   (_     __    o     _    | \/  |_
607   __)|_| | \_/ | \_/(/_   |_/\__|__
608
609   The following stuff implements a facility that automatically patches
610   registered ir_node pointers to the new node when a dead node elimination occurs.
611 */
612
613 struct _survive_dce_t {
614         struct obstack obst;
615         pmap *places;
616         pmap *new_places;
617         hook_entry_t dead_node_elim;
618         hook_entry_t dead_node_elim_subst;
619 };
620
621 typedef struct _survive_dce_list_t {
622         struct _survive_dce_list_t *next;
623         ir_node **place;
624 } survive_dce_list_t;
625
626 static void dead_node_hook(void *context, ir_graph *irg, int start) {
627         survive_dce_t *sd = context;
628         (void) irg;
629
630         /* Create a new map before the dead node elimination is performed. */
631         if (start) {
632                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
633         } else {
634                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
635                 pmap_destroy(sd->places);
636                 sd->places     = sd->new_places;
637                 sd->new_places = NULL;
638         }
639 }
640
641 /**
642  * Hook called when dead node elimination replaces old by nw.
643  */
644 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
645         survive_dce_t *sd = context;
646         survive_dce_list_t *list = pmap_get(sd->places, old);
647         (void) irg;
648
649         /* If the node is to be patched back, write the new address to all registered locations. */
650         if (list) {
651                 survive_dce_list_t *p;
652
653                 for (p = list; p; p = p->next)
654                         *(p->place) = nw;
655
656                 pmap_insert(sd->new_places, nw, list);
657         }
658 }
659
660 /**
661  * Make a new Survive DCE environment.
662  */
663 survive_dce_t *new_survive_dce(void) {
664         survive_dce_t *res = XMALLOC(survive_dce_t);
665         obstack_init(&res->obst);
666         res->places     = pmap_create();
667         res->new_places = NULL;
668
669         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
670         res->dead_node_elim.context                   = res;
671         res->dead_node_elim.next                      = NULL;
672
673         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
674         res->dead_node_elim_subst.context = res;
675         res->dead_node_elim_subst.next    = NULL;
676
677         register_hook(hook_dead_node_elim, &res->dead_node_elim);
678         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
679         return res;
680 }
681
682 /**
683  * Free a Survive DCE environment.
684  */
685 void free_survive_dce(survive_dce_t *sd) {
686         obstack_free(&sd->obst, NULL);
687         pmap_destroy(sd->places);
688         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
689         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
690         xfree(sd);
691 }
692
693 /**
694  * Register a node pointer to be patched upon DCE.
695  * When DCE occurs, the node pointer specified by @p place will be
696  * patched to the new address of the node it is pointing to.
697  *
698  * @param sd    The Survive DCE environment.
699  * @param place The address of the node pointer.
700  */
701 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
702         if (*place != NULL) {
703                 ir_node *irn      = *place;
704                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
705                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
706
707                 nw->next  = curr;
708                 nw->place = place;
709
710                 pmap_insert(sd->places, irn, nw);
711         }
712 }
713
714 /*--------------------------------------------------------------------*/
715 /*  Functionality for inlining                                         */
716 /*--------------------------------------------------------------------*/
717
718 /**
719  * Copy node for inlineing.  Updates attributes that change when
720  * inlineing but not for dead node elimination.
721  *
722  * Copies the node by calling copy_node() and then updates the entity if
723  * it's a local one.  env must be a pointer of the frame type of the
724  * inlined procedure. The new entities must be in the link field of
725  * the entities.
726  */
727 static void copy_node_inline(ir_node *n, void *env) {
728         ir_node *nn;
729         ir_type *frame_tp = (ir_type *)env;
730
731         copy_node(n, NULL);
732         if (is_Sel(n)) {
733                 nn = get_new_node (n);
734                 assert(is_Sel(nn));
735                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
736                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
737                 }
738         } else if (is_Block(n)) {
739                 nn = get_new_node (n);
740                 nn->attr.block.irg = current_ir_graph;
741         }
742 }
743
744 /**
745  * Copies new predecessors of old node and move constants to
746  * the Start Block.
747  */
748 static void copy_preds_inline(ir_node *n, void *env) {
749         ir_node *nn;
750
751         copy_preds(n, env);
752         nn = skip_Id(get_new_node(n));
753         if (is_irn_constlike(nn)) {
754                 /* move Constants into the start block */
755                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
756
757                 n = identify_remember(current_ir_graph->value_table, nn);
758                 if (nn != n) {
759                         DBG_OPT_CSE(nn, n);
760                         exchange(nn, n);
761                 }
762         }
763 }
764
765 /**
766  * Walker: checks if P_value_arg_base is used.
767  */
768 static void find_addr(ir_node *node, void *env) {
769         int *allow_inline = env;
770         if (is_Proj(node) &&
771                         is_Start(get_Proj_pred(node)) &&
772                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
773                 *allow_inline = 0;
774         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
775                 /* From GCC:
776                  * Refuse to inline alloca call unless user explicitly forced so as this
777                  * may change program's memory overhead drastically when the function
778                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
779                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
780                  *
781                  * Sorrily this is true with our implementation also.
782                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
783                  * disables inlining of functions using VLA (with are completely save).
784                  *
785                  * 2 Solutions:
786                  * - add a flag to the Alloc node for "real" alloca() calls
787                  * - add a new Stack-Restore node at the end of a function using alloca()
788                  */
789                 *allow_inline = 0;
790         }
791 }
792
793 /**
794  * Check if we can inline a given call.
795  * Currently, we cannot inline two cases:
796  * - call with compound arguments
797  * - graphs that take the address of a parameter
798  *
799  * check these conditions here
800  */
801 static int can_inline(ir_node *call, ir_graph *called_graph) {
802         ir_type *call_type = get_Call_type(call);
803         int params, ress, i, res;
804         assert(is_Method_type(call_type));
805
806         params = get_method_n_params(call_type);
807         ress   = get_method_n_ress(call_type);
808
809         /* check parameters for compound arguments */
810         for (i = 0; i < params; ++i) {
811                 ir_type *p_type = get_method_param_type(call_type, i);
812
813                 if (is_compound_type(p_type))
814                         return 0;
815         }
816
817         /* check results for compound arguments */
818         for (i = 0; i < ress; ++i) {
819                 ir_type *r_type = get_method_res_type(call_type, i);
820
821                 if (is_compound_type(r_type))
822                         return 0;
823         }
824
825         res = 1;
826         irg_walk_graph(called_graph, find_addr, NULL, &res);
827
828         return res;
829 }
830
831 enum exc_mode {
832         exc_handler    = 0, /**< There is a handler. */
833         exc_to_end     = 1, /**< Branches to End. */
834         exc_no_handler = 2  /**< Exception handling not represented. */
835 };
836
837 /* Inlines a method at the given call site. */
838 int inline_method(ir_node *call, ir_graph *called_graph) {
839         ir_node             *pre_call;
840         ir_node             *post_call, *post_bl;
841         ir_node             *in[pn_Start_max];
842         ir_node             *end, *end_bl, *block;
843         ir_node             **res_pred;
844         ir_node             **cf_pred;
845         ir_node             **args_in;
846         ir_node             *ret, *phi;
847         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
848         enum exc_mode       exc_handling;
849         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
850         ir_entity           *ent;
851         ir_graph            *rem, *irg;
852         irg_inline_property prop = get_irg_inline_property(called_graph);
853         unsigned long       visited;
854
855         if (prop == irg_inline_forbidden)
856                 return 0;
857
858         ent = get_irg_entity(called_graph);
859
860         mtp = get_entity_type(ent);
861         ctp = get_Call_type(call);
862         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
863                 /* this is a bad feature of C: without a prototype, we can can call a function with less
864                 parameters than needed. Currently we don't support this, although it would be
865                 to use Unknown than. */
866                 return 0;
867         }
868
869         /* Argh, compiling C has some bad consequences:
870            the call type AND the method type might be different.
871            It is implementation defendant what happens in that case.
872            We support inlining, if the bitsize of the types matches AND
873            the same arithmetic is used. */
874         n_params = get_method_n_params(mtp);
875         for (i = n_params - 1; i >= 0; --i) {
876                 ir_type *param_tp = get_method_param_type(mtp, i);
877                 ir_type *arg_tp   = get_method_param_type(ctp, i);
878
879                 if (param_tp != arg_tp) {
880                         ir_mode *pmode = get_type_mode(param_tp);
881                         ir_mode *amode = get_type_mode(arg_tp);
882
883                         if (pmode == NULL || amode == NULL)
884                                 return 0;
885                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
886                                 return 0;
887                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
888                                 return 0;
889                         /* otherwise we can simply "reinterpret" the bits */
890                 }
891         }
892
893         irg = get_irn_irg(call);
894
895         /*
896          * We cannot inline a recursive call. The graph must be copied before
897          * the call the inline_method() using create_irg_copy().
898          */
899         if (called_graph == irg)
900                 return 0;
901
902         /*
903          * currently, we cannot inline two cases:
904          * - call with compound arguments
905          * - graphs that take the address of a parameter
906          */
907         if (! can_inline(call, called_graph))
908                 return 0;
909
910         rem = current_ir_graph;
911         current_ir_graph = irg;
912
913         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
914
915         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
916         rem_opt = get_opt_optimize();
917         set_optimize(0);
918
919         /* Handle graph state */
920         assert(get_irg_phase_state(irg) != phase_building);
921         assert(get_irg_pinned(irg) == op_pin_state_pinned);
922         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
923         set_irg_outs_inconsistent(irg);
924         set_irg_extblk_inconsistent(irg);
925         set_irg_doms_inconsistent(irg);
926         set_irg_loopinfo_inconsistent(irg);
927         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
928
929         /* -- Check preconditions -- */
930         assert(is_Call(call));
931
932         /* here we know we WILL inline, so inform the statistics */
933         hook_inline(call, called_graph);
934
935         /* -- Decide how to handle exception control flow: Is there a handler
936            for the Call node, or do we branch directly to End on an exception?
937            exc_handling:
938            0 There is a handler.
939            1 Branches to End.
940            2 Exception handling not represented in Firm. -- */
941         {
942                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
943                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
944                         long proj_nr = get_Proj_proj(proj);
945                         if (proj_nr == pn_Call_X_except) Xproj = proj;
946                         if (proj_nr == pn_Call_M_except) Mproj = proj;
947                 }
948                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
949                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
950                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
951         }
952
953         /* create the argument tuple */
954         NEW_ARR_A(ir_type *, args_in, n_params);
955
956         block = get_nodes_block(call);
957         for (i = n_params - 1; i >= 0; --i) {
958                 ir_node *arg      = get_Call_param(call, i);
959                 ir_type *param_tp = get_method_param_type(mtp, i);
960                 ir_mode *mode     = get_type_mode(param_tp);
961
962                 if (mode != get_irn_mode(arg)) {
963                         arg = new_r_Conv(irg, block, arg, mode);
964                 }
965                 args_in[i] = arg;
966         }
967
968         /* --
969            the procedure and later replaces the Start node of the called graph.
970            Post_call is the old Call node and collects the results of the called
971            graph. Both will end up being a tuple.  -- */
972         post_bl = get_nodes_block(call);
973         set_irg_current_block(irg, post_bl);
974         /* XxMxPxPxPxT of Start + parameter of Call */
975         in[pn_Start_X_initial_exec]   = new_Jmp();
976         in[pn_Start_M]                = get_Call_mem(call);
977         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
978         in[pn_Start_P_tls]            = get_irg_tls(irg);
979         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
980         /* in[pn_Start_P_value_arg_base] = ??? */
981         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
982         pre_call = new_Tuple(pn_Start_max - 1, in);
983         post_call = call;
984
985         /* --
986            The new block gets the ins of the old block, pre_call and all its
987            predecessors and all Phi nodes. -- */
988         part_block(pre_call);
989
990         /* -- Prepare state for dead node elimination -- */
991         /* Visited flags in calling irg must be >= flag in called irg.
992            Else walker and arity computation will not work. */
993         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
994                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
995         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
996                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
997         visited = get_irg_visited(irg);
998
999         /* Set pre_call as new Start node in link field of the start node of
1000            calling graph and pre_calls block as new block for the start block
1001            of calling graph.
1002            Further mark these nodes so that they are not visited by the
1003            copying. */
1004         set_irn_link(get_irg_start(called_graph), pre_call);
1005         set_irn_visited(get_irg_start(called_graph), visited);
1006         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1007         set_irn_visited(get_irg_start_block(called_graph), visited);
1008
1009         set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1010         set_irn_visited(get_irg_bad(called_graph), visited);
1011
1012         set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1013         set_irn_visited(get_irg_no_mem(called_graph), visited);
1014
1015         /* Initialize for compaction of in arrays */
1016         inc_irg_block_visited(irg);
1017
1018         /* -- Replicate local entities of the called_graph -- */
1019         /* copy the entities. */
1020         irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1021         called_frame = get_irg_frame_type(called_graph);
1022         curr_frame   = get_irg_frame_type(irg);
1023         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1024                 ir_entity *new_ent, *old_ent;
1025                 old_ent = get_class_member(called_frame, i);
1026                 new_ent = copy_entity_own(old_ent, curr_frame);
1027                 set_entity_link(old_ent, new_ent);
1028         }
1029
1030         /* visited is > than that of called graph.  With this trick visited will
1031            remain unchanged so that an outer walker, e.g., searching the call nodes
1032             to inline, calling this inline will not visit the inlined nodes. */
1033         set_irg_visited(irg, get_irg_visited(irg)-1);
1034
1035         /* -- Performing dead node elimination inlines the graph -- */
1036         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1037            entities. */
1038         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1039                  get_irg_frame_type(called_graph));
1040
1041         irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1042
1043         /* Repair called_graph */
1044         set_irg_visited(called_graph, get_irg_visited(irg));
1045         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1046         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1047
1048         /* -- Merge the end of the inlined procedure with the call site -- */
1049         /* We will turn the old Call node into a Tuple with the following
1050            predecessors:
1051            -1:  Block of Tuple.
1052            0: Phi of all Memories of Return statements.
1053            1: Jmp from new Block that merges the control flow from all exception
1054            predecessors of the old end block.
1055            2: Tuple of all arguments.
1056            3: Phi of Exception memories.
1057            In case the old Call directly branches to End on an exception we don't
1058            need the block merging all exceptions nor the Phi of the exception
1059            memories.
1060         */
1061
1062         /* -- Precompute some values -- */
1063         end_bl = get_new_node(get_irg_end_block(called_graph));
1064         end = get_new_node(get_irg_end(called_graph));
1065         arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
1066         n_res = get_method_n_ress(get_Call_type(call));
1067
1068         res_pred = XMALLOCN(ir_node*, n_res);
1069         cf_pred  = XMALLOCN(ir_node*, arity);
1070
1071         set_irg_current_block(irg, post_bl); /* just to make sure */
1072
1073         /* -- archive keepalives -- */
1074         irn_arity = get_irn_arity(end);
1075         for (i = 0; i < irn_arity; i++) {
1076                 ir_node *ka = get_End_keepalive(end, i);
1077                 if (! is_Bad(ka))
1078                         add_End_keepalive(get_irg_end(irg), ka);
1079         }
1080
1081         /* The new end node will die.  We need not free as the in array is on the obstack:
1082            copy_node() only generated 'D' arrays. */
1083
1084         /* -- Replace Return nodes by Jump nodes. -- */
1085         n_ret = 0;
1086         for (i = 0; i < arity; i++) {
1087                 ir_node *ret;
1088                 ret = get_Block_cfgpred(end_bl, i);
1089                 if (is_Return(ret)) {
1090                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1091                         n_ret++;
1092                 }
1093         }
1094         set_irn_in(post_bl, n_ret, cf_pred);
1095
1096         /* -- Build a Tuple for all results of the method.
1097            Add Phi node if there was more than one Return.  -- */
1098         turn_into_tuple(post_call, pn_Call_max);
1099         /* First the Memory-Phi */
1100         n_ret = 0;
1101         for (i = 0; i < arity; i++) {
1102                 ret = get_Block_cfgpred(end_bl, i);
1103                 if (is_Return(ret)) {
1104                         cf_pred[n_ret] = get_Return_mem(ret);
1105                         n_ret++;
1106                 }
1107         }
1108         phi = new_Phi(n_ret, cf_pred, mode_M);
1109         set_Tuple_pred(call, pn_Call_M_regular, phi);
1110         /* Conserve Phi-list for further inlinings -- but might be optimized */
1111         if (get_nodes_block(phi) == post_bl) {
1112                 set_irn_link(phi, get_irn_link(post_bl));
1113                 set_irn_link(post_bl, phi);
1114         }
1115         /* Now the real results */
1116         if (n_res > 0) {
1117                 for (j = 0; j < n_res; j++) {
1118                         n_ret = 0;
1119                         for (i = 0; i < arity; i++) {
1120                                 ret = get_Block_cfgpred(end_bl, i);
1121                                 if (is_Return(ret)) {
1122                                         cf_pred[n_ret] = get_Return_res(ret, j);
1123                                         n_ret++;
1124                                 }
1125                         }
1126                         if (n_ret > 0)
1127                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1128                         else
1129                                 phi = new_Bad();
1130                         res_pred[j] = phi;
1131                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1132                         if (get_nodes_block(phi) == post_bl) {
1133                                 set_Phi_next(phi, get_Block_phis(post_bl));
1134                                 set_Block_phis(post_bl, phi);
1135                         }
1136                 }
1137                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1138         } else {
1139                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1140         }
1141         /* handle the regular call */
1142         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1143
1144         /* For now, we cannot inline calls with value_base */
1145         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1146
1147         /* Finally the exception control flow.
1148            We have two (three) possible situations:
1149            First if the Call branches to an exception handler: We need to add a Phi node to
1150            collect the memory containing the exception objects.  Further we need
1151            to add another block to get a correct representation of this Phi.  To
1152            this block we add a Jmp that resolves into the X output of the Call
1153            when the Call is turned into a tuple.
1154            Second the Call branches to End, the exception is not handled.  Just
1155            add all inlined exception branches to the End node.
1156            Third: there is no Exception edge at all. Handle as case two. */
1157         if (exc_handling == exc_handler) {
1158                 n_exc = 0;
1159                 for (i = 0; i < arity; i++) {
1160                         ir_node *ret, *irn;
1161                         ret = get_Block_cfgpred(end_bl, i);
1162                         irn = skip_Proj(ret);
1163                         if (is_fragile_op(irn) || is_Raise(irn)) {
1164                                 cf_pred[n_exc] = ret;
1165                                 ++n_exc;
1166                         }
1167                 }
1168                 if (n_exc > 0) {
1169                         new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
1170                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1171                         /* The Phi for the memories with the exception objects */
1172                         n_exc = 0;
1173                         for (i = 0; i < arity; i++) {
1174                                 ir_node *ret;
1175                                 ret = skip_Proj(get_Block_cfgpred(end_bl, i));
1176                                 if (is_Call(ret)) {
1177                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1178                                         n_exc++;
1179                                 } else if (is_fragile_op(ret)) {
1180                                         /* We rely that all cfops have the memory output at the same position. */
1181                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1182                                         n_exc++;
1183                                 } else if (is_Raise(ret)) {
1184                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1185                                         n_exc++;
1186                                 }
1187                         }
1188                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1189                 } else {
1190                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1191                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1192                 }
1193         } else {
1194                 ir_node *main_end_bl;
1195                 int main_end_bl_arity;
1196                 ir_node **end_preds;
1197
1198                 /* assert(exc_handling == 1 || no exceptions. ) */
1199                 n_exc = 0;
1200                 for (i = 0; i < arity; i++) {
1201                         ir_node *ret = get_Block_cfgpred(end_bl, i);
1202                         ir_node *irn = skip_Proj(ret);
1203
1204                         if (is_fragile_op(irn) || is_Raise(irn)) {
1205                                 cf_pred[n_exc] = ret;
1206                                 n_exc++;
1207                         }
1208                 }
1209                 main_end_bl       = get_irg_end_block(irg);
1210                 main_end_bl_arity = get_irn_arity(main_end_bl);
1211                 end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
1212
1213                 for (i = 0; i < main_end_bl_arity; ++i)
1214                         end_preds[i] = get_irn_n(main_end_bl, i);
1215                 for (i = 0; i < n_exc; ++i)
1216                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1217                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1218                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1219                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1220                 free(end_preds);
1221         }
1222         free(res_pred);
1223         free(cf_pred);
1224
1225         /* --  Turn CSE back on. -- */
1226         set_optimize(rem_opt);
1227         current_ir_graph = rem;
1228
1229         return 1;
1230 }
1231
1232 /********************************************************************/
1233 /* Apply inlining to small methods.                                 */
1234 /********************************************************************/
1235
1236 static struct obstack  temp_obst;
1237
1238 /** Represents a possible inlinable call in a graph. */
1239 typedef struct _call_entry {
1240         ir_node    *call;       /**< The Call node. */
1241         ir_graph   *callee;     /**< The callee IR-graph. */
1242         list_head  list;        /**< List head for linking the next one. */
1243         int        loop_depth;  /**< The loop depth of this call. */
1244         int        benefice;    /**< The calculated benefice of this call. */
1245         unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
1246         unsigned   all_const:1; /**< Set if this call has only constant parameters. */
1247 } call_entry;
1248
1249 /**
1250  * environment for inlining small irgs
1251  */
1252 typedef struct _inline_env_t {
1253         struct obstack obst;  /**< An obstack where call_entries are allocated on. */
1254         list_head      calls; /**< The call entry list. */
1255 } inline_env_t;
1256
1257 /**
1258  * Returns the irg called from a Call node. If the irg is not
1259  * known, NULL is returned.
1260  *
1261  * @param call  the call node
1262  */
1263 static ir_graph *get_call_called_irg(ir_node *call) {
1264         ir_node *addr;
1265
1266         addr = get_Call_ptr(call);
1267         if (is_Global(addr)) {
1268                 ir_entity *ent = get_Global_entity(addr);
1269                 return get_entity_irg(ent);
1270         }
1271
1272         return NULL;
1273 }
1274
1275 /**
1276  * Walker: Collect all calls to known graphs inside a graph.
1277  */
1278 static void collect_calls(ir_node *call, void *env) {
1279         (void) env;
1280         if (is_Call(call)) {
1281                 ir_graph *called_irg = get_call_called_irg(call);
1282
1283                 if (called_irg != NULL) {
1284                         /* The Call node calls a locally defined method.  Remember to inline. */
1285                         inline_env_t *ienv  = env;
1286                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1287                         entry->call       = call;
1288                         entry->callee     = called_irg;
1289                         entry->loop_depth = 0;
1290                         entry->benefice   = 0;
1291                         entry->local_adr  = 0;
1292                         entry->all_const  = 0;
1293
1294                         list_add_tail(&entry->list, &ienv->calls);
1295                 }
1296         }
1297 }
1298
1299 /**
1300  * Inlines all small methods at call sites where the called address comes
1301  * from a Const node that references the entity representing the called
1302  * method.
1303  * The size argument is a rough measure for the code size of the method:
1304  * Methods where the obstack containing the firm graph is smaller than
1305  * size are inlined.
1306  */
1307 void inline_small_irgs(ir_graph *irg, int size) {
1308         ir_graph *rem = current_ir_graph;
1309         inline_env_t env;
1310         call_entry *entry;
1311
1312         current_ir_graph = irg;
1313         /* Handle graph state */
1314         assert(get_irg_phase_state(irg) != phase_building);
1315         free_callee_info(irg);
1316
1317         /* Find Call nodes to inline.
1318            (We can not inline during a walk of the graph, as inlining the same
1319            method several times changes the visited flag of the walked graph:
1320            after the first inlining visited of the callee equals visited of
1321            the caller.  With the next inlining both are increased.) */
1322         obstack_init(&env.obst);
1323         INIT_LIST_HEAD(&env.calls);
1324         irg_walk_graph(irg, NULL, collect_calls, &env);
1325
1326         if (! list_empty(&env.calls)) {
1327                 /* There are calls to inline */
1328                 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1329                 collect_phiprojs(irg);
1330
1331                 list_for_each_entry(call_entry, entry, &env.calls, list) {
1332                         ir_graph            *callee = entry->callee;
1333                         irg_inline_property prop    = get_irg_inline_property(callee);
1334
1335                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1336                                 /* do not inline forbidden / weak graphs */
1337                                 continue;
1338                         }
1339
1340                         if (prop >= irg_inline_forced ||
1341                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1342                                 inline_method(entry->call, callee);
1343                         }
1344                 }
1345                 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1346         }
1347         obstack_free(&env.obst, NULL);
1348         current_ir_graph = rem;
1349 }
1350
1351 /**
1352  * Environment for inlining irgs.
1353  */
1354 typedef struct {
1355         list_head calls;             /**< List of of all call nodes in this graph. */
1356         unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
1357         unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1358         unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
1359         unsigned  n_nodes_orig;      /**< for statistics */
1360         unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
1361         unsigned  n_call_nodes_orig; /**< for statistics */
1362         unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
1363         unsigned  n_callers_orig;    /**< for statistics */
1364         unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
1365         unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
1366         unsigned  recursive:1;       /**< Set, if this function is self recursive. */
1367 } inline_irg_env;
1368
1369 /**
1370  * Allocate a new environment for inlining.
1371  */
1372 static inline_irg_env *alloc_inline_irg_env(void) {
1373         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1374         INIT_LIST_HEAD(&env->calls);
1375         env->local_weights     = NULL;
1376         env->n_nodes           = -2; /* do not count count Start, End */
1377         env->n_blocks          = -2; /* do not count count Start, End Block */
1378         env->n_nodes_orig      = -2; /* do not count Start, End */
1379         env->n_call_nodes      = 0;
1380         env->n_call_nodes_orig = 0;
1381         env->n_callers         = 0;
1382         env->n_callers_orig    = 0;
1383         env->got_inline        = 0;
1384         env->local_vars        = 0;
1385         env->recursive         = 0;
1386         return env;
1387 }
1388
1389 typedef struct walker_env {
1390         inline_irg_env *x;     /**< the inline environment */
1391         char ignore_runtime;   /**< the ignore runtime flag */
1392         char ignore_callers;   /**< if set, do change callers data */
1393 } wenv_t;
1394
1395 /**
1396  * post-walker: collect all calls in the inline-environment
1397  * of a graph and sum some statistics.
1398  */
1399 static void collect_calls2(ir_node *call, void *ctx) {
1400         wenv_t         *env = ctx;
1401         inline_irg_env *x = env->x;
1402         ir_opcode      code = get_irn_opcode(call);
1403         ir_graph       *callee;
1404         call_entry     *entry;
1405
1406         /* count meaningful nodes in irg */
1407         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1408                 if (code != iro_Block) {
1409                         ++x->n_nodes;
1410                         ++x->n_nodes_orig;
1411                 } else {
1412                         ++x->n_blocks;
1413                 }
1414         }
1415
1416         if (code != iro_Call) return;
1417
1418         /* check, if it's a runtime call */
1419         if (env->ignore_runtime) {
1420                 ir_node *symc = get_Call_ptr(call);
1421
1422                 if (is_Global(symc)) {
1423                         ir_entity *ent = get_Global_entity(symc);
1424
1425                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1426                                 return;
1427                 }
1428         }
1429
1430         /* collect all call nodes */
1431         ++x->n_call_nodes;
1432         ++x->n_call_nodes_orig;
1433
1434         callee = get_call_called_irg(call);
1435         if (callee != NULL) {
1436                 if (! env->ignore_callers) {
1437                         inline_irg_env *callee_env = get_irg_link(callee);
1438                         /* count all static callers */
1439                         ++callee_env->n_callers;
1440                         ++callee_env->n_callers_orig;
1441                 }
1442                 if (callee == current_ir_graph)
1443                         x->recursive = 1;
1444
1445                 /* link it in the list of possible inlinable entries */
1446                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1447                 entry->call       = call;
1448                 entry->callee     = callee;
1449                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1450                 entry->benefice   = 0;
1451                 entry->local_adr  = 0;
1452                 entry->all_const  = 0;
1453
1454                 list_add_tail(&entry->list, &x->calls);
1455         }
1456 }
1457
1458 /**
1459  * Returns TRUE if the number of callers is 0 in the irg's environment,
1460  * hence this irg is a leave.
1461  */
1462 inline static int is_leave(ir_graph *irg) {
1463         inline_irg_env *env = get_irg_link(irg);
1464         return env->n_call_nodes == 0;
1465 }
1466
1467 /**
1468  * Returns TRUE if the number of nodes in the callee is
1469  * smaller then size in the irg's environment.
1470  */
1471 inline static int is_smaller(ir_graph *callee, unsigned size) {
1472         inline_irg_env *env = get_irg_link(callee);
1473         return env->n_nodes < size;
1474 }
1475
1476 /**
1477  * Duplicate a call entry.
1478  *
1479  * @param entry     the original entry to duplicate
1480  * @param new_call  the new call node
1481  * @param loop_depth_delta
1482  *                  delta value for the loop depth
1483  */
1484 static call_entry *duplicate_call_entry(const call_entry *entry,
1485                                         ir_node *new_call, int loop_depth_delta) {
1486         call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1487         nentry->call       = new_call;
1488         nentry->callee     = entry->callee;
1489         nentry->benefice   = entry->benefice;
1490         nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1491         nentry->local_adr  = entry->local_adr;
1492         nentry->all_const  = entry->all_const;
1493
1494         return nentry;
1495 }
1496
1497 /**
1498  * Append all call nodes of the source environment to the nodes of in the destination
1499  * environment.
1500  *
1501  * @param dst         destination environment
1502  * @param src         source environment
1503  * @param loop_depth  the loop depth of the call that is replaced by the src list
1504  */
1505 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
1506         call_entry *entry, *nentry;
1507
1508         /* Note that the src list points to Call nodes in the inlined graph, but
1509            we need Call nodes in our graph. Luckily the inliner leaves this information
1510            in the link field. */
1511         list_for_each_entry(call_entry, entry, &src->calls, list) {
1512                 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1513                 list_add_tail(&nentry->list, &dst->calls);
1514         }
1515         dst->n_call_nodes += src->n_call_nodes;
1516         dst->n_nodes      += src->n_nodes;
1517 }
1518
1519 /*
1520  * Inlines small leave methods at call sites where the called address comes
1521  * from a Const node that references the entity representing the called
1522  * method.
1523  * The size argument is a rough measure for the code size of the method:
1524  * Methods where the obstack containing the firm graph is smaller than
1525  * size are inlined.
1526  */
1527 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1528                             unsigned size, int ignore_runtime)
1529 {
1530         inline_irg_env   *env;
1531         ir_graph         *irg;
1532         int              i, n_irgs;
1533         ir_graph         *rem;
1534         int              did_inline;
1535         wenv_t           wenv;
1536         call_entry       *entry, *next;
1537         const call_entry *centry;
1538         pmap             *copied_graphs;
1539         pmap_entry       *pm_entry;
1540
1541         rem = current_ir_graph;
1542         obstack_init(&temp_obst);
1543
1544         /* a map for the copied graphs, used to inline recursive calls */
1545         copied_graphs = pmap_create();
1546
1547         /* extend all irgs by a temporary data structure for inlining. */
1548         n_irgs = get_irp_n_irgs();
1549         for (i = 0; i < n_irgs; ++i)
1550                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1551
1552         /* Pre-compute information in temporary data structure. */
1553         wenv.ignore_runtime = ignore_runtime;
1554         wenv.ignore_callers = 0;
1555         for (i = 0; i < n_irgs; ++i) {
1556                 ir_graph *irg = get_irp_irg(i);
1557
1558                 assert(get_irg_phase_state(irg) != phase_building);
1559                 free_callee_info(irg);
1560
1561                 assure_cf_loop(irg);
1562                 wenv.x = get_irg_link(irg);
1563                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1564         }
1565
1566         /* -- and now inline. -- */
1567
1568         /* Inline leaves recursively -- we might construct new leaves. */
1569         do {
1570                 did_inline = 0;
1571
1572                 for (i = 0; i < n_irgs; ++i) {
1573                         ir_node *call;
1574                         int phiproj_computed = 0;
1575
1576                         current_ir_graph = get_irp_irg(i);
1577                         env              = get_irg_link(current_ir_graph);
1578
1579                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1580                         list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1581                                 ir_graph            *callee;
1582                                 irg_inline_property  prop;
1583
1584                                 if (env->n_nodes > maxsize)
1585                                         break;
1586
1587                                 call   = entry->call;
1588                                 callee = entry->callee;
1589
1590                                 prop = get_irg_inline_property(callee);
1591                                 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1592                                         /* do not inline forbidden / weak graphs */
1593                                         continue;
1594                                 }
1595
1596                                 if (is_leave(callee) && (
1597                                     is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1598                                         if (!phiproj_computed) {
1599                                                 phiproj_computed = 1;
1600                                                 collect_phiprojs(current_ir_graph);
1601                                         }
1602                                         did_inline = inline_method(call, callee);
1603
1604                                         if (did_inline) {
1605                                                 inline_irg_env *callee_env = get_irg_link(callee);
1606
1607                                                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1608                                                 phiproj_computed = 0;
1609
1610                                                 /* Do some statistics */
1611                                                 env->got_inline = 1;
1612                                                 --env->n_call_nodes;
1613                                                 env->n_nodes += callee_env->n_nodes;
1614                                                 --callee_env->n_callers;
1615
1616                                                 /* remove this call from the list */
1617                                                 list_del(&entry->list);
1618                                                 continue;
1619                                         }
1620                                 }
1621                         }
1622                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1623                 }
1624         } while (did_inline);
1625
1626         /* inline other small functions. */
1627         for (i = 0; i < n_irgs; ++i) {
1628                 ir_node *call;
1629                 int phiproj_computed = 0;
1630
1631                 current_ir_graph = get_irp_irg(i);
1632                 env              = get_irg_link(current_ir_graph);
1633
1634                 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1635
1636                 /* note that the list of possible calls is updated during the process */
1637                 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1638                         irg_inline_property prop;
1639                         ir_graph            *callee;
1640                         pmap_entry          *e;
1641
1642                         call   = entry->call;
1643                         callee = entry->callee;
1644
1645                         prop = get_irg_inline_property(callee);
1646                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1647                                 /* do not inline forbidden / weak graphs */
1648                                 continue;
1649                         }
1650
1651                         e = pmap_find(copied_graphs, callee);
1652                         if (e != NULL) {
1653                                 /*
1654                                  * Remap callee if we have a copy.
1655                                  * FIXME: Should we do this only for recursive Calls ?
1656                                  */
1657                                 callee = e->value;
1658                         }
1659
1660                         if (prop >= irg_inline_forced ||
1661                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1662                                 if (current_ir_graph == callee) {
1663                                         /*
1664                                          * Recursive call: we cannot directly inline because we cannot walk
1665                                          * the graph and change it. So we have to make a copy of the graph
1666                                          * first.
1667                                          */
1668
1669                                         inline_irg_env *callee_env;
1670                                         ir_graph       *copy;
1671
1672                                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1673
1674                                         /*
1675                                          * No copy yet, create one.
1676                                          * Note that recursive methods are never leaves, so it is sufficient
1677                                          * to test this condition here.
1678                                          */
1679                                         copy = create_irg_copy(callee);
1680
1681                                         /* create_irg_copy() destroys the Proj links, recompute them */
1682                                         phiproj_computed = 0;
1683
1684                                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1685
1686                                         /* allocate new environment */
1687                                         callee_env = alloc_inline_irg_env();
1688                                         set_irg_link(copy, callee_env);
1689
1690                                         assure_cf_loop(copy);
1691                                         wenv.x              = callee_env;
1692                                         wenv.ignore_callers = 1;
1693                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1694
1695                                         /*
1696                                          * Enter the entity of the original graph. This is needed
1697                                          * for inline_method(). However, note that ent->irg still points
1698                                          * to callee, NOT to copy.
1699                                          */
1700                                         set_irg_entity(copy, get_irg_entity(callee));
1701
1702                                         pmap_insert(copied_graphs, callee, copy);
1703                                         callee = copy;
1704
1705                                         /* we have only one caller: the original graph */
1706                                         callee_env->n_callers      = 1;
1707                                         callee_env->n_callers_orig = 1;
1708                                 }
1709                                 if (! phiproj_computed) {
1710                                         phiproj_computed = 1;
1711                                         collect_phiprojs(current_ir_graph);
1712                                 }
1713                                 did_inline = inline_method(call, callee);
1714                                 if (did_inline) {
1715                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1716
1717                                         /* call was inlined, Phi/Projs for current graph must be recomputed */
1718                                         phiproj_computed = 0;
1719
1720                                         /* callee was inline. Append it's call list. */
1721                                         env->got_inline = 1;
1722                                         --env->n_call_nodes;
1723                                         append_call_list(env, callee_env, entry->loop_depth);
1724                                         --callee_env->n_callers;
1725
1726                                         /* after we have inlined callee, all called methods inside callee
1727                                            are now called once more */
1728                                         list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1729                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1730                                                 ++penv->n_callers;
1731                                         }
1732
1733                                         /* remove this call from the list */
1734                                         list_del(&entry->list);
1735                                         continue;
1736                                 }
1737                         }
1738                 }
1739                 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1740         }
1741
1742         for (i = 0; i < n_irgs; ++i) {
1743                 irg = get_irp_irg(i);
1744                 env = get_irg_link(irg);
1745
1746                 if (env->got_inline) {
1747                         optimize_graph_df(irg);
1748                         optimize_cf(irg);
1749                 }
1750                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1751                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1752                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1753                         env->n_callers_orig, env->n_callers,
1754                         get_entity_name(get_irg_entity(irg))));
1755                 }
1756         }
1757
1758         /* kill the copied graphs: we don't need them anymore */
1759         foreach_pmap(copied_graphs, pm_entry) {
1760                 ir_graph *copy = pm_entry->value;
1761
1762                 /* reset the entity, otherwise it will be deleted in the next step ... */
1763                 set_irg_entity(copy, NULL);
1764                 free_ir_graph(copy);
1765         }
1766         pmap_destroy(copied_graphs);
1767
1768         obstack_free(&temp_obst, NULL);
1769         current_ir_graph = rem;
1770 }
1771
1772 /**
1773  * Calculate the parameter weights for transmitting the address of a local variable.
1774  */
1775 static unsigned calc_method_local_weight(ir_node *arg) {
1776         int      i, j, k;
1777         unsigned v, weight = 0;
1778
1779         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1780                 ir_node *succ = get_irn_out(arg, i);
1781
1782                 switch (get_irn_opcode(succ)) {
1783                 case iro_Load:
1784                 case iro_Store:
1785                         /* Loads and Store can be removed */
1786                         weight += 3;
1787                         break;
1788                 case iro_Sel:
1789                         /* check if all args are constant */
1790                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1791                                 ir_node *idx = get_Sel_index(succ, j);
1792                                 if (! is_Const(idx))
1793                                         return 0;
1794                         }
1795                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1796                            some unsupported node. */
1797                         v = calc_method_local_weight(succ);
1798                         if (v == 0)
1799                                 return 0;
1800                         /* we can kill one Sel with constant indexes, this is cheap */
1801                         weight += v + 1;
1802                         break;
1803                 case iro_Id:
1804                         /* when looking backward we might find Id nodes */
1805                         weight += calc_method_local_weight(succ);
1806                         break;
1807                 case iro_Tuple:
1808                         /* unoptimized tuple */
1809                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1810                                 ir_node *pred = get_Tuple_pred(succ, j);
1811                                 if (pred == arg) {
1812                                         /* look for Proj(j) */
1813                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1814                                                 ir_node *succ_succ = get_irn_out(succ, k);
1815                                                 if (is_Proj(succ_succ)) {
1816                                                         if (get_Proj_proj(succ_succ) == j) {
1817                                                                 /* found */
1818                                                                 weight += calc_method_local_weight(succ_succ);
1819                                                         }
1820                                                 } else {
1821                                                         /* this should NOT happen */
1822                                                         return 0;
1823                                                 }
1824                                         }
1825                                 }
1826                         }
1827                         break;
1828                 default:
1829                         /* any other node: unsupported yet or bad. */
1830                         return 0;
1831                 }
1832         }
1833         return weight;
1834 }
1835
1836 /**
1837  * Calculate the parameter weights for transmitting the address of a local variable.
1838  */
1839 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1840         ir_entity *ent = get_irg_entity(irg);
1841         ir_type  *mtp;
1842         int      nparams, i, proj_nr;
1843         ir_node  *irg_args, *arg;
1844
1845         mtp      = get_entity_type(ent);
1846         nparams  = get_method_n_params(mtp);
1847
1848         /* allocate a new array. currently used as 'analysed' flag */
1849         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1850
1851         /* If the method haven't parameters we have nothing to do. */
1852         if (nparams <= 0)
1853                 return;
1854
1855         assure_irg_outs(irg);
1856         irg_args = get_irg_args(irg);
1857         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1858                 arg     = get_irn_out(irg_args, i);
1859                 proj_nr = get_Proj_proj(arg);
1860                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1861         }
1862 }
1863
1864 /**
1865  * Calculate the benefice for transmitting an local variable address.
1866  * After inlining, the local variable might be transformed into a
1867  * SSA variable by scalar_replacement().
1868  */
1869 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1870         inline_irg_env *env = get_irg_link(callee);
1871
1872         if (env->local_weights != NULL) {
1873                 if (pos < ARR_LEN(env->local_weights))
1874                         return env->local_weights[pos];
1875                 return 0;
1876         }
1877
1878         analyze_irg_local_weights(env, callee);
1879
1880         if (pos < ARR_LEN(env->local_weights))
1881                 return env->local_weights[pos];
1882         return 0;
1883 }
1884
1885 /**
1886  * Calculate a benefice value for inlining the given call.
1887  *
1888  * @param call       the call node we have to inspect
1889  * @param callee     the called graph
1890  */
1891 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1892 {
1893         ir_node   *call = entry->call;
1894         ir_entity *ent  = get_irg_entity(callee);
1895         ir_node   *frame_ptr;
1896         ir_type   *mtp;
1897         int       weight = 0;
1898         int       i, n_params, all_const;
1899         unsigned  cc, v;
1900         irg_inline_property prop;
1901
1902         inline_irg_env *callee_env;
1903
1904         prop = get_irg_inline_property(callee);
1905         if (prop == irg_inline_forbidden) {
1906                 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1907                     call, callee));
1908                 return entry->benefice = INT_MIN;
1909         }
1910
1911         if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
1912                 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1913                     call, callee));
1914                 return entry->benefice = INT_MIN;
1915         }
1916
1917         /* costs for every passed parameter */
1918         n_params = get_Call_n_params(call);
1919         mtp      = get_entity_type(ent);
1920         cc       = get_method_calling_convention(mtp);
1921         if (cc & cc_reg_param) {
1922                 /* register parameter, smaller costs for register parameters */
1923                 int max_regs = cc & ~cc_bits;
1924
1925                 if (max_regs < n_params)
1926                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1927                 else
1928                         weight += n_params * 2;
1929         } else {
1930                 /* parameters are passed an stack */
1931                 weight += 5 * n_params;
1932         }
1933
1934         /* constant parameters improve the benefice */
1935         frame_ptr = get_irg_frame(current_ir_graph);
1936         all_const = 1;
1937         for (i = 0; i < n_params; ++i) {
1938                 ir_node *param = get_Call_param(call, i);
1939
1940                 if (is_Const(param)) {
1941                         weight += get_method_param_weight(ent, i);
1942                 } else {
1943                         all_const = 0;
1944                         if (is_SymConst(param))
1945                                 weight += get_method_param_weight(ent, i);
1946                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1947                                 /*
1948                                  * An address of a local variable is transmitted. After
1949                                  * inlining, scalar_replacement might be able to remove the
1950                                  * local variable, so honor this.
1951                                  */
1952                                 v = get_method_local_adress_weight(callee, i);
1953                                 weight += v;
1954                                 if (v > 0)
1955                                         entry->local_adr = 1;
1956                         }
1957                 }
1958         }
1959         entry->all_const = all_const;
1960
1961         callee_env = get_irg_link(callee);
1962         if (callee_env->n_callers == 1 &&
1963             callee != current_ir_graph &&
1964                 get_entity_visibility(ent) == visibility_local) {
1965                 weight += 700;
1966         }
1967
1968         /* give a bonus for functions with one block */
1969         if (callee_env->n_blocks == 1)
1970                 weight = weight * 3 / 2;
1971
1972         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1973         if (callee_env->n_nodes < 30 && !callee_env->recursive)
1974                 weight += 2000;
1975
1976         /* and finally for leaves: they do not increase the register pressure
1977            because of callee safe registers */
1978         if (callee_env->n_call_nodes == 0)
1979                 weight += 400;
1980
1981         /** it's important to inline inner loops first */
1982         if (entry->loop_depth > 30)
1983                 weight += 30 * 1024;
1984         else
1985                 weight += entry->loop_depth * 1024;
1986
1987         /*
1988          * All arguments constant is probably a good sign, give an extra bonus
1989          */
1990         if (all_const)
1991                 weight += 1024;
1992
1993         return entry->benefice = weight;
1994 }
1995
1996 static ir_graph **irgs;
1997 static int      last_irg;
1998
1999 /**
2000  * Callgraph walker, collect all visited graphs.
2001  */
2002 static void callgraph_walker(ir_graph *irg, void *data) {
2003         (void) data;
2004         irgs[last_irg++] = irg;
2005 }
2006
2007 /**
2008  * Creates an inline order for all graphs.
2009  *
2010  * @return the list of graphs.
2011  */
2012 static ir_graph **create_irg_list(void) {
2013         ir_entity **free_methods;
2014         int       arr_len;
2015         int       n_irgs = get_irp_n_irgs();
2016
2017         cgana(&arr_len, &free_methods);
2018         xfree(free_methods);
2019
2020         compute_callgraph();
2021
2022         last_irg = 0;
2023         irgs     = XMALLOCNZ(ir_graph*, n_irgs);
2024
2025         callgraph_walk(NULL, callgraph_walker, NULL);
2026         assert(n_irgs == last_irg);
2027
2028         return irgs;
2029 }
2030
2031 /**
2032  * Push a call onto the priority list if its benefice is big enough.
2033  *
2034  * @param pqueue   the priority queue of calls
2035  * @param call     the call entry
2036  * @param inlien_threshold
2037  *                 the threshold value
2038  */
2039 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
2040                             int inline_threshold)
2041 {
2042         ir_graph            *callee  = call->callee;
2043         irg_inline_property prop     = get_irg_inline_property(callee);
2044         int                 benefice = calc_inline_benefice(call, callee);
2045
2046         DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
2047             get_irn_irg(call->call), call->call, callee, benefice));
2048
2049         if (prop < irg_inline_forced && benefice < inline_threshold) {
2050                 return;
2051         }
2052
2053         pqueue_put(pqueue, call, benefice);
2054 }
2055
2056 /**
2057  * Try to inline calls into a graph.
2058  *
2059  * @param irg      the graph into which we inline
2060  * @param maxsize  do NOT inline if the size of irg gets
2061  *                 bigger than this amount
2062  * @param inline_threshold
2063  *                 threshold value for inline decision
2064  * @param copied_graphs
2065  *                 map containing copied of recursive graphs
2066  */
2067 static void inline_into(ir_graph *irg, unsigned maxsize,
2068                         int inline_threshold, pmap *copied_graphs)
2069 {
2070         int            phiproj_computed = 0;
2071         inline_irg_env *env = get_irg_link(irg);
2072         call_entry     *curr_call;
2073         wenv_t         wenv;
2074         pqueue_t       *pqueue;
2075
2076         if (env->n_call_nodes == 0)
2077                 return;
2078
2079         if (env->n_nodes > maxsize) {
2080                 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
2081                 return;
2082         }
2083
2084         current_ir_graph = irg;
2085         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2086
2087         /* put irgs into the pqueue */
2088         pqueue = new_pqueue();
2089
2090         list_for_each_entry(call_entry, curr_call, &env->calls, list) {
2091                 assert(is_Call(curr_call->call));
2092                 maybe_push_call(pqueue, curr_call, inline_threshold);
2093         }
2094
2095         /* note that the list of possible calls is updated during the process */
2096         while (!pqueue_empty(pqueue)) {
2097                 int                 did_inline;
2098                 call_entry          *curr_call  = pqueue_pop_front(pqueue);
2099                 ir_graph            *callee     = curr_call->callee;
2100                 ir_node             *call_node  = curr_call->call;
2101                 inline_irg_env      *callee_env = get_irg_link(callee);
2102                 irg_inline_property prop        = get_irg_inline_property(callee);
2103                 int                 loop_depth;
2104                 const call_entry    *centry;
2105                 pmap_entry          *e;
2106
2107                 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
2108                         DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
2109                                                 env->n_nodes, callee, callee_env->n_nodes));
2110                         continue;
2111                 }
2112
2113                 e = pmap_find(copied_graphs, callee);
2114                 if (e != NULL) {
2115                         int benefice = curr_call->benefice;
2116                         /*
2117                          * Reduce the weight for recursive function IFF not all arguments are const.
2118                          * inlining recursive functions is rarely good.
2119                          */
2120                         if (!curr_call->all_const)
2121                                 benefice -= 2000;
2122                         if (benefice < inline_threshold)
2123                                 continue;
2124
2125                         /*
2126                          * Remap callee if we have a copy.
2127                          */
2128                         callee     = e->value;
2129                         callee_env = get_irg_link(callee);
2130                 }
2131
2132                 if (current_ir_graph == callee) {
2133                         /*
2134                          * Recursive call: we cannot directly inline because we cannot
2135                          * walk the graph and change it. So we have to make a copy of
2136                          * the graph first.
2137                          */
2138                         int benefice = curr_call->benefice;
2139                         ir_graph *copy;
2140
2141                         /*
2142                          * Reduce the weight for recursive function IFF not all arguments are const.
2143                          * inlining recursive functions is rarely good.
2144                          */
2145                         if (!curr_call->all_const)
2146                                 benefice -= 2000;
2147                         if (benefice < inline_threshold)
2148                                 continue;
2149
2150                         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2151
2152                         /*
2153                          * No copy yet, create one.
2154                          * Note that recursive methods are never leaves, so it is
2155                          * sufficient to test this condition here.
2156                          */
2157                         copy = create_irg_copy(callee);
2158
2159                         /* create_irg_copy() destroys the Proj links, recompute them */
2160                         phiproj_computed = 0;
2161
2162                         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2163
2164                         /* allocate a new environment */
2165                         callee_env = alloc_inline_irg_env();
2166                         set_irg_link(copy, callee_env);
2167
2168                         assure_cf_loop(copy);
2169                         wenv.x              = callee_env;
2170                         wenv.ignore_callers = 1;
2171                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2172
2173                         /*
2174                          * Enter the entity of the original graph. This is needed
2175                          * for inline_method(). However, note that ent->irg still points
2176                          * to callee, NOT to copy.
2177                          */
2178                         set_irg_entity(copy, get_irg_entity(callee));
2179
2180                         pmap_insert(copied_graphs, callee, copy);
2181                         callee = copy;
2182
2183                         /* we have only one caller: the original graph */
2184                         callee_env->n_callers      = 1;
2185                         callee_env->n_callers_orig = 1;
2186                 }
2187                 if (! phiproj_computed) {
2188                         phiproj_computed = 1;
2189                         collect_phiprojs(current_ir_graph);
2190                 }
2191                 did_inline = inline_method(call_node, callee);
2192                 if (!did_inline)
2193                         continue;
2194
2195                 /* call was inlined, Phi/Projs for current graph must be recomputed */
2196                 phiproj_computed = 0;
2197
2198                 /* remove it from the caller list */
2199                 list_del(&curr_call->list);
2200
2201                 /* callee was inline. Append it's call list. */
2202                 env->got_inline = 1;
2203                 if (curr_call->local_adr)
2204                         env->local_vars = 1;
2205                 --env->n_call_nodes;
2206
2207                 /* we just generate a bunch of new calls */
2208                 loop_depth = curr_call->loop_depth;
2209                 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2210                         inline_irg_env *penv = get_irg_link(centry->callee);
2211                         ir_node        *new_call;
2212                         call_entry     *new_entry;
2213
2214                         /* after we have inlined callee, all called methods inside
2215                          * callee are now called once more */
2216                         ++penv->n_callers;
2217
2218                         /* Note that the src list points to Call nodes in the inlined graph,
2219                          * but we need Call nodes in our graph. Luckily the inliner leaves
2220                          * this information in the link field. */
2221                         new_call = get_irn_link(centry->call);
2222                         assert(is_Call(new_call));
2223
2224                         new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2225                         list_add_tail(&new_entry->list, &env->calls);
2226                         maybe_push_call(pqueue, new_entry, inline_threshold);
2227                 }
2228
2229                 env->n_call_nodes += callee_env->n_call_nodes;
2230                 env->n_nodes += callee_env->n_nodes;
2231                 --callee_env->n_callers;
2232         }
2233         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2234         del_pqueue(pqueue);
2235 }
2236
2237 /*
2238  * Heuristic inliner. Calculates a benefice value for every call and inlines
2239  * those calls with a value higher than the threshold.
2240  */
2241 void inline_functions(unsigned maxsize, int inline_threshold) {
2242         inline_irg_env   *env;
2243         int              i, n_irgs;
2244         ir_graph         *rem;
2245         wenv_t           wenv;
2246         pmap             *copied_graphs;
2247         pmap_entry       *pm_entry;
2248         ir_graph         **irgs;
2249
2250         rem = current_ir_graph;
2251         obstack_init(&temp_obst);
2252
2253         irgs = create_irg_list();
2254
2255         /* a map for the copied graphs, used to inline recursive calls */
2256         copied_graphs = pmap_create();
2257
2258         /* extend all irgs by a temporary data structure for inlining. */
2259         n_irgs = get_irp_n_irgs();
2260         for (i = 0; i < n_irgs; ++i)
2261                 set_irg_link(irgs[i], alloc_inline_irg_env());
2262
2263         /* Pre-compute information in temporary data structure. */
2264         wenv.ignore_runtime = 0;
2265         wenv.ignore_callers = 0;
2266         for (i = 0; i < n_irgs; ++i) {
2267                 ir_graph *irg = irgs[i];
2268
2269                 free_callee_info(irg);
2270
2271                 wenv.x = get_irg_link(irg);
2272                 assure_cf_loop(irg);
2273                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2274         }
2275
2276         /* -- and now inline. -- */
2277         for (i = 0; i < n_irgs; ++i) {
2278                 ir_graph *irg = irgs[i];
2279
2280                 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2281         }
2282
2283         for (i = 0; i < n_irgs; ++i) {
2284                 ir_graph *irg = irgs[i];
2285
2286                 env = get_irg_link(irg);
2287                 if (env->got_inline) {
2288                         /* this irg got calls inlined: optimize it */
2289                         if (get_opt_combo()) {
2290                                 if (env->local_vars) {
2291                                         scalar_replacement_opt(irg);
2292                                 }
2293                                 combo(irg);
2294                         } else {
2295                                 if (env->local_vars) {
2296                                         if (scalar_replacement_opt(irg)) {
2297                                                 optimize_graph_df(irg);
2298                                         }
2299                                 }
2300                                 optimize_cf(irg);
2301                         }
2302                 }
2303                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2304                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2305                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2306                         env->n_callers_orig, env->n_callers,
2307                         get_entity_name(get_irg_entity(irg))));
2308                 }
2309         }
2310
2311         /* kill the copied graphs: we don't need them anymore */
2312         foreach_pmap(copied_graphs, pm_entry) {
2313                 ir_graph *copy = pm_entry->value;
2314
2315                 /* reset the entity, otherwise it will be deleted in the next step ... */
2316                 set_irg_entity(copy, NULL);
2317                 free_ir_graph(copy);
2318         }
2319         pmap_destroy(copied_graphs);
2320
2321         xfree(irgs);
2322
2323         obstack_free(&temp_obst, NULL);
2324         current_ir_graph = rem;
2325 }
2326
2327 void firm_init_inline(void) {
2328         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2329 }