1e33bfa85def3f733fa38cf1152ec027fff448c7
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #include "config.h"
27
28 #include <limits.h>
29 #include <assert.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irprog_t.h"
34
35 #include "iroptimize.h"
36 #include "ircons_t.h"
37 #include "iropt_t.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41
42 #include "array_t.h"
43 #include "list.h"
44 #include "pset.h"
45 #include "pmap.h"
46 #include "pdeq.h"
47 #include "xmalloc.h"
48 #include "pqueue.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
66
67 /*------------------------------------------------------------------*/
68 /* Routines for dead node elimination / copying garbage collection  */
69 /* of the obstack.                                                  */
70 /*------------------------------------------------------------------*/
71
72 /**
73  * Remember the new node in the old node by using a field all nodes have.
74  */
75 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
76
77 /**
78  * Get this new node, before the old node is forgotten.
79  */
80 #define get_new_node(oldn) get_irn_link(oldn)
81
82 /**
83  * Check if a new node was set.
84  */
85 #define has_new_node(n) (get_new_node(n) != NULL)
86
87 /**
88  * We use the block_visited flag to mark that we have computed the
89  * number of useful predecessors for this block.
90  * Further we encode the new arity in this flag in the old blocks.
91  * Remembering the arity is useful, as it saves a lot of pointer
92  * accesses.  This function is called for all Phi and Block nodes
93  * in a Block.
94  */
95 static inline int
96 compute_new_arity(ir_node *b) {
97         int i, res, irn_arity;
98         int irg_v, block_v;
99
100         irg_v = get_irg_block_visited(current_ir_graph);
101         block_v = get_Block_block_visited(b);
102         if (block_v >= irg_v) {
103                 /* we computed the number of preds for this block and saved it in the
104                    block_v flag */
105                 return block_v - irg_v;
106         } else {
107                 /* compute the number of good predecessors */
108                 res = irn_arity = get_irn_arity(b);
109                 for (i = 0; i < irn_arity; i++)
110                         if (is_Bad(get_irn_n(b, i))) res--;
111                         /* save it in the flag. */
112                         set_Block_block_visited(b, irg_v + res);
113                         return res;
114         }
115 }
116
117 /**
118  * Copies the node to the new obstack. The Ins of the new node point to
119  * the predecessors on the old obstack.  For block/phi nodes not all
120  * predecessors might be copied.  n->link points to the new node.
121  * For Phi and Block nodes the function allocates in-arrays with an arity
122  * only for useful predecessors.  The arity is determined by counting
123  * the non-bad predecessors of the block.
124  *
125  * @param n    The node to be copied
126  * @param env  if non-NULL, the node number attribute will be copied to the new node
127  *
128  * Note: Also used for loop unrolling.
129  */
130 static void copy_node(ir_node *n, void *env) {
131         ir_node *nn, *block;
132         int new_arity;
133         ir_op *op = get_irn_op(n);
134         (void) env;
135
136         if (op == op_Bad) {
137                 /* node copied already */
138                 return;
139         } else if (op == op_Block) {
140                 block = NULL;
141                 new_arity = compute_new_arity(n);
142                 n->attr.block.graph_arr = NULL;
143         } else {
144                 block = get_nodes_block(n);
145                 if (op == op_Phi) {
146                         new_arity = compute_new_arity(block);
147                 } else {
148                         new_arity = get_irn_arity(n);
149                 }
150         }
151         nn = new_ir_node(get_irn_dbg_info(n),
152                 current_ir_graph,
153                 block,
154                 op,
155                 get_irn_mode(n),
156                 new_arity,
157                 get_irn_in(n) + 1);
158         /* Copy the attributes.  These might point to additional data.  If this
159            was allocated on the old obstack the pointers now are dangling.  This
160            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
161         if (op == op_Block) {
162                 /* we cannot allow blocks WITHOUT macroblock input */
163                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
164         }
165         copy_node_attr(n, nn);
166
167 #ifdef DEBUG_libfirm
168         {
169                 int copy_node_nr = env != NULL;
170                 if (copy_node_nr) {
171                         /* for easier debugging, we want to copy the node numbers too */
172                         nn->node_nr = n->node_nr;
173                 }
174         }
175 #endif
176
177         set_new_node(n, nn);
178         hook_dead_node_elim_subst(current_ir_graph, n, nn);
179 }
180
181 /**
182  * Copies new predecessors of old node to new node remembered in link.
183  * Spare the Bad predecessors of Phi and Block nodes.
184  */
185 static void copy_preds(ir_node *n, void *env) {
186         ir_node *nn, *block;
187         int i, j, irn_arity;
188         (void) env;
189
190         nn = get_new_node(n);
191
192         if (is_Block(n)) {
193                 /* copy the macro block header */
194                 ir_node *mbh = get_Block_MacroBlock(n);
195
196                 if (mbh == n) {
197                         /* this block is a macroblock header */
198                         set_Block_MacroBlock(nn, nn);
199                 } else {
200                         /* get the macro block header */
201                         ir_node *nmbh = get_new_node(mbh);
202                         assert(nmbh != NULL);
203                         set_Block_MacroBlock(nn, nmbh);
204                 }
205
206                 /* Don't copy Bad nodes. */
207                 j = 0;
208                 irn_arity = get_irn_arity(n);
209                 for (i = 0; i < irn_arity; i++) {
210                         if (! is_Bad(get_irn_n(n, i))) {
211                                 ir_node *pred = get_irn_n(n, i);
212                                 set_irn_n(nn, j, get_new_node(pred));
213                                 j++;
214                         }
215                 }
216                 /* repair the block visited flag from above misuse. Repair it in both
217                    graphs so that the old one can still be used. */
218                 set_Block_block_visited(nn, 0);
219                 set_Block_block_visited(n, 0);
220                 /* Local optimization could not merge two subsequent blocks if
221                    in array contained Bads.  Now it's possible.
222                    We don't call optimize_in_place as it requires
223                    that the fields in ir_graph are set properly. */
224                 if (!has_Block_label(nn) &&
225                     get_opt_control_flow_straightening() &&
226                     get_Block_n_cfgpreds(nn) == 1 &&
227                     is_Jmp(get_Block_cfgpred(nn, 0))) {
228                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
229                         if (nn == old) {
230                                 /* Jmp jumps into the block it is in -- deal self cycle. */
231                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
232                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
233                         } else {
234                                 exchange(nn, old);
235                         }
236                 }
237         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
238                 /* Don't copy node if corresponding predecessor in block is Bad.
239                    The Block itself should not be Bad. */
240                 block = get_nodes_block(n);
241                 set_nodes_block(nn, get_new_node(block));
242                 j = 0;
243                 irn_arity = get_irn_arity(n);
244                 for (i = 0; i < irn_arity; i++) {
245                         if (! is_Bad(get_irn_n(block, i))) {
246                                 ir_node *pred = get_irn_n(n, i);
247                                 set_irn_n(nn, j, get_new_node(pred));
248                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
249                                 j++;
250                         }
251                 }
252                 /* If the pre walker reached this Phi after the post walker visited the
253                    block block_visited is > 0. */
254                 set_Block_block_visited(get_nodes_block(n), 0);
255                 /* Compacting the Phi's ins might generate Phis with only one
256                    predecessor. */
257                 if (get_irn_arity(nn) == 1)
258                         exchange(nn, get_irn_n(nn, 0));
259         } else {
260                 irn_arity = get_irn_arity(n);
261                 for (i = -1; i < irn_arity; i++)
262                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
263         }
264         /* Now the new node is complete.  We can add it to the hash table for CSE.
265            @@@ inlining aborts if we identify End. Why? */
266         if (!is_End(nn))
267                 add_identities(current_ir_graph->value_table, nn);
268 }
269
270 /**
271  * Copies the graph recursively, compacts the keep-alives of the end node.
272  *
273  * @param irg           the graph to be copied
274  * @param copy_node_nr  If non-zero, the node number will be copied
275  */
276 static void copy_graph(ir_graph *irg, int copy_node_nr) {
277         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
278         ir_node *ka;      /* keep alive */
279         int i, irn_arity;
280         unsigned long vfl;
281
282         /* Some nodes must be copied by hand, sigh */
283         vfl = get_irg_visited(irg);
284         set_irg_visited(irg, vfl + 1);
285
286         oe = get_irg_end(irg);
287         mark_irn_visited(oe);
288         /* copy the end node by hand, allocate dynamic in array! */
289         ne = new_ir_node(get_irn_dbg_info(oe),
290                 irg,
291                 NULL,
292                 op_End,
293                 mode_X,
294                 -1,
295                 NULL);
296         /* Copy the attributes.  Well, there might be some in the future... */
297         copy_node_attr(oe, ne);
298         set_new_node(oe, ne);
299
300         /* copy the Bad node */
301         ob = get_irg_bad(irg);
302         mark_irn_visited(ob);
303         nb = new_ir_node(get_irn_dbg_info(ob),
304                 irg,
305                 NULL,
306                 op_Bad,
307                 mode_T,
308                 0,
309                 NULL);
310         copy_node_attr(ob, nb);
311         set_new_node(ob, nb);
312
313         /* copy the NoMem node */
314         om = get_irg_no_mem(irg);
315         mark_irn_visited(om);
316         nm = new_ir_node(get_irn_dbg_info(om),
317                 irg,
318                 NULL,
319                 op_NoMem,
320                 mode_M,
321                 0,
322                 NULL);
323         copy_node_attr(om, nm);
324         set_new_node(om, nm);
325
326         /* copy the live nodes */
327         set_irg_visited(irg, vfl);
328         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
329
330         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
331
332         /* visit the anchors as well */
333         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
334                 ir_node *n = get_irg_anchor(irg, i);
335
336                 if (n && (get_irn_visited(n) <= vfl)) {
337                         set_irg_visited(irg, vfl);
338                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
339                 }
340         }
341
342         /* copy_preds for the end node ... */
343         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
344
345         /*- ... and now the keep alives. -*/
346         /* First pick the not marked block nodes and walk them.  We must pick these
347            first as else we will oversee blocks reachable from Phis. */
348         irn_arity = get_End_n_keepalives(oe);
349         for (i = 0; i < irn_arity; i++) {
350                 ka = get_End_keepalive(oe, i);
351                 if (is_Block(ka)) {
352                         if (get_irn_visited(ka) <= vfl) {
353                                 /* We must keep the block alive and copy everything reachable */
354                                 set_irg_visited(irg, vfl);
355                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
356                         }
357                         add_End_keepalive(ne, get_new_node(ka));
358                 }
359         }
360
361         /* Now pick other nodes.  Here we will keep all! */
362         irn_arity = get_End_n_keepalives(oe);
363         for (i = 0; i < irn_arity; i++) {
364                 ka = get_End_keepalive(oe, i);
365                 if (!is_Block(ka)) {
366                         if (get_irn_visited(ka) <= vfl) {
367                                 /* We didn't copy the node yet.  */
368                                 set_irg_visited(irg, vfl);
369                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
370                         }
371                         add_End_keepalive(ne, get_new_node(ka));
372                 }
373         }
374
375         /* start block sometimes only reached after keep alives */
376         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
377         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
378 }
379
380 /**
381  * Copies the graph reachable from current_ir_graph->end to the obstack
382  * in current_ir_graph and fixes the environment.
383  * Then fixes the fields in current_ir_graph containing nodes of the
384  * graph.
385  *
386  * @param copy_node_nr  If non-zero, the node number will be copied
387  */
388 static void
389 copy_graph_env(int copy_node_nr) {
390         ir_graph *irg = current_ir_graph;
391         ir_node *old_end, *new_anchor;
392         int i;
393
394         /* remove end_except and end_reg nodes */
395         old_end = get_irg_end(irg);
396         set_irg_end_except (irg, old_end);
397         set_irg_end_reg    (irg, old_end);
398
399         /* Not all nodes remembered in irg might be reachable
400            from the end node.  Assure their link is set to NULL, so that
401            we can test whether new nodes have been computed. */
402         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
403                 ir_node *n = get_irg_anchor(irg, i);
404                 if (n != NULL)
405                         set_new_node(n, NULL);
406         }
407         /* we use the block walk flag for removing Bads from Blocks ins. */
408         inc_irg_block_visited(irg);
409
410         /* copy the graph */
411         copy_graph(irg, copy_node_nr);
412
413         /* fix the anchor */
414         old_end    = get_irg_end(irg);
415         new_anchor = new_Anchor(irg);
416
417         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
418                 ir_node *n = get_irg_anchor(irg, i);
419                 if (n)
420                         set_irn_n(new_anchor, i, get_new_node(n));
421         }
422         free_End(old_end);
423         irg->anchor = new_anchor;
424
425         /* ensure the new anchor is placed in the endblock */
426         set_nodes_block(new_anchor, get_irg_end_block(irg));
427 }
428
429 /**
430  * Copies all reachable nodes to a new obstack.  Removes bad inputs
431  * from block nodes and the corresponding inputs from Phi nodes.
432  * Merges single exit blocks with single entry blocks and removes
433  * 1-input Phis.
434  * Adds all new nodes to a new hash table for CSE.  Does not
435  * perform CSE, so the hash table might contain common subexpressions.
436  */
437 void dead_node_elimination(ir_graph *irg) {
438         ir_graph *rem;
439 #ifdef INTERPROCEDURAL_VIEW
440         int rem_ipview = get_interprocedural_view();
441 #endif
442         struct obstack *graveyard_obst = NULL;
443         struct obstack *rebirth_obst   = NULL;
444
445         edges_deactivate(irg);
446
447         /* inform statistics that we started a dead-node elimination run */
448         hook_dead_node_elim(irg, 1);
449
450         /* Remember external state of current_ir_graph. */
451         rem = current_ir_graph;
452         current_ir_graph = irg;
453 #ifdef INTERPROCEDURAL_VIEW
454         set_interprocedural_view(0);
455 #endif
456
457         assert(get_irg_phase_state(irg) != phase_building);
458
459         /* Handle graph state */
460         free_callee_info(irg);
461         free_irg_outs(irg);
462         free_trouts();
463
464         /* @@@ so far we loose loops when copying */
465         free_loop_information(irg);
466
467         set_irg_doms_inconsistent(irg);
468
469         /* A quiet place, where the old obstack can rest in peace,
470            until it will be cremated. */
471         graveyard_obst = irg->obst;
472
473         /* A new obstack, where the reachable nodes will be copied to. */
474         rebirth_obst = XMALLOC(struct obstack);
475         irg->obst = rebirth_obst;
476         obstack_init(irg->obst);
477         irg->last_node_idx = 0;
478
479         /* We also need a new value table for CSE */
480         del_identities(irg->value_table);
481         irg->value_table = new_identities();
482
483         /* Copy the graph from the old to the new obstack */
484         copy_graph_env(/*copy_node_nr=*/1);
485
486         /* Free memory from old unoptimized obstack */
487         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
488         xfree(graveyard_obst);            /* ... then free it.           */
489
490         /* inform statistics that the run is over */
491         hook_dead_node_elim(irg, 0);
492
493         current_ir_graph = rem;
494 #ifdef INTERPROCEDURAL_VIEW
495         set_interprocedural_view(rem_ipview);
496 #endif
497 }
498
499 /**
500  * Relink bad predecessors of a block and store the old in array to the
501  * link field. This function is called by relink_bad_predecessors().
502  * The array of link field starts with the block operand at position 0.
503  * If block has bad predecessors, create a new in array without bad preds.
504  * Otherwise let in array untouched.
505  */
506 static void relink_bad_block_predecessors(ir_node *n, void *env) {
507         ir_node **new_in, *irn;
508         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
509         (void) env;
510
511         /* if link field of block is NULL, look for bad predecessors otherwise
512            this is already done */
513         if (is_Block(n) && get_irn_link(n) == NULL) {
514                 /* save old predecessors in link field (position 0 is the block operand)*/
515                 set_irn_link(n, get_irn_in(n));
516
517                 /* count predecessors without bad nodes */
518                 old_irn_arity = get_irn_arity(n);
519                 for (i = 0; i < old_irn_arity; i++)
520                         if (!is_Bad(get_irn_n(n, i)))
521                                 ++new_irn_arity;
522
523                 /* arity changing: set new predecessors without bad nodes */
524                 if (new_irn_arity < old_irn_arity) {
525                         /* Get new predecessor array. We do not resize the array, as we must
526                            keep the old one to update Phis. */
527                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
528
529                         /* set new predecessors in array */
530                         new_in[0] = NULL;
531                         new_irn_n = 1;
532                         for (i = 0; i < old_irn_arity; i++) {
533                                 irn = get_irn_n(n, i);
534                                 if (!is_Bad(irn)) {
535                                         new_in[new_irn_n] = irn;
536                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
537                                         ++new_irn_n;
538                                 }
539                         }
540                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
541                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
542                         n->in = new_in;
543                 } /* ir node has bad predecessors */
544         } /* Block is not relinked */
545 }
546
547 /**
548  * Relinks Bad predecessors from Blocks and Phis called by walker
549  * remove_bad_predecesors(). If n is a Block, call
550  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
551  * function of Phi's Block. If this block has bad predecessors, relink preds
552  * of the Phi-node.
553  */
554 static void relink_bad_predecessors(ir_node *n, void *env) {
555         ir_node *block, **old_in;
556         int i, old_irn_arity, new_irn_arity;
557
558         /* relink bad predecessors of a block */
559         if (is_Block(n))
560                 relink_bad_block_predecessors(n, env);
561
562         /* If Phi node relink its block and its predecessors */
563         if (is_Phi(n)) {
564                 /* Relink predecessors of phi's block */
565                 block = get_nodes_block(n);
566                 if (get_irn_link(block) == NULL)
567                         relink_bad_block_predecessors(block, env);
568
569                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
570                 old_irn_arity = ARR_LEN(old_in);
571
572                 /* Relink Phi predecessors if count of predecessors changed */
573                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
574                         /* set new predecessors in array
575                            n->in[0] remains the same block */
576                         new_irn_arity = 1;
577                         for(i = 1; i < old_irn_arity; i++)
578                                 if (!is_Bad(old_in[i])) {
579                                         n->in[new_irn_arity] = n->in[i];
580                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
581                                         ++new_irn_arity;
582                                 }
583
584                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
585                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
586                 }
587         } /* n is a Phi node */
588 }
589
590 /*
591  * Removes Bad Bad predecessors from Blocks and the corresponding
592  * inputs to Phi nodes as in dead_node_elimination but without
593  * copying the graph.
594  * On walking up set the link field to NULL, on walking down call
595  * relink_bad_predecessors() (This function stores the old in array
596  * to the link field and sets a new in array if arity of predecessors
597  * changes).
598  */
599 void remove_bad_predecessors(ir_graph *irg) {
600         panic("Fix backedge handling first");
601         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
602 }
603
604
605 /*
606    __                      _  __ __
607   (_     __    o     _    | \/  |_
608   __)|_| | \_/ | \_/(/_   |_/\__|__
609
610   The following stuff implements a facility that automatically patches
611   registered ir_node pointers to the new node when a dead node elimination occurs.
612 */
613
614 struct _survive_dce_t {
615         struct obstack obst;
616         pmap *places;
617         pmap *new_places;
618         hook_entry_t dead_node_elim;
619         hook_entry_t dead_node_elim_subst;
620 };
621
622 typedef struct _survive_dce_list_t {
623         struct _survive_dce_list_t *next;
624         ir_node **place;
625 } survive_dce_list_t;
626
627 static void dead_node_hook(void *context, ir_graph *irg, int start) {
628         survive_dce_t *sd = context;
629         (void) irg;
630
631         /* Create a new map before the dead node elimination is performed. */
632         if (start) {
633                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
634         } else {
635                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
636                 pmap_destroy(sd->places);
637                 sd->places     = sd->new_places;
638                 sd->new_places = NULL;
639         }
640 }
641
642 /**
643  * Hook called when dead node elimination replaces old by nw.
644  */
645 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
646         survive_dce_t *sd = context;
647         survive_dce_list_t *list = pmap_get(sd->places, old);
648         (void) irg;
649
650         /* If the node is to be patched back, write the new address to all registered locations. */
651         if (list) {
652                 survive_dce_list_t *p;
653
654                 for (p = list; p; p = p->next)
655                         *(p->place) = nw;
656
657                 pmap_insert(sd->new_places, nw, list);
658         }
659 }
660
661 /**
662  * Make a new Survive DCE environment.
663  */
664 survive_dce_t *new_survive_dce(void) {
665         survive_dce_t *res = XMALLOC(survive_dce_t);
666         obstack_init(&res->obst);
667         res->places     = pmap_create();
668         res->new_places = NULL;
669
670         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
671         res->dead_node_elim.context                   = res;
672         res->dead_node_elim.next                      = NULL;
673
674         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
675         res->dead_node_elim_subst.context = res;
676         res->dead_node_elim_subst.next    = NULL;
677
678         register_hook(hook_dead_node_elim, &res->dead_node_elim);
679         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
680         return res;
681 }
682
683 /**
684  * Free a Survive DCE environment.
685  */
686 void free_survive_dce(survive_dce_t *sd) {
687         obstack_free(&sd->obst, NULL);
688         pmap_destroy(sd->places);
689         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
690         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
691         xfree(sd);
692 }
693
694 /**
695  * Register a node pointer to be patched upon DCE.
696  * When DCE occurs, the node pointer specified by @p place will be
697  * patched to the new address of the node it is pointing to.
698  *
699  * @param sd    The Survive DCE environment.
700  * @param place The address of the node pointer.
701  */
702 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
703         if (*place != NULL) {
704                 ir_node *irn      = *place;
705                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
706                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
707
708                 nw->next  = curr;
709                 nw->place = place;
710
711                 pmap_insert(sd->places, irn, nw);
712         }
713 }
714
715 /*--------------------------------------------------------------------*/
716 /*  Functionality for inlining                                         */
717 /*--------------------------------------------------------------------*/
718
719 /**
720  * Copy node for inlineing.  Updates attributes that change when
721  * inlineing but not for dead node elimination.
722  *
723  * Copies the node by calling copy_node() and then updates the entity if
724  * it's a local one.  env must be a pointer of the frame type of the
725  * inlined procedure. The new entities must be in the link field of
726  * the entities.
727  */
728 static void copy_node_inline(ir_node *n, void *env) {
729         ir_node *nn;
730         ir_type *frame_tp = (ir_type *)env;
731
732         copy_node(n, NULL);
733         if (is_Sel(n)) {
734                 nn = get_new_node (n);
735                 assert(is_Sel(nn));
736                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
737                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
738                 }
739         } else if (is_Block(n)) {
740                 nn = get_new_node (n);
741                 nn->attr.block.irg = current_ir_graph;
742         }
743 }
744
745 /**
746  * Copies new predecessors of old node and move constants to
747  * the Start Block.
748  */
749 static void copy_preds_inline(ir_node *n, void *env) {
750         ir_node *nn;
751
752         copy_preds(n, env);
753         nn = skip_Id(get_new_node(n));
754         if (is_irn_constlike(nn)) {
755                 /* move Constants into the start block */
756                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
757
758                 n = identify_remember(current_ir_graph->value_table, nn);
759                 if (nn != n) {
760                         DBG_OPT_CSE(nn, n);
761                         exchange(nn, n);
762                 }
763         }
764 }
765
766 /**
767  * Walker: checks if P_value_arg_base is used.
768  */
769 static void find_addr(ir_node *node, void *env) {
770         int *allow_inline = env;
771         if (is_Proj(node) &&
772                         is_Start(get_Proj_pred(node)) &&
773                         get_Proj_proj(node) == pn_Start_P_value_arg_base) {
774                 *allow_inline = 0;
775         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
776                 /* From GCC:
777                  * Refuse to inline alloca call unless user explicitly forced so as this
778                  * may change program's memory overhead drastically when the function
779                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
780                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
781                  *
782                  * Sorrily this is true with our implementation also.
783                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
784                  * disables inlining of functions using VLA (with are completely save).
785                  *
786                  * 2 Solutions:
787                  * - add a flag to the Alloc node for "real" alloca() calls
788                  * - add a new Stack-Restore node at the end of a function using alloca()
789                  */
790                 *allow_inline = 0;
791         }
792 }
793
794 /**
795  * Check if we can inline a given call.
796  * Currently, we cannot inline two cases:
797  * - call with compound arguments
798  * - graphs that take the address of a parameter
799  *
800  * check these conditions here
801  */
802 static int can_inline(ir_node *call, ir_graph *called_graph) {
803         ir_type *call_type = get_Call_type(call);
804         int params, ress, i, res;
805         assert(is_Method_type(call_type));
806
807         params = get_method_n_params(call_type);
808         ress   = get_method_n_ress(call_type);
809
810         /* check parameters for compound arguments */
811         for (i = 0; i < params; ++i) {
812                 ir_type *p_type = get_method_param_type(call_type, i);
813
814                 if (is_compound_type(p_type))
815                         return 0;
816         }
817
818         /* check results for compound arguments */
819         for (i = 0; i < ress; ++i) {
820                 ir_type *r_type = get_method_res_type(call_type, i);
821
822                 if (is_compound_type(r_type))
823                         return 0;
824         }
825
826         res = 1;
827         irg_walk_graph(called_graph, find_addr, NULL, &res);
828
829         return res;
830 }
831
832 enum exc_mode {
833         exc_handler    = 0, /**< There is a handler. */
834         exc_to_end     = 1, /**< Branches to End. */
835         exc_no_handler = 2  /**< Exception handling not represented. */
836 };
837
838 /* Inlines a method at the given call site. */
839 int inline_method(ir_node *call, ir_graph *called_graph) {
840         ir_node             *pre_call;
841         ir_node             *post_call, *post_bl;
842         ir_node             *in[pn_Start_max];
843         ir_node             *end, *end_bl, *block;
844         ir_node             **res_pred;
845         ir_node             **cf_pred;
846         ir_node             **args_in;
847         ir_node             *ret, *phi;
848         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
849         enum exc_mode       exc_handling;
850         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
851         ir_entity           *ent;
852         ir_graph            *rem, *irg;
853         irg_inline_property prop = get_irg_inline_property(called_graph);
854         unsigned long       visited;
855
856         if (prop == irg_inline_forbidden)
857                 return 0;
858
859         ent = get_irg_entity(called_graph);
860
861         mtp = get_entity_type(ent);
862         ctp = get_Call_type(call);
863         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
864                 /* this is a bad feature of C: without a prototype, we can can call a function with less
865                 parameters than needed. Currently we don't support this, although it would be
866                 to use Unknown than. */
867                 return 0;
868         }
869
870         /* Argh, compiling C has some bad consequences:
871            the call type AND the method type might be different.
872            It is implementation defendant what happens in that case.
873            We support inlining, if the bitsize of the types matches AND
874            the same arithmetic is used. */
875         n_params = get_method_n_params(mtp);
876         for (i = n_params - 1; i >= 0; --i) {
877                 ir_type *param_tp = get_method_param_type(mtp, i);
878                 ir_type *arg_tp   = get_method_param_type(ctp, i);
879
880                 if (param_tp != arg_tp) {
881                         ir_mode *pmode = get_type_mode(param_tp);
882                         ir_mode *amode = get_type_mode(arg_tp);
883
884                         if (pmode == NULL || amode == NULL)
885                                 return 0;
886                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
887                                 return 0;
888                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
889                                 return 0;
890                         /* otherwise we can simply "reinterpret" the bits */
891                 }
892         }
893
894         irg = get_irn_irg(call);
895
896         /*
897          * We cannot inline a recursive call. The graph must be copied before
898          * the call the inline_method() using create_irg_copy().
899          */
900         if (called_graph == irg)
901                 return 0;
902
903         /*
904          * currently, we cannot inline two cases:
905          * - call with compound arguments
906          * - graphs that take the address of a parameter
907          */
908         if (! can_inline(call, called_graph))
909                 return 0;
910
911         rem = current_ir_graph;
912         current_ir_graph = irg;
913
914         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
915
916         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
917         rem_opt = get_opt_optimize();
918         set_optimize(0);
919
920         /* Handle graph state */
921         assert(get_irg_phase_state(irg) != phase_building);
922         assert(get_irg_pinned(irg) == op_pin_state_pinned);
923         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
924         set_irg_outs_inconsistent(irg);
925         set_irg_extblk_inconsistent(irg);
926         set_irg_doms_inconsistent(irg);
927         set_irg_loopinfo_inconsistent(irg);
928         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
929
930         /* -- Check preconditions -- */
931         assert(is_Call(call));
932
933         /* here we know we WILL inline, so inform the statistics */
934         hook_inline(call, called_graph);
935
936         /* -- Decide how to handle exception control flow: Is there a handler
937            for the Call node, or do we branch directly to End on an exception?
938            exc_handling:
939            0 There is a handler.
940            1 Branches to End.
941            2 Exception handling not represented in Firm. -- */
942         {
943                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
944                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
945                         long proj_nr = get_Proj_proj(proj);
946                         if (proj_nr == pn_Call_X_except) Xproj = proj;
947                         if (proj_nr == pn_Call_M_except) Mproj = proj;
948                 }
949                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
950                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
951                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
952         }
953
954         /* create the argument tuple */
955         NEW_ARR_A(ir_type *, args_in, n_params);
956
957         block = get_nodes_block(call);
958         for (i = n_params - 1; i >= 0; --i) {
959                 ir_node *arg      = get_Call_param(call, i);
960                 ir_type *param_tp = get_method_param_type(mtp, i);
961                 ir_mode *mode     = get_type_mode(param_tp);
962
963                 if (mode != get_irn_mode(arg)) {
964                         arg = new_r_Conv(irg, block, arg, mode);
965                 }
966                 args_in[i] = arg;
967         }
968
969         /* --
970            the procedure and later replaces the Start node of the called graph.
971            Post_call is the old Call node and collects the results of the called
972            graph. Both will end up being a tuple.  -- */
973         post_bl = get_nodes_block(call);
974         set_irg_current_block(irg, post_bl);
975         /* XxMxPxPxPxT of Start + parameter of Call */
976         in[pn_Start_X_initial_exec]   = new_Jmp();
977         in[pn_Start_M]                = get_Call_mem(call);
978         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
979         in[pn_Start_P_tls]            = get_irg_tls(irg);
980         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
981         /* in[pn_Start_P_value_arg_base] = ??? */
982         assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
983         pre_call = new_Tuple(pn_Start_max - 1, in);
984         post_call = call;
985
986         /* --
987            The new block gets the ins of the old block, pre_call and all its
988            predecessors and all Phi nodes. -- */
989         part_block(pre_call);
990
991         /* -- Prepare state for dead node elimination -- */
992         /* Visited flags in calling irg must be >= flag in called irg.
993            Else walker and arity computation will not work. */
994         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
995                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
996         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
997                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
998         visited = get_irg_visited(irg);
999
1000         /* Set pre_call as new Start node in link field of the start node of
1001            calling graph and pre_calls block as new block for the start block
1002            of calling graph.
1003            Further mark these nodes so that they are not visited by the
1004            copying. */
1005         set_irn_link(get_irg_start(called_graph), pre_call);
1006         set_irn_visited(get_irg_start(called_graph), visited);
1007         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1008         set_irn_visited(get_irg_start_block(called_graph), visited);
1009
1010         set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1011         set_irn_visited(get_irg_bad(called_graph), visited);
1012
1013         set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1014         set_irn_visited(get_irg_no_mem(called_graph), visited);
1015
1016         /* Initialize for compaction of in arrays */
1017         inc_irg_block_visited(irg);
1018
1019         /* -- Replicate local entities of the called_graph -- */
1020         /* copy the entities. */
1021         irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1022         called_frame = get_irg_frame_type(called_graph);
1023         curr_frame   = get_irg_frame_type(irg);
1024         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1025                 ir_entity *new_ent, *old_ent;
1026                 old_ent = get_class_member(called_frame, i);
1027                 new_ent = copy_entity_own(old_ent, curr_frame);
1028                 set_entity_link(old_ent, new_ent);
1029         }
1030
1031         /* visited is > than that of called graph.  With this trick visited will
1032            remain unchanged so that an outer walker, e.g., searching the call nodes
1033             to inline, calling this inline will not visit the inlined nodes. */
1034         set_irg_visited(irg, get_irg_visited(irg)-1);
1035
1036         /* -- Performing dead node elimination inlines the graph -- */
1037         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1038            entities. */
1039         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1040                  get_irg_frame_type(called_graph));
1041
1042         irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1043
1044         /* Repair called_graph */
1045         set_irg_visited(called_graph, get_irg_visited(irg));
1046         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1047         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1048
1049         /* -- Merge the end of the inlined procedure with the call site -- */
1050         /* We will turn the old Call node into a Tuple with the following
1051            predecessors:
1052            -1:  Block of Tuple.
1053            0: Phi of all Memories of Return statements.
1054            1: Jmp from new Block that merges the control flow from all exception
1055            predecessors of the old end block.
1056            2: Tuple of all arguments.
1057            3: Phi of Exception memories.
1058            In case the old Call directly branches to End on an exception we don't
1059            need the block merging all exceptions nor the Phi of the exception
1060            memories.
1061         */
1062
1063         /* -- Precompute some values -- */
1064         end_bl = get_new_node(get_irg_end_block(called_graph));
1065         end = get_new_node(get_irg_end(called_graph));
1066         arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
1067         n_res = get_method_n_ress(get_Call_type(call));
1068
1069         res_pred = XMALLOCN(ir_node*, n_res);
1070         cf_pred  = XMALLOCN(ir_node*, arity);
1071
1072         set_irg_current_block(irg, post_bl); /* just to make sure */
1073
1074         /* -- archive keepalives -- */
1075         irn_arity = get_irn_arity(end);
1076         for (i = 0; i < irn_arity; i++) {
1077                 ir_node *ka = get_End_keepalive(end, i);
1078                 if (! is_Bad(ka))
1079                         add_End_keepalive(get_irg_end(irg), ka);
1080         }
1081
1082         /* The new end node will die.  We need not free as the in array is on the obstack:
1083            copy_node() only generated 'D' arrays. */
1084
1085         /* -- Replace Return nodes by Jump nodes. -- */
1086         n_ret = 0;
1087         for (i = 0; i < arity; i++) {
1088                 ir_node *ret;
1089                 ret = get_Block_cfgpred(end_bl, i);
1090                 if (is_Return(ret)) {
1091                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1092                         n_ret++;
1093                 }
1094         }
1095         set_irn_in(post_bl, n_ret, cf_pred);
1096
1097         /* -- Build a Tuple for all results of the method.
1098            Add Phi node if there was more than one Return.  -- */
1099         turn_into_tuple(post_call, pn_Call_max);
1100         /* First the Memory-Phi */
1101         n_ret = 0;
1102         for (i = 0; i < arity; i++) {
1103                 ret = get_Block_cfgpred(end_bl, i);
1104                 if (is_Return(ret)) {
1105                         cf_pred[n_ret] = get_Return_mem(ret);
1106                         n_ret++;
1107                 }
1108         }
1109         phi = new_Phi(n_ret, cf_pred, mode_M);
1110         set_Tuple_pred(call, pn_Call_M_regular, phi);
1111         /* Conserve Phi-list for further inlinings -- but might be optimized */
1112         if (get_nodes_block(phi) == post_bl) {
1113                 set_irn_link(phi, get_irn_link(post_bl));
1114                 set_irn_link(post_bl, phi);
1115         }
1116         /* Now the real results */
1117         if (n_res > 0) {
1118                 for (j = 0; j < n_res; j++) {
1119                         n_ret = 0;
1120                         for (i = 0; i < arity; i++) {
1121                                 ret = get_Block_cfgpred(end_bl, i);
1122                                 if (is_Return(ret)) {
1123                                         cf_pred[n_ret] = get_Return_res(ret, j);
1124                                         n_ret++;
1125                                 }
1126                         }
1127                         if (n_ret > 0)
1128                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1129                         else
1130                                 phi = new_Bad();
1131                         res_pred[j] = phi;
1132                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1133                         if (get_nodes_block(phi) == post_bl) {
1134                                 set_Phi_next(phi, get_Block_phis(post_bl));
1135                                 set_Block_phis(post_bl, phi);
1136                         }
1137                 }
1138                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1139         } else {
1140                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1141         }
1142         /* handle the regular call */
1143         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1144
1145         /* For now, we cannot inline calls with value_base */
1146         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1147
1148         /* Finally the exception control flow.
1149            We have two (three) possible situations:
1150            First if the Call branches to an exception handler: We need to add a Phi node to
1151            collect the memory containing the exception objects.  Further we need
1152            to add another block to get a correct representation of this Phi.  To
1153            this block we add a Jmp that resolves into the X output of the Call
1154            when the Call is turned into a tuple.
1155            Second the Call branches to End, the exception is not handled.  Just
1156            add all inlined exception branches to the End node.
1157            Third: there is no Exception edge at all. Handle as case two. */
1158         if (exc_handling == exc_handler) {
1159                 n_exc = 0;
1160                 for (i = 0; i < arity; i++) {
1161                         ir_node *ret, *irn;
1162                         ret = get_Block_cfgpred(end_bl, i);
1163                         irn = skip_Proj(ret);
1164                         if (is_fragile_op(irn) || is_Raise(irn)) {
1165                                 cf_pred[n_exc] = ret;
1166                                 ++n_exc;
1167                         }
1168                 }
1169                 if (n_exc > 0) {
1170                         ir_node *block = new_Block(n_exc, cf_pred);
1171                         set_cur_block(block);
1172
1173                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1174                         /* The Phi for the memories with the exception objects */
1175                         n_exc = 0;
1176                         for (i = 0; i < arity; i++) {
1177                                 ir_node *ret;
1178                                 ret = skip_Proj(get_Block_cfgpred(end_bl, i));
1179                                 if (is_Call(ret)) {
1180                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1181                                         n_exc++;
1182                                 } else if (is_fragile_op(ret)) {
1183                                         /* We rely that all cfops have the memory output at the same position. */
1184                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1185                                         n_exc++;
1186                                 } else if (is_Raise(ret)) {
1187                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1188                                         n_exc++;
1189                                 }
1190                         }
1191                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1192                 } else {
1193                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1194                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1195                 }
1196         } else {
1197                 ir_node *main_end_bl;
1198                 int main_end_bl_arity;
1199                 ir_node **end_preds;
1200
1201                 /* assert(exc_handling == 1 || no exceptions. ) */
1202                 n_exc = 0;
1203                 for (i = 0; i < arity; i++) {
1204                         ir_node *ret = get_Block_cfgpred(end_bl, i);
1205                         ir_node *irn = skip_Proj(ret);
1206
1207                         if (is_fragile_op(irn) || is_Raise(irn)) {
1208                                 cf_pred[n_exc] = ret;
1209                                 n_exc++;
1210                         }
1211                 }
1212                 main_end_bl       = get_irg_end_block(irg);
1213                 main_end_bl_arity = get_irn_arity(main_end_bl);
1214                 end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
1215
1216                 for (i = 0; i < main_end_bl_arity; ++i)
1217                         end_preds[i] = get_irn_n(main_end_bl, i);
1218                 for (i = 0; i < n_exc; ++i)
1219                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1220                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1221                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1222                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1223                 free(end_preds);
1224         }
1225         free(res_pred);
1226         free(cf_pred);
1227
1228         /* --  Turn CSE back on. -- */
1229         set_optimize(rem_opt);
1230         current_ir_graph = rem;
1231
1232         return 1;
1233 }
1234
1235 /********************************************************************/
1236 /* Apply inlining to small methods.                                 */
1237 /********************************************************************/
1238
1239 static struct obstack  temp_obst;
1240
1241 /** Represents a possible inlinable call in a graph. */
1242 typedef struct _call_entry {
1243         ir_node    *call;       /**< The Call node. */
1244         ir_graph   *callee;     /**< The callee IR-graph. */
1245         list_head  list;        /**< List head for linking the next one. */
1246         int        loop_depth;  /**< The loop depth of this call. */
1247         int        benefice;    /**< The calculated benefice of this call. */
1248         unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
1249         unsigned   all_const:1; /**< Set if this call has only constant parameters. */
1250 } call_entry;
1251
1252 /**
1253  * environment for inlining small irgs
1254  */
1255 typedef struct _inline_env_t {
1256         struct obstack obst;  /**< An obstack where call_entries are allocated on. */
1257         list_head      calls; /**< The call entry list. */
1258 } inline_env_t;
1259
1260 /**
1261  * Returns the irg called from a Call node. If the irg is not
1262  * known, NULL is returned.
1263  *
1264  * @param call  the call node
1265  */
1266 static ir_graph *get_call_called_irg(ir_node *call) {
1267         ir_node *addr;
1268
1269         addr = get_Call_ptr(call);
1270         if (is_Global(addr)) {
1271                 ir_entity *ent = get_Global_entity(addr);
1272                 return get_entity_irg(ent);
1273         }
1274
1275         return NULL;
1276 }
1277
1278 /**
1279  * Walker: Collect all calls to known graphs inside a graph.
1280  */
1281 static void collect_calls(ir_node *call, void *env) {
1282         (void) env;
1283         if (is_Call(call)) {
1284                 ir_graph *called_irg = get_call_called_irg(call);
1285
1286                 if (called_irg != NULL) {
1287                         /* The Call node calls a locally defined method.  Remember to inline. */
1288                         inline_env_t *ienv  = env;
1289                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1290                         entry->call       = call;
1291                         entry->callee     = called_irg;
1292                         entry->loop_depth = 0;
1293                         entry->benefice   = 0;
1294                         entry->local_adr  = 0;
1295                         entry->all_const  = 0;
1296
1297                         list_add_tail(&entry->list, &ienv->calls);
1298                 }
1299         }
1300 }
1301
1302 /**
1303  * Inlines all small methods at call sites where the called address comes
1304  * from a Const node that references the entity representing the called
1305  * method.
1306  * The size argument is a rough measure for the code size of the method:
1307  * Methods where the obstack containing the firm graph is smaller than
1308  * size are inlined.
1309  */
1310 void inline_small_irgs(ir_graph *irg, int size) {
1311         ir_graph *rem = current_ir_graph;
1312         inline_env_t env;
1313         call_entry *entry;
1314
1315         current_ir_graph = irg;
1316         /* Handle graph state */
1317         assert(get_irg_phase_state(irg) != phase_building);
1318         free_callee_info(irg);
1319
1320         /* Find Call nodes to inline.
1321            (We can not inline during a walk of the graph, as inlining the same
1322            method several times changes the visited flag of the walked graph:
1323            after the first inlining visited of the callee equals visited of
1324            the caller.  With the next inlining both are increased.) */
1325         obstack_init(&env.obst);
1326         INIT_LIST_HEAD(&env.calls);
1327         irg_walk_graph(irg, NULL, collect_calls, &env);
1328
1329         if (! list_empty(&env.calls)) {
1330                 /* There are calls to inline */
1331                 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1332                 collect_phiprojs(irg);
1333
1334                 list_for_each_entry(call_entry, entry, &env.calls, list) {
1335                         ir_graph            *callee = entry->callee;
1336                         irg_inline_property prop    = get_irg_inline_property(callee);
1337
1338                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1339                                 /* do not inline forbidden / weak graphs */
1340                                 continue;
1341                         }
1342
1343                         if (prop >= irg_inline_forced ||
1344                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1345                                 inline_method(entry->call, callee);
1346                         }
1347                 }
1348                 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1349         }
1350         obstack_free(&env.obst, NULL);
1351         current_ir_graph = rem;
1352 }
1353
1354 /**
1355  * Environment for inlining irgs.
1356  */
1357 typedef struct {
1358         list_head calls;             /**< List of of all call nodes in this graph. */
1359         unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
1360         unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1361         unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
1362         unsigned  n_nodes_orig;      /**< for statistics */
1363         unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
1364         unsigned  n_call_nodes_orig; /**< for statistics */
1365         unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
1366         unsigned  n_callers_orig;    /**< for statistics */
1367         unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
1368         unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
1369         unsigned  recursive:1;       /**< Set, if this function is self recursive. */
1370 } inline_irg_env;
1371
1372 /**
1373  * Allocate a new environment for inlining.
1374  */
1375 static inline_irg_env *alloc_inline_irg_env(void) {
1376         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1377         INIT_LIST_HEAD(&env->calls);
1378         env->local_weights     = NULL;
1379         env->n_nodes           = -2; /* do not count count Start, End */
1380         env->n_blocks          = -2; /* do not count count Start, End Block */
1381         env->n_nodes_orig      = -2; /* do not count Start, End */
1382         env->n_call_nodes      = 0;
1383         env->n_call_nodes_orig = 0;
1384         env->n_callers         = 0;
1385         env->n_callers_orig    = 0;
1386         env->got_inline        = 0;
1387         env->local_vars        = 0;
1388         env->recursive         = 0;
1389         return env;
1390 }
1391
1392 typedef struct walker_env {
1393         inline_irg_env *x;     /**< the inline environment */
1394         char ignore_runtime;   /**< the ignore runtime flag */
1395         char ignore_callers;   /**< if set, do change callers data */
1396 } wenv_t;
1397
1398 /**
1399  * post-walker: collect all calls in the inline-environment
1400  * of a graph and sum some statistics.
1401  */
1402 static void collect_calls2(ir_node *call, void *ctx) {
1403         wenv_t         *env = ctx;
1404         inline_irg_env *x = env->x;
1405         ir_opcode      code = get_irn_opcode(call);
1406         ir_graph       *callee;
1407         call_entry     *entry;
1408
1409         /* count meaningful nodes in irg */
1410         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1411                 if (code != iro_Block) {
1412                         ++x->n_nodes;
1413                         ++x->n_nodes_orig;
1414                 } else {
1415                         ++x->n_blocks;
1416                 }
1417         }
1418
1419         if (code != iro_Call) return;
1420
1421         /* check, if it's a runtime call */
1422         if (env->ignore_runtime) {
1423                 ir_node *symc = get_Call_ptr(call);
1424
1425                 if (is_Global(symc)) {
1426                         ir_entity *ent = get_Global_entity(symc);
1427
1428                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1429                                 return;
1430                 }
1431         }
1432
1433         /* collect all call nodes */
1434         ++x->n_call_nodes;
1435         ++x->n_call_nodes_orig;
1436
1437         callee = get_call_called_irg(call);
1438         if (callee != NULL) {
1439                 if (! env->ignore_callers) {
1440                         inline_irg_env *callee_env = get_irg_link(callee);
1441                         /* count all static callers */
1442                         ++callee_env->n_callers;
1443                         ++callee_env->n_callers_orig;
1444                 }
1445                 if (callee == current_ir_graph)
1446                         x->recursive = 1;
1447
1448                 /* link it in the list of possible inlinable entries */
1449                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1450                 entry->call       = call;
1451                 entry->callee     = callee;
1452                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1453                 entry->benefice   = 0;
1454                 entry->local_adr  = 0;
1455                 entry->all_const  = 0;
1456
1457                 list_add_tail(&entry->list, &x->calls);
1458         }
1459 }
1460
1461 /**
1462  * Returns TRUE if the number of callers is 0 in the irg's environment,
1463  * hence this irg is a leave.
1464  */
1465 inline static int is_leave(ir_graph *irg) {
1466         inline_irg_env *env = get_irg_link(irg);
1467         return env->n_call_nodes == 0;
1468 }
1469
1470 /**
1471  * Returns TRUE if the number of nodes in the callee is
1472  * smaller then size in the irg's environment.
1473  */
1474 inline static int is_smaller(ir_graph *callee, unsigned size) {
1475         inline_irg_env *env = get_irg_link(callee);
1476         return env->n_nodes < size;
1477 }
1478
1479 /**
1480  * Duplicate a call entry.
1481  *
1482  * @param entry     the original entry to duplicate
1483  * @param new_call  the new call node
1484  * @param loop_depth_delta
1485  *                  delta value for the loop depth
1486  */
1487 static call_entry *duplicate_call_entry(const call_entry *entry,
1488                                         ir_node *new_call, int loop_depth_delta) {
1489         call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1490         nentry->call       = new_call;
1491         nentry->callee     = entry->callee;
1492         nentry->benefice   = entry->benefice;
1493         nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1494         nentry->local_adr  = entry->local_adr;
1495         nentry->all_const  = entry->all_const;
1496
1497         return nentry;
1498 }
1499
1500 /**
1501  * Append all call nodes of the source environment to the nodes of in the destination
1502  * environment.
1503  *
1504  * @param dst         destination environment
1505  * @param src         source environment
1506  * @param loop_depth  the loop depth of the call that is replaced by the src list
1507  */
1508 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
1509         call_entry *entry, *nentry;
1510
1511         /* Note that the src list points to Call nodes in the inlined graph, but
1512            we need Call nodes in our graph. Luckily the inliner leaves this information
1513            in the link field. */
1514         list_for_each_entry(call_entry, entry, &src->calls, list) {
1515                 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1516                 list_add_tail(&nentry->list, &dst->calls);
1517         }
1518         dst->n_call_nodes += src->n_call_nodes;
1519         dst->n_nodes      += src->n_nodes;
1520 }
1521
1522 /*
1523  * Inlines small leave methods at call sites where the called address comes
1524  * from a Const node that references the entity representing the called
1525  * method.
1526  * The size argument is a rough measure for the code size of the method:
1527  * Methods where the obstack containing the firm graph is smaller than
1528  * size are inlined.
1529  */
1530 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1531                             unsigned size, int ignore_runtime)
1532 {
1533         inline_irg_env   *env;
1534         ir_graph         *irg;
1535         int              i, n_irgs;
1536         ir_graph         *rem;
1537         int              did_inline;
1538         wenv_t           wenv;
1539         call_entry       *entry, *next;
1540         const call_entry *centry;
1541         pmap             *copied_graphs;
1542         pmap_entry       *pm_entry;
1543
1544         rem = current_ir_graph;
1545         obstack_init(&temp_obst);
1546
1547         /* a map for the copied graphs, used to inline recursive calls */
1548         copied_graphs = pmap_create();
1549
1550         /* extend all irgs by a temporary data structure for inlining. */
1551         n_irgs = get_irp_n_irgs();
1552         for (i = 0; i < n_irgs; ++i)
1553                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1554
1555         /* Pre-compute information in temporary data structure. */
1556         wenv.ignore_runtime = ignore_runtime;
1557         wenv.ignore_callers = 0;
1558         for (i = 0; i < n_irgs; ++i) {
1559                 ir_graph *irg = get_irp_irg(i);
1560
1561                 assert(get_irg_phase_state(irg) != phase_building);
1562                 free_callee_info(irg);
1563
1564                 assure_cf_loop(irg);
1565                 wenv.x = get_irg_link(irg);
1566                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1567         }
1568
1569         /* -- and now inline. -- */
1570
1571         /* Inline leaves recursively -- we might construct new leaves. */
1572         do {
1573                 did_inline = 0;
1574
1575                 for (i = 0; i < n_irgs; ++i) {
1576                         ir_node *call;
1577                         int phiproj_computed = 0;
1578
1579                         current_ir_graph = get_irp_irg(i);
1580                         env              = get_irg_link(current_ir_graph);
1581
1582                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1583                         list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1584                                 ir_graph            *callee;
1585                                 irg_inline_property  prop;
1586
1587                                 if (env->n_nodes > maxsize)
1588                                         break;
1589
1590                                 call   = entry->call;
1591                                 callee = entry->callee;
1592
1593                                 prop = get_irg_inline_property(callee);
1594                                 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1595                                         /* do not inline forbidden / weak graphs */
1596                                         continue;
1597                                 }
1598
1599                                 if (is_leave(callee) && (
1600                                     is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1601                                         if (!phiproj_computed) {
1602                                                 phiproj_computed = 1;
1603                                                 collect_phiprojs(current_ir_graph);
1604                                         }
1605                                         did_inline = inline_method(call, callee);
1606
1607                                         if (did_inline) {
1608                                                 inline_irg_env *callee_env = get_irg_link(callee);
1609
1610                                                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1611                                                 phiproj_computed = 0;
1612
1613                                                 /* Do some statistics */
1614                                                 env->got_inline = 1;
1615                                                 --env->n_call_nodes;
1616                                                 env->n_nodes += callee_env->n_nodes;
1617                                                 --callee_env->n_callers;
1618
1619                                                 /* remove this call from the list */
1620                                                 list_del(&entry->list);
1621                                                 continue;
1622                                         }
1623                                 }
1624                         }
1625                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1626                 }
1627         } while (did_inline);
1628
1629         /* inline other small functions. */
1630         for (i = 0; i < n_irgs; ++i) {
1631                 ir_node *call;
1632                 int phiproj_computed = 0;
1633
1634                 current_ir_graph = get_irp_irg(i);
1635                 env              = get_irg_link(current_ir_graph);
1636
1637                 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1638
1639                 /* note that the list of possible calls is updated during the process */
1640                 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1641                         irg_inline_property prop;
1642                         ir_graph            *callee;
1643                         pmap_entry          *e;
1644
1645                         call   = entry->call;
1646                         callee = entry->callee;
1647
1648                         prop = get_irg_inline_property(callee);
1649                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1650                                 /* do not inline forbidden / weak graphs */
1651                                 continue;
1652                         }
1653
1654                         e = pmap_find(copied_graphs, callee);
1655                         if (e != NULL) {
1656                                 /*
1657                                  * Remap callee if we have a copy.
1658                                  * FIXME: Should we do this only for recursive Calls ?
1659                                  */
1660                                 callee = e->value;
1661                         }
1662
1663                         if (prop >= irg_inline_forced ||
1664                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1665                                 if (current_ir_graph == callee) {
1666                                         /*
1667                                          * Recursive call: we cannot directly inline because we cannot walk
1668                                          * the graph and change it. So we have to make a copy of the graph
1669                                          * first.
1670                                          */
1671
1672                                         inline_irg_env *callee_env;
1673                                         ir_graph       *copy;
1674
1675                                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1676
1677                                         /*
1678                                          * No copy yet, create one.
1679                                          * Note that recursive methods are never leaves, so it is sufficient
1680                                          * to test this condition here.
1681                                          */
1682                                         copy = create_irg_copy(callee);
1683
1684                                         /* create_irg_copy() destroys the Proj links, recompute them */
1685                                         phiproj_computed = 0;
1686
1687                                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1688
1689                                         /* allocate new environment */
1690                                         callee_env = alloc_inline_irg_env();
1691                                         set_irg_link(copy, callee_env);
1692
1693                                         assure_cf_loop(copy);
1694                                         wenv.x              = callee_env;
1695                                         wenv.ignore_callers = 1;
1696                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1697
1698                                         /*
1699                                          * Enter the entity of the original graph. This is needed
1700                                          * for inline_method(). However, note that ent->irg still points
1701                                          * to callee, NOT to copy.
1702                                          */
1703                                         set_irg_entity(copy, get_irg_entity(callee));
1704
1705                                         pmap_insert(copied_graphs, callee, copy);
1706                                         callee = copy;
1707
1708                                         /* we have only one caller: the original graph */
1709                                         callee_env->n_callers      = 1;
1710                                         callee_env->n_callers_orig = 1;
1711                                 }
1712                                 if (! phiproj_computed) {
1713                                         phiproj_computed = 1;
1714                                         collect_phiprojs(current_ir_graph);
1715                                 }
1716                                 did_inline = inline_method(call, callee);
1717                                 if (did_inline) {
1718                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1719
1720                                         /* call was inlined, Phi/Projs for current graph must be recomputed */
1721                                         phiproj_computed = 0;
1722
1723                                         /* callee was inline. Append it's call list. */
1724                                         env->got_inline = 1;
1725                                         --env->n_call_nodes;
1726                                         append_call_list(env, callee_env, entry->loop_depth);
1727                                         --callee_env->n_callers;
1728
1729                                         /* after we have inlined callee, all called methods inside callee
1730                                            are now called once more */
1731                                         list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1732                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1733                                                 ++penv->n_callers;
1734                                         }
1735
1736                                         /* remove this call from the list */
1737                                         list_del(&entry->list);
1738                                         continue;
1739                                 }
1740                         }
1741                 }
1742                 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1743         }
1744
1745         for (i = 0; i < n_irgs; ++i) {
1746                 irg = get_irp_irg(i);
1747                 env = get_irg_link(irg);
1748
1749                 if (env->got_inline) {
1750                         optimize_graph_df(irg);
1751                         optimize_cf(irg);
1752                 }
1753                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1754                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1755                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1756                         env->n_callers_orig, env->n_callers,
1757                         get_entity_name(get_irg_entity(irg))));
1758                 }
1759         }
1760
1761         /* kill the copied graphs: we don't need them anymore */
1762         foreach_pmap(copied_graphs, pm_entry) {
1763                 ir_graph *copy = pm_entry->value;
1764
1765                 /* reset the entity, otherwise it will be deleted in the next step ... */
1766                 set_irg_entity(copy, NULL);
1767                 free_ir_graph(copy);
1768         }
1769         pmap_destroy(copied_graphs);
1770
1771         obstack_free(&temp_obst, NULL);
1772         current_ir_graph = rem;
1773 }
1774
1775 /**
1776  * Calculate the parameter weights for transmitting the address of a local variable.
1777  */
1778 static unsigned calc_method_local_weight(ir_node *arg) {
1779         int      i, j, k;
1780         unsigned v, weight = 0;
1781
1782         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1783                 ir_node *succ = get_irn_out(arg, i);
1784
1785                 switch (get_irn_opcode(succ)) {
1786                 case iro_Load:
1787                 case iro_Store:
1788                         /* Loads and Store can be removed */
1789                         weight += 3;
1790                         break;
1791                 case iro_Sel:
1792                         /* check if all args are constant */
1793                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1794                                 ir_node *idx = get_Sel_index(succ, j);
1795                                 if (! is_Const(idx))
1796                                         return 0;
1797                         }
1798                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1799                            some unsupported node. */
1800                         v = calc_method_local_weight(succ);
1801                         if (v == 0)
1802                                 return 0;
1803                         /* we can kill one Sel with constant indexes, this is cheap */
1804                         weight += v + 1;
1805                         break;
1806                 case iro_Id:
1807                         /* when looking backward we might find Id nodes */
1808                         weight += calc_method_local_weight(succ);
1809                         break;
1810                 case iro_Tuple:
1811                         /* unoptimized tuple */
1812                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1813                                 ir_node *pred = get_Tuple_pred(succ, j);
1814                                 if (pred == arg) {
1815                                         /* look for Proj(j) */
1816                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1817                                                 ir_node *succ_succ = get_irn_out(succ, k);
1818                                                 if (is_Proj(succ_succ)) {
1819                                                         if (get_Proj_proj(succ_succ) == j) {
1820                                                                 /* found */
1821                                                                 weight += calc_method_local_weight(succ_succ);
1822                                                         }
1823                                                 } else {
1824                                                         /* this should NOT happen */
1825                                                         return 0;
1826                                                 }
1827                                         }
1828                                 }
1829                         }
1830                         break;
1831                 default:
1832                         /* any other node: unsupported yet or bad. */
1833                         return 0;
1834                 }
1835         }
1836         return weight;
1837 }
1838
1839 /**
1840  * Calculate the parameter weights for transmitting the address of a local variable.
1841  */
1842 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1843         ir_entity *ent = get_irg_entity(irg);
1844         ir_type  *mtp;
1845         int      nparams, i, proj_nr;
1846         ir_node  *irg_args, *arg;
1847
1848         mtp      = get_entity_type(ent);
1849         nparams  = get_method_n_params(mtp);
1850
1851         /* allocate a new array. currently used as 'analysed' flag */
1852         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1853
1854         /* If the method haven't parameters we have nothing to do. */
1855         if (nparams <= 0)
1856                 return;
1857
1858         assure_irg_outs(irg);
1859         irg_args = get_irg_args(irg);
1860         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1861                 arg     = get_irn_out(irg_args, i);
1862                 proj_nr = get_Proj_proj(arg);
1863                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1864         }
1865 }
1866
1867 /**
1868  * Calculate the benefice for transmitting an local variable address.
1869  * After inlining, the local variable might be transformed into a
1870  * SSA variable by scalar_replacement().
1871  */
1872 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1873         inline_irg_env *env = get_irg_link(callee);
1874
1875         if (env->local_weights != NULL) {
1876                 if (pos < ARR_LEN(env->local_weights))
1877                         return env->local_weights[pos];
1878                 return 0;
1879         }
1880
1881         analyze_irg_local_weights(env, callee);
1882
1883         if (pos < ARR_LEN(env->local_weights))
1884                 return env->local_weights[pos];
1885         return 0;
1886 }
1887
1888 /**
1889  * Calculate a benefice value for inlining the given call.
1890  *
1891  * @param call       the call node we have to inspect
1892  * @param callee     the called graph
1893  */
1894 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1895 {
1896         ir_node   *call = entry->call;
1897         ir_entity *ent  = get_irg_entity(callee);
1898         ir_node   *frame_ptr;
1899         ir_type   *mtp;
1900         int       weight = 0;
1901         int       i, n_params, all_const;
1902         unsigned  cc, v;
1903         irg_inline_property prop;
1904
1905         inline_irg_env *callee_env;
1906
1907         prop = get_irg_inline_property(callee);
1908         if (prop == irg_inline_forbidden) {
1909                 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1910                     call, callee));
1911                 return entry->benefice = INT_MIN;
1912         }
1913
1914         if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
1915                 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1916                     call, callee));
1917                 return entry->benefice = INT_MIN;
1918         }
1919
1920         /* costs for every passed parameter */
1921         n_params = get_Call_n_params(call);
1922         mtp      = get_entity_type(ent);
1923         cc       = get_method_calling_convention(mtp);
1924         if (cc & cc_reg_param) {
1925                 /* register parameter, smaller costs for register parameters */
1926                 int max_regs = cc & ~cc_bits;
1927
1928                 if (max_regs < n_params)
1929                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1930                 else
1931                         weight += n_params * 2;
1932         } else {
1933                 /* parameters are passed an stack */
1934                 weight += 5 * n_params;
1935         }
1936
1937         /* constant parameters improve the benefice */
1938         frame_ptr = get_irg_frame(current_ir_graph);
1939         all_const = 1;
1940         for (i = 0; i < n_params; ++i) {
1941                 ir_node *param = get_Call_param(call, i);
1942
1943                 if (is_Const(param)) {
1944                         weight += get_method_param_weight(ent, i);
1945                 } else {
1946                         all_const = 0;
1947                         if (is_SymConst(param))
1948                                 weight += get_method_param_weight(ent, i);
1949                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1950                                 /*
1951                                  * An address of a local variable is transmitted. After
1952                                  * inlining, scalar_replacement might be able to remove the
1953                                  * local variable, so honor this.
1954                                  */
1955                                 v = get_method_local_adress_weight(callee, i);
1956                                 weight += v;
1957                                 if (v > 0)
1958                                         entry->local_adr = 1;
1959                         }
1960                 }
1961         }
1962         entry->all_const = all_const;
1963
1964         callee_env = get_irg_link(callee);
1965         if (callee_env->n_callers == 1 &&
1966             callee != current_ir_graph &&
1967                 get_entity_visibility(ent) == visibility_local) {
1968                 weight += 700;
1969         }
1970
1971         /* give a bonus for functions with one block */
1972         if (callee_env->n_blocks == 1)
1973                 weight = weight * 3 / 2;
1974
1975         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1976         if (callee_env->n_nodes < 30 && !callee_env->recursive)
1977                 weight += 2000;
1978
1979         /* and finally for leaves: they do not increase the register pressure
1980            because of callee safe registers */
1981         if (callee_env->n_call_nodes == 0)
1982                 weight += 400;
1983
1984         /** it's important to inline inner loops first */
1985         if (entry->loop_depth > 30)
1986                 weight += 30 * 1024;
1987         else
1988                 weight += entry->loop_depth * 1024;
1989
1990         /*
1991          * All arguments constant is probably a good sign, give an extra bonus
1992          */
1993         if (all_const)
1994                 weight += 1024;
1995
1996         return entry->benefice = weight;
1997 }
1998
1999 static ir_graph **irgs;
2000 static int      last_irg;
2001
2002 /**
2003  * Callgraph walker, collect all visited graphs.
2004  */
2005 static void callgraph_walker(ir_graph *irg, void *data) {
2006         (void) data;
2007         irgs[last_irg++] = irg;
2008 }
2009
2010 /**
2011  * Creates an inline order for all graphs.
2012  *
2013  * @return the list of graphs.
2014  */
2015 static ir_graph **create_irg_list(void) {
2016         ir_entity **free_methods;
2017         int       arr_len;
2018         int       n_irgs = get_irp_n_irgs();
2019
2020         cgana(&arr_len, &free_methods);
2021         xfree(free_methods);
2022
2023         compute_callgraph();
2024
2025         last_irg = 0;
2026         irgs     = XMALLOCNZ(ir_graph*, n_irgs);
2027
2028         callgraph_walk(NULL, callgraph_walker, NULL);
2029         assert(n_irgs == last_irg);
2030
2031         return irgs;
2032 }
2033
2034 /**
2035  * Push a call onto the priority list if its benefice is big enough.
2036  *
2037  * @param pqueue   the priority queue of calls
2038  * @param call     the call entry
2039  * @param inlien_threshold
2040  *                 the threshold value
2041  */
2042 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
2043                             int inline_threshold)
2044 {
2045         ir_graph            *callee  = call->callee;
2046         irg_inline_property prop     = get_irg_inline_property(callee);
2047         int                 benefice = calc_inline_benefice(call, callee);
2048
2049         DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
2050             get_irn_irg(call->call), call->call, callee, benefice));
2051
2052         if (prop < irg_inline_forced && benefice < inline_threshold) {
2053                 return;
2054         }
2055
2056         pqueue_put(pqueue, call, benefice);
2057 }
2058
2059 /**
2060  * Try to inline calls into a graph.
2061  *
2062  * @param irg      the graph into which we inline
2063  * @param maxsize  do NOT inline if the size of irg gets
2064  *                 bigger than this amount
2065  * @param inline_threshold
2066  *                 threshold value for inline decision
2067  * @param copied_graphs
2068  *                 map containing copied of recursive graphs
2069  */
2070 static void inline_into(ir_graph *irg, unsigned maxsize,
2071                         int inline_threshold, pmap *copied_graphs)
2072 {
2073         int            phiproj_computed = 0;
2074         inline_irg_env *env = get_irg_link(irg);
2075         call_entry     *curr_call;
2076         wenv_t         wenv;
2077         pqueue_t       *pqueue;
2078
2079         if (env->n_call_nodes == 0)
2080                 return;
2081
2082         if (env->n_nodes > maxsize) {
2083                 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
2084                 return;
2085         }
2086
2087         current_ir_graph = irg;
2088         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2089
2090         /* put irgs into the pqueue */
2091         pqueue = new_pqueue();
2092
2093         list_for_each_entry(call_entry, curr_call, &env->calls, list) {
2094                 assert(is_Call(curr_call->call));
2095                 maybe_push_call(pqueue, curr_call, inline_threshold);
2096         }
2097
2098         /* note that the list of possible calls is updated during the process */
2099         while (!pqueue_empty(pqueue)) {
2100                 int                 did_inline;
2101                 call_entry          *curr_call  = pqueue_pop_front(pqueue);
2102                 ir_graph            *callee     = curr_call->callee;
2103                 ir_node             *call_node  = curr_call->call;
2104                 inline_irg_env      *callee_env = get_irg_link(callee);
2105                 irg_inline_property prop        = get_irg_inline_property(callee);
2106                 int                 loop_depth;
2107                 const call_entry    *centry;
2108                 pmap_entry          *e;
2109
2110                 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
2111                         DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
2112                                                 env->n_nodes, callee, callee_env->n_nodes));
2113                         continue;
2114                 }
2115
2116                 e = pmap_find(copied_graphs, callee);
2117                 if (e != NULL) {
2118                         int benefice = curr_call->benefice;
2119                         /*
2120                          * Reduce the weight for recursive function IFF not all arguments are const.
2121                          * inlining recursive functions is rarely good.
2122                          */
2123                         if (!curr_call->all_const)
2124                                 benefice -= 2000;
2125                         if (benefice < inline_threshold)
2126                                 continue;
2127
2128                         /*
2129                          * Remap callee if we have a copy.
2130                          */
2131                         callee     = e->value;
2132                         callee_env = get_irg_link(callee);
2133                 }
2134
2135                 if (current_ir_graph == callee) {
2136                         /*
2137                          * Recursive call: we cannot directly inline because we cannot
2138                          * walk the graph and change it. So we have to make a copy of
2139                          * the graph first.
2140                          */
2141                         int benefice = curr_call->benefice;
2142                         ir_graph *copy;
2143
2144                         /*
2145                          * Reduce the weight for recursive function IFF not all arguments are const.
2146                          * inlining recursive functions is rarely good.
2147                          */
2148                         if (!curr_call->all_const)
2149                                 benefice -= 2000;
2150                         if (benefice < inline_threshold)
2151                                 continue;
2152
2153                         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2154
2155                         /*
2156                          * No copy yet, create one.
2157                          * Note that recursive methods are never leaves, so it is
2158                          * sufficient to test this condition here.
2159                          */
2160                         copy = create_irg_copy(callee);
2161
2162                         /* create_irg_copy() destroys the Proj links, recompute them */
2163                         phiproj_computed = 0;
2164
2165                         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2166
2167                         /* allocate a new environment */
2168                         callee_env = alloc_inline_irg_env();
2169                         set_irg_link(copy, callee_env);
2170
2171                         assure_cf_loop(copy);
2172                         wenv.x              = callee_env;
2173                         wenv.ignore_callers = 1;
2174                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2175
2176                         /*
2177                          * Enter the entity of the original graph. This is needed
2178                          * for inline_method(). However, note that ent->irg still points
2179                          * to callee, NOT to copy.
2180                          */
2181                         set_irg_entity(copy, get_irg_entity(callee));
2182
2183                         pmap_insert(copied_graphs, callee, copy);
2184                         callee = copy;
2185
2186                         /* we have only one caller: the original graph */
2187                         callee_env->n_callers      = 1;
2188                         callee_env->n_callers_orig = 1;
2189                 }
2190                 if (! phiproj_computed) {
2191                         phiproj_computed = 1;
2192                         collect_phiprojs(current_ir_graph);
2193                 }
2194                 did_inline = inline_method(call_node, callee);
2195                 if (!did_inline)
2196                         continue;
2197
2198                 /* call was inlined, Phi/Projs for current graph must be recomputed */
2199                 phiproj_computed = 0;
2200
2201                 /* remove it from the caller list */
2202                 list_del(&curr_call->list);
2203
2204                 /* callee was inline. Append it's call list. */
2205                 env->got_inline = 1;
2206                 if (curr_call->local_adr)
2207                         env->local_vars = 1;
2208                 --env->n_call_nodes;
2209
2210                 /* we just generate a bunch of new calls */
2211                 loop_depth = curr_call->loop_depth;
2212                 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2213                         inline_irg_env *penv = get_irg_link(centry->callee);
2214                         ir_node        *new_call;
2215                         call_entry     *new_entry;
2216
2217                         /* after we have inlined callee, all called methods inside
2218                          * callee are now called once more */
2219                         ++penv->n_callers;
2220
2221                         /* Note that the src list points to Call nodes in the inlined graph,
2222                          * but we need Call nodes in our graph. Luckily the inliner leaves
2223                          * this information in the link field. */
2224                         new_call = get_irn_link(centry->call);
2225                         assert(is_Call(new_call));
2226
2227                         new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2228                         list_add_tail(&new_entry->list, &env->calls);
2229                         maybe_push_call(pqueue, new_entry, inline_threshold);
2230                 }
2231
2232                 env->n_call_nodes += callee_env->n_call_nodes;
2233                 env->n_nodes += callee_env->n_nodes;
2234                 --callee_env->n_callers;
2235         }
2236         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2237         del_pqueue(pqueue);
2238 }
2239
2240 /*
2241  * Heuristic inliner. Calculates a benefice value for every call and inlines
2242  * those calls with a value higher than the threshold.
2243  */
2244 void inline_functions(unsigned maxsize, int inline_threshold) {
2245         inline_irg_env   *env;
2246         int              i, n_irgs;
2247         ir_graph         *rem;
2248         wenv_t           wenv;
2249         pmap             *copied_graphs;
2250         pmap_entry       *pm_entry;
2251         ir_graph         **irgs;
2252
2253         rem = current_ir_graph;
2254         obstack_init(&temp_obst);
2255
2256         irgs = create_irg_list();
2257
2258         /* a map for the copied graphs, used to inline recursive calls */
2259         copied_graphs = pmap_create();
2260
2261         /* extend all irgs by a temporary data structure for inlining. */
2262         n_irgs = get_irp_n_irgs();
2263         for (i = 0; i < n_irgs; ++i)
2264                 set_irg_link(irgs[i], alloc_inline_irg_env());
2265
2266         /* Pre-compute information in temporary data structure. */
2267         wenv.ignore_runtime = 0;
2268         wenv.ignore_callers = 0;
2269         for (i = 0; i < n_irgs; ++i) {
2270                 ir_graph *irg = irgs[i];
2271
2272                 free_callee_info(irg);
2273
2274                 wenv.x = get_irg_link(irg);
2275                 assure_cf_loop(irg);
2276                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2277         }
2278
2279         /* -- and now inline. -- */
2280         for (i = 0; i < n_irgs; ++i) {
2281                 ir_graph *irg = irgs[i];
2282
2283                 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2284         }
2285
2286         for (i = 0; i < n_irgs; ++i) {
2287                 ir_graph *irg = irgs[i];
2288
2289                 env = get_irg_link(irg);
2290                 if (env->got_inline) {
2291                         /* this irg got calls inlined: optimize it */
2292                         if (get_opt_combo()) {
2293                                 if (env->local_vars) {
2294                                         scalar_replacement_opt(irg);
2295                                 }
2296                                 combo(irg);
2297                         } else {
2298                                 if (env->local_vars) {
2299                                         if (scalar_replacement_opt(irg)) {
2300                                                 optimize_graph_df(irg);
2301                                         }
2302                                 }
2303                                 optimize_cf(irg);
2304                         }
2305                 }
2306                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2307                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2308                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2309                         env->n_callers_orig, env->n_callers,
2310                         get_entity_name(get_irg_entity(irg))));
2311                 }
2312         }
2313
2314         /* kill the copied graphs: we don't need them anymore */
2315         foreach_pmap(copied_graphs, pm_entry) {
2316                 ir_graph *copy = pm_entry->value;
2317
2318                 /* reset the entity, otherwise it will be deleted in the next step ... */
2319                 set_irg_entity(copy, NULL);
2320                 free_ir_graph(copy);
2321         }
2322         pmap_destroy(copied_graphs);
2323
2324         xfree(irgs);
2325
2326         obstack_free(&temp_obst, NULL);
2327         current_ir_graph = rem;
2328 }
2329
2330 void firm_init_inline(void) {
2331         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2332 }