533e925e991ee33b36858186ee36ee790a5a9416
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #include "config.h"
27
28 #include <limits.h>
29 #include <assert.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irprog_t.h"
34
35 #include "iroptimize.h"
36 #include "ircons_t.h"
37 #include "iropt_t.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41
42 #include "array_t.h"
43 #include "list.h"
44 #include "pset.h"
45 #include "pmap.h"
46 #include "pdeq.h"
47 #include "xmalloc.h"
48 #include "pqueue.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
66
67 /*------------------------------------------------------------------*/
68 /* Routines for dead node elimination / copying garbage collection  */
69 /* of the obstack.                                                  */
70 /*------------------------------------------------------------------*/
71
72 /**
73  * Remember the new node in the old node by using a field all nodes have.
74  */
75 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
76
77 /**
78  * Get this new node, before the old node is forgotten.
79  */
80 #define get_new_node(oldn) get_irn_link(oldn)
81
82 /**
83  * Check if a new node was set.
84  */
85 #define has_new_node(n) (get_new_node(n) != NULL)
86
87 /**
88  * We use the block_visited flag to mark that we have computed the
89  * number of useful predecessors for this block.
90  * Further we encode the new arity in this flag in the old blocks.
91  * Remembering the arity is useful, as it saves a lot of pointer
92  * accesses.  This function is called for all Phi and Block nodes
93  * in a Block.
94  */
95 static inline int
96 compute_new_arity(ir_node *b) {
97         int i, res, irn_arity;
98         int irg_v, block_v;
99
100         irg_v = get_irg_block_visited(current_ir_graph);
101         block_v = get_Block_block_visited(b);
102         if (block_v >= irg_v) {
103                 /* we computed the number of preds for this block and saved it in the
104                    block_v flag */
105                 return block_v - irg_v;
106         } else {
107                 /* compute the number of good predecessors */
108                 res = irn_arity = get_irn_arity(b);
109                 for (i = 0; i < irn_arity; i++)
110                         if (is_Bad(get_irn_n(b, i))) res--;
111                         /* save it in the flag. */
112                         set_Block_block_visited(b, irg_v + res);
113                         return res;
114         }
115 }
116
117 /**
118  * Copies the node to the new obstack. The Ins of the new node point to
119  * the predecessors on the old obstack.  For block/phi nodes not all
120  * predecessors might be copied.  n->link points to the new node.
121  * For Phi and Block nodes the function allocates in-arrays with an arity
122  * only for useful predecessors.  The arity is determined by counting
123  * the non-bad predecessors of the block.
124  *
125  * @param n    The node to be copied
126  * @param env  if non-NULL, the node number attribute will be copied to the new node
127  *
128  * Note: Also used for loop unrolling.
129  */
130 static void copy_node(ir_node *n, void *env) {
131         ir_node *nn, *block;
132         int new_arity;
133         ir_op *op = get_irn_op(n);
134         (void) env;
135
136         if (op == op_Bad) {
137                 /* node copied already */
138                 return;
139         } else if (op == op_Block) {
140                 block = NULL;
141                 new_arity = compute_new_arity(n);
142                 n->attr.block.graph_arr = NULL;
143         } else {
144                 block = get_nodes_block(n);
145                 if (op == op_Phi) {
146                         new_arity = compute_new_arity(block);
147                 } else {
148                         new_arity = get_irn_arity(n);
149                 }
150         }
151         nn = new_ir_node(get_irn_dbg_info(n),
152                 current_ir_graph,
153                 block,
154                 op,
155                 get_irn_mode(n),
156                 new_arity,
157                 get_irn_in(n) + 1);
158         /* Copy the attributes.  These might point to additional data.  If this
159            was allocated on the old obstack the pointers now are dangling.  This
160            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
161         if (op == op_Block) {
162                 /* we cannot allow blocks WITHOUT macroblock input */
163                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
164         }
165         copy_node_attr(n, nn);
166
167 #ifdef DEBUG_libfirm
168         {
169                 int copy_node_nr = env != NULL;
170                 if (copy_node_nr) {
171                         /* for easier debugging, we want to copy the node numbers too */
172                         nn->node_nr = n->node_nr;
173                 }
174         }
175 #endif
176
177         set_new_node(n, nn);
178         hook_dead_node_elim_subst(current_ir_graph, n, nn);
179 }
180
181 /**
182  * Copies new predecessors of old node to new node remembered in link.
183  * Spare the Bad predecessors of Phi and Block nodes.
184  */
185 static void copy_preds(ir_node *n, void *env) {
186         ir_node *nn, *block;
187         int i, j, irn_arity;
188         (void) env;
189
190         nn = get_new_node(n);
191
192         if (is_Block(n)) {
193                 /* copy the macro block header */
194                 ir_node *mbh = get_Block_MacroBlock(n);
195
196                 if (mbh == n) {
197                         /* this block is a macroblock header */
198                         set_Block_MacroBlock(nn, nn);
199                 } else {
200                         /* get the macro block header */
201                         ir_node *nmbh = get_new_node(mbh);
202                         assert(nmbh != NULL);
203                         set_Block_MacroBlock(nn, nmbh);
204                 }
205
206                 /* Don't copy Bad nodes. */
207                 j = 0;
208                 irn_arity = get_irn_arity(n);
209                 for (i = 0; i < irn_arity; i++) {
210                         if (! is_Bad(get_irn_n(n, i))) {
211                                 ir_node *pred = get_irn_n(n, i);
212                                 set_irn_n(nn, j, get_new_node(pred));
213                                 j++;
214                         }
215                 }
216                 /* repair the block visited flag from above misuse. Repair it in both
217                    graphs so that the old one can still be used. */
218                 set_Block_block_visited(nn, 0);
219                 set_Block_block_visited(n, 0);
220                 /* Local optimization could not merge two subsequent blocks if
221                    in array contained Bads.  Now it's possible.
222                    We don't call optimize_in_place as it requires
223                    that the fields in ir_graph are set properly. */
224                 if (!has_Block_label(nn) &&
225                     get_opt_control_flow_straightening() &&
226                     get_Block_n_cfgpreds(nn) == 1 &&
227                     is_Jmp(get_Block_cfgpred(nn, 0))) {
228                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
229                         if (nn == old) {
230                                 /* Jmp jumps into the block it is in -- deal self cycle. */
231                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
232                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
233                         } else {
234                                 exchange(nn, old);
235                         }
236                 }
237         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
238                 /* Don't copy node if corresponding predecessor in block is Bad.
239                    The Block itself should not be Bad. */
240                 block = get_nodes_block(n);
241                 set_nodes_block(nn, get_new_node(block));
242                 j = 0;
243                 irn_arity = get_irn_arity(n);
244                 for (i = 0; i < irn_arity; i++) {
245                         if (! is_Bad(get_irn_n(block, i))) {
246                                 ir_node *pred = get_irn_n(n, i);
247                                 set_irn_n(nn, j, get_new_node(pred));
248                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
249                                 j++;
250                         }
251                 }
252                 /* If the pre walker reached this Phi after the post walker visited the
253                    block block_visited is > 0. */
254                 set_Block_block_visited(get_nodes_block(n), 0);
255                 /* Compacting the Phi's ins might generate Phis with only one
256                    predecessor. */
257                 if (get_irn_arity(nn) == 1)
258                         exchange(nn, get_irn_n(nn, 0));
259         } else {
260                 irn_arity = get_irn_arity(n);
261                 for (i = -1; i < irn_arity; i++)
262                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
263         }
264         /* Now the new node is complete.  We can add it to the hash table for CSE.
265            @@@ inlining aborts if we identify End. Why? */
266         if (!is_End(nn))
267                 add_identities(current_ir_graph->value_table, nn);
268 }
269
270 /**
271  * Copies the graph recursively, compacts the keep-alives of the end node.
272  *
273  * @param irg           the graph to be copied
274  * @param copy_node_nr  If non-zero, the node number will be copied
275  */
276 static void copy_graph(ir_graph *irg, int copy_node_nr) {
277         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
278         ir_node *ka;      /* keep alive */
279         int i, irn_arity;
280         unsigned long vfl;
281
282         /* Some nodes must be copied by hand, sigh */
283         vfl = get_irg_visited(irg);
284         set_irg_visited(irg, vfl + 1);
285
286         oe = get_irg_end(irg);
287         mark_irn_visited(oe);
288         /* copy the end node by hand, allocate dynamic in array! */
289         ne = new_ir_node(get_irn_dbg_info(oe),
290                 irg,
291                 NULL,
292                 op_End,
293                 mode_X,
294                 -1,
295                 NULL);
296         /* Copy the attributes.  Well, there might be some in the future... */
297         copy_node_attr(oe, ne);
298         set_new_node(oe, ne);
299
300         /* copy the Bad node */
301         ob = get_irg_bad(irg);
302         mark_irn_visited(ob);
303         nb = new_ir_node(get_irn_dbg_info(ob),
304                 irg,
305                 NULL,
306                 op_Bad,
307                 mode_T,
308                 0,
309                 NULL);
310         copy_node_attr(ob, nb);
311         set_new_node(ob, nb);
312
313         /* copy the NoMem node */
314         om = get_irg_no_mem(irg);
315         mark_irn_visited(om);
316         nm = new_ir_node(get_irn_dbg_info(om),
317                 irg,
318                 NULL,
319                 op_NoMem,
320                 mode_M,
321                 0,
322                 NULL);
323         copy_node_attr(om, nm);
324         set_new_node(om, nm);
325
326         /* copy the live nodes */
327         set_irg_visited(irg, vfl);
328         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
329
330         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
331
332         /* visit the anchors as well */
333         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
334                 ir_node *n = get_irg_anchor(irg, i);
335
336                 if (n && (get_irn_visited(n) <= vfl)) {
337                         set_irg_visited(irg, vfl);
338                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
339                 }
340         }
341
342         /* copy_preds for the end node ... */
343         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
344
345         /*- ... and now the keep alives. -*/
346         /* First pick the not marked block nodes and walk them.  We must pick these
347            first as else we will oversee blocks reachable from Phis. */
348         irn_arity = get_End_n_keepalives(oe);
349         for (i = 0; i < irn_arity; i++) {
350                 ka = get_End_keepalive(oe, i);
351                 if (is_Block(ka)) {
352                         if (get_irn_visited(ka) <= vfl) {
353                                 /* We must keep the block alive and copy everything reachable */
354                                 set_irg_visited(irg, vfl);
355                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
356                         }
357                         add_End_keepalive(ne, get_new_node(ka));
358                 }
359         }
360
361         /* Now pick other nodes.  Here we will keep all! */
362         irn_arity = get_End_n_keepalives(oe);
363         for (i = 0; i < irn_arity; i++) {
364                 ka = get_End_keepalive(oe, i);
365                 if (!is_Block(ka)) {
366                         if (get_irn_visited(ka) <= vfl) {
367                                 /* We didn't copy the node yet.  */
368                                 set_irg_visited(irg, vfl);
369                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
370                         }
371                         add_End_keepalive(ne, get_new_node(ka));
372                 }
373         }
374
375         /* start block sometimes only reached after keep alives */
376         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
377         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
378 }
379
380 /**
381  * Copies the graph reachable from current_ir_graph->end to the obstack
382  * in current_ir_graph and fixes the environment.
383  * Then fixes the fields in current_ir_graph containing nodes of the
384  * graph.
385  *
386  * @param copy_node_nr  If non-zero, the node number will be copied
387  */
388 static void
389 copy_graph_env(int copy_node_nr) {
390         ir_graph *irg = current_ir_graph;
391         ir_node *old_end, *new_anchor;
392         int i;
393
394         /* remove end_except and end_reg nodes */
395         old_end = get_irg_end(irg);
396         set_irg_end_except (irg, old_end);
397         set_irg_end_reg    (irg, old_end);
398
399         /* Not all nodes remembered in irg might be reachable
400            from the end node.  Assure their link is set to NULL, so that
401            we can test whether new nodes have been computed. */
402         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
403                 ir_node *n = get_irg_anchor(irg, i);
404                 if (n != NULL)
405                         set_new_node(n, NULL);
406         }
407         /* we use the block walk flag for removing Bads from Blocks ins. */
408         inc_irg_block_visited(irg);
409
410         /* copy the graph */
411         copy_graph(irg, copy_node_nr);
412
413         /* fix the anchor */
414         old_end    = get_irg_end(irg);
415         new_anchor = new_Anchor(irg);
416
417         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
418                 ir_node *n = get_irg_anchor(irg, i);
419                 if (n)
420                         set_irn_n(new_anchor, i, get_new_node(n));
421         }
422         free_End(old_end);
423         irg->anchor = new_anchor;
424
425         /* ensure the new anchor is placed in the endblock */
426         set_nodes_block(new_anchor, get_irg_end_block(irg));
427 }
428
429 /**
430  * Copies all reachable nodes to a new obstack.  Removes bad inputs
431  * from block nodes and the corresponding inputs from Phi nodes.
432  * Merges single exit blocks with single entry blocks and removes
433  * 1-input Phis.
434  * Adds all new nodes to a new hash table for CSE.  Does not
435  * perform CSE, so the hash table might contain common subexpressions.
436  */
437 void dead_node_elimination(ir_graph *irg) {
438         ir_graph *rem;
439 #ifdef INTERPROCEDURAL_VIEW
440         int rem_ipview = get_interprocedural_view();
441 #endif
442         struct obstack *graveyard_obst = NULL;
443         struct obstack *rebirth_obst   = NULL;
444
445         edges_deactivate(irg);
446
447         /* inform statistics that we started a dead-node elimination run */
448         hook_dead_node_elim(irg, 1);
449
450         /* Remember external state of current_ir_graph. */
451         rem = current_ir_graph;
452         current_ir_graph = irg;
453 #ifdef INTERPROCEDURAL_VIEW
454         set_interprocedural_view(0);
455 #endif
456
457         assert(get_irg_phase_state(irg) != phase_building);
458
459         /* Handle graph state */
460         free_callee_info(irg);
461         free_irg_outs(irg);
462         free_trouts();
463
464         /* @@@ so far we loose loops when copying */
465         free_loop_information(irg);
466
467         set_irg_doms_inconsistent(irg);
468
469         /* A quiet place, where the old obstack can rest in peace,
470            until it will be cremated. */
471         graveyard_obst = irg->obst;
472
473         /* A new obstack, where the reachable nodes will be copied to. */
474         rebirth_obst = XMALLOC(struct obstack);
475         irg->obst = rebirth_obst;
476         obstack_init(irg->obst);
477         irg->last_node_idx = 0;
478
479         /* We also need a new value table for CSE */
480         del_identities(irg->value_table);
481         irg->value_table = new_identities();
482
483         /* Copy the graph from the old to the new obstack */
484         copy_graph_env(/*copy_node_nr=*/1);
485
486         /* Free memory from old unoptimized obstack */
487         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
488         xfree(graveyard_obst);            /* ... then free it.           */
489
490         /* inform statistics that the run is over */
491         hook_dead_node_elim(irg, 0);
492
493         current_ir_graph = rem;
494 #ifdef INTERPROCEDURAL_VIEW
495         set_interprocedural_view(rem_ipview);
496 #endif
497 }
498
499 /**
500  * Relink bad predecessors of a block and store the old in array to the
501  * link field. This function is called by relink_bad_predecessors().
502  * The array of link field starts with the block operand at position 0.
503  * If block has bad predecessors, create a new in array without bad preds.
504  * Otherwise let in array untouched.
505  */
506 static void relink_bad_block_predecessors(ir_node *n, void *env) {
507         ir_node **new_in, *irn;
508         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
509         (void) env;
510
511         /* if link field of block is NULL, look for bad predecessors otherwise
512            this is already done */
513         if (is_Block(n) && get_irn_link(n) == NULL) {
514                 /* save old predecessors in link field (position 0 is the block operand)*/
515                 set_irn_link(n, get_irn_in(n));
516
517                 /* count predecessors without bad nodes */
518                 old_irn_arity = get_irn_arity(n);
519                 for (i = 0; i < old_irn_arity; i++)
520                         if (!is_Bad(get_irn_n(n, i)))
521                                 ++new_irn_arity;
522
523                 /* arity changing: set new predecessors without bad nodes */
524                 if (new_irn_arity < old_irn_arity) {
525                         /* Get new predecessor array. We do not resize the array, as we must
526                            keep the old one to update Phis. */
527                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
528
529                         /* set new predecessors in array */
530                         new_in[0] = NULL;
531                         new_irn_n = 1;
532                         for (i = 0; i < old_irn_arity; i++) {
533                                 irn = get_irn_n(n, i);
534                                 if (!is_Bad(irn)) {
535                                         new_in[new_irn_n] = irn;
536                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
537                                         ++new_irn_n;
538                                 }
539                         }
540                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
541                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
542                         n->in = new_in;
543                 } /* ir node has bad predecessors */
544         } /* Block is not relinked */
545 }
546
547 /**
548  * Relinks Bad predecessors from Blocks and Phis called by walker
549  * remove_bad_predecesors(). If n is a Block, call
550  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
551  * function of Phi's Block. If this block has bad predecessors, relink preds
552  * of the Phi-node.
553  */
554 static void relink_bad_predecessors(ir_node *n, void *env) {
555         ir_node *block, **old_in;
556         int i, old_irn_arity, new_irn_arity;
557
558         /* relink bad predecessors of a block */
559         if (is_Block(n))
560                 relink_bad_block_predecessors(n, env);
561
562         /* If Phi node relink its block and its predecessors */
563         if (is_Phi(n)) {
564                 /* Relink predecessors of phi's block */
565                 block = get_nodes_block(n);
566                 if (get_irn_link(block) == NULL)
567                         relink_bad_block_predecessors(block, env);
568
569                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
570                 old_irn_arity = ARR_LEN(old_in);
571
572                 /* Relink Phi predecessors if count of predecessors changed */
573                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
574                         /* set new predecessors in array
575                            n->in[0] remains the same block */
576                         new_irn_arity = 1;
577                         for(i = 1; i < old_irn_arity; i++)
578                                 if (!is_Bad(old_in[i])) {
579                                         n->in[new_irn_arity] = n->in[i];
580                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
581                                         ++new_irn_arity;
582                                 }
583
584                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
585                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
586                 }
587         } /* n is a Phi node */
588 }
589
590 /*
591  * Removes Bad Bad predecessors from Blocks and the corresponding
592  * inputs to Phi nodes as in dead_node_elimination but without
593  * copying the graph.
594  * On walking up set the link field to NULL, on walking down call
595  * relink_bad_predecessors() (This function stores the old in array
596  * to the link field and sets a new in array if arity of predecessors
597  * changes).
598  */
599 void remove_bad_predecessors(ir_graph *irg) {
600         panic("Fix backedge handling first");
601         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
602 }
603
604
605 /*
606    __                      _  __ __
607   (_     __    o     _    | \/  |_
608   __)|_| | \_/ | \_/(/_   |_/\__|__
609
610   The following stuff implements a facility that automatically patches
611   registered ir_node pointers to the new node when a dead node elimination occurs.
612 */
613
614 struct _survive_dce_t {
615         struct obstack obst;
616         pmap *places;
617         pmap *new_places;
618         hook_entry_t dead_node_elim;
619         hook_entry_t dead_node_elim_subst;
620 };
621
622 typedef struct _survive_dce_list_t {
623         struct _survive_dce_list_t *next;
624         ir_node **place;
625 } survive_dce_list_t;
626
627 static void dead_node_hook(void *context, ir_graph *irg, int start) {
628         survive_dce_t *sd = context;
629         (void) irg;
630
631         /* Create a new map before the dead node elimination is performed. */
632         if (start) {
633                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
634         } else {
635                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
636                 pmap_destroy(sd->places);
637                 sd->places     = sd->new_places;
638                 sd->new_places = NULL;
639         }
640 }
641
642 /**
643  * Hook called when dead node elimination replaces old by nw.
644  */
645 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
646         survive_dce_t *sd = context;
647         survive_dce_list_t *list = pmap_get(sd->places, old);
648         (void) irg;
649
650         /* If the node is to be patched back, write the new address to all registered locations. */
651         if (list) {
652                 survive_dce_list_t *p;
653
654                 for (p = list; p; p = p->next)
655                         *(p->place) = nw;
656
657                 pmap_insert(sd->new_places, nw, list);
658         }
659 }
660
661 /**
662  * Make a new Survive DCE environment.
663  */
664 survive_dce_t *new_survive_dce(void) {
665         survive_dce_t *res = XMALLOC(survive_dce_t);
666         obstack_init(&res->obst);
667         res->places     = pmap_create();
668         res->new_places = NULL;
669
670         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
671         res->dead_node_elim.context                   = res;
672         res->dead_node_elim.next                      = NULL;
673
674         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
675         res->dead_node_elim_subst.context = res;
676         res->dead_node_elim_subst.next    = NULL;
677
678         register_hook(hook_dead_node_elim, &res->dead_node_elim);
679         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
680         return res;
681 }
682
683 /**
684  * Free a Survive DCE environment.
685  */
686 void free_survive_dce(survive_dce_t *sd) {
687         obstack_free(&sd->obst, NULL);
688         pmap_destroy(sd->places);
689         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
690         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
691         xfree(sd);
692 }
693
694 /**
695  * Register a node pointer to be patched upon DCE.
696  * When DCE occurs, the node pointer specified by @p place will be
697  * patched to the new address of the node it is pointing to.
698  *
699  * @param sd    The Survive DCE environment.
700  * @param place The address of the node pointer.
701  */
702 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
703         if (*place != NULL) {
704                 ir_node *irn      = *place;
705                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
706                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
707
708                 nw->next  = curr;
709                 nw->place = place;
710
711                 pmap_insert(sd->places, irn, nw);
712         }
713 }
714
715 /*--------------------------------------------------------------------*/
716 /*  Functionality for inlining                                         */
717 /*--------------------------------------------------------------------*/
718
719 /**
720  * Copy node for inlineing.  Updates attributes that change when
721  * inlineing but not for dead node elimination.
722  *
723  * Copies the node by calling copy_node() and then updates the entity if
724  * it's a local one.  env must be a pointer of the frame type of the
725  * inlined procedure. The new entities must be in the link field of
726  * the entities.
727  */
728 static void copy_node_inline(ir_node *n, void *env) {
729         ir_node *nn;
730         ir_type *frame_tp = (ir_type *)env;
731
732         copy_node(n, NULL);
733         if (is_Sel(n)) {
734                 nn = get_new_node (n);
735                 assert(is_Sel(nn));
736                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
737                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
738                 }
739         } else if (is_Block(n)) {
740                 nn = get_new_node (n);
741                 nn->attr.block.irg = current_ir_graph;
742         }
743 }
744
745 /**
746  * Copies new predecessors of old node and move constants to
747  * the Start Block.
748  */
749 static void copy_preds_inline(ir_node *n, void *env) {
750         ir_node *nn;
751
752         copy_preds(n, env);
753         nn = skip_Id(get_new_node(n));
754         if (is_irn_constlike(nn)) {
755                 /* move Constants into the start block */
756                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
757
758                 n = identify_remember(current_ir_graph->value_table, nn);
759                 if (nn != n) {
760                         DBG_OPT_CSE(nn, n);
761                         exchange(nn, n);
762                 }
763         }
764 }
765
766 /**
767  * Walker: checks if P_value_arg_base is used.
768  */
769 static void find_addr(ir_node *node, void *env) {
770         int *allow_inline = env;
771         if (is_Sel(node)) {
772                 ir_graph *irg = current_ir_graph;
773                 if (get_Sel_ptr(node) == get_irg_frame(irg)) {
774                         /* access to frame */
775                         ir_entity *ent = get_Sel_entity(node);
776                         if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
777                                 /* access to value_type */
778                                 *allow_inline = 0;
779                         }
780                 }
781         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
782                 /* From GCC:
783                  * Refuse to inline alloca call unless user explicitly forced so as this
784                  * may change program's memory overhead drastically when the function
785                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
786                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
787                  *
788                  * Sorrily this is true with our implementation also.
789                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
790                  * disables inlining of functions using VLA (with are completely save).
791                  *
792                  * 2 Solutions:
793                  * - add a flag to the Alloc node for "real" alloca() calls
794                  * - add a new Stack-Restore node at the end of a function using alloca()
795                  */
796                 *allow_inline = 0;
797         }
798 }
799
800 /**
801  * Check if we can inline a given call.
802  * Currently, we cannot inline two cases:
803  * - call with compound arguments
804  * - graphs that take the address of a parameter
805  *
806  * check these conditions here
807  */
808 static int can_inline(ir_node *call, ir_graph *called_graph) {
809         ir_type *call_type = get_Call_type(call);
810         int params, ress, i, res;
811         assert(is_Method_type(call_type));
812
813         params = get_method_n_params(call_type);
814         ress   = get_method_n_ress(call_type);
815
816         /* check parameters for compound arguments */
817         for (i = 0; i < params; ++i) {
818                 ir_type *p_type = get_method_param_type(call_type, i);
819
820                 if (is_compound_type(p_type))
821                         return 0;
822         }
823
824         /* check results for compound arguments */
825         for (i = 0; i < ress; ++i) {
826                 ir_type *r_type = get_method_res_type(call_type, i);
827
828                 if (is_compound_type(r_type))
829                         return 0;
830         }
831
832         res = 1;
833         irg_walk_graph(called_graph, find_addr, NULL, &res);
834
835         return res;
836 }
837
838 enum exc_mode {
839         exc_handler    = 0, /**< There is a handler. */
840         exc_to_end     = 1, /**< Branches to End. */
841         exc_no_handler = 2  /**< Exception handling not represented. */
842 };
843
844 /* Inlines a method at the given call site. */
845 int inline_method(ir_node *call, ir_graph *called_graph) {
846         ir_node             *pre_call;
847         ir_node             *post_call, *post_bl;
848         ir_node             *in[pn_Start_max];
849         ir_node             *end, *end_bl, *block;
850         ir_node             **res_pred;
851         ir_node             **cf_pred;
852         ir_node             **args_in;
853         ir_node             *ret, *phi;
854         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
855         enum exc_mode       exc_handling;
856         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
857         ir_entity           *ent;
858         ir_graph            *rem, *irg;
859         irg_inline_property prop = get_irg_inline_property(called_graph);
860         unsigned long       visited;
861
862         if (prop == irg_inline_forbidden)
863                 return 0;
864
865         ent = get_irg_entity(called_graph);
866
867         mtp = get_entity_type(ent);
868         ctp = get_Call_type(call);
869         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
870                 /* this is a bad feature of C: without a prototype, we can
871                  * call a function with less parameters than needed. Currently
872                  * we don't support this, although we could use Unknown than. */
873                 return 0;
874         }
875
876         /* Argh, compiling C has some bad consequences:
877          * It is implementation dependent what happens in that case.
878          * We support inlining, if the bitsize of the types matches AND
879          * the same arithmetic is used. */
880         n_params = get_method_n_params(mtp);
881         for (i = n_params - 1; i >= 0; --i) {
882                 ir_type *param_tp = get_method_param_type(mtp, i);
883                 ir_type *arg_tp   = get_method_param_type(ctp, i);
884
885                 if (param_tp != arg_tp) {
886                         ir_mode *pmode = get_type_mode(param_tp);
887                         ir_mode *amode = get_type_mode(arg_tp);
888
889                         if (pmode == NULL || amode == NULL)
890                                 return 0;
891                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
892                                 return 0;
893                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
894                                 return 0;
895                         /* otherwise we can simply "reinterpret" the bits */
896                 }
897         }
898
899         irg = get_irn_irg(call);
900
901         /*
902          * We cannot inline a recursive call. The graph must be copied before
903          * the call the inline_method() using create_irg_copy().
904          */
905         if (called_graph == irg)
906                 return 0;
907
908         /*
909          * currently, we cannot inline two cases:
910          * - call with compound arguments
911          * - graphs that take the address of a parameter
912          */
913         if (! can_inline(call, called_graph))
914                 return 0;
915
916         rem = current_ir_graph;
917         current_ir_graph = irg;
918
919         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
920
921         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
922         rem_opt = get_opt_optimize();
923         set_optimize(0);
924
925         /* Handle graph state */
926         assert(get_irg_phase_state(irg) != phase_building);
927         assert(get_irg_pinned(irg) == op_pin_state_pinned);
928         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
929         set_irg_outs_inconsistent(irg);
930         set_irg_extblk_inconsistent(irg);
931         set_irg_doms_inconsistent(irg);
932         set_irg_loopinfo_inconsistent(irg);
933         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
934         set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
935
936         /* -- Check preconditions -- */
937         assert(is_Call(call));
938
939         /* here we know we WILL inline, so inform the statistics */
940         hook_inline(call, called_graph);
941
942         /* -- Decide how to handle exception control flow: Is there a handler
943            for the Call node, or do we branch directly to End on an exception?
944            exc_handling:
945            0 There is a handler.
946            1 Branches to End.
947            2 Exception handling not represented in Firm. -- */
948         {
949                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
950                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
951                         long proj_nr = get_Proj_proj(proj);
952                         if (proj_nr == pn_Call_X_except) Xproj = proj;
953                         if (proj_nr == pn_Call_M_except) Mproj = proj;
954                 }
955                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
956                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
957                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
958         }
959
960         /* create the argument tuple */
961         NEW_ARR_A(ir_type *, args_in, n_params);
962
963         block = get_nodes_block(call);
964         for (i = n_params - 1; i >= 0; --i) {
965                 ir_node *arg      = get_Call_param(call, i);
966                 ir_type *param_tp = get_method_param_type(mtp, i);
967                 ir_mode *mode     = get_type_mode(param_tp);
968
969                 if (mode != get_irn_mode(arg)) {
970                         arg = new_r_Conv(irg, block, arg, mode, 0);
971                 }
972                 args_in[i] = arg;
973         }
974
975         /* --
976            the procedure and later replaces the Start node of the called graph.
977            Post_call is the old Call node and collects the results of the called
978            graph. Both will end up being a tuple.  -- */
979         post_bl = get_nodes_block(call);
980         set_irg_current_block(irg, post_bl);
981         /* XxMxPxPxPxT of Start + parameter of Call */
982         in[pn_Start_X_initial_exec]   = new_Jmp();
983         in[pn_Start_M]                = get_Call_mem(call);
984         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
985         in[pn_Start_P_tls]            = get_irg_tls(irg);
986         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
987         pre_call = new_Tuple(pn_Start_max, in);
988         post_call = call;
989
990         /* --
991            The new block gets the ins of the old block, pre_call and all its
992            predecessors and all Phi nodes. -- */
993         part_block(pre_call);
994
995         /* -- Prepare state for dead node elimination -- */
996         /* Visited flags in calling irg must be >= flag in called irg.
997            Else walker and arity computation will not work. */
998         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
999                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
1000         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
1001                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
1002         visited = get_irg_visited(irg);
1003
1004         /* Set pre_call as new Start node in link field of the start node of
1005            calling graph and pre_calls block as new block for the start block
1006            of calling graph.
1007            Further mark these nodes so that they are not visited by the
1008            copying. */
1009         set_irn_link(get_irg_start(called_graph), pre_call);
1010         set_irn_visited(get_irg_start(called_graph), visited);
1011         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1012         set_irn_visited(get_irg_start_block(called_graph), visited);
1013
1014         set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1015         set_irn_visited(get_irg_bad(called_graph), visited);
1016
1017         set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1018         set_irn_visited(get_irg_no_mem(called_graph), visited);
1019
1020         /* Initialize for compaction of in arrays */
1021         inc_irg_block_visited(irg);
1022
1023         /* -- Replicate local entities of the called_graph -- */
1024         /* copy the entities. */
1025         irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1026         called_frame = get_irg_frame_type(called_graph);
1027         curr_frame   = get_irg_frame_type(irg);
1028         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1029                 ir_entity *new_ent, *old_ent;
1030                 old_ent = get_class_member(called_frame, i);
1031                 new_ent = copy_entity_own(old_ent, curr_frame);
1032                 set_entity_link(old_ent, new_ent);
1033         }
1034
1035         /* visited is > than that of called graph.  With this trick visited will
1036            remain unchanged so that an outer walker, e.g., searching the call nodes
1037             to inline, calling this inline will not visit the inlined nodes. */
1038         set_irg_visited(irg, get_irg_visited(irg)-1);
1039
1040         /* -- Performing dead node elimination inlines the graph -- */
1041         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1042            entities. */
1043         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1044                  get_irg_frame_type(called_graph));
1045
1046         irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1047
1048         /* Repair called_graph */
1049         set_irg_visited(called_graph, get_irg_visited(irg));
1050         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1051         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1052
1053         /* -- Merge the end of the inlined procedure with the call site -- */
1054         /* We will turn the old Call node into a Tuple with the following
1055            predecessors:
1056            -1:  Block of Tuple.
1057            0: Phi of all Memories of Return statements.
1058            1: Jmp from new Block that merges the control flow from all exception
1059            predecessors of the old end block.
1060            2: Tuple of all arguments.
1061            3: Phi of Exception memories.
1062            In case the old Call directly branches to End on an exception we don't
1063            need the block merging all exceptions nor the Phi of the exception
1064            memories.
1065         */
1066
1067         /* -- Precompute some values -- */
1068         end_bl = get_new_node(get_irg_end_block(called_graph));
1069         end = get_new_node(get_irg_end(called_graph));
1070         arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
1071         n_res = get_method_n_ress(get_Call_type(call));
1072
1073         res_pred = XMALLOCN(ir_node*, n_res);
1074         cf_pred  = XMALLOCN(ir_node*, arity);
1075
1076         set_irg_current_block(irg, post_bl); /* just to make sure */
1077
1078         /* -- archive keepalives -- */
1079         irn_arity = get_irn_arity(end);
1080         for (i = 0; i < irn_arity; i++) {
1081                 ir_node *ka = get_End_keepalive(end, i);
1082                 if (! is_Bad(ka))
1083                         add_End_keepalive(get_irg_end(irg), ka);
1084         }
1085
1086         /* The new end node will die.  We need not free as the in array is on the obstack:
1087            copy_node() only generated 'D' arrays. */
1088
1089         /* -- Replace Return nodes by Jump nodes. -- */
1090         n_ret = 0;
1091         for (i = 0; i < arity; i++) {
1092                 ir_node *ret;
1093                 ret = get_Block_cfgpred(end_bl, i);
1094                 if (is_Return(ret)) {
1095                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1096                         n_ret++;
1097                 }
1098         }
1099         set_irn_in(post_bl, n_ret, cf_pred);
1100
1101         /* -- Build a Tuple for all results of the method.
1102            Add Phi node if there was more than one Return.  -- */
1103         turn_into_tuple(post_call, pn_Call_max);
1104         /* First the Memory-Phi */
1105         n_ret = 0;
1106         for (i = 0; i < arity; i++) {
1107                 ret = get_Block_cfgpred(end_bl, i);
1108                 if (is_Return(ret)) {
1109                         cf_pred[n_ret] = get_Return_mem(ret);
1110                         n_ret++;
1111                 }
1112         }
1113         phi = new_Phi(n_ret, cf_pred, mode_M);
1114         set_Tuple_pred(call, pn_Call_M_regular, phi);
1115         /* Conserve Phi-list for further inlinings -- but might be optimized */
1116         if (get_nodes_block(phi) == post_bl) {
1117                 set_irn_link(phi, get_irn_link(post_bl));
1118                 set_irn_link(post_bl, phi);
1119         }
1120         /* Now the real results */
1121         if (n_res > 0) {
1122                 for (j = 0; j < n_res; j++) {
1123                         n_ret = 0;
1124                         for (i = 0; i < arity; i++) {
1125                                 ret = get_Block_cfgpred(end_bl, i);
1126                                 if (is_Return(ret)) {
1127                                         cf_pred[n_ret] = get_Return_res(ret, j);
1128                                         n_ret++;
1129                                 }
1130                         }
1131                         if (n_ret > 0)
1132                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1133                         else
1134                                 phi = new_Bad();
1135                         res_pred[j] = phi;
1136                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1137                         if (get_nodes_block(phi) == post_bl) {
1138                                 set_Phi_next(phi, get_Block_phis(post_bl));
1139                                 set_Block_phis(post_bl, phi);
1140                         }
1141                 }
1142                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1143         } else {
1144                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1145         }
1146         /* handle the regular call */
1147         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1148
1149         /* For now, we cannot inline calls with value_base */
1150         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1151
1152         /* Finally the exception control flow.
1153            We have two (three) possible situations:
1154            First if the Call branches to an exception handler: We need to add a Phi node to
1155            collect the memory containing the exception objects.  Further we need
1156            to add another block to get a correct representation of this Phi.  To
1157            this block we add a Jmp that resolves into the X output of the Call
1158            when the Call is turned into a tuple.
1159            Second the Call branches to End, the exception is not handled.  Just
1160            add all inlined exception branches to the End node.
1161            Third: there is no Exception edge at all. Handle as case two. */
1162         if (exc_handling == exc_handler) {
1163                 n_exc = 0;
1164                 for (i = 0; i < arity; i++) {
1165                         ir_node *ret, *irn;
1166                         ret = get_Block_cfgpred(end_bl, i);
1167                         irn = skip_Proj(ret);
1168                         if (is_fragile_op(irn) || is_Raise(irn)) {
1169                                 cf_pred[n_exc] = ret;
1170                                 ++n_exc;
1171                         }
1172                 }
1173                 if (n_exc > 0) {
1174                         ir_node *block = new_Block(n_exc, cf_pred);
1175                         set_cur_block(block);
1176
1177                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1178                         /* The Phi for the memories with the exception objects */
1179                         n_exc = 0;
1180                         for (i = 0; i < arity; i++) {
1181                                 ir_node *ret;
1182                                 ret = skip_Proj(get_Block_cfgpred(end_bl, i));
1183                                 if (is_Call(ret)) {
1184                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1185                                         n_exc++;
1186                                 } else if (is_fragile_op(ret)) {
1187                                         /* We rely that all cfops have the memory output at the same position. */
1188                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1189                                         n_exc++;
1190                                 } else if (is_Raise(ret)) {
1191                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1192                                         n_exc++;
1193                                 }
1194                         }
1195                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1196                 } else {
1197                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1198                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1199                 }
1200         } else {
1201                 ir_node *main_end_bl;
1202                 int main_end_bl_arity;
1203                 ir_node **end_preds;
1204
1205                 /* assert(exc_handling == 1 || no exceptions. ) */
1206                 n_exc = 0;
1207                 for (i = 0; i < arity; i++) {
1208                         ir_node *ret = get_Block_cfgpred(end_bl, i);
1209                         ir_node *irn = skip_Proj(ret);
1210
1211                         if (is_fragile_op(irn) || is_Raise(irn)) {
1212                                 cf_pred[n_exc] = ret;
1213                                 n_exc++;
1214                         }
1215                 }
1216                 main_end_bl       = get_irg_end_block(irg);
1217                 main_end_bl_arity = get_irn_arity(main_end_bl);
1218                 end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
1219
1220                 for (i = 0; i < main_end_bl_arity; ++i)
1221                         end_preds[i] = get_irn_n(main_end_bl, i);
1222                 for (i = 0; i < n_exc; ++i)
1223                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1224                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1225                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1226                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1227                 free(end_preds);
1228         }
1229         free(res_pred);
1230         free(cf_pred);
1231
1232         /* --  Turn CSE back on. -- */
1233         set_optimize(rem_opt);
1234         current_ir_graph = rem;
1235
1236         return 1;
1237 }
1238
1239 /********************************************************************/
1240 /* Apply inlining to small methods.                                 */
1241 /********************************************************************/
1242
1243 static struct obstack  temp_obst;
1244
1245 /** Represents a possible inlinable call in a graph. */
1246 typedef struct _call_entry {
1247         ir_node    *call;       /**< The Call node. */
1248         ir_graph   *callee;     /**< The callee IR-graph. */
1249         list_head  list;        /**< List head for linking the next one. */
1250         int        loop_depth;  /**< The loop depth of this call. */
1251         int        benefice;    /**< The calculated benefice of this call. */
1252         unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
1253         unsigned   all_const:1; /**< Set if this call has only constant parameters. */
1254 } call_entry;
1255
1256 /**
1257  * environment for inlining small irgs
1258  */
1259 typedef struct _inline_env_t {
1260         struct obstack obst;  /**< An obstack where call_entries are allocated on. */
1261         list_head      calls; /**< The call entry list. */
1262 } inline_env_t;
1263
1264 /**
1265  * Returns the irg called from a Call node. If the irg is not
1266  * known, NULL is returned.
1267  *
1268  * @param call  the call node
1269  */
1270 static ir_graph *get_call_called_irg(ir_node *call) {
1271         ir_node *addr;
1272
1273         addr = get_Call_ptr(call);
1274         if (is_Global(addr)) {
1275                 ir_entity *ent = get_Global_entity(addr);
1276                 return get_entity_irg(ent);
1277         }
1278
1279         return NULL;
1280 }
1281
1282 /**
1283  * Walker: Collect all calls to known graphs inside a graph.
1284  */
1285 static void collect_calls(ir_node *call, void *env) {
1286         (void) env;
1287         if (is_Call(call)) {
1288                 ir_graph *called_irg = get_call_called_irg(call);
1289
1290                 if (called_irg != NULL) {
1291                         /* The Call node calls a locally defined method.  Remember to inline. */
1292                         inline_env_t *ienv  = env;
1293                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1294                         entry->call       = call;
1295                         entry->callee     = called_irg;
1296                         entry->loop_depth = 0;
1297                         entry->benefice   = 0;
1298                         entry->local_adr  = 0;
1299                         entry->all_const  = 0;
1300
1301                         list_add_tail(&entry->list, &ienv->calls);
1302                 }
1303         }
1304 }
1305
1306 /**
1307  * Inlines all small methods at call sites where the called address comes
1308  * from a Const node that references the entity representing the called
1309  * method.
1310  * The size argument is a rough measure for the code size of the method:
1311  * Methods where the obstack containing the firm graph is smaller than
1312  * size are inlined.
1313  */
1314 void inline_small_irgs(ir_graph *irg, int size) {
1315         ir_graph *rem = current_ir_graph;
1316         inline_env_t env;
1317         call_entry *entry;
1318
1319         current_ir_graph = irg;
1320         /* Handle graph state */
1321         assert(get_irg_phase_state(irg) != phase_building);
1322         free_callee_info(irg);
1323
1324         /* Find Call nodes to inline.
1325            (We can not inline during a walk of the graph, as inlining the same
1326            method several times changes the visited flag of the walked graph:
1327            after the first inlining visited of the callee equals visited of
1328            the caller.  With the next inlining both are increased.) */
1329         obstack_init(&env.obst);
1330         INIT_LIST_HEAD(&env.calls);
1331         irg_walk_graph(irg, NULL, collect_calls, &env);
1332
1333         if (! list_empty(&env.calls)) {
1334                 /* There are calls to inline */
1335                 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1336                 collect_phiprojs(irg);
1337
1338                 list_for_each_entry(call_entry, entry, &env.calls, list) {
1339                         ir_graph            *callee = entry->callee;
1340                         irg_inline_property prop    = get_irg_inline_property(callee);
1341
1342                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1343                                 /* do not inline forbidden / weak graphs */
1344                                 continue;
1345                         }
1346
1347                         if (prop >= irg_inline_forced ||
1348                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1349                                 inline_method(entry->call, callee);
1350                         }
1351                 }
1352                 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1353         }
1354         obstack_free(&env.obst, NULL);
1355         current_ir_graph = rem;
1356 }
1357
1358 /**
1359  * Environment for inlining irgs.
1360  */
1361 typedef struct {
1362         list_head calls;             /**< List of of all call nodes in this graph. */
1363         unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
1364         unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1365         unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
1366         unsigned  n_nodes_orig;      /**< for statistics */
1367         unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
1368         unsigned  n_call_nodes_orig; /**< for statistics */
1369         unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
1370         unsigned  n_callers_orig;    /**< for statistics */
1371         unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
1372         unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
1373         unsigned  recursive:1;       /**< Set, if this function is self recursive. */
1374 } inline_irg_env;
1375
1376 /**
1377  * Allocate a new environment for inlining.
1378  */
1379 static inline_irg_env *alloc_inline_irg_env(void) {
1380         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1381         INIT_LIST_HEAD(&env->calls);
1382         env->local_weights     = NULL;
1383         env->n_nodes           = -2; /* do not count count Start, End */
1384         env->n_blocks          = -2; /* do not count count Start, End Block */
1385         env->n_nodes_orig      = -2; /* do not count Start, End */
1386         env->n_call_nodes      = 0;
1387         env->n_call_nodes_orig = 0;
1388         env->n_callers         = 0;
1389         env->n_callers_orig    = 0;
1390         env->got_inline        = 0;
1391         env->local_vars        = 0;
1392         env->recursive         = 0;
1393         return env;
1394 }
1395
1396 typedef struct walker_env {
1397         inline_irg_env *x;     /**< the inline environment */
1398         char ignore_runtime;   /**< the ignore runtime flag */
1399         char ignore_callers;   /**< if set, do change callers data */
1400 } wenv_t;
1401
1402 /**
1403  * post-walker: collect all calls in the inline-environment
1404  * of a graph and sum some statistics.
1405  */
1406 static void collect_calls2(ir_node *call, void *ctx) {
1407         wenv_t         *env = ctx;
1408         inline_irg_env *x = env->x;
1409         ir_opcode      code = get_irn_opcode(call);
1410         ir_graph       *callee;
1411         call_entry     *entry;
1412
1413         /* count meaningful nodes in irg */
1414         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1415                 if (code != iro_Block) {
1416                         ++x->n_nodes;
1417                         ++x->n_nodes_orig;
1418                 } else {
1419                         ++x->n_blocks;
1420                 }
1421         }
1422
1423         if (code != iro_Call) return;
1424
1425         /* check, if it's a runtime call */
1426         if (env->ignore_runtime) {
1427                 ir_node *symc = get_Call_ptr(call);
1428
1429                 if (is_Global(symc)) {
1430                         ir_entity *ent = get_Global_entity(symc);
1431
1432                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1433                                 return;
1434                 }
1435         }
1436
1437         /* collect all call nodes */
1438         ++x->n_call_nodes;
1439         ++x->n_call_nodes_orig;
1440
1441         callee = get_call_called_irg(call);
1442         if (callee != NULL) {
1443                 if (! env->ignore_callers) {
1444                         inline_irg_env *callee_env = get_irg_link(callee);
1445                         /* count all static callers */
1446                         ++callee_env->n_callers;
1447                         ++callee_env->n_callers_orig;
1448                 }
1449                 if (callee == current_ir_graph)
1450                         x->recursive = 1;
1451
1452                 /* link it in the list of possible inlinable entries */
1453                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1454                 entry->call       = call;
1455                 entry->callee     = callee;
1456                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1457                 entry->benefice   = 0;
1458                 entry->local_adr  = 0;
1459                 entry->all_const  = 0;
1460
1461                 list_add_tail(&entry->list, &x->calls);
1462         }
1463 }
1464
1465 /**
1466  * Returns TRUE if the number of callers is 0 in the irg's environment,
1467  * hence this irg is a leave.
1468  */
1469 inline static int is_leave(ir_graph *irg) {
1470         inline_irg_env *env = get_irg_link(irg);
1471         return env->n_call_nodes == 0;
1472 }
1473
1474 /**
1475  * Returns TRUE if the number of nodes in the callee is
1476  * smaller then size in the irg's environment.
1477  */
1478 inline static int is_smaller(ir_graph *callee, unsigned size) {
1479         inline_irg_env *env = get_irg_link(callee);
1480         return env->n_nodes < size;
1481 }
1482
1483 /**
1484  * Duplicate a call entry.
1485  *
1486  * @param entry     the original entry to duplicate
1487  * @param new_call  the new call node
1488  * @param loop_depth_delta
1489  *                  delta value for the loop depth
1490  */
1491 static call_entry *duplicate_call_entry(const call_entry *entry,
1492                                         ir_node *new_call, int loop_depth_delta) {
1493         call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1494         nentry->call       = new_call;
1495         nentry->callee     = entry->callee;
1496         nentry->benefice   = entry->benefice;
1497         nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1498         nentry->local_adr  = entry->local_adr;
1499         nentry->all_const  = entry->all_const;
1500
1501         return nentry;
1502 }
1503
1504 /**
1505  * Append all call nodes of the source environment to the nodes of in the destination
1506  * environment.
1507  *
1508  * @param dst         destination environment
1509  * @param src         source environment
1510  * @param loop_depth  the loop depth of the call that is replaced by the src list
1511  */
1512 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
1513         call_entry *entry, *nentry;
1514
1515         /* Note that the src list points to Call nodes in the inlined graph, but
1516            we need Call nodes in our graph. Luckily the inliner leaves this information
1517            in the link field. */
1518         list_for_each_entry(call_entry, entry, &src->calls, list) {
1519                 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1520                 list_add_tail(&nentry->list, &dst->calls);
1521         }
1522         dst->n_call_nodes += src->n_call_nodes;
1523         dst->n_nodes      += src->n_nodes;
1524 }
1525
1526 /*
1527  * Inlines small leave methods at call sites where the called address comes
1528  * from a Const node that references the entity representing the called
1529  * method.
1530  * The size argument is a rough measure for the code size of the method:
1531  * Methods where the obstack containing the firm graph is smaller than
1532  * size are inlined.
1533  */
1534 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1535                             unsigned size, int ignore_runtime)
1536 {
1537         inline_irg_env   *env;
1538         ir_graph         *irg;
1539         int              i, n_irgs;
1540         ir_graph         *rem;
1541         int              did_inline;
1542         wenv_t           wenv;
1543         call_entry       *entry, *next;
1544         const call_entry *centry;
1545         pmap             *copied_graphs;
1546         pmap_entry       *pm_entry;
1547
1548         rem = current_ir_graph;
1549         obstack_init(&temp_obst);
1550
1551         /* a map for the copied graphs, used to inline recursive calls */
1552         copied_graphs = pmap_create();
1553
1554         /* extend all irgs by a temporary data structure for inlining. */
1555         n_irgs = get_irp_n_irgs();
1556         for (i = 0; i < n_irgs; ++i)
1557                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1558
1559         /* Pre-compute information in temporary data structure. */
1560         wenv.ignore_runtime = ignore_runtime;
1561         wenv.ignore_callers = 0;
1562         for (i = 0; i < n_irgs; ++i) {
1563                 ir_graph *irg = get_irp_irg(i);
1564
1565                 assert(get_irg_phase_state(irg) != phase_building);
1566                 free_callee_info(irg);
1567
1568                 assure_cf_loop(irg);
1569                 wenv.x = get_irg_link(irg);
1570                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1571         }
1572
1573         /* -- and now inline. -- */
1574
1575         /* Inline leaves recursively -- we might construct new leaves. */
1576         do {
1577                 did_inline = 0;
1578
1579                 for (i = 0; i < n_irgs; ++i) {
1580                         ir_node *call;
1581                         int phiproj_computed = 0;
1582
1583                         current_ir_graph = get_irp_irg(i);
1584                         env              = get_irg_link(current_ir_graph);
1585
1586                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1587                         list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1588                                 ir_graph            *callee;
1589                                 irg_inline_property  prop;
1590
1591                                 if (env->n_nodes > maxsize)
1592                                         break;
1593
1594                                 call   = entry->call;
1595                                 callee = entry->callee;
1596
1597                                 prop = get_irg_inline_property(callee);
1598                                 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1599                                         /* do not inline forbidden / weak graphs */
1600                                         continue;
1601                                 }
1602
1603                                 if (is_leave(callee) && (
1604                                     is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1605                                         if (!phiproj_computed) {
1606                                                 phiproj_computed = 1;
1607                                                 collect_phiprojs(current_ir_graph);
1608                                         }
1609                                         did_inline = inline_method(call, callee);
1610
1611                                         if (did_inline) {
1612                                                 inline_irg_env *callee_env = get_irg_link(callee);
1613
1614                                                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1615                                                 phiproj_computed = 0;
1616
1617                                                 /* Do some statistics */
1618                                                 env->got_inline = 1;
1619                                                 --env->n_call_nodes;
1620                                                 env->n_nodes += callee_env->n_nodes;
1621                                                 --callee_env->n_callers;
1622
1623                                                 /* remove this call from the list */
1624                                                 list_del(&entry->list);
1625                                                 continue;
1626                                         }
1627                                 }
1628                         }
1629                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1630                 }
1631         } while (did_inline);
1632
1633         /* inline other small functions. */
1634         for (i = 0; i < n_irgs; ++i) {
1635                 ir_node *call;
1636                 int phiproj_computed = 0;
1637
1638                 current_ir_graph = get_irp_irg(i);
1639                 env              = get_irg_link(current_ir_graph);
1640
1641                 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1642
1643                 /* note that the list of possible calls is updated during the process */
1644                 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1645                         irg_inline_property prop;
1646                         ir_graph            *callee;
1647                         pmap_entry          *e;
1648
1649                         call   = entry->call;
1650                         callee = entry->callee;
1651
1652                         prop = get_irg_inline_property(callee);
1653                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1654                                 /* do not inline forbidden / weak graphs */
1655                                 continue;
1656                         }
1657
1658                         e = pmap_find(copied_graphs, callee);
1659                         if (e != NULL) {
1660                                 /*
1661                                  * Remap callee if we have a copy.
1662                                  * FIXME: Should we do this only for recursive Calls ?
1663                                  */
1664                                 callee = e->value;
1665                         }
1666
1667                         if (prop >= irg_inline_forced ||
1668                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1669                                 if (current_ir_graph == callee) {
1670                                         /*
1671                                          * Recursive call: we cannot directly inline because we cannot walk
1672                                          * the graph and change it. So we have to make a copy of the graph
1673                                          * first.
1674                                          */
1675
1676                                         inline_irg_env *callee_env;
1677                                         ir_graph       *copy;
1678
1679                                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1680
1681                                         /*
1682                                          * No copy yet, create one.
1683                                          * Note that recursive methods are never leaves, so it is sufficient
1684                                          * to test this condition here.
1685                                          */
1686                                         copy = create_irg_copy(callee);
1687
1688                                         /* create_irg_copy() destroys the Proj links, recompute them */
1689                                         phiproj_computed = 0;
1690
1691                                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1692
1693                                         /* allocate new environment */
1694                                         callee_env = alloc_inline_irg_env();
1695                                         set_irg_link(copy, callee_env);
1696
1697                                         assure_cf_loop(copy);
1698                                         wenv.x              = callee_env;
1699                                         wenv.ignore_callers = 1;
1700                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1701
1702                                         /*
1703                                          * Enter the entity of the original graph. This is needed
1704                                          * for inline_method(). However, note that ent->irg still points
1705                                          * to callee, NOT to copy.
1706                                          */
1707                                         set_irg_entity(copy, get_irg_entity(callee));
1708
1709                                         pmap_insert(copied_graphs, callee, copy);
1710                                         callee = copy;
1711
1712                                         /* we have only one caller: the original graph */
1713                                         callee_env->n_callers      = 1;
1714                                         callee_env->n_callers_orig = 1;
1715                                 }
1716                                 if (! phiproj_computed) {
1717                                         phiproj_computed = 1;
1718                                         collect_phiprojs(current_ir_graph);
1719                                 }
1720                                 did_inline = inline_method(call, callee);
1721                                 if (did_inline) {
1722                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1723
1724                                         /* call was inlined, Phi/Projs for current graph must be recomputed */
1725                                         phiproj_computed = 0;
1726
1727                                         /* callee was inline. Append it's call list. */
1728                                         env->got_inline = 1;
1729                                         --env->n_call_nodes;
1730                                         append_call_list(env, callee_env, entry->loop_depth);
1731                                         --callee_env->n_callers;
1732
1733                                         /* after we have inlined callee, all called methods inside callee
1734                                            are now called once more */
1735                                         list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1736                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1737                                                 ++penv->n_callers;
1738                                         }
1739
1740                                         /* remove this call from the list */
1741                                         list_del(&entry->list);
1742                                         continue;
1743                                 }
1744                         }
1745                 }
1746                 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1747         }
1748
1749         for (i = 0; i < n_irgs; ++i) {
1750                 irg = get_irp_irg(i);
1751                 env = get_irg_link(irg);
1752
1753                 if (env->got_inline) {
1754                         optimize_graph_df(irg);
1755                         optimize_cf(irg);
1756                 }
1757                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1758                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1759                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1760                         env->n_callers_orig, env->n_callers,
1761                         get_entity_name(get_irg_entity(irg))));
1762                 }
1763         }
1764
1765         /* kill the copied graphs: we don't need them anymore */
1766         foreach_pmap(copied_graphs, pm_entry) {
1767                 ir_graph *copy = pm_entry->value;
1768
1769                 /* reset the entity, otherwise it will be deleted in the next step ... */
1770                 set_irg_entity(copy, NULL);
1771                 free_ir_graph(copy);
1772         }
1773         pmap_destroy(copied_graphs);
1774
1775         obstack_free(&temp_obst, NULL);
1776         current_ir_graph = rem;
1777 }
1778
1779 /**
1780  * Calculate the parameter weights for transmitting the address of a local variable.
1781  */
1782 static unsigned calc_method_local_weight(ir_node *arg) {
1783         int      i, j, k;
1784         unsigned v, weight = 0;
1785
1786         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1787                 ir_node *succ = get_irn_out(arg, i);
1788
1789                 switch (get_irn_opcode(succ)) {
1790                 case iro_Load:
1791                 case iro_Store:
1792                         /* Loads and Store can be removed */
1793                         weight += 3;
1794                         break;
1795                 case iro_Sel:
1796                         /* check if all args are constant */
1797                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1798                                 ir_node *idx = get_Sel_index(succ, j);
1799                                 if (! is_Const(idx))
1800                                         return 0;
1801                         }
1802                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1803                            some unsupported node. */
1804                         v = calc_method_local_weight(succ);
1805                         if (v == 0)
1806                                 return 0;
1807                         /* we can kill one Sel with constant indexes, this is cheap */
1808                         weight += v + 1;
1809                         break;
1810                 case iro_Id:
1811                         /* when looking backward we might find Id nodes */
1812                         weight += calc_method_local_weight(succ);
1813                         break;
1814                 case iro_Tuple:
1815                         /* unoptimized tuple */
1816                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1817                                 ir_node *pred = get_Tuple_pred(succ, j);
1818                                 if (pred == arg) {
1819                                         /* look for Proj(j) */
1820                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1821                                                 ir_node *succ_succ = get_irn_out(succ, k);
1822                                                 if (is_Proj(succ_succ)) {
1823                                                         if (get_Proj_proj(succ_succ) == j) {
1824                                                                 /* found */
1825                                                                 weight += calc_method_local_weight(succ_succ);
1826                                                         }
1827                                                 } else {
1828                                                         /* this should NOT happen */
1829                                                         return 0;
1830                                                 }
1831                                         }
1832                                 }
1833                         }
1834                         break;
1835                 default:
1836                         /* any other node: unsupported yet or bad. */
1837                         return 0;
1838                 }
1839         }
1840         return weight;
1841 }
1842
1843 /**
1844  * Calculate the parameter weights for transmitting the address of a local variable.
1845  */
1846 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1847         ir_entity *ent = get_irg_entity(irg);
1848         ir_type  *mtp;
1849         int      nparams, i, proj_nr;
1850         ir_node  *irg_args, *arg;
1851
1852         mtp      = get_entity_type(ent);
1853         nparams  = get_method_n_params(mtp);
1854
1855         /* allocate a new array. currently used as 'analysed' flag */
1856         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1857
1858         /* If the method haven't parameters we have nothing to do. */
1859         if (nparams <= 0)
1860                 return;
1861
1862         assure_irg_outs(irg);
1863         irg_args = get_irg_args(irg);
1864         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1865                 arg     = get_irn_out(irg_args, i);
1866                 proj_nr = get_Proj_proj(arg);
1867                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1868         }
1869 }
1870
1871 /**
1872  * Calculate the benefice for transmitting an local variable address.
1873  * After inlining, the local variable might be transformed into a
1874  * SSA variable by scalar_replacement().
1875  */
1876 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1877         inline_irg_env *env = get_irg_link(callee);
1878
1879         if (env->local_weights != NULL) {
1880                 if (pos < ARR_LEN(env->local_weights))
1881                         return env->local_weights[pos];
1882                 return 0;
1883         }
1884
1885         analyze_irg_local_weights(env, callee);
1886
1887         if (pos < ARR_LEN(env->local_weights))
1888                 return env->local_weights[pos];
1889         return 0;
1890 }
1891
1892 /**
1893  * Calculate a benefice value for inlining the given call.
1894  *
1895  * @param call       the call node we have to inspect
1896  * @param callee     the called graph
1897  */
1898 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1899 {
1900         ir_node   *call = entry->call;
1901         ir_entity *ent  = get_irg_entity(callee);
1902         ir_node   *frame_ptr;
1903         ir_type   *mtp;
1904         int       weight = 0;
1905         int       i, n_params, all_const;
1906         unsigned  cc, v;
1907         irg_inline_property prop;
1908
1909         inline_irg_env *callee_env;
1910
1911         prop = get_irg_inline_property(callee);
1912         if (prop == irg_inline_forbidden) {
1913                 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1914                     call, callee));
1915                 return entry->benefice = INT_MIN;
1916         }
1917
1918         if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
1919                 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1920                     call, callee));
1921                 return entry->benefice = INT_MIN;
1922         }
1923
1924         /* costs for every passed parameter */
1925         n_params = get_Call_n_params(call);
1926         mtp      = get_entity_type(ent);
1927         cc       = get_method_calling_convention(mtp);
1928         if (cc & cc_reg_param) {
1929                 /* register parameter, smaller costs for register parameters */
1930                 int max_regs = cc & ~cc_bits;
1931
1932                 if (max_regs < n_params)
1933                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1934                 else
1935                         weight += n_params * 2;
1936         } else {
1937                 /* parameters are passed an stack */
1938                 weight += 5 * n_params;
1939         }
1940
1941         /* constant parameters improve the benefice */
1942         frame_ptr = get_irg_frame(current_ir_graph);
1943         all_const = 1;
1944         for (i = 0; i < n_params; ++i) {
1945                 ir_node *param = get_Call_param(call, i);
1946
1947                 if (is_Const(param)) {
1948                         weight += get_method_param_weight(ent, i);
1949                 } else {
1950                         all_const = 0;
1951                         if (is_SymConst(param))
1952                                 weight += get_method_param_weight(ent, i);
1953                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1954                                 /*
1955                                  * An address of a local variable is transmitted. After
1956                                  * inlining, scalar_replacement might be able to remove the
1957                                  * local variable, so honor this.
1958                                  */
1959                                 v = get_method_local_adress_weight(callee, i);
1960                                 weight += v;
1961                                 if (v > 0)
1962                                         entry->local_adr = 1;
1963                         }
1964                 }
1965         }
1966         entry->all_const = all_const;
1967
1968         callee_env = get_irg_link(callee);
1969         if (callee_env->n_callers == 1 &&
1970             callee != current_ir_graph &&
1971                 get_entity_visibility(ent) == visibility_local) {
1972                 weight += 700;
1973         }
1974
1975         /* give a bonus for functions with one block */
1976         if (callee_env->n_blocks == 1)
1977                 weight = weight * 3 / 2;
1978
1979         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1980         if (callee_env->n_nodes < 30 && !callee_env->recursive)
1981                 weight += 2000;
1982
1983         /* and finally for leaves: they do not increase the register pressure
1984            because of callee safe registers */
1985         if (callee_env->n_call_nodes == 0)
1986                 weight += 400;
1987
1988         /** it's important to inline inner loops first */
1989         if (entry->loop_depth > 30)
1990                 weight += 30 * 1024;
1991         else
1992                 weight += entry->loop_depth * 1024;
1993
1994         /*
1995          * All arguments constant is probably a good sign, give an extra bonus
1996          */
1997         if (all_const)
1998                 weight += 1024;
1999
2000         return entry->benefice = weight;
2001 }
2002
2003 static ir_graph **irgs;
2004 static int      last_irg;
2005
2006 /**
2007  * Callgraph walker, collect all visited graphs.
2008  */
2009 static void callgraph_walker(ir_graph *irg, void *data) {
2010         (void) data;
2011         irgs[last_irg++] = irg;
2012 }
2013
2014 /**
2015  * Creates an inline order for all graphs.
2016  *
2017  * @return the list of graphs.
2018  */
2019 static ir_graph **create_irg_list(void) {
2020         ir_entity **free_methods;
2021         int       arr_len;
2022         int       n_irgs = get_irp_n_irgs();
2023
2024         cgana(&arr_len, &free_methods);
2025         xfree(free_methods);
2026
2027         compute_callgraph();
2028
2029         last_irg = 0;
2030         irgs     = XMALLOCNZ(ir_graph*, n_irgs);
2031
2032         callgraph_walk(NULL, callgraph_walker, NULL);
2033         assert(n_irgs == last_irg);
2034
2035         return irgs;
2036 }
2037
2038 /**
2039  * Push a call onto the priority list if its benefice is big enough.
2040  *
2041  * @param pqueue   the priority queue of calls
2042  * @param call     the call entry
2043  * @param inlien_threshold
2044  *                 the threshold value
2045  */
2046 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
2047                             int inline_threshold)
2048 {
2049         ir_graph            *callee  = call->callee;
2050         irg_inline_property prop     = get_irg_inline_property(callee);
2051         int                 benefice = calc_inline_benefice(call, callee);
2052
2053         DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
2054             get_irn_irg(call->call), call->call, callee, benefice));
2055
2056         if (prop < irg_inline_forced && benefice < inline_threshold) {
2057                 return;
2058         }
2059
2060         pqueue_put(pqueue, call, benefice);
2061 }
2062
2063 /**
2064  * Try to inline calls into a graph.
2065  *
2066  * @param irg      the graph into which we inline
2067  * @param maxsize  do NOT inline if the size of irg gets
2068  *                 bigger than this amount
2069  * @param inline_threshold
2070  *                 threshold value for inline decision
2071  * @param copied_graphs
2072  *                 map containing copied of recursive graphs
2073  */
2074 static void inline_into(ir_graph *irg, unsigned maxsize,
2075                         int inline_threshold, pmap *copied_graphs)
2076 {
2077         int            phiproj_computed = 0;
2078         inline_irg_env *env = get_irg_link(irg);
2079         call_entry     *curr_call;
2080         wenv_t         wenv;
2081         pqueue_t       *pqueue;
2082
2083         if (env->n_call_nodes == 0)
2084                 return;
2085
2086         if (env->n_nodes > maxsize) {
2087                 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
2088                 return;
2089         }
2090
2091         current_ir_graph = irg;
2092         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2093
2094         /* put irgs into the pqueue */
2095         pqueue = new_pqueue();
2096
2097         list_for_each_entry(call_entry, curr_call, &env->calls, list) {
2098                 assert(is_Call(curr_call->call));
2099                 maybe_push_call(pqueue, curr_call, inline_threshold);
2100         }
2101
2102         /* note that the list of possible calls is updated during the process */
2103         while (!pqueue_empty(pqueue)) {
2104                 int                 did_inline;
2105                 call_entry          *curr_call  = pqueue_pop_front(pqueue);
2106                 ir_graph            *callee     = curr_call->callee;
2107                 ir_node             *call_node  = curr_call->call;
2108                 inline_irg_env      *callee_env = get_irg_link(callee);
2109                 irg_inline_property prop        = get_irg_inline_property(callee);
2110                 int                 loop_depth;
2111                 const call_entry    *centry;
2112                 pmap_entry          *e;
2113
2114                 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
2115                         DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
2116                                                 env->n_nodes, callee, callee_env->n_nodes));
2117                         continue;
2118                 }
2119
2120                 e = pmap_find(copied_graphs, callee);
2121                 if (e != NULL) {
2122                         int benefice = curr_call->benefice;
2123                         /*
2124                          * Reduce the weight for recursive function IFF not all arguments are const.
2125                          * inlining recursive functions is rarely good.
2126                          */
2127                         if (!curr_call->all_const)
2128                                 benefice -= 2000;
2129                         if (benefice < inline_threshold)
2130                                 continue;
2131
2132                         /*
2133                          * Remap callee if we have a copy.
2134                          */
2135                         callee     = e->value;
2136                         callee_env = get_irg_link(callee);
2137                 }
2138
2139                 if (current_ir_graph == callee) {
2140                         /*
2141                          * Recursive call: we cannot directly inline because we cannot
2142                          * walk the graph and change it. So we have to make a copy of
2143                          * the graph first.
2144                          */
2145                         int benefice = curr_call->benefice;
2146                         ir_graph *copy;
2147
2148                         /*
2149                          * Reduce the weight for recursive function IFF not all arguments are const.
2150                          * inlining recursive functions is rarely good.
2151                          */
2152                         if (!curr_call->all_const)
2153                                 benefice -= 2000;
2154                         if (benefice < inline_threshold)
2155                                 continue;
2156
2157                         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2158
2159                         /*
2160                          * No copy yet, create one.
2161                          * Note that recursive methods are never leaves, so it is
2162                          * sufficient to test this condition here.
2163                          */
2164                         copy = create_irg_copy(callee);
2165
2166                         /* create_irg_copy() destroys the Proj links, recompute them */
2167                         phiproj_computed = 0;
2168
2169                         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2170
2171                         /* allocate a new environment */
2172                         callee_env = alloc_inline_irg_env();
2173                         set_irg_link(copy, callee_env);
2174
2175                         assure_cf_loop(copy);
2176                         wenv.x              = callee_env;
2177                         wenv.ignore_callers = 1;
2178                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2179
2180                         /*
2181                          * Enter the entity of the original graph. This is needed
2182                          * for inline_method(). However, note that ent->irg still points
2183                          * to callee, NOT to copy.
2184                          */
2185                         set_irg_entity(copy, get_irg_entity(callee));
2186
2187                         pmap_insert(copied_graphs, callee, copy);
2188                         callee = copy;
2189
2190                         /* we have only one caller: the original graph */
2191                         callee_env->n_callers      = 1;
2192                         callee_env->n_callers_orig = 1;
2193                 }
2194                 if (! phiproj_computed) {
2195                         phiproj_computed = 1;
2196                         collect_phiprojs(current_ir_graph);
2197                 }
2198                 did_inline = inline_method(call_node, callee);
2199                 if (!did_inline)
2200                         continue;
2201
2202                 /* call was inlined, Phi/Projs for current graph must be recomputed */
2203                 phiproj_computed = 0;
2204
2205                 /* remove it from the caller list */
2206                 list_del(&curr_call->list);
2207
2208                 /* callee was inline. Append it's call list. */
2209                 env->got_inline = 1;
2210                 if (curr_call->local_adr)
2211                         env->local_vars = 1;
2212                 --env->n_call_nodes;
2213
2214                 /* we just generate a bunch of new calls */
2215                 loop_depth = curr_call->loop_depth;
2216                 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2217                         inline_irg_env *penv = get_irg_link(centry->callee);
2218                         ir_node        *new_call;
2219                         call_entry     *new_entry;
2220
2221                         /* after we have inlined callee, all called methods inside
2222                          * callee are now called once more */
2223                         ++penv->n_callers;
2224
2225                         /* Note that the src list points to Call nodes in the inlined graph,
2226                          * but we need Call nodes in our graph. Luckily the inliner leaves
2227                          * this information in the link field. */
2228                         new_call = get_irn_link(centry->call);
2229                         assert(is_Call(new_call));
2230
2231                         new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2232                         list_add_tail(&new_entry->list, &env->calls);
2233                         maybe_push_call(pqueue, new_entry, inline_threshold);
2234                 }
2235
2236                 env->n_call_nodes += callee_env->n_call_nodes;
2237                 env->n_nodes += callee_env->n_nodes;
2238                 --callee_env->n_callers;
2239         }
2240         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2241         del_pqueue(pqueue);
2242 }
2243
2244 /*
2245  * Heuristic inliner. Calculates a benefice value for every call and inlines
2246  * those calls with a value higher than the threshold.
2247  */
2248 void inline_functions(unsigned maxsize, int inline_threshold) {
2249         inline_irg_env   *env;
2250         int              i, n_irgs;
2251         ir_graph         *rem;
2252         wenv_t           wenv;
2253         pmap             *copied_graphs;
2254         pmap_entry       *pm_entry;
2255         ir_graph         **irgs;
2256
2257         rem = current_ir_graph;
2258         obstack_init(&temp_obst);
2259
2260         irgs = create_irg_list();
2261
2262         /* a map for the copied graphs, used to inline recursive calls */
2263         copied_graphs = pmap_create();
2264
2265         /* extend all irgs by a temporary data structure for inlining. */
2266         n_irgs = get_irp_n_irgs();
2267         for (i = 0; i < n_irgs; ++i)
2268                 set_irg_link(irgs[i], alloc_inline_irg_env());
2269
2270         /* Pre-compute information in temporary data structure. */
2271         wenv.ignore_runtime = 0;
2272         wenv.ignore_callers = 0;
2273         for (i = 0; i < n_irgs; ++i) {
2274                 ir_graph *irg = irgs[i];
2275
2276                 free_callee_info(irg);
2277
2278                 wenv.x = get_irg_link(irg);
2279                 assure_cf_loop(irg);
2280                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2281         }
2282
2283         /* -- and now inline. -- */
2284         for (i = 0; i < n_irgs; ++i) {
2285                 ir_graph *irg = irgs[i];
2286
2287                 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2288         }
2289
2290         for (i = 0; i < n_irgs; ++i) {
2291                 ir_graph *irg = irgs[i];
2292
2293                 env = get_irg_link(irg);
2294                 if (env->got_inline) {
2295                         /* this irg got calls inlined: optimize it */
2296                         if (get_opt_combo()) {
2297                                 if (env->local_vars) {
2298                                         scalar_replacement_opt(irg);
2299                                 }
2300                                 combo(irg);
2301                         } else {
2302                                 if (env->local_vars) {
2303                                         if (scalar_replacement_opt(irg)) {
2304                                                 optimize_graph_df(irg);
2305                                         }
2306                                 }
2307                                 optimize_cf(irg);
2308                         }
2309                 }
2310                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2311                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2312                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2313                         env->n_callers_orig, env->n_callers,
2314                         get_entity_name(get_irg_entity(irg))));
2315                 }
2316         }
2317
2318         /* kill the copied graphs: we don't need them anymore */
2319         foreach_pmap(copied_graphs, pm_entry) {
2320                 ir_graph *copy = pm_entry->value;
2321
2322                 /* reset the entity, otherwise it will be deleted in the next step ... */
2323                 set_irg_entity(copy, NULL);
2324                 free_ir_graph(copy);
2325         }
2326         pmap_destroy(copied_graphs);
2327
2328         xfree(irgs);
2329
2330         obstack_free(&temp_obst, NULL);
2331         current_ir_graph = rem;
2332 }
2333
2334 void firm_init_inline(void) {
2335         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2336 }