3537e30fa80945f4fdc4dc7c368021d97f138e38
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  * @version  $Id$
25  */
26 #include "config.h"
27
28 #include <limits.h>
29 #include <assert.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irprog_t.h"
34
35 #include "iroptimize.h"
36 #include "ircons_t.h"
37 #include "iropt_t.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41
42 #include "array_t.h"
43 #include "list.h"
44 #include "pset.h"
45 #include "pmap.h"
46 #include "pdeq.h"
47 #include "xmalloc.h"
48 #include "pqueue.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_inline_t.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64
65 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
66
67 /*------------------------------------------------------------------*/
68 /* Routines for dead node elimination / copying garbage collection  */
69 /* of the obstack.                                                  */
70 /*------------------------------------------------------------------*/
71
72 /**
73  * Remember the new node in the old node by using a field all nodes have.
74  */
75 #define set_new_node(oldn, newn)  set_irn_link(oldn, newn)
76
77 /**
78  * Get this new node, before the old node is forgotten.
79  */
80 #define get_new_node(oldn) get_irn_link(oldn)
81
82 /**
83  * Check if a new node was set.
84  */
85 #define has_new_node(n) (get_new_node(n) != NULL)
86
87 /**
88  * We use the block_visited flag to mark that we have computed the
89  * number of useful predecessors for this block.
90  * Further we encode the new arity in this flag in the old blocks.
91  * Remembering the arity is useful, as it saves a lot of pointer
92  * accesses.  This function is called for all Phi and Block nodes
93  * in a Block.
94  */
95 static inline int
96 compute_new_arity(ir_node *b) {
97         int i, res, irn_arity;
98         int irg_v, block_v;
99
100         irg_v = get_irg_block_visited(current_ir_graph);
101         block_v = get_Block_block_visited(b);
102         if (block_v >= irg_v) {
103                 /* we computed the number of preds for this block and saved it in the
104                    block_v flag */
105                 return block_v - irg_v;
106         } else {
107                 /* compute the number of good predecessors */
108                 res = irn_arity = get_irn_arity(b);
109                 for (i = 0; i < irn_arity; i++)
110                         if (is_Bad(get_irn_n(b, i))) res--;
111                         /* save it in the flag. */
112                         set_Block_block_visited(b, irg_v + res);
113                         return res;
114         }
115 }
116
117 /**
118  * Copies the node to the new obstack. The Ins of the new node point to
119  * the predecessors on the old obstack.  For block/phi nodes not all
120  * predecessors might be copied.  n->link points to the new node.
121  * For Phi and Block nodes the function allocates in-arrays with an arity
122  * only for useful predecessors.  The arity is determined by counting
123  * the non-bad predecessors of the block.
124  *
125  * @param n    The node to be copied
126  * @param env  if non-NULL, the node number attribute will be copied to the new node
127  *
128  * Note: Also used for loop unrolling.
129  */
130 static void copy_node(ir_node *n, void *env) {
131         ir_node *nn, *block;
132         int new_arity;
133         ir_op *op = get_irn_op(n);
134         (void) env;
135
136         if (op == op_Bad) {
137                 /* node copied already */
138                 return;
139         } else if (op == op_Block) {
140                 block = NULL;
141                 new_arity = compute_new_arity(n);
142                 n->attr.block.graph_arr = NULL;
143         } else {
144                 block = get_nodes_block(n);
145                 if (op == op_Phi) {
146                         new_arity = compute_new_arity(block);
147                 } else {
148                         new_arity = get_irn_arity(n);
149                 }
150         }
151         nn = new_ir_node(get_irn_dbg_info(n),
152                 current_ir_graph,
153                 block,
154                 op,
155                 get_irn_mode(n),
156                 new_arity,
157                 get_irn_in(n) + 1);
158         /* Copy the attributes.  These might point to additional data.  If this
159            was allocated on the old obstack the pointers now are dangling.  This
160            frees e.g. the memory of the graph_arr allocated in new_immBlock. */
161         if (op == op_Block) {
162                 /* we cannot allow blocks WITHOUT macroblock input */
163                 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
164         }
165         copy_node_attr(n, nn);
166
167         if (env != NULL) {
168                 /* for easier debugging, we want to copy the node numbers too */
169                 nn->node_nr = n->node_nr;
170         }
171
172         set_new_node(n, nn);
173         hook_dead_node_elim_subst(current_ir_graph, n, nn);
174 }
175
176 /**
177  * Copies new predecessors of old node to new node remembered in link.
178  * Spare the Bad predecessors of Phi and Block nodes.
179  */
180 static void copy_preds(ir_node *n, void *env) {
181         ir_node *nn, *block;
182         int i, j, irn_arity;
183         (void) env;
184
185         nn = get_new_node(n);
186
187         if (is_Block(n)) {
188                 /* copy the macro block header */
189                 ir_node *mbh = get_Block_MacroBlock(n);
190
191                 if (mbh == n) {
192                         /* this block is a macroblock header */
193                         set_Block_MacroBlock(nn, nn);
194                 } else {
195                         /* get the macro block header */
196                         ir_node *nmbh = get_new_node(mbh);
197                         assert(nmbh != NULL);
198                         set_Block_MacroBlock(nn, nmbh);
199                 }
200
201                 /* Don't copy Bad nodes. */
202                 j = 0;
203                 irn_arity = get_irn_arity(n);
204                 for (i = 0; i < irn_arity; i++) {
205                         if (! is_Bad(get_irn_n(n, i))) {
206                                 ir_node *pred = get_irn_n(n, i);
207                                 set_irn_n(nn, j, get_new_node(pred));
208                                 j++;
209                         }
210                 }
211                 /* repair the block visited flag from above misuse. Repair it in both
212                    graphs so that the old one can still be used. */
213                 set_Block_block_visited(nn, 0);
214                 set_Block_block_visited(n, 0);
215                 /* Local optimization could not merge two subsequent blocks if
216                    in array contained Bads.  Now it's possible.
217                    We don't call optimize_in_place as it requires
218                    that the fields in ir_graph are set properly. */
219                 if (!has_Block_label(nn) &&
220                     get_opt_control_flow_straightening() &&
221                     get_Block_n_cfgpreds(nn) == 1 &&
222                     is_Jmp(get_Block_cfgpred(nn, 0))) {
223                         ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
224                         if (nn == old) {
225                                 /* Jmp jumps into the block it is in -- deal self cycle. */
226                                 assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
227                                 exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
228                         } else {
229                                 exchange(nn, old);
230                         }
231                 }
232         } else if (is_Phi(n) && get_irn_arity(n) > 0) {
233                 /* Don't copy node if corresponding predecessor in block is Bad.
234                    The Block itself should not be Bad. */
235                 block = get_nodes_block(n);
236                 set_nodes_block(nn, get_new_node(block));
237                 j = 0;
238                 irn_arity = get_irn_arity(n);
239                 for (i = 0; i < irn_arity; i++) {
240                         if (! is_Bad(get_irn_n(block, i))) {
241                                 ir_node *pred = get_irn_n(n, i);
242                                 set_irn_n(nn, j, get_new_node(pred));
243                                 /*if (is_backedge(n, i)) set_backedge(nn, j);*/
244                                 j++;
245                         }
246                 }
247                 /* If the pre walker reached this Phi after the post walker visited the
248                    block block_visited is > 0. */
249                 set_Block_block_visited(get_nodes_block(n), 0);
250                 /* Compacting the Phi's ins might generate Phis with only one
251                    predecessor. */
252                 if (get_irn_arity(nn) == 1)
253                         exchange(nn, get_irn_n(nn, 0));
254         } else {
255                 irn_arity = get_irn_arity(n);
256                 for (i = -1; i < irn_arity; i++)
257                         set_irn_n(nn, i, get_new_node(get_irn_n(n, i)));
258         }
259         /* Now the new node is complete.  We can add it to the hash table for CSE.
260            @@@ inlining aborts if we identify End. Why? */
261         if (!is_End(nn))
262                 add_identities(current_ir_graph->value_table, nn);
263 }
264
265 /**
266  * Copies the graph recursively, compacts the keep-alives of the end node.
267  *
268  * @param irg           the graph to be copied
269  * @param copy_node_nr  If non-zero, the node number will be copied
270  */
271 static void copy_graph(ir_graph *irg, int copy_node_nr) {
272         ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
273         ir_node *ka;      /* keep alive */
274         int i, irn_arity;
275         unsigned long vfl;
276
277         /* Some nodes must be copied by hand, sigh */
278         vfl = get_irg_visited(irg);
279         set_irg_visited(irg, vfl + 1);
280
281         oe = get_irg_end(irg);
282         mark_irn_visited(oe);
283         /* copy the end node by hand, allocate dynamic in array! */
284         ne = new_ir_node(get_irn_dbg_info(oe),
285                 irg,
286                 NULL,
287                 op_End,
288                 mode_X,
289                 -1,
290                 NULL);
291         /* Copy the attributes.  Well, there might be some in the future... */
292         copy_node_attr(oe, ne);
293         set_new_node(oe, ne);
294
295         /* copy the Bad node */
296         ob = get_irg_bad(irg);
297         mark_irn_visited(ob);
298         nb = new_ir_node(get_irn_dbg_info(ob),
299                 irg,
300                 NULL,
301                 op_Bad,
302                 mode_T,
303                 0,
304                 NULL);
305         copy_node_attr(ob, nb);
306         set_new_node(ob, nb);
307
308         /* copy the NoMem node */
309         om = get_irg_no_mem(irg);
310         mark_irn_visited(om);
311         nm = new_ir_node(get_irn_dbg_info(om),
312                 irg,
313                 NULL,
314                 op_NoMem,
315                 mode_M,
316                 0,
317                 NULL);
318         copy_node_attr(om, nm);
319         set_new_node(om, nm);
320
321         /* copy the live nodes */
322         set_irg_visited(irg, vfl);
323         irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
324
325         /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
326
327         /* visit the anchors as well */
328         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
329                 ir_node *n = get_irg_anchor(irg, i);
330
331                 if (n && (get_irn_visited(n) <= vfl)) {
332                         set_irg_visited(irg, vfl);
333                         irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
334                 }
335         }
336
337         /* copy_preds for the end node ... */
338         set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
339
340         /*- ... and now the keep alives. -*/
341         /* First pick the not marked block nodes and walk them.  We must pick these
342            first as else we will oversee blocks reachable from Phis. */
343         irn_arity = get_End_n_keepalives(oe);
344         for (i = 0; i < irn_arity; i++) {
345                 ka = get_End_keepalive(oe, i);
346                 if (is_Block(ka)) {
347                         if (get_irn_visited(ka) <= vfl) {
348                                 /* We must keep the block alive and copy everything reachable */
349                                 set_irg_visited(irg, vfl);
350                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
351                         }
352                         add_End_keepalive(ne, get_new_node(ka));
353                 }
354         }
355
356         /* Now pick other nodes.  Here we will keep all! */
357         irn_arity = get_End_n_keepalives(oe);
358         for (i = 0; i < irn_arity; i++) {
359                 ka = get_End_keepalive(oe, i);
360                 if (!is_Block(ka)) {
361                         if (get_irn_visited(ka) <= vfl) {
362                                 /* We didn't copy the node yet.  */
363                                 set_irg_visited(irg, vfl);
364                                 irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
365                         }
366                         add_End_keepalive(ne, get_new_node(ka));
367                 }
368         }
369
370         /* start block sometimes only reached after keep alives */
371         set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
372         set_nodes_block(nm, get_new_node(get_nodes_block(om)));
373 }
374
375 /**
376  * Copies the graph reachable from current_ir_graph->end to the obstack
377  * in current_ir_graph and fixes the environment.
378  * Then fixes the fields in current_ir_graph containing nodes of the
379  * graph.
380  *
381  * @param copy_node_nr  If non-zero, the node number will be copied
382  */
383 static void
384 copy_graph_env(int copy_node_nr) {
385         ir_graph *irg = current_ir_graph;
386         ir_node *old_end, *new_anchor;
387         int i;
388
389         /* remove end_except and end_reg nodes */
390         old_end = get_irg_end(irg);
391         set_irg_end_except (irg, old_end);
392         set_irg_end_reg    (irg, old_end);
393
394         /* Not all nodes remembered in irg might be reachable
395            from the end node.  Assure their link is set to NULL, so that
396            we can test whether new nodes have been computed. */
397         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
398                 ir_node *n = get_irg_anchor(irg, i);
399                 if (n != NULL)
400                         set_new_node(n, NULL);
401         }
402         /* we use the block walk flag for removing Bads from Blocks ins. */
403         inc_irg_block_visited(irg);
404
405         /* copy the graph */
406         copy_graph(irg, copy_node_nr);
407
408         /* fix the anchor */
409         old_end    = get_irg_end(irg);
410         new_anchor = new_Anchor(irg);
411
412         for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
413                 ir_node *n = get_irg_anchor(irg, i);
414                 if (n)
415                         set_irn_n(new_anchor, i, get_new_node(n));
416         }
417         free_End(old_end);
418         irg->anchor = new_anchor;
419
420         /* ensure the new anchor is placed in the endblock */
421         set_nodes_block(new_anchor, get_irg_end_block(irg));
422 }
423
424 /**
425  * Copies all reachable nodes to a new obstack.  Removes bad inputs
426  * from block nodes and the corresponding inputs from Phi nodes.
427  * Merges single exit blocks with single entry blocks and removes
428  * 1-input Phis.
429  * Adds all new nodes to a new hash table for CSE.  Does not
430  * perform CSE, so the hash table might contain common subexpressions.
431  */
432 void dead_node_elimination(ir_graph *irg) {
433         ir_graph *rem;
434 #ifdef INTERPROCEDURAL_VIEW
435         int rem_ipview = get_interprocedural_view();
436 #endif
437         struct obstack *graveyard_obst = NULL;
438         struct obstack *rebirth_obst   = NULL;
439
440         edges_deactivate(irg);
441
442         /* inform statistics that we started a dead-node elimination run */
443         hook_dead_node_elim(irg, 1);
444
445         /* Remember external state of current_ir_graph. */
446         rem = current_ir_graph;
447         current_ir_graph = irg;
448 #ifdef INTERPROCEDURAL_VIEW
449         set_interprocedural_view(0);
450 #endif
451
452         assert(get_irg_phase_state(irg) != phase_building);
453
454         /* Handle graph state */
455         free_callee_info(irg);
456         free_irg_outs(irg);
457         free_trouts();
458
459         /* @@@ so far we loose loops when copying */
460         free_loop_information(irg);
461
462         set_irg_doms_inconsistent(irg);
463
464         /* A quiet place, where the old obstack can rest in peace,
465            until it will be cremated. */
466         graveyard_obst = irg->obst;
467
468         /* A new obstack, where the reachable nodes will be copied to. */
469         rebirth_obst = XMALLOC(struct obstack);
470         irg->obst = rebirth_obst;
471         obstack_init(irg->obst);
472         irg->last_node_idx = 0;
473
474         /* We also need a new value table for CSE */
475         del_identities(irg->value_table);
476         irg->value_table = new_identities();
477
478         /* Copy the graph from the old to the new obstack */
479         copy_graph_env(/*copy_node_nr=*/1);
480
481         /* Free memory from old unoptimized obstack */
482         obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
483         xfree(graveyard_obst);            /* ... then free it.           */
484
485         /* inform statistics that the run is over */
486         hook_dead_node_elim(irg, 0);
487
488         current_ir_graph = rem;
489 #ifdef INTERPROCEDURAL_VIEW
490         set_interprocedural_view(rem_ipview);
491 #endif
492 }
493
494 /**
495  * Relink bad predecessors of a block and store the old in array to the
496  * link field. This function is called by relink_bad_predecessors().
497  * The array of link field starts with the block operand at position 0.
498  * If block has bad predecessors, create a new in array without bad preds.
499  * Otherwise let in array untouched.
500  */
501 static void relink_bad_block_predecessors(ir_node *n, void *env) {
502         ir_node **new_in, *irn;
503         int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
504         (void) env;
505
506         /* if link field of block is NULL, look for bad predecessors otherwise
507            this is already done */
508         if (is_Block(n) && get_irn_link(n) == NULL) {
509                 /* save old predecessors in link field (position 0 is the block operand)*/
510                 set_irn_link(n, get_irn_in(n));
511
512                 /* count predecessors without bad nodes */
513                 old_irn_arity = get_irn_arity(n);
514                 for (i = 0; i < old_irn_arity; i++)
515                         if (!is_Bad(get_irn_n(n, i)))
516                                 ++new_irn_arity;
517
518                 /* arity changing: set new predecessors without bad nodes */
519                 if (new_irn_arity < old_irn_arity) {
520                         /* Get new predecessor array. We do not resize the array, as we must
521                            keep the old one to update Phis. */
522                         new_in = NEW_ARR_D(ir_node *, current_ir_graph->obst, (new_irn_arity+1));
523
524                         /* set new predecessors in array */
525                         new_in[0] = NULL;
526                         new_irn_n = 1;
527                         for (i = 0; i < old_irn_arity; i++) {
528                                 irn = get_irn_n(n, i);
529                                 if (!is_Bad(irn)) {
530                                         new_in[new_irn_n] = irn;
531                                         is_backedge(n, i) ? set_backedge(n, new_irn_n-1) : set_not_backedge(n, new_irn_n-1);
532                                         ++new_irn_n;
533                                 }
534                         }
535                         /* ARR_SETLEN(int, n->attr.block.backedge, new_irn_arity); */
536                         ARR_SHRINKLEN(n->attr.block.backedge, new_irn_arity);
537                         n->in = new_in;
538                 } /* ir node has bad predecessors */
539         } /* Block is not relinked */
540 }
541
542 /**
543  * Relinks Bad predecessors from Blocks and Phis called by walker
544  * remove_bad_predecesors(). If n is a Block, call
545  * relink_bad_block_redecessors(). If n is a Phi-node, call also the relinking
546  * function of Phi's Block. If this block has bad predecessors, relink preds
547  * of the Phi-node.
548  */
549 static void relink_bad_predecessors(ir_node *n, void *env) {
550         ir_node *block, **old_in;
551         int i, old_irn_arity, new_irn_arity;
552
553         /* relink bad predecessors of a block */
554         if (is_Block(n))
555                 relink_bad_block_predecessors(n, env);
556
557         /* If Phi node relink its block and its predecessors */
558         if (is_Phi(n)) {
559                 /* Relink predecessors of phi's block */
560                 block = get_nodes_block(n);
561                 if (get_irn_link(block) == NULL)
562                         relink_bad_block_predecessors(block, env);
563
564                 old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
565                 old_irn_arity = ARR_LEN(old_in);
566
567                 /* Relink Phi predecessors if count of predecessors changed */
568                 if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
569                         /* set new predecessors in array
570                            n->in[0] remains the same block */
571                         new_irn_arity = 1;
572                         for(i = 1; i < old_irn_arity; i++)
573                                 if (!is_Bad(old_in[i])) {
574                                         n->in[new_irn_arity] = n->in[i];
575                                         is_backedge(n, i) ? set_backedge(n, new_irn_arity) : set_not_backedge(n, new_irn_arity);
576                                         ++new_irn_arity;
577                                 }
578
579                                 ARR_SETLEN(ir_node *, n->in, new_irn_arity);
580                                 ARR_SETLEN(int, n->attr.phi.u.backedge, new_irn_arity);
581                 }
582         } /* n is a Phi node */
583 }
584
585 /*
586  * Removes Bad Bad predecessors from Blocks and the corresponding
587  * inputs to Phi nodes as in dead_node_elimination but without
588  * copying the graph.
589  * On walking up set the link field to NULL, on walking down call
590  * relink_bad_predecessors() (This function stores the old in array
591  * to the link field and sets a new in array if arity of predecessors
592  * changes).
593  */
594 void remove_bad_predecessors(ir_graph *irg) {
595         panic("Fix backedge handling first");
596         irg_walk_graph(irg, firm_clear_link, relink_bad_predecessors, NULL);
597 }
598
599
600 /*
601    __                      _  __ __
602   (_     __    o     _    | \/  |_
603   __)|_| | \_/ | \_/(/_   |_/\__|__
604
605   The following stuff implements a facility that automatically patches
606   registered ir_node pointers to the new node when a dead node elimination occurs.
607 */
608
609 struct _survive_dce_t {
610         struct obstack obst;
611         pmap *places;
612         pmap *new_places;
613         hook_entry_t dead_node_elim;
614         hook_entry_t dead_node_elim_subst;
615 };
616
617 typedef struct _survive_dce_list_t {
618         struct _survive_dce_list_t *next;
619         ir_node **place;
620 } survive_dce_list_t;
621
622 static void dead_node_hook(void *context, ir_graph *irg, int start) {
623         survive_dce_t *sd = context;
624         (void) irg;
625
626         /* Create a new map before the dead node elimination is performed. */
627         if (start) {
628                 sd->new_places = pmap_create_ex(pmap_count(sd->places));
629         } else {
630                 /* Patch back all nodes if dead node elimination is over and something is to be done. */
631                 pmap_destroy(sd->places);
632                 sd->places     = sd->new_places;
633                 sd->new_places = NULL;
634         }
635 }
636
637 /**
638  * Hook called when dead node elimination replaces old by nw.
639  */
640 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
641         survive_dce_t *sd = context;
642         survive_dce_list_t *list = pmap_get(sd->places, old);
643         (void) irg;
644
645         /* If the node is to be patched back, write the new address to all registered locations. */
646         if (list) {
647                 survive_dce_list_t *p;
648
649                 for (p = list; p; p = p->next)
650                         *(p->place) = nw;
651
652                 pmap_insert(sd->new_places, nw, list);
653         }
654 }
655
656 /**
657  * Make a new Survive DCE environment.
658  */
659 survive_dce_t *new_survive_dce(void) {
660         survive_dce_t *res = XMALLOC(survive_dce_t);
661         obstack_init(&res->obst);
662         res->places     = pmap_create();
663         res->new_places = NULL;
664
665         res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
666         res->dead_node_elim.context                   = res;
667         res->dead_node_elim.next                      = NULL;
668
669         res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
670         res->dead_node_elim_subst.context = res;
671         res->dead_node_elim_subst.next    = NULL;
672
673         register_hook(hook_dead_node_elim, &res->dead_node_elim);
674         register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
675         return res;
676 }
677
678 /**
679  * Free a Survive DCE environment.
680  */
681 void free_survive_dce(survive_dce_t *sd) {
682         obstack_free(&sd->obst, NULL);
683         pmap_destroy(sd->places);
684         unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
685         unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
686         xfree(sd);
687 }
688
689 /**
690  * Register a node pointer to be patched upon DCE.
691  * When DCE occurs, the node pointer specified by @p place will be
692  * patched to the new address of the node it is pointing to.
693  *
694  * @param sd    The Survive DCE environment.
695  * @param place The address of the node pointer.
696  */
697 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
698         if (*place != NULL) {
699                 ir_node *irn      = *place;
700                 survive_dce_list_t *curr = pmap_get(sd->places, irn);
701                 survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
702
703                 nw->next  = curr;
704                 nw->place = place;
705
706                 pmap_insert(sd->places, irn, nw);
707         }
708 }
709
710 /*--------------------------------------------------------------------*/
711 /*  Functionality for inlining                                         */
712 /*--------------------------------------------------------------------*/
713
714 /**
715  * Copy node for inlineing.  Updates attributes that change when
716  * inlineing but not for dead node elimination.
717  *
718  * Copies the node by calling copy_node() and then updates the entity if
719  * it's a local one.  env must be a pointer of the frame type of the
720  * inlined procedure. The new entities must be in the link field of
721  * the entities.
722  */
723 static void copy_node_inline(ir_node *n, void *env) {
724         ir_node *nn;
725         ir_type *frame_tp = (ir_type *)env;
726
727         copy_node(n, NULL);
728         if (is_Sel(n)) {
729                 nn = get_new_node (n);
730                 assert(is_Sel(nn));
731                 if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
732                         set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
733                 }
734         } else if (is_Block(n)) {
735                 nn = get_new_node (n);
736                 nn->attr.block.irg = current_ir_graph;
737         }
738 }
739
740 /**
741  * Copies new predecessors of old node and move constants to
742  * the Start Block.
743  */
744 static void copy_preds_inline(ir_node *n, void *env) {
745         ir_node *nn;
746
747         copy_preds(n, env);
748         nn = skip_Id(get_new_node(n));
749         if (is_irn_constlike(nn)) {
750                 /* move Constants into the start block */
751                 set_nodes_block(nn, get_irg_start_block(current_ir_graph));
752
753                 n = identify_remember(current_ir_graph->value_table, nn);
754                 if (nn != n) {
755                         DBG_OPT_CSE(nn, n);
756                         exchange(nn, n);
757                 }
758         }
759 }
760
761 /**
762  * Walker: checks if P_value_arg_base is used.
763  */
764 static void find_addr(ir_node *node, void *env) {
765         int *allow_inline = env;
766         if (is_Sel(node)) {
767                 ir_graph *irg = current_ir_graph;
768                 if (get_Sel_ptr(node) == get_irg_frame(irg)) {
769                         /* access to frame */
770                         ir_entity *ent = get_Sel_entity(node);
771                         if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
772                                 /* access to value_type */
773                                 *allow_inline = 0;
774                         }
775                 }
776         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
777                 /* From GCC:
778                  * Refuse to inline alloca call unless user explicitly forced so as this
779                  * may change program's memory overhead drastically when the function
780                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
781                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
782                  *
783                  * Sorrily this is true with our implementation also.
784                  * Moreover, we cannot differentiate between alloca() and VLA yet, so this
785                  * disables inlining of functions using VLA (with are completely save).
786                  *
787                  * 2 Solutions:
788                  * - add a flag to the Alloc node for "real" alloca() calls
789                  * - add a new Stack-Restore node at the end of a function using alloca()
790                  */
791                 *allow_inline = 0;
792         }
793 }
794
795 /**
796  * Check if we can inline a given call.
797  * Currently, we cannot inline two cases:
798  * - call with compound arguments
799  * - graphs that take the address of a parameter
800  *
801  * check these conditions here
802  */
803 static int can_inline(ir_node *call, ir_graph *called_graph) {
804         ir_type *call_type = get_Call_type(call);
805         int params, ress, i, res;
806         assert(is_Method_type(call_type));
807
808         params = get_method_n_params(call_type);
809         ress   = get_method_n_ress(call_type);
810
811         /* check parameters for compound arguments */
812         for (i = 0; i < params; ++i) {
813                 ir_type *p_type = get_method_param_type(call_type, i);
814
815                 if (is_compound_type(p_type))
816                         return 0;
817         }
818
819         /* check results for compound arguments */
820         for (i = 0; i < ress; ++i) {
821                 ir_type *r_type = get_method_res_type(call_type, i);
822
823                 if (is_compound_type(r_type))
824                         return 0;
825         }
826
827         res = 1;
828         irg_walk_graph(called_graph, find_addr, NULL, &res);
829
830         return res;
831 }
832
833 enum exc_mode {
834         exc_handler    = 0, /**< There is a handler. */
835         exc_to_end     = 1, /**< Branches to End. */
836         exc_no_handler = 2  /**< Exception handling not represented. */
837 };
838
839 /* Inlines a method at the given call site. */
840 int inline_method(ir_node *call, ir_graph *called_graph) {
841         ir_node             *pre_call;
842         ir_node             *post_call, *post_bl;
843         ir_node             *in[pn_Start_max];
844         ir_node             *end, *end_bl, *block;
845         ir_node             **res_pred;
846         ir_node             **cf_pred;
847         ir_node             **args_in;
848         ir_node             *ret, *phi;
849         int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
850         enum exc_mode       exc_handling;
851         ir_type             *called_frame, *curr_frame, *mtp, *ctp;
852         ir_entity           *ent;
853         ir_graph            *rem, *irg;
854         irg_inline_property prop = get_irg_inline_property(called_graph);
855         unsigned long       visited;
856
857         if (prop == irg_inline_forbidden)
858                 return 0;
859
860         ent = get_irg_entity(called_graph);
861
862         mtp = get_entity_type(ent);
863         ctp = get_Call_type(call);
864         if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
865                 /* this is a bad feature of C: without a prototype, we can
866                  * call a function with less parameters than needed. Currently
867                  * we don't support this, although we could use Unknown than. */
868                 return 0;
869         }
870
871         /* Argh, compiling C has some bad consequences:
872          * It is implementation dependent what happens in that case.
873          * We support inlining, if the bitsize of the types matches AND
874          * the same arithmetic is used. */
875         n_params = get_method_n_params(mtp);
876         for (i = n_params - 1; i >= 0; --i) {
877                 ir_type *param_tp = get_method_param_type(mtp, i);
878                 ir_type *arg_tp   = get_method_param_type(ctp, i);
879
880                 if (param_tp != arg_tp) {
881                         ir_mode *pmode = get_type_mode(param_tp);
882                         ir_mode *amode = get_type_mode(arg_tp);
883
884                         if (pmode == NULL || amode == NULL)
885                                 return 0;
886                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
887                                 return 0;
888                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
889                                 return 0;
890                         /* otherwise we can simply "reinterpret" the bits */
891                 }
892         }
893
894         irg = get_irn_irg(call);
895
896         /*
897          * We cannot inline a recursive call. The graph must be copied before
898          * the call the inline_method() using create_irg_copy().
899          */
900         if (called_graph == irg)
901                 return 0;
902
903         /*
904          * currently, we cannot inline two cases:
905          * - call with compound arguments
906          * - graphs that take the address of a parameter
907          */
908         if (! can_inline(call, called_graph))
909                 return 0;
910
911         rem = current_ir_graph;
912         current_ir_graph = irg;
913
914         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
915
916         /* --  Turn off optimizations, this can cause problems when allocating new nodes. -- */
917         rem_opt = get_opt_optimize();
918         set_optimize(0);
919
920         /* Handle graph state */
921         assert(get_irg_phase_state(irg) != phase_building);
922         assert(get_irg_pinned(irg) == op_pin_state_pinned);
923         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
924         set_irg_outs_inconsistent(irg);
925         set_irg_extblk_inconsistent(irg);
926         set_irg_doms_inconsistent(irg);
927         set_irg_loopinfo_inconsistent(irg);
928         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
929         set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
930
931         /* -- Check preconditions -- */
932         assert(is_Call(call));
933
934         /* here we know we WILL inline, so inform the statistics */
935         hook_inline(call, called_graph);
936
937         /* -- Decide how to handle exception control flow: Is there a handler
938            for the Call node, or do we branch directly to End on an exception?
939            exc_handling:
940            0 There is a handler.
941            1 Branches to End.
942            2 Exception handling not represented in Firm. -- */
943         {
944                 ir_node *proj, *Mproj = NULL, *Xproj = NULL;
945                 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
946                         long proj_nr = get_Proj_proj(proj);
947                         if (proj_nr == pn_Call_X_except) Xproj = proj;
948                         if (proj_nr == pn_Call_M_except) Mproj = proj;
949                 }
950                 if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
951                 else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
952                 else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
953         }
954
955         /* create the argument tuple */
956         NEW_ARR_A(ir_type *, args_in, n_params);
957
958         block = get_nodes_block(call);
959         for (i = n_params - 1; i >= 0; --i) {
960                 ir_node *arg      = get_Call_param(call, i);
961                 ir_type *param_tp = get_method_param_type(mtp, i);
962                 ir_mode *mode     = get_type_mode(param_tp);
963
964                 if (mode != get_irn_mode(arg)) {
965                         arg = new_r_Conv(irg, block, arg, mode, 0);
966                 }
967                 args_in[i] = arg;
968         }
969
970         /* --
971            the procedure and later replaces the Start node of the called graph.
972            Post_call is the old Call node and collects the results of the called
973            graph. Both will end up being a tuple.  -- */
974         post_bl = get_nodes_block(call);
975         set_irg_current_block(irg, post_bl);
976         /* XxMxPxPxPxT of Start + parameter of Call */
977         in[pn_Start_X_initial_exec]   = new_Jmp();
978         in[pn_Start_M]                = get_Call_mem(call);
979         in[pn_Start_P_frame_base]     = get_irg_frame(irg);
980         in[pn_Start_P_tls]            = get_irg_tls(irg);
981         in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
982         pre_call = new_Tuple(pn_Start_max, in);
983         post_call = call;
984
985         /* --
986            The new block gets the ins of the old block, pre_call and all its
987            predecessors and all Phi nodes. -- */
988         part_block(pre_call);
989
990         /* -- Prepare state for dead node elimination -- */
991         /* Visited flags in calling irg must be >= flag in called irg.
992            Else walker and arity computation will not work. */
993         if (get_irg_visited(irg) <= get_irg_visited(called_graph))
994                 set_irg_visited(irg, get_irg_visited(called_graph) + 1);
995         if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
996                 set_irg_block_visited(irg, get_irg_block_visited(called_graph));
997         visited = get_irg_visited(irg);
998
999         /* Set pre_call as new Start node in link field of the start node of
1000            calling graph and pre_calls block as new block for the start block
1001            of calling graph.
1002            Further mark these nodes so that they are not visited by the
1003            copying. */
1004         set_irn_link(get_irg_start(called_graph), pre_call);
1005         set_irn_visited(get_irg_start(called_graph), visited);
1006         set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
1007         set_irn_visited(get_irg_start_block(called_graph), visited);
1008
1009         set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
1010         set_irn_visited(get_irg_bad(called_graph), visited);
1011
1012         set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
1013         set_irn_visited(get_irg_no_mem(called_graph), visited);
1014
1015         /* Initialize for compaction of in arrays */
1016         inc_irg_block_visited(irg);
1017
1018         /* -- Replicate local entities of the called_graph -- */
1019         /* copy the entities. */
1020         irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
1021         called_frame = get_irg_frame_type(called_graph);
1022         curr_frame   = get_irg_frame_type(irg);
1023         for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
1024                 ir_entity *new_ent, *old_ent;
1025                 old_ent = get_class_member(called_frame, i);
1026                 new_ent = copy_entity_own(old_ent, curr_frame);
1027                 set_entity_link(old_ent, new_ent);
1028         }
1029
1030         /* visited is > than that of called graph.  With this trick visited will
1031            remain unchanged so that an outer walker, e.g., searching the call nodes
1032             to inline, calling this inline will not visit the inlined nodes. */
1033         set_irg_visited(irg, get_irg_visited(irg)-1);
1034
1035         /* -- Performing dead node elimination inlines the graph -- */
1036         /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
1037            entities. */
1038         irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
1039                  get_irg_frame_type(called_graph));
1040
1041         irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
1042
1043         /* Repair called_graph */
1044         set_irg_visited(called_graph, get_irg_visited(irg));
1045         set_irg_block_visited(called_graph, get_irg_block_visited(irg));
1046         set_Block_block_visited(get_irg_start_block(called_graph), 0);
1047
1048         /* -- Merge the end of the inlined procedure with the call site -- */
1049         /* We will turn the old Call node into a Tuple with the following
1050            predecessors:
1051            -1:  Block of Tuple.
1052            0: Phi of all Memories of Return statements.
1053            1: Jmp from new Block that merges the control flow from all exception
1054            predecessors of the old end block.
1055            2: Tuple of all arguments.
1056            3: Phi of Exception memories.
1057            In case the old Call directly branches to End on an exception we don't
1058            need the block merging all exceptions nor the Phi of the exception
1059            memories.
1060         */
1061
1062         /* -- Precompute some values -- */
1063         end_bl = get_new_node(get_irg_end_block(called_graph));
1064         end = get_new_node(get_irg_end(called_graph));
1065         arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
1066         n_res = get_method_n_ress(get_Call_type(call));
1067
1068         res_pred = XMALLOCN(ir_node*, n_res);
1069         cf_pred  = XMALLOCN(ir_node*, arity);
1070
1071         set_irg_current_block(irg, post_bl); /* just to make sure */
1072
1073         /* -- archive keepalives -- */
1074         irn_arity = get_irn_arity(end);
1075         for (i = 0; i < irn_arity; i++) {
1076                 ir_node *ka = get_End_keepalive(end, i);
1077                 if (! is_Bad(ka))
1078                         add_End_keepalive(get_irg_end(irg), ka);
1079         }
1080
1081         /* The new end node will die.  We need not free as the in array is on the obstack:
1082            copy_node() only generated 'D' arrays. */
1083
1084         /* -- Replace Return nodes by Jump nodes. -- */
1085         n_ret = 0;
1086         for (i = 0; i < arity; i++) {
1087                 ir_node *ret;
1088                 ret = get_Block_cfgpred(end_bl, i);
1089                 if (is_Return(ret)) {
1090                         cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
1091                         n_ret++;
1092                 }
1093         }
1094         set_irn_in(post_bl, n_ret, cf_pred);
1095
1096         /* -- Build a Tuple for all results of the method.
1097            Add Phi node if there was more than one Return.  -- */
1098         turn_into_tuple(post_call, pn_Call_max);
1099         /* First the Memory-Phi */
1100         n_ret = 0;
1101         for (i = 0; i < arity; i++) {
1102                 ret = get_Block_cfgpred(end_bl, i);
1103                 if (is_Return(ret)) {
1104                         cf_pred[n_ret] = get_Return_mem(ret);
1105                         n_ret++;
1106                 }
1107         }
1108         phi = new_Phi(n_ret, cf_pred, mode_M);
1109         set_Tuple_pred(call, pn_Call_M_regular, phi);
1110         /* Conserve Phi-list for further inlinings -- but might be optimized */
1111         if (get_nodes_block(phi) == post_bl) {
1112                 set_irn_link(phi, get_irn_link(post_bl));
1113                 set_irn_link(post_bl, phi);
1114         }
1115         /* Now the real results */
1116         if (n_res > 0) {
1117                 for (j = 0; j < n_res; j++) {
1118                         n_ret = 0;
1119                         for (i = 0; i < arity; i++) {
1120                                 ret = get_Block_cfgpred(end_bl, i);
1121                                 if (is_Return(ret)) {
1122                                         cf_pred[n_ret] = get_Return_res(ret, j);
1123                                         n_ret++;
1124                                 }
1125                         }
1126                         if (n_ret > 0)
1127                                 phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
1128                         else
1129                                 phi = new_Bad();
1130                         res_pred[j] = phi;
1131                         /* Conserve Phi-list for further inlinings -- but might be optimized */
1132                         if (get_nodes_block(phi) == post_bl) {
1133                                 set_Phi_next(phi, get_Block_phis(post_bl));
1134                                 set_Block_phis(post_bl, phi);
1135                         }
1136                 }
1137                 set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
1138         } else {
1139                 set_Tuple_pred(call, pn_Call_T_result, new_Bad());
1140         }
1141         /* handle the regular call */
1142         set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
1143
1144         /* For now, we cannot inline calls with value_base */
1145         set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
1146
1147         /* Finally the exception control flow.
1148            We have two (three) possible situations:
1149            First if the Call branches to an exception handler: We need to add a Phi node to
1150            collect the memory containing the exception objects.  Further we need
1151            to add another block to get a correct representation of this Phi.  To
1152            this block we add a Jmp that resolves into the X output of the Call
1153            when the Call is turned into a tuple.
1154            Second the Call branches to End, the exception is not handled.  Just
1155            add all inlined exception branches to the End node.
1156            Third: there is no Exception edge at all. Handle as case two. */
1157         if (exc_handling == exc_handler) {
1158                 n_exc = 0;
1159                 for (i = 0; i < arity; i++) {
1160                         ir_node *ret, *irn;
1161                         ret = get_Block_cfgpred(end_bl, i);
1162                         irn = skip_Proj(ret);
1163                         if (is_fragile_op(irn) || is_Raise(irn)) {
1164                                 cf_pred[n_exc] = ret;
1165                                 ++n_exc;
1166                         }
1167                 }
1168                 if (n_exc > 0) {
1169                         ir_node *block = new_Block(n_exc, cf_pred);
1170                         set_cur_block(block);
1171
1172                         set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
1173                         /* The Phi for the memories with the exception objects */
1174                         n_exc = 0;
1175                         for (i = 0; i < arity; i++) {
1176                                 ir_node *ret;
1177                                 ret = skip_Proj(get_Block_cfgpred(end_bl, i));
1178                                 if (is_Call(ret)) {
1179                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
1180                                         n_exc++;
1181                                 } else if (is_fragile_op(ret)) {
1182                                         /* We rely that all cfops have the memory output at the same position. */
1183                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
1184                                         n_exc++;
1185                                 } else if (is_Raise(ret)) {
1186                                         cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
1187                                         n_exc++;
1188                                 }
1189                         }
1190                         set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
1191                 } else {
1192                         set_Tuple_pred(call, pn_Call_X_except, new_Bad());
1193                         set_Tuple_pred(call, pn_Call_M_except, new_Bad());
1194                 }
1195         } else {
1196                 ir_node *main_end_bl;
1197                 int main_end_bl_arity;
1198                 ir_node **end_preds;
1199
1200                 /* assert(exc_handling == 1 || no exceptions. ) */
1201                 n_exc = 0;
1202                 for (i = 0; i < arity; i++) {
1203                         ir_node *ret = get_Block_cfgpred(end_bl, i);
1204                         ir_node *irn = skip_Proj(ret);
1205
1206                         if (is_fragile_op(irn) || is_Raise(irn)) {
1207                                 cf_pred[n_exc] = ret;
1208                                 n_exc++;
1209                         }
1210                 }
1211                 main_end_bl       = get_irg_end_block(irg);
1212                 main_end_bl_arity = get_irn_arity(main_end_bl);
1213                 end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
1214
1215                 for (i = 0; i < main_end_bl_arity; ++i)
1216                         end_preds[i] = get_irn_n(main_end_bl, i);
1217                 for (i = 0; i < n_exc; ++i)
1218                         end_preds[main_end_bl_arity + i] = cf_pred[i];
1219                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
1220                 set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
1221                 set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
1222                 free(end_preds);
1223         }
1224         free(res_pred);
1225         free(cf_pred);
1226
1227         /* --  Turn CSE back on. -- */
1228         set_optimize(rem_opt);
1229         current_ir_graph = rem;
1230
1231         return 1;
1232 }
1233
1234 /********************************************************************/
1235 /* Apply inlining to small methods.                                 */
1236 /********************************************************************/
1237
1238 static struct obstack  temp_obst;
1239
1240 /** Represents a possible inlinable call in a graph. */
1241 typedef struct _call_entry {
1242         ir_node    *call;       /**< The Call node. */
1243         ir_graph   *callee;     /**< The callee IR-graph. */
1244         list_head  list;        /**< List head for linking the next one. */
1245         int        loop_depth;  /**< The loop depth of this call. */
1246         int        benefice;    /**< The calculated benefice of this call. */
1247         unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
1248         unsigned   all_const:1; /**< Set if this call has only constant parameters. */
1249 } call_entry;
1250
1251 /**
1252  * environment for inlining small irgs
1253  */
1254 typedef struct _inline_env_t {
1255         struct obstack obst;  /**< An obstack where call_entries are allocated on. */
1256         list_head      calls; /**< The call entry list. */
1257 } inline_env_t;
1258
1259 /**
1260  * Returns the irg called from a Call node. If the irg is not
1261  * known, NULL is returned.
1262  *
1263  * @param call  the call node
1264  */
1265 static ir_graph *get_call_called_irg(ir_node *call) {
1266         ir_node *addr;
1267
1268         addr = get_Call_ptr(call);
1269         if (is_Global(addr)) {
1270                 ir_entity *ent = get_Global_entity(addr);
1271                 return get_entity_irg(ent);
1272         }
1273
1274         return NULL;
1275 }
1276
1277 /**
1278  * Walker: Collect all calls to known graphs inside a graph.
1279  */
1280 static void collect_calls(ir_node *call, void *env) {
1281         (void) env;
1282         if (is_Call(call)) {
1283                 ir_graph *called_irg = get_call_called_irg(call);
1284
1285                 if (called_irg != NULL) {
1286                         /* The Call node calls a locally defined method.  Remember to inline. */
1287                         inline_env_t *ienv  = env;
1288                         call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
1289                         entry->call       = call;
1290                         entry->callee     = called_irg;
1291                         entry->loop_depth = 0;
1292                         entry->benefice   = 0;
1293                         entry->local_adr  = 0;
1294                         entry->all_const  = 0;
1295
1296                         list_add_tail(&entry->list, &ienv->calls);
1297                 }
1298         }
1299 }
1300
1301 /**
1302  * Inlines all small methods at call sites where the called address comes
1303  * from a Const node that references the entity representing the called
1304  * method.
1305  * The size argument is a rough measure for the code size of the method:
1306  * Methods where the obstack containing the firm graph is smaller than
1307  * size are inlined.
1308  */
1309 void inline_small_irgs(ir_graph *irg, int size) {
1310         ir_graph *rem = current_ir_graph;
1311         inline_env_t env;
1312         call_entry *entry;
1313
1314         current_ir_graph = irg;
1315         /* Handle graph state */
1316         assert(get_irg_phase_state(irg) != phase_building);
1317         free_callee_info(irg);
1318
1319         /* Find Call nodes to inline.
1320            (We can not inline during a walk of the graph, as inlining the same
1321            method several times changes the visited flag of the walked graph:
1322            after the first inlining visited of the callee equals visited of
1323            the caller.  With the next inlining both are increased.) */
1324         obstack_init(&env.obst);
1325         INIT_LIST_HEAD(&env.calls);
1326         irg_walk_graph(irg, NULL, collect_calls, &env);
1327
1328         if (! list_empty(&env.calls)) {
1329                 /* There are calls to inline */
1330                 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1331                 collect_phiprojs(irg);
1332
1333                 list_for_each_entry(call_entry, entry, &env.calls, list) {
1334                         ir_graph            *callee = entry->callee;
1335                         irg_inline_property prop    = get_irg_inline_property(callee);
1336
1337                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1338                                 /* do not inline forbidden / weak graphs */
1339                                 continue;
1340                         }
1341
1342                         if (prop >= irg_inline_forced ||
1343                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1344                                 inline_method(entry->call, callee);
1345                         }
1346                 }
1347                 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1348         }
1349         obstack_free(&env.obst, NULL);
1350         current_ir_graph = rem;
1351 }
1352
1353 /**
1354  * Environment for inlining irgs.
1355  */
1356 typedef struct {
1357         list_head calls;             /**< List of of all call nodes in this graph. */
1358         unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
1359         unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1360         unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
1361         unsigned  n_nodes_orig;      /**< for statistics */
1362         unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
1363         unsigned  n_call_nodes_orig; /**< for statistics */
1364         unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
1365         unsigned  n_callers_orig;    /**< for statistics */
1366         unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
1367         unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
1368         unsigned  recursive:1;       /**< Set, if this function is self recursive. */
1369 } inline_irg_env;
1370
1371 /**
1372  * Allocate a new environment for inlining.
1373  */
1374 static inline_irg_env *alloc_inline_irg_env(void) {
1375         inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
1376         INIT_LIST_HEAD(&env->calls);
1377         env->local_weights     = NULL;
1378         env->n_nodes           = -2; /* do not count count Start, End */
1379         env->n_blocks          = -2; /* do not count count Start, End Block */
1380         env->n_nodes_orig      = -2; /* do not count Start, End */
1381         env->n_call_nodes      = 0;
1382         env->n_call_nodes_orig = 0;
1383         env->n_callers         = 0;
1384         env->n_callers_orig    = 0;
1385         env->got_inline        = 0;
1386         env->local_vars        = 0;
1387         env->recursive         = 0;
1388         return env;
1389 }
1390
1391 typedef struct walker_env {
1392         inline_irg_env *x;     /**< the inline environment */
1393         char ignore_runtime;   /**< the ignore runtime flag */
1394         char ignore_callers;   /**< if set, do change callers data */
1395 } wenv_t;
1396
1397 /**
1398  * post-walker: collect all calls in the inline-environment
1399  * of a graph and sum some statistics.
1400  */
1401 static void collect_calls2(ir_node *call, void *ctx) {
1402         wenv_t         *env = ctx;
1403         inline_irg_env *x = env->x;
1404         ir_opcode      code = get_irn_opcode(call);
1405         ir_graph       *callee;
1406         call_entry     *entry;
1407
1408         /* count meaningful nodes in irg */
1409         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1410                 if (code != iro_Block) {
1411                         ++x->n_nodes;
1412                         ++x->n_nodes_orig;
1413                 } else {
1414                         ++x->n_blocks;
1415                 }
1416         }
1417
1418         if (code != iro_Call) return;
1419
1420         /* check, if it's a runtime call */
1421         if (env->ignore_runtime) {
1422                 ir_node *symc = get_Call_ptr(call);
1423
1424                 if (is_Global(symc)) {
1425                         ir_entity *ent = get_Global_entity(symc);
1426
1427                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
1428                                 return;
1429                 }
1430         }
1431
1432         /* collect all call nodes */
1433         ++x->n_call_nodes;
1434         ++x->n_call_nodes_orig;
1435
1436         callee = get_call_called_irg(call);
1437         if (callee != NULL) {
1438                 if (! env->ignore_callers) {
1439                         inline_irg_env *callee_env = get_irg_link(callee);
1440                         /* count all static callers */
1441                         ++callee_env->n_callers;
1442                         ++callee_env->n_callers_orig;
1443                 }
1444                 if (callee == current_ir_graph)
1445                         x->recursive = 1;
1446
1447                 /* link it in the list of possible inlinable entries */
1448                 entry = obstack_alloc(&temp_obst, sizeof(*entry));
1449                 entry->call       = call;
1450                 entry->callee     = callee;
1451                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1452                 entry->benefice   = 0;
1453                 entry->local_adr  = 0;
1454                 entry->all_const  = 0;
1455
1456                 list_add_tail(&entry->list, &x->calls);
1457         }
1458 }
1459
1460 /**
1461  * Returns TRUE if the number of callers is 0 in the irg's environment,
1462  * hence this irg is a leave.
1463  */
1464 inline static int is_leave(ir_graph *irg) {
1465         inline_irg_env *env = get_irg_link(irg);
1466         return env->n_call_nodes == 0;
1467 }
1468
1469 /**
1470  * Returns TRUE if the number of nodes in the callee is
1471  * smaller then size in the irg's environment.
1472  */
1473 inline static int is_smaller(ir_graph *callee, unsigned size) {
1474         inline_irg_env *env = get_irg_link(callee);
1475         return env->n_nodes < size;
1476 }
1477
1478 /**
1479  * Duplicate a call entry.
1480  *
1481  * @param entry     the original entry to duplicate
1482  * @param new_call  the new call node
1483  * @param loop_depth_delta
1484  *                  delta value for the loop depth
1485  */
1486 static call_entry *duplicate_call_entry(const call_entry *entry,
1487                                         ir_node *new_call, int loop_depth_delta) {
1488         call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
1489         nentry->call       = new_call;
1490         nentry->callee     = entry->callee;
1491         nentry->benefice   = entry->benefice;
1492         nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1493         nentry->local_adr  = entry->local_adr;
1494         nentry->all_const  = entry->all_const;
1495
1496         return nentry;
1497 }
1498
1499 /**
1500  * Append all call nodes of the source environment to the nodes of in the destination
1501  * environment.
1502  *
1503  * @param dst         destination environment
1504  * @param src         source environment
1505  * @param loop_depth  the loop depth of the call that is replaced by the src list
1506  */
1507 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
1508         call_entry *entry, *nentry;
1509
1510         /* Note that the src list points to Call nodes in the inlined graph, but
1511            we need Call nodes in our graph. Luckily the inliner leaves this information
1512            in the link field. */
1513         list_for_each_entry(call_entry, entry, &src->calls, list) {
1514                 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1515                 list_add_tail(&nentry->list, &dst->calls);
1516         }
1517         dst->n_call_nodes += src->n_call_nodes;
1518         dst->n_nodes      += src->n_nodes;
1519 }
1520
1521 /*
1522  * Inlines small leave methods at call sites where the called address comes
1523  * from a Const node that references the entity representing the called
1524  * method.
1525  * The size argument is a rough measure for the code size of the method:
1526  * Methods where the obstack containing the firm graph is smaller than
1527  * size are inlined.
1528  */
1529 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1530                             unsigned size, int ignore_runtime)
1531 {
1532         inline_irg_env   *env;
1533         ir_graph         *irg;
1534         int              i, n_irgs;
1535         ir_graph         *rem;
1536         int              did_inline;
1537         wenv_t           wenv;
1538         call_entry       *entry, *next;
1539         const call_entry *centry;
1540         pmap             *copied_graphs;
1541         pmap_entry       *pm_entry;
1542
1543         rem = current_ir_graph;
1544         obstack_init(&temp_obst);
1545
1546         /* a map for the copied graphs, used to inline recursive calls */
1547         copied_graphs = pmap_create();
1548
1549         /* extend all irgs by a temporary data structure for inlining. */
1550         n_irgs = get_irp_n_irgs();
1551         for (i = 0; i < n_irgs; ++i)
1552                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1553
1554         /* Pre-compute information in temporary data structure. */
1555         wenv.ignore_runtime = ignore_runtime;
1556         wenv.ignore_callers = 0;
1557         for (i = 0; i < n_irgs; ++i) {
1558                 ir_graph *irg = get_irp_irg(i);
1559
1560                 assert(get_irg_phase_state(irg) != phase_building);
1561                 free_callee_info(irg);
1562
1563                 assure_cf_loop(irg);
1564                 wenv.x = get_irg_link(irg);
1565                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1566         }
1567
1568         /* -- and now inline. -- */
1569
1570         /* Inline leaves recursively -- we might construct new leaves. */
1571         do {
1572                 did_inline = 0;
1573
1574                 for (i = 0; i < n_irgs; ++i) {
1575                         ir_node *call;
1576                         int phiproj_computed = 0;
1577
1578                         current_ir_graph = get_irp_irg(i);
1579                         env              = get_irg_link(current_ir_graph);
1580
1581                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1582                         list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1583                                 ir_graph            *callee;
1584                                 irg_inline_property  prop;
1585
1586                                 if (env->n_nodes > maxsize)
1587                                         break;
1588
1589                                 call   = entry->call;
1590                                 callee = entry->callee;
1591
1592                                 prop = get_irg_inline_property(callee);
1593                                 if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1594                                         /* do not inline forbidden / weak graphs */
1595                                         continue;
1596                                 }
1597
1598                                 if (is_leave(callee) && (
1599                                     is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1600                                         if (!phiproj_computed) {
1601                                                 phiproj_computed = 1;
1602                                                 collect_phiprojs(current_ir_graph);
1603                                         }
1604                                         did_inline = inline_method(call, callee);
1605
1606                                         if (did_inline) {
1607                                                 inline_irg_env *callee_env = get_irg_link(callee);
1608
1609                                                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1610                                                 phiproj_computed = 0;
1611
1612                                                 /* Do some statistics */
1613                                                 env->got_inline = 1;
1614                                                 --env->n_call_nodes;
1615                                                 env->n_nodes += callee_env->n_nodes;
1616                                                 --callee_env->n_callers;
1617
1618                                                 /* remove this call from the list */
1619                                                 list_del(&entry->list);
1620                                                 continue;
1621                                         }
1622                                 }
1623                         }
1624                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1625                 }
1626         } while (did_inline);
1627
1628         /* inline other small functions. */
1629         for (i = 0; i < n_irgs; ++i) {
1630                 ir_node *call;
1631                 int phiproj_computed = 0;
1632
1633                 current_ir_graph = get_irp_irg(i);
1634                 env              = get_irg_link(current_ir_graph);
1635
1636                 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1637
1638                 /* note that the list of possible calls is updated during the process */
1639                 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1640                         irg_inline_property prop;
1641                         ir_graph            *callee;
1642                         pmap_entry          *e;
1643
1644                         call   = entry->call;
1645                         callee = entry->callee;
1646
1647                         prop = get_irg_inline_property(callee);
1648                         if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
1649                                 /* do not inline forbidden / weak graphs */
1650                                 continue;
1651                         }
1652
1653                         e = pmap_find(copied_graphs, callee);
1654                         if (e != NULL) {
1655                                 /*
1656                                  * Remap callee if we have a copy.
1657                                  * FIXME: Should we do this only for recursive Calls ?
1658                                  */
1659                                 callee = e->value;
1660                         }
1661
1662                         if (prop >= irg_inline_forced ||
1663                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1664                                 if (current_ir_graph == callee) {
1665                                         /*
1666                                          * Recursive call: we cannot directly inline because we cannot walk
1667                                          * the graph and change it. So we have to make a copy of the graph
1668                                          * first.
1669                                          */
1670
1671                                         inline_irg_env *callee_env;
1672                                         ir_graph       *copy;
1673
1674                                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1675
1676                                         /*
1677                                          * No copy yet, create one.
1678                                          * Note that recursive methods are never leaves, so it is sufficient
1679                                          * to test this condition here.
1680                                          */
1681                                         copy = create_irg_copy(callee);
1682
1683                                         /* create_irg_copy() destroys the Proj links, recompute them */
1684                                         phiproj_computed = 0;
1685
1686                                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1687
1688                                         /* allocate new environment */
1689                                         callee_env = alloc_inline_irg_env();
1690                                         set_irg_link(copy, callee_env);
1691
1692                                         assure_cf_loop(copy);
1693                                         wenv.x              = callee_env;
1694                                         wenv.ignore_callers = 1;
1695                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1696
1697                                         /*
1698                                          * Enter the entity of the original graph. This is needed
1699                                          * for inline_method(). However, note that ent->irg still points
1700                                          * to callee, NOT to copy.
1701                                          */
1702                                         set_irg_entity(copy, get_irg_entity(callee));
1703
1704                                         pmap_insert(copied_graphs, callee, copy);
1705                                         callee = copy;
1706
1707                                         /* we have only one caller: the original graph */
1708                                         callee_env->n_callers      = 1;
1709                                         callee_env->n_callers_orig = 1;
1710                                 }
1711                                 if (! phiproj_computed) {
1712                                         phiproj_computed = 1;
1713                                         collect_phiprojs(current_ir_graph);
1714                                 }
1715                                 did_inline = inline_method(call, callee);
1716                                 if (did_inline) {
1717                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1718
1719                                         /* call was inlined, Phi/Projs for current graph must be recomputed */
1720                                         phiproj_computed = 0;
1721
1722                                         /* callee was inline. Append it's call list. */
1723                                         env->got_inline = 1;
1724                                         --env->n_call_nodes;
1725                                         append_call_list(env, callee_env, entry->loop_depth);
1726                                         --callee_env->n_callers;
1727
1728                                         /* after we have inlined callee, all called methods inside callee
1729                                            are now called once more */
1730                                         list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1731                                                 inline_irg_env *penv = get_irg_link(centry->callee);
1732                                                 ++penv->n_callers;
1733                                         }
1734
1735                                         /* remove this call from the list */
1736                                         list_del(&entry->list);
1737                                         continue;
1738                                 }
1739                         }
1740                 }
1741                 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1742         }
1743
1744         for (i = 0; i < n_irgs; ++i) {
1745                 irg = get_irp_irg(i);
1746                 env = get_irg_link(irg);
1747
1748                 if (env->got_inline) {
1749                         optimize_graph_df(irg);
1750                         optimize_cf(irg);
1751                 }
1752                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1753                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1754                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1755                         env->n_callers_orig, env->n_callers,
1756                         get_entity_name(get_irg_entity(irg))));
1757                 }
1758         }
1759
1760         /* kill the copied graphs: we don't need them anymore */
1761         foreach_pmap(copied_graphs, pm_entry) {
1762                 ir_graph *copy = pm_entry->value;
1763
1764                 /* reset the entity, otherwise it will be deleted in the next step ... */
1765                 set_irg_entity(copy, NULL);
1766                 free_ir_graph(copy);
1767         }
1768         pmap_destroy(copied_graphs);
1769
1770         obstack_free(&temp_obst, NULL);
1771         current_ir_graph = rem;
1772 }
1773
1774 /**
1775  * Calculate the parameter weights for transmitting the address of a local variable.
1776  */
1777 static unsigned calc_method_local_weight(ir_node *arg) {
1778         int      i, j, k;
1779         unsigned v, weight = 0;
1780
1781         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1782                 ir_node *succ = get_irn_out(arg, i);
1783
1784                 switch (get_irn_opcode(succ)) {
1785                 case iro_Load:
1786                 case iro_Store:
1787                         /* Loads and Store can be removed */
1788                         weight += 3;
1789                         break;
1790                 case iro_Sel:
1791                         /* check if all args are constant */
1792                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1793                                 ir_node *idx = get_Sel_index(succ, j);
1794                                 if (! is_Const(idx))
1795                                         return 0;
1796                         }
1797                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1798                            some unsupported node. */
1799                         v = calc_method_local_weight(succ);
1800                         if (v == 0)
1801                                 return 0;
1802                         /* we can kill one Sel with constant indexes, this is cheap */
1803                         weight += v + 1;
1804                         break;
1805                 case iro_Id:
1806                         /* when looking backward we might find Id nodes */
1807                         weight += calc_method_local_weight(succ);
1808                         break;
1809                 case iro_Tuple:
1810                         /* unoptimized tuple */
1811                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1812                                 ir_node *pred = get_Tuple_pred(succ, j);
1813                                 if (pred == arg) {
1814                                         /* look for Proj(j) */
1815                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1816                                                 ir_node *succ_succ = get_irn_out(succ, k);
1817                                                 if (is_Proj(succ_succ)) {
1818                                                         if (get_Proj_proj(succ_succ) == j) {
1819                                                                 /* found */
1820                                                                 weight += calc_method_local_weight(succ_succ);
1821                                                         }
1822                                                 } else {
1823                                                         /* this should NOT happen */
1824                                                         return 0;
1825                                                 }
1826                                         }
1827                                 }
1828                         }
1829                         break;
1830                 default:
1831                         /* any other node: unsupported yet or bad. */
1832                         return 0;
1833                 }
1834         }
1835         return weight;
1836 }
1837
1838 /**
1839  * Calculate the parameter weights for transmitting the address of a local variable.
1840  */
1841 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) {
1842         ir_entity *ent = get_irg_entity(irg);
1843         ir_type  *mtp;
1844         int      nparams, i, proj_nr;
1845         ir_node  *irg_args, *arg;
1846
1847         mtp      = get_entity_type(ent);
1848         nparams  = get_method_n_params(mtp);
1849
1850         /* allocate a new array. currently used as 'analysed' flag */
1851         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1852
1853         /* If the method haven't parameters we have nothing to do. */
1854         if (nparams <= 0)
1855                 return;
1856
1857         assure_irg_outs(irg);
1858         irg_args = get_irg_args(irg);
1859         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1860                 arg     = get_irn_out(irg_args, i);
1861                 proj_nr = get_Proj_proj(arg);
1862                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1863         }
1864 }
1865
1866 /**
1867  * Calculate the benefice for transmitting an local variable address.
1868  * After inlining, the local variable might be transformed into a
1869  * SSA variable by scalar_replacement().
1870  */
1871 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
1872         inline_irg_env *env = get_irg_link(callee);
1873
1874         if (env->local_weights != NULL) {
1875                 if (pos < ARR_LEN(env->local_weights))
1876                         return env->local_weights[pos];
1877                 return 0;
1878         }
1879
1880         analyze_irg_local_weights(env, callee);
1881
1882         if (pos < ARR_LEN(env->local_weights))
1883                 return env->local_weights[pos];
1884         return 0;
1885 }
1886
1887 /**
1888  * Calculate a benefice value for inlining the given call.
1889  *
1890  * @param call       the call node we have to inspect
1891  * @param callee     the called graph
1892  */
1893 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1894 {
1895         ir_node   *call = entry->call;
1896         ir_entity *ent  = get_irg_entity(callee);
1897         ir_node   *frame_ptr;
1898         ir_type   *mtp;
1899         int       weight = 0;
1900         int       i, n_params, all_const;
1901         unsigned  cc, v;
1902         irg_inline_property prop;
1903
1904         inline_irg_env *callee_env;
1905
1906         prop = get_irg_inline_property(callee);
1907         if (prop == irg_inline_forbidden) {
1908                 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1909                     call, callee));
1910                 return entry->benefice = INT_MIN;
1911         }
1912
1913         if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
1914                 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1915                     call, callee));
1916                 return entry->benefice = INT_MIN;
1917         }
1918
1919         /* costs for every passed parameter */
1920         n_params = get_Call_n_params(call);
1921         mtp      = get_entity_type(ent);
1922         cc       = get_method_calling_convention(mtp);
1923         if (cc & cc_reg_param) {
1924                 /* register parameter, smaller costs for register parameters */
1925                 int max_regs = cc & ~cc_bits;
1926
1927                 if (max_regs < n_params)
1928                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1929                 else
1930                         weight += n_params * 2;
1931         } else {
1932                 /* parameters are passed an stack */
1933                 weight += 5 * n_params;
1934         }
1935
1936         /* constant parameters improve the benefice */
1937         frame_ptr = get_irg_frame(current_ir_graph);
1938         all_const = 1;
1939         for (i = 0; i < n_params; ++i) {
1940                 ir_node *param = get_Call_param(call, i);
1941
1942                 if (is_Const(param)) {
1943                         weight += get_method_param_weight(ent, i);
1944                 } else {
1945                         all_const = 0;
1946                         if (is_SymConst(param))
1947                                 weight += get_method_param_weight(ent, i);
1948                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1949                                 /*
1950                                  * An address of a local variable is transmitted. After
1951                                  * inlining, scalar_replacement might be able to remove the
1952                                  * local variable, so honor this.
1953                                  */
1954                                 v = get_method_local_adress_weight(callee, i);
1955                                 weight += v;
1956                                 if (v > 0)
1957                                         entry->local_adr = 1;
1958                         }
1959                 }
1960         }
1961         entry->all_const = all_const;
1962
1963         callee_env = get_irg_link(callee);
1964         if (callee_env->n_callers == 1 &&
1965             callee != current_ir_graph &&
1966                 get_entity_visibility(ent) == visibility_local) {
1967                 weight += 700;
1968         }
1969
1970         /* give a bonus for functions with one block */
1971         if (callee_env->n_blocks == 1)
1972                 weight = weight * 3 / 2;
1973
1974         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1975         if (callee_env->n_nodes < 30 && !callee_env->recursive)
1976                 weight += 2000;
1977
1978         /* and finally for leaves: they do not increase the register pressure
1979            because of callee safe registers */
1980         if (callee_env->n_call_nodes == 0)
1981                 weight += 400;
1982
1983         /** it's important to inline inner loops first */
1984         if (entry->loop_depth > 30)
1985                 weight += 30 * 1024;
1986         else
1987                 weight += entry->loop_depth * 1024;
1988
1989         /*
1990          * All arguments constant is probably a good sign, give an extra bonus
1991          */
1992         if (all_const)
1993                 weight += 1024;
1994
1995         return entry->benefice = weight;
1996 }
1997
1998 static ir_graph **irgs;
1999 static int      last_irg;
2000
2001 /**
2002  * Callgraph walker, collect all visited graphs.
2003  */
2004 static void callgraph_walker(ir_graph *irg, void *data) {
2005         (void) data;
2006         irgs[last_irg++] = irg;
2007 }
2008
2009 /**
2010  * Creates an inline order for all graphs.
2011  *
2012  * @return the list of graphs.
2013  */
2014 static ir_graph **create_irg_list(void) {
2015         ir_entity **free_methods;
2016         int       arr_len;
2017         int       n_irgs = get_irp_n_irgs();
2018
2019         cgana(&arr_len, &free_methods);
2020         xfree(free_methods);
2021
2022         compute_callgraph();
2023
2024         last_irg = 0;
2025         irgs     = XMALLOCNZ(ir_graph*, n_irgs);
2026
2027         callgraph_walk(NULL, callgraph_walker, NULL);
2028         assert(n_irgs == last_irg);
2029
2030         return irgs;
2031 }
2032
2033 /**
2034  * Push a call onto the priority list if its benefice is big enough.
2035  *
2036  * @param pqueue   the priority queue of calls
2037  * @param call     the call entry
2038  * @param inlien_threshold
2039  *                 the threshold value
2040  */
2041 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
2042                             int inline_threshold)
2043 {
2044         ir_graph            *callee  = call->callee;
2045         irg_inline_property prop     = get_irg_inline_property(callee);
2046         int                 benefice = calc_inline_benefice(call, callee);
2047
2048         DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
2049             get_irn_irg(call->call), call->call, callee, benefice));
2050
2051         if (prop < irg_inline_forced && benefice < inline_threshold) {
2052                 return;
2053         }
2054
2055         pqueue_put(pqueue, call, benefice);
2056 }
2057
2058 /**
2059  * Try to inline calls into a graph.
2060  *
2061  * @param irg      the graph into which we inline
2062  * @param maxsize  do NOT inline if the size of irg gets
2063  *                 bigger than this amount
2064  * @param inline_threshold
2065  *                 threshold value for inline decision
2066  * @param copied_graphs
2067  *                 map containing copied of recursive graphs
2068  */
2069 static void inline_into(ir_graph *irg, unsigned maxsize,
2070                         int inline_threshold, pmap *copied_graphs)
2071 {
2072         int            phiproj_computed = 0;
2073         inline_irg_env *env = get_irg_link(irg);
2074         call_entry     *curr_call;
2075         wenv_t         wenv;
2076         pqueue_t       *pqueue;
2077
2078         if (env->n_call_nodes == 0)
2079                 return;
2080
2081         if (env->n_nodes > maxsize) {
2082                 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
2083                 return;
2084         }
2085
2086         current_ir_graph = irg;
2087         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2088
2089         /* put irgs into the pqueue */
2090         pqueue = new_pqueue();
2091
2092         list_for_each_entry(call_entry, curr_call, &env->calls, list) {
2093                 assert(is_Call(curr_call->call));
2094                 maybe_push_call(pqueue, curr_call, inline_threshold);
2095         }
2096
2097         /* note that the list of possible calls is updated during the process */
2098         while (!pqueue_empty(pqueue)) {
2099                 int                 did_inline;
2100                 call_entry          *curr_call  = pqueue_pop_front(pqueue);
2101                 ir_graph            *callee     = curr_call->callee;
2102                 ir_node             *call_node  = curr_call->call;
2103                 inline_irg_env      *callee_env = get_irg_link(callee);
2104                 irg_inline_property prop        = get_irg_inline_property(callee);
2105                 int                 loop_depth;
2106                 const call_entry    *centry;
2107                 pmap_entry          *e;
2108
2109                 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
2110                         DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
2111                                                 env->n_nodes, callee, callee_env->n_nodes));
2112                         continue;
2113                 }
2114
2115                 e = pmap_find(copied_graphs, callee);
2116                 if (e != NULL) {
2117                         int benefice = curr_call->benefice;
2118                         /*
2119                          * Reduce the weight for recursive function IFF not all arguments are const.
2120                          * inlining recursive functions is rarely good.
2121                          */
2122                         if (!curr_call->all_const)
2123                                 benefice -= 2000;
2124                         if (benefice < inline_threshold)
2125                                 continue;
2126
2127                         /*
2128                          * Remap callee if we have a copy.
2129                          */
2130                         callee     = e->value;
2131                         callee_env = get_irg_link(callee);
2132                 }
2133
2134                 if (current_ir_graph == callee) {
2135                         /*
2136                          * Recursive call: we cannot directly inline because we cannot
2137                          * walk the graph and change it. So we have to make a copy of
2138                          * the graph first.
2139                          */
2140                         int benefice = curr_call->benefice;
2141                         ir_graph *copy;
2142
2143                         /*
2144                          * Reduce the weight for recursive function IFF not all arguments are const.
2145                          * inlining recursive functions is rarely good.
2146                          */
2147                         if (!curr_call->all_const)
2148                                 benefice -= 2000;
2149                         if (benefice < inline_threshold)
2150                                 continue;
2151
2152                         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2153
2154                         /*
2155                          * No copy yet, create one.
2156                          * Note that recursive methods are never leaves, so it is
2157                          * sufficient to test this condition here.
2158                          */
2159                         copy = create_irg_copy(callee);
2160
2161                         /* create_irg_copy() destroys the Proj links, recompute them */
2162                         phiproj_computed = 0;
2163
2164                         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2165
2166                         /* allocate a new environment */
2167                         callee_env = alloc_inline_irg_env();
2168                         set_irg_link(copy, callee_env);
2169
2170                         assure_cf_loop(copy);
2171                         wenv.x              = callee_env;
2172                         wenv.ignore_callers = 1;
2173                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2174
2175                         /*
2176                          * Enter the entity of the original graph. This is needed
2177                          * for inline_method(). However, note that ent->irg still points
2178                          * to callee, NOT to copy.
2179                          */
2180                         set_irg_entity(copy, get_irg_entity(callee));
2181
2182                         pmap_insert(copied_graphs, callee, copy);
2183                         callee = copy;
2184
2185                         /* we have only one caller: the original graph */
2186                         callee_env->n_callers      = 1;
2187                         callee_env->n_callers_orig = 1;
2188                 }
2189                 if (! phiproj_computed) {
2190                         phiproj_computed = 1;
2191                         collect_phiprojs(current_ir_graph);
2192                 }
2193                 did_inline = inline_method(call_node, callee);
2194                 if (!did_inline)
2195                         continue;
2196
2197                 /* call was inlined, Phi/Projs for current graph must be recomputed */
2198                 phiproj_computed = 0;
2199
2200                 /* remove it from the caller list */
2201                 list_del(&curr_call->list);
2202
2203                 /* callee was inline. Append it's call list. */
2204                 env->got_inline = 1;
2205                 if (curr_call->local_adr)
2206                         env->local_vars = 1;
2207                 --env->n_call_nodes;
2208
2209                 /* we just generate a bunch of new calls */
2210                 loop_depth = curr_call->loop_depth;
2211                 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2212                         inline_irg_env *penv = get_irg_link(centry->callee);
2213                         ir_node        *new_call;
2214                         call_entry     *new_entry;
2215
2216                         /* after we have inlined callee, all called methods inside
2217                          * callee are now called once more */
2218                         ++penv->n_callers;
2219
2220                         /* Note that the src list points to Call nodes in the inlined graph,
2221                          * but we need Call nodes in our graph. Luckily the inliner leaves
2222                          * this information in the link field. */
2223                         new_call = get_irn_link(centry->call);
2224                         assert(is_Call(new_call));
2225
2226                         new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2227                         list_add_tail(&new_entry->list, &env->calls);
2228                         maybe_push_call(pqueue, new_entry, inline_threshold);
2229                 }
2230
2231                 env->n_call_nodes += callee_env->n_call_nodes;
2232                 env->n_nodes += callee_env->n_nodes;
2233                 --callee_env->n_callers;
2234         }
2235         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2236         del_pqueue(pqueue);
2237 }
2238
2239 /*
2240  * Heuristic inliner. Calculates a benefice value for every call and inlines
2241  * those calls with a value higher than the threshold.
2242  */
2243 void inline_functions(unsigned maxsize, int inline_threshold) {
2244         inline_irg_env   *env;
2245         int              i, n_irgs;
2246         ir_graph         *rem;
2247         wenv_t           wenv;
2248         pmap             *copied_graphs;
2249         pmap_entry       *pm_entry;
2250         ir_graph         **irgs;
2251
2252         rem = current_ir_graph;
2253         obstack_init(&temp_obst);
2254
2255         irgs = create_irg_list();
2256
2257         /* a map for the copied graphs, used to inline recursive calls */
2258         copied_graphs = pmap_create();
2259
2260         /* extend all irgs by a temporary data structure for inlining. */
2261         n_irgs = get_irp_n_irgs();
2262         for (i = 0; i < n_irgs; ++i)
2263                 set_irg_link(irgs[i], alloc_inline_irg_env());
2264
2265         /* Pre-compute information in temporary data structure. */
2266         wenv.ignore_runtime = 0;
2267         wenv.ignore_callers = 0;
2268         for (i = 0; i < n_irgs; ++i) {
2269                 ir_graph *irg = irgs[i];
2270
2271                 free_callee_info(irg);
2272
2273                 wenv.x = get_irg_link(irg);
2274                 assure_cf_loop(irg);
2275                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2276         }
2277
2278         /* -- and now inline. -- */
2279         for (i = 0; i < n_irgs; ++i) {
2280                 ir_graph *irg = irgs[i];
2281
2282                 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2283         }
2284
2285         for (i = 0; i < n_irgs; ++i) {
2286                 ir_graph *irg = irgs[i];
2287
2288                 env = get_irg_link(irg);
2289                 if (env->got_inline) {
2290                         /* this irg got calls inlined: optimize it */
2291                         if (get_opt_combo()) {
2292                                 if (env->local_vars) {
2293                                         scalar_replacement_opt(irg);
2294                                 }
2295                                 combo(irg);
2296                         } else {
2297                                 if (env->local_vars) {
2298                                         if (scalar_replacement_opt(irg)) {
2299                                                 optimize_graph_df(irg);
2300                                         }
2301                                 }
2302                                 optimize_cf(irg);
2303                         }
2304                 }
2305                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2306                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2307                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2308                         env->n_callers_orig, env->n_callers,
2309                         get_entity_name(get_irg_entity(irg))));
2310                 }
2311         }
2312
2313         /* kill the copied graphs: we don't need them anymore */
2314         foreach_pmap(copied_graphs, pm_entry) {
2315                 ir_graph *copy = pm_entry->value;
2316
2317                 /* reset the entity, otherwise it will be deleted in the next step ... */
2318                 set_irg_entity(copy, NULL);
2319                 free_ir_graph(copy);
2320         }
2321         pmap_destroy(copied_graphs);
2322
2323         xfree(irgs);
2324
2325         obstack_free(&temp_obst, NULL);
2326         current_ir_graph = rem;
2327 }
2328
2329 void firm_init_inline(void) {
2330         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
2331 }