Let list_for_each_entry(), list_for_each_entry_reverse() and list_for_each_entry_safe...
[libfirm] / ir / opt / opt_inline.c
1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief    Dead node elimination and Procedure Inlining.
23  * @author   Michael Beck, Goetz Lindenmaier
24  */
25 #include "config.h"
26
27 #include <limits.h>
28 #include <stdbool.h>
29 #include <assert.h>
30
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irprog_t.h"
34
35 #include "iroptimize.h"
36 #include "ircons_t.h"
37 #include "iropt_t.h"
38 #include "irgopt.h"
39 #include "irgmod.h"
40 #include "irgwalk.h"
41
42 #include "array_t.h"
43 #include "list.h"
44 #include "pset.h"
45 #include "pmap.h"
46 #include "pdeq.h"
47 #include "xmalloc.h"
48 #include "pqueue.h"
49
50 #include "irouts.h"
51 #include "irloop_t.h"
52 #include "irbackedge_t.h"
53 #include "opt_init.h"
54 #include "cgana.h"
55 #include "trouts.h"
56 #include "error.h"
57
58 #include "analyze_irg_args.h"
59 #include "iredges_t.h"
60 #include "irflag_t.h"
61 #include "irhooks.h"
62 #include "irtools.h"
63 #include "iropt_dbg.h"
64 #include "irpass_t.h"
65 #include "irnodemap.h"
66
67 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
68
69 /*------------------------------------------------------------------*/
70 /* Routines for dead node elimination / copying garbage collection  */
71 /* of the obstack.                                                  */
72 /*------------------------------------------------------------------*/
73
74 /**
75  * Remember the new node in the old node by using a field all nodes have.
76  */
77 static void set_new_node(ir_node *node, ir_node *new_node)
78 {
79         set_irn_link(node, new_node);
80 }
81
82 /**
83  * Get this new node, before the old node is forgotten.
84  */
85 static inline ir_node *get_new_node(ir_node *old_node)
86 {
87         assert(irn_visited(old_node));
88         return (ir_node*) get_irn_link(old_node);
89 }
90
91 /*--------------------------------------------------------------------*/
92 /*  Functionality for inlining                                         */
93 /*--------------------------------------------------------------------*/
94
95 /**
96  * Copy node for inlineing.  Updates attributes that change when
97  * inlineing but not for dead node elimination.
98  *
99  * Copies the node by calling copy_node() and then updates the entity if
100  * it's a local one.  env must be a pointer of the frame type of the
101  * inlined procedure. The new entities must be in the link field of
102  * the entities.
103  */
104 static void copy_node_inline(ir_node *node, void *env)
105 {
106         ir_graph *new_irg  = (ir_graph*) env;
107         ir_node  *new_node = irn_copy_into_irg(node, new_irg);
108
109         set_new_node(node, new_node);
110         if (is_Sel(node)) {
111                 ir_graph  *old_irg        = get_irn_irg(node);
112                 ir_type   *old_frame_type = get_irg_frame_type(old_irg);
113                 ir_entity *old_entity     = get_Sel_entity(node);
114                 assert(is_Sel(new_node));
115                 /* use copied entities from the new frame */
116                 if (get_entity_owner(old_entity) == old_frame_type) {
117                         ir_entity *new_entity = (ir_entity*)get_entity_link(old_entity);
118                         assert(new_entity != NULL);
119                         set_Sel_entity(new_node, new_entity);
120                 }
121         } else if (is_Block(new_node)) {
122                 new_node->attr.block.irg.irg = new_irg;
123         }
124 }
125
126 static void set_preds_inline(ir_node *node, void *env)
127 {
128         ir_node *new_node;
129
130         irn_rewire_inputs(node);
131
132         /* move constants into start block */
133         new_node = get_new_node(node);
134         if (is_irn_constlike(new_node)) {
135                 ir_graph *new_irg     = (ir_graph *) env;
136                 ir_node  *start_block = get_irg_start_block(new_irg);
137                 set_nodes_block(new_node, start_block);
138         }
139 }
140
141 /**
142  * Walker: checks if P_value_arg_base is used.
143  */
144 static void find_addr(ir_node *node, void *env)
145 {
146         bool *allow_inline = (bool*)env;
147
148         if (is_Block(node) && get_Block_entity(node)) {
149                 /**
150                  * Currently we can't handle blocks whose address was taken correctly
151                  * when inlining
152                  */
153                 *allow_inline = false;
154         } else if (is_Sel(node)) {
155                 ir_graph *irg = current_ir_graph;
156                 if (get_Sel_ptr(node) == get_irg_frame(irg)) {
157                         /* access to frame */
158                         ir_entity *ent = get_Sel_entity(node);
159                         if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
160                                 /* access to value_type */
161                                 *allow_inline = false;
162                         }
163                         if (is_parameter_entity(ent)) {
164                                 *allow_inline = false;
165                         }
166                 }
167         } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
168                 /* From GCC:
169                  * Refuse to inline alloca call unless user explicitly forced so as this
170                  * may change program's memory overhead drastically when the function
171                  * using alloca is called in loop.  In GCC present in SPEC2000 inlining
172                  * into schedule_block cause it to require 2GB of ram instead of 256MB.
173                  *
174                  * Sorrily this is true with our implementation also.
175                  * Moreover, we cannot differentiate between alloca() and VLA yet, so
176                  * this disables inlining of functions using VLA (which are completely
177                  * save).
178                  *
179                  * 2 Solutions:
180                  * - add a flag to the Alloc node for "real" alloca() calls
181                  * - add a new Stack-Restore node at the end of a function using
182                  *   alloca()
183                  */
184                 *allow_inline = false;
185         }
186 }
187
188 /**
189  * Check if we can inline a given call.
190  * Currently, we cannot inline two cases:
191  * - call with compound arguments
192  * - graphs that take the address of a parameter
193  *
194  * check these conditions here
195  */
196 static bool can_inline(ir_node *call, ir_graph *called_graph)
197 {
198         ir_entity          *called      = get_irg_entity(called_graph);
199         ir_type            *called_type = get_entity_type(called);
200         ir_type            *call_type   = get_Call_type(call);
201         size_t              n_params    = get_method_n_params(called_type);
202         size_t              n_arguments = get_method_n_params(call_type);
203         size_t              n_res       = get_method_n_ress(called_type);
204         irg_inline_property prop        = get_irg_inline_property(called_graph);
205         size_t              i;
206         bool                res;
207
208         if (prop == irg_inline_forbidden)
209                 return false;
210
211         if (n_arguments != n_params) {
212                 /* this is a bad feature of C: without a prototype, we can
213                  * call a function with less parameters than needed. Currently
214                  * we don't support this, although we could use Unknown than. */
215                 return false;
216         }
217         if (n_res != get_method_n_ress(call_type)) {
218                 return false;
219         }
220
221         /* Argh, compiling C has some bad consequences:
222          * It is implementation dependent what happens in that case.
223          * We support inlining, if the bitsize of the types matches AND
224          * the same arithmetic is used. */
225         for (i = 0; i < n_params; ++i) {
226                 ir_type *param_tp = get_method_param_type(called_type, i);
227                 ir_type *arg_tp   = get_method_param_type(call_type, i);
228
229                 if (param_tp != arg_tp) {
230                         ir_mode *pmode = get_type_mode(param_tp);
231                         ir_mode *amode = get_type_mode(arg_tp);
232
233                         if (pmode == NULL || amode == NULL)
234                                 return false;
235                         if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
236                                 return false;
237                         if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
238                                 return false;
239                         /* otherwise we can simply "reinterpret" the bits */
240                 }
241         }
242         for (i = 0; i < n_res; ++i) {
243                 ir_type *decl_res_tp = get_method_res_type(called_type, i);
244                 ir_type *used_res_tp = get_method_res_type(call_type, i);
245
246                 if (decl_res_tp != used_res_tp) {
247                         ir_mode *decl_mode = get_type_mode(decl_res_tp);
248                         ir_mode *used_mode = get_type_mode(used_res_tp);
249                         if (decl_mode == NULL || used_mode == NULL)
250                                 return false;
251                         if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
252                                 return false;
253                         if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
254                                 return false;
255                         /* otherwise we can "reinterpret" the bits */
256                 }
257         }
258
259         /* check parameters for compound arguments */
260         for (i = 0; i < n_params; ++i) {
261                 ir_type *p_type = get_method_param_type(call_type, i);
262
263                 if (is_compound_type(p_type))
264                         return false;
265         }
266
267         /* check results for compound arguments */
268         for (i = 0; i < n_res; ++i) {
269                 ir_type *r_type = get_method_res_type(call_type, i);
270
271                 if (is_compound_type(r_type))
272                         return false;
273         }
274
275         res = true;
276         irg_walk_graph(called_graph, find_addr, NULL, &res);
277
278         return res;
279 }
280
281 enum exc_mode {
282         exc_handler,    /**< There is a handler. */
283         exc_no_handler  /**< Exception handling not represented. */
284 };
285
286 /**
287  * copy all entities on the stack frame on 1 irg to the stackframe of another.
288  * Sets entity links of the old entities to the copies
289  */
290 static void copy_frame_entities(ir_graph *from, ir_graph *to)
291 {
292         ir_type *from_frame = get_irg_frame_type(from);
293         ir_type *to_frame   = get_irg_frame_type(to);
294         size_t   n_members  = get_class_n_members(from_frame);
295         size_t   i;
296         assert(from_frame != to_frame);
297
298         for (i = 0; i < n_members; ++i) {
299                 ir_entity *old_ent = get_class_member(from_frame, i);
300                 ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
301                 set_entity_link(old_ent, new_ent);
302                 assert (!is_parameter_entity(old_ent));
303         }
304 }
305
306 /* Inlines a method at the given call site. */
307 int inline_method(ir_node *call, ir_graph *called_graph)
308 {
309         ir_node       *pre_call;
310         ir_node       *post_call, *post_bl;
311         ir_node       *in[pn_Start_max+1];
312         ir_node       *end, *end_bl, *block;
313         ir_node       **res_pred;
314         ir_node       **cf_pred;
315         ir_node       **args_in;
316         ir_node       *ret, *phi;
317         int           arity, n_ret, n_exc, n_res, i, j, rem_opt;
318         int           irn_arity, n_params;
319         int           n_mem_phi;
320         enum exc_mode exc_handling;
321         ir_type       *mtp;
322         ir_type       *ctp;
323         ir_entity     *ent;
324         ir_graph      *rem;
325         ir_graph      *irg = get_irn_irg(call);
326
327         /* we cannot inline some types of calls */
328         if (! can_inline(call, called_graph))
329                 return 0;
330
331         /* We cannot inline a recursive call. The graph must be copied before
332          * the call the inline_method() using create_irg_copy(). */
333         if (called_graph == irg)
334                 return 0;
335
336         ent      = get_irg_entity(called_graph);
337         mtp      = get_entity_type(ent);
338         ctp      = get_Call_type(call);
339         n_params = get_method_n_params(mtp);
340         n_res    = get_method_n_ress(mtp);
341
342         rem = current_ir_graph;
343         current_ir_graph = irg;
344
345         DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
346
347         /* optimizations can cause problems when allocating new nodes */
348         rem_opt = get_opt_optimize();
349         set_optimize(0);
350
351         /* Handle graph state */
352         assert(get_irg_phase_state(irg) != phase_building);
353         assert(get_irg_pinned(irg) == op_pin_state_pinned);
354         assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
355         clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
356                            | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
357         set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
358         clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
359         edges_deactivate(irg);
360
361         /* here we know we WILL inline, so inform the statistics */
362         hook_inline(call, called_graph);
363
364         /* -- Decide how to handle exception control flow: Is there a handler
365            for the Call node, or do we branch directly to End on an exception?
366            exc_handling:
367            0 There is a handler.
368            2 Exception handling not represented in Firm. -- */
369         {
370                 ir_node *Xproj = NULL;
371                 ir_node *proj;
372                 for (proj = (ir_node*)get_irn_link(call); proj != NULL;
373                      proj = (ir_node*)get_irn_link(proj)) {
374                         long proj_nr = get_Proj_proj(proj);
375                         if (proj_nr == pn_Call_X_except) Xproj = proj;
376                 }
377                 exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
378         }
379
380         /* create the argument tuple */
381         args_in = ALLOCAN(ir_node*, n_params);
382
383         block = get_nodes_block(call);
384         for (i = n_params - 1; i >= 0; --i) {
385                 ir_node *arg      = get_Call_param(call, i);
386                 ir_type *param_tp = get_method_param_type(mtp, i);
387                 ir_mode *mode     = get_type_mode(param_tp);
388
389                 if (mode != get_irn_mode(arg)) {
390                         arg = new_r_Conv(block, arg, mode);
391                 }
392                 args_in[i] = arg;
393         }
394
395         /* the procedure and later replaces the Start node of the called graph.
396          * Post_call is the old Call node and collects the results of the called
397          * graph. Both will end up being a tuple. */
398         post_bl = get_nodes_block(call);
399         /* XxMxPxPxPxT of Start + parameter of Call */
400         in[pn_Start_M]              = get_Call_mem(call);
401         in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
402         in[pn_Start_P_frame_base]   = get_irg_frame(irg);
403         in[pn_Start_T_args]         = new_r_Tuple(post_bl, n_params, args_in);
404         pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
405         post_call = call;
406
407         /* --
408            The new block gets the ins of the old block, pre_call and all its
409            predecessors and all Phi nodes. -- */
410         part_block(pre_call);
411
412         /* increment visited flag for later walk */
413         inc_irg_visited(called_graph);
414
415         /* link some nodes to nodes in the current graph so instead of copying
416          * the linked nodes will get used.
417          * So the copier will use the created Tuple instead of copying the start
418          * node, similar for singleton nodes like NoMem and Bad.
419          * Note: this will prohibit predecessors to be copied - only do it for
420          *       nodes without predecessors */
421         {
422                 ir_node *start_block;
423                 ir_node *start;
424                 ir_node *nomem;
425
426                 start_block = get_irg_start_block(called_graph);
427                 set_new_node(start_block, get_nodes_block(pre_call));
428                 mark_irn_visited(start_block);
429
430                 start = get_irg_start(called_graph);
431                 set_new_node(start, pre_call);
432                 mark_irn_visited(start);
433
434                 nomem = get_irg_no_mem(called_graph);
435                 set_new_node(nomem, get_irg_no_mem(irg));
436                 mark_irn_visited(nomem);
437         }
438
439         /* entitiy link is used to link entities on old stackframe to the
440          * new stackframe */
441         irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
442
443         /* copy entities and nodes */
444         assert(!irn_visited(get_irg_end(called_graph)));
445         copy_frame_entities(called_graph, irg);
446         irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
447                       irg);
448
449         irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
450
451         /* -- Merge the end of the inlined procedure with the call site -- */
452         /* We will turn the old Call node into a Tuple with the following
453            predecessors:
454            -1:  Block of Tuple.
455            0: Phi of all Memories of Return statements.
456            1: Jmp from new Block that merges the control flow from all exception
457            predecessors of the old end block.
458            2: Tuple of all arguments.
459            3: Phi of Exception memories.
460            In case the old Call directly branches to End on an exception we don't
461            need the block merging all exceptions nor the Phi of the exception
462            memories.
463         */
464
465         /* Precompute some values */
466         end_bl = get_new_node(get_irg_end_block(called_graph));
467         end    = get_new_node(get_irg_end(called_graph));
468         arity  = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
469         n_res  = get_method_n_ress(get_Call_type(call));
470
471         res_pred = XMALLOCN(ir_node*, n_res);
472         cf_pred  = XMALLOCN(ir_node*, arity);
473
474         /* archive keepalives */
475         irn_arity = get_irn_arity(end);
476         for (i = 0; i < irn_arity; i++) {
477                 ir_node *ka = get_End_keepalive(end, i);
478                 if (! is_Bad(ka))
479                         add_End_keepalive(get_irg_end(irg), ka);
480         }
481
482         /* replace Return nodes by Jump nodes */
483         n_ret = 0;
484         for (i = 0; i < arity; i++) {
485                 ir_node *ret;
486                 ret = get_Block_cfgpred(end_bl, i);
487                 if (is_Return(ret)) {
488                         ir_node *block = get_nodes_block(ret);
489                         cf_pred[n_ret] = new_r_Jmp(block);
490                         n_ret++;
491                 }
492         }
493         set_irn_in(post_bl, n_ret, cf_pred);
494
495         /* build a Tuple for all results of the method.
496          * add Phi node if there was more than one Return. */
497         turn_into_tuple(post_call, pn_Call_max+1);
498         /* First the Memory-Phi */
499         n_mem_phi = 0;
500         for (i = 0; i < arity; i++) {
501                 ret = get_Block_cfgpred(end_bl, i);
502                 if (is_Return(ret)) {
503                         cf_pred[n_mem_phi++] = get_Return_mem(ret);
504                 }
505                 /* memory output for some exceptions is directly connected to End */
506                 if (is_Call(ret)) {
507                         cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 3);
508                 } else if (is_fragile_op(ret)) {
509                         /* We rely that all cfops have the memory output at the same position. */
510                         cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 0);
511                 } else if (is_Raise(ret)) {
512                         cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
513                 }
514         }
515         phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
516         set_Tuple_pred(call, pn_Call_M, phi);
517         /* Conserve Phi-list for further inlinings -- but might be optimized */
518         if (get_nodes_block(phi) == post_bl) {
519                 set_irn_link(phi, get_irn_link(post_bl));
520                 set_irn_link(post_bl, phi);
521         }
522         /* Now the real results */
523         if (n_res > 0) {
524                 ir_node *result_tuple;
525                 for (j = 0; j < n_res; j++) {
526                         ir_type *res_type = get_method_res_type(ctp, j);
527                         ir_mode *res_mode = get_type_mode(res_type);
528                         n_ret = 0;
529                         for (i = 0; i < arity; i++) {
530                                 ret = get_Block_cfgpred(end_bl, i);
531                                 if (is_Return(ret)) {
532                                         ir_node *res = get_Return_res(ret, j);
533                                         if (get_irn_mode(res) != res_mode) {
534                                                 ir_node *block = get_nodes_block(res);
535                                                 res = new_r_Conv(block, res, res_mode);
536                                         }
537                                         cf_pred[n_ret] = res;
538                                         n_ret++;
539                                 }
540                         }
541                         if (n_ret > 0) {
542                                 phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
543                         } else {
544                                 phi = new_r_Bad(irg, res_mode);
545                         }
546                         res_pred[j] = phi;
547                         /* Conserve Phi-list for further inlinings -- but might be optimized */
548                         if (get_nodes_block(phi) == post_bl) {
549                                 set_Phi_next(phi, get_Block_phis(post_bl));
550                                 set_Block_phis(post_bl, phi);
551                         }
552                 }
553                 result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
554                 set_Tuple_pred(call, pn_Call_T_result, result_tuple);
555         } else {
556                 set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
557         }
558         /* handle the regular call */
559         set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
560
561         /* Finally the exception control flow.
562            We have two possible situations:
563            First if the Call branches to an exception handler:
564            We need to add a Phi node to
565            collect the memory containing the exception objects.  Further we need
566            to add another block to get a correct representation of this Phi.  To
567            this block we add a Jmp that resolves into the X output of the Call
568            when the Call is turned into a tuple.
569            Second: There is no exception edge. Just add all inlined exception
570            branches to the End node.
571          */
572         if (exc_handling == exc_handler) {
573                 n_exc = 0;
574                 for (i = 0; i < arity; i++) {
575                         ir_node *ret, *irn;
576                         ret = get_Block_cfgpred(end_bl, i);
577                         irn = skip_Proj(ret);
578                         if (is_fragile_op(irn) || is_Raise(irn)) {
579                                 cf_pred[n_exc] = ret;
580                                 ++n_exc;
581                         }
582                 }
583                 if (n_exc > 0) {
584                         if (n_exc == 1) {
585                                 /* simple fix */
586                                 set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
587                         } else {
588                                 ir_node *block = new_r_Block(irg, n_exc, cf_pred);
589                                 set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
590                         }
591                 } else {
592                         set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
593                 }
594         } else {
595                 ir_node *main_end_bl;
596                 int main_end_bl_arity;
597                 ir_node **end_preds;
598
599                 /* assert(exc_handling == 1 || no exceptions. ) */
600                 n_exc = 0;
601                 for (i = 0; i < arity; i++) {
602                         ir_node *ret = get_Block_cfgpred(end_bl, i);
603                         ir_node *irn = skip_Proj(ret);
604
605                         if (is_fragile_op(irn) || is_Raise(irn)) {
606                                 cf_pred[n_exc] = ret;
607                                 n_exc++;
608                         }
609                 }
610                 main_end_bl       = get_irg_end_block(irg);
611                 main_end_bl_arity = get_irn_arity(main_end_bl);
612                 end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
613
614                 for (i = 0; i < main_end_bl_arity; ++i)
615                         end_preds[i] = get_irn_n(main_end_bl, i);
616                 for (i = 0; i < n_exc; ++i)
617                         end_preds[main_end_bl_arity + i] = cf_pred[i];
618                 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
619                 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
620                 free(end_preds);
621         }
622         free(res_pred);
623         free(cf_pred);
624
625         /* --  Turn CSE back on. -- */
626         set_optimize(rem_opt);
627         current_ir_graph = rem;
628
629         return 1;
630 }
631
632 /********************************************************************/
633 /* Apply inlining to small methods.                                 */
634 /********************************************************************/
635
636 static struct obstack  temp_obst;
637
638 /** Represents a possible inlinable call in a graph. */
639 typedef struct call_entry {
640         ir_node    *call;       /**< The Call node. */
641         ir_graph   *callee;     /**< The callee IR-graph. */
642         list_head  list;        /**< List head for linking the next one. */
643         int        loop_depth;  /**< The loop depth of this call. */
644         int        benefice;    /**< The calculated benefice of this call. */
645         unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
646         unsigned   all_const:1; /**< Set if this call has only constant parameters. */
647 } call_entry;
648
649 /**
650  * environment for inlining small irgs
651  */
652 typedef struct inline_env_t {
653         struct obstack obst;  /**< An obstack where call_entries are allocated on. */
654         list_head      calls; /**< The call entry list. */
655 } inline_env_t;
656
657 /**
658  * Returns the irg called from a Call node. If the irg is not
659  * known, NULL is returned.
660  *
661  * @param call  the call node
662  */
663 static ir_graph *get_call_called_irg(ir_node *call)
664 {
665         ir_node *addr;
666
667         addr = get_Call_ptr(call);
668         if (is_SymConst_addr_ent(addr)) {
669                 ir_entity *ent = get_SymConst_entity(addr);
670                 /* we don't know which function gets finally bound to a weak symbol */
671                 if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
672                         return NULL;
673
674                 return get_entity_irg(ent);
675         }
676
677         return NULL;
678 }
679
680 /**
681  * Walker: Collect all calls to known graphs inside a graph.
682  */
683 static void collect_calls(ir_node *call, void *env)
684 {
685         (void) env;
686         if (is_Call(call)) {
687                 ir_graph *called_irg = get_call_called_irg(call);
688
689                 if (called_irg != NULL) {
690                         /* The Call node calls a locally defined method.  Remember to inline. */
691                         inline_env_t *ienv  = (inline_env_t*)env;
692                         call_entry   *entry = OALLOC(&ienv->obst, call_entry);
693                         entry->call       = call;
694                         entry->callee     = called_irg;
695                         entry->loop_depth = 0;
696                         entry->benefice   = 0;
697                         entry->local_adr  = 0;
698                         entry->all_const  = 0;
699
700                         list_add_tail(&entry->list, &ienv->calls);
701                 }
702         }
703 }
704
705 /**
706  * Inlines all small methods at call sites where the called address comes
707  * from a Const node that references the entity representing the called
708  * method.
709  * The size argument is a rough measure for the code size of the method:
710  * Methods where the obstack containing the firm graph is smaller than
711  * size are inlined.
712  */
713 void inline_small_irgs(ir_graph *irg, int size)
714 {
715         ir_graph *rem = current_ir_graph;
716         inline_env_t env;
717
718         current_ir_graph = irg;
719         /* Handle graph state */
720         assert(get_irg_phase_state(irg) != phase_building);
721         free_callee_info(irg);
722
723         /* Find Call nodes to inline.
724            (We can not inline during a walk of the graph, as inlining the same
725            method several times changes the visited flag of the walked graph:
726            after the first inlining visited of the callee equals visited of
727            the caller.  With the next inlining both are increased.) */
728         obstack_init(&env.obst);
729         INIT_LIST_HEAD(&env.calls);
730         irg_walk_graph(irg, NULL, collect_calls, &env);
731
732         if (! list_empty(&env.calls)) {
733                 /* There are calls to inline */
734                 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
735                 collect_phiprojs(irg);
736
737                 list_for_each_entry(call_entry, entry, &env.calls, list) {
738                         ir_graph            *callee = entry->callee;
739                         irg_inline_property prop    = get_irg_inline_property(callee);
740
741                         if (prop == irg_inline_forbidden) {
742                                 continue;
743                         }
744
745                         if (prop >= irg_inline_forced ||
746                             _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
747                                 inline_method(entry->call, callee);
748                         }
749                 }
750                 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
751         }
752         obstack_free(&env.obst, NULL);
753         current_ir_graph = rem;
754 }
755
756 typedef struct inline_small_irgs_pass_t {
757         ir_graph_pass_t pass;
758         int            size;
759 } inline_small_irgs_pass_t;
760
761 /**
762  * Wrapper to run inline_small_irgs() as a pass.
763  */
764 static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
765 {
766         inline_small_irgs_pass_t *pass = (inline_small_irgs_pass_t*)context;
767
768         inline_small_irgs(irg, pass->size);
769         return 0;
770 }
771
772 /* create a pass for inline_small_irgs() */
773 ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
774 {
775         inline_small_irgs_pass_t *pass = XMALLOCZ(inline_small_irgs_pass_t);
776
777         pass->size = size;
778         return def_graph_pass_constructor(
779                 &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
780 }
781
782 /**
783  * Environment for inlining irgs.
784  */
785 typedef struct {
786         list_head calls;             /**< List of of all call nodes in this graph. */
787         unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
788         unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
789         unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
790         unsigned  n_nodes_orig;      /**< for statistics */
791         unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
792         unsigned  n_call_nodes_orig; /**< for statistics */
793         unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
794         unsigned  n_callers_orig;    /**< for statistics */
795         unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
796         unsigned  recursive:1;       /**< Set, if this function is self recursive. */
797 } inline_irg_env;
798
799 /**
800  * Allocate a new environment for inlining.
801  */
802 static inline_irg_env *alloc_inline_irg_env(void)
803 {
804         inline_irg_env *env    = OALLOC(&temp_obst, inline_irg_env);
805         INIT_LIST_HEAD(&env->calls);
806         env->local_weights     = NULL;
807         env->n_nodes           = -2; /* do not count count Start, End */
808         env->n_blocks          = -2; /* do not count count Start, End Block */
809         env->n_nodes_orig      = -2; /* do not count Start, End */
810         env->n_call_nodes      = 0;
811         env->n_call_nodes_orig = 0;
812         env->n_callers         = 0;
813         env->n_callers_orig    = 0;
814         env->got_inline        = 0;
815         env->recursive         = 0;
816         return env;
817 }
818
819 typedef struct walker_env {
820         inline_irg_env *x;     /**< the inline environment */
821         char ignore_runtime;   /**< the ignore runtime flag */
822         char ignore_callers;   /**< if set, do change callers data */
823 } wenv_t;
824
825 /**
826  * post-walker: collect all calls in the inline-environment
827  * of a graph and sum some statistics.
828  */
829 static void collect_calls2(ir_node *call, void *ctx)
830 {
831         wenv_t         *env = (wenv_t*)ctx;
832         inline_irg_env *x = env->x;
833         unsigned        code = get_irn_opcode(call);
834         ir_graph       *callee;
835         call_entry     *entry;
836
837         /* count meaningful nodes in irg */
838         if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
839                 if (code != iro_Block) {
840                         ++x->n_nodes;
841                         ++x->n_nodes_orig;
842                 } else {
843                         ++x->n_blocks;
844                 }
845         }
846
847         if (code != iro_Call) return;
848
849         /* check, if it's a runtime call */
850         if (env->ignore_runtime) {
851                 ir_node *symc = get_Call_ptr(call);
852
853                 if (is_SymConst_addr_ent(symc)) {
854                         ir_entity *ent = get_SymConst_entity(symc);
855
856                         if (get_entity_additional_properties(ent) & mtp_property_runtime)
857                                 return;
858                 }
859         }
860
861         /* collect all call nodes */
862         ++x->n_call_nodes;
863         ++x->n_call_nodes_orig;
864
865         callee = get_call_called_irg(call);
866         if (callee != NULL) {
867                 if (! env->ignore_callers) {
868                         inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
869                         /* count all static callers */
870                         ++callee_env->n_callers;
871                         ++callee_env->n_callers_orig;
872                 }
873                 if (callee == current_ir_graph)
874                         x->recursive = 1;
875
876                 /* link it in the list of possible inlinable entries */
877                 entry = OALLOC(&temp_obst, call_entry);
878                 entry->call       = call;
879                 entry->callee     = callee;
880                 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
881                 entry->benefice   = 0;
882                 entry->local_adr  = 0;
883                 entry->all_const  = 0;
884
885                 list_add_tail(&entry->list, &x->calls);
886         }
887 }
888
889 /**
890  * Returns TRUE if the number of callers is 0 in the irg's environment,
891  * hence this irg is a leaf.
892  */
893 inline static int is_leaf(ir_graph *irg)
894 {
895         inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
896         return env->n_call_nodes == 0;
897 }
898
899 /**
900  * Returns TRUE if the number of nodes in the callee is
901  * smaller then size in the irg's environment.
902  */
903 inline static int is_smaller(ir_graph *callee, unsigned size)
904 {
905         inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
906         return env->n_nodes < size;
907 }
908
909 /**
910  * Duplicate a call entry.
911  *
912  * @param entry     the original entry to duplicate
913  * @param new_call  the new call node
914  * @param loop_depth_delta
915  *                  delta value for the loop depth
916  */
917 static call_entry *duplicate_call_entry(const call_entry *entry,
918                                         ir_node *new_call, int loop_depth_delta)
919 {
920         call_entry *nentry = OALLOC(&temp_obst, call_entry);
921         nentry->call       = new_call;
922         nentry->callee     = entry->callee;
923         nentry->benefice   = entry->benefice;
924         nentry->loop_depth = entry->loop_depth + loop_depth_delta;
925         nentry->local_adr  = entry->local_adr;
926         nentry->all_const  = entry->all_const;
927
928         return nentry;
929 }
930
931 /**
932  * Append all call nodes of the source environment to the nodes of in the destination
933  * environment.
934  *
935  * @param dst         destination environment
936  * @param src         source environment
937  * @param loop_depth  the loop depth of the call that is replaced by the src list
938  */
939 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
940 {
941         call_entry *nentry;
942
943         /* Note that the src list points to Call nodes in the inlined graph, but
944            we need Call nodes in our graph. Luckily the inliner leaves this information
945            in the link field. */
946         list_for_each_entry(call_entry, entry, &src->calls, list) {
947                 nentry = duplicate_call_entry(entry, (ir_node*)get_irn_link(entry->call), loop_depth);
948                 list_add_tail(&nentry->list, &dst->calls);
949         }
950         dst->n_call_nodes += src->n_call_nodes;
951         dst->n_nodes      += src->n_nodes;
952 }
953
954 /*
955  * Inlines small leaf methods at call sites where the called address comes
956  * from a Const node that references the entity representing the called
957  * method.
958  * The size argument is a rough measure for the code size of the method:
959  * Methods where the obstack containing the firm graph is smaller than
960  * size are inlined.
961  */
962 void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
963                            unsigned size, int ignore_runtime)
964 {
965         inline_irg_env   *env;
966         ir_graph         *irg;
967         size_t           i, n_irgs;
968         ir_graph         *rem;
969         int              did_inline;
970         wenv_t           wenv;
971         pmap             *copied_graphs;
972         pmap_entry       *pm_entry;
973
974         rem = current_ir_graph;
975         obstack_init(&temp_obst);
976
977         /* a map for the copied graphs, used to inline recursive calls */
978         copied_graphs = pmap_create();
979
980         /* extend all irgs by a temporary data structure for inlining. */
981         n_irgs = get_irp_n_irgs();
982         for (i = 0; i < n_irgs; ++i)
983                 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
984
985         /* Pre-compute information in temporary data structure. */
986         wenv.ignore_runtime = ignore_runtime;
987         wenv.ignore_callers = 0;
988         for (i = 0; i < n_irgs; ++i) {
989                 ir_graph *irg = get_irp_irg(i);
990
991                 assert(get_irg_phase_state(irg) != phase_building);
992                 free_callee_info(irg);
993
994                 assure_irg_properties(irg,
995                         IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
996                 wenv.x = (inline_irg_env*)get_irg_link(irg);
997                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
998                 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
999         }
1000
1001         /* -- and now inline. -- */
1002
1003         /* Inline leafs recursively -- we might construct new leafs. */
1004         do {
1005                 did_inline = 0;
1006
1007                 for (i = 0; i < n_irgs; ++i) {
1008                         ir_node *call;
1009                         int phiproj_computed = 0;
1010
1011                         current_ir_graph = get_irp_irg(i);
1012                         env              = (inline_irg_env*)get_irg_link(current_ir_graph);
1013
1014                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1015                         list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1016                                 ir_graph            *callee;
1017                                 irg_inline_property  prop;
1018
1019                                 if (env->n_nodes > maxsize)
1020                                         break;
1021
1022                                 call   = entry->call;
1023                                 callee = entry->callee;
1024
1025                                 prop = get_irg_inline_property(callee);
1026                                 if (prop == irg_inline_forbidden) {
1027                                         continue;
1028                                 }
1029
1030                                 if (is_leaf(callee) && (
1031                                     is_smaller(callee, leafsize) || prop >= irg_inline_forced)) {
1032                                         if (!phiproj_computed) {
1033                                                 phiproj_computed = 1;
1034                                                 collect_phiprojs(current_ir_graph);
1035                                         }
1036                                         did_inline = inline_method(call, callee);
1037
1038                                         if (did_inline) {
1039                                                 inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
1040
1041                                                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1042                                                 phiproj_computed = 0;
1043
1044                                                 /* Do some statistics */
1045                                                 env->got_inline = 1;
1046                                                 --env->n_call_nodes;
1047                                                 env->n_nodes += callee_env->n_nodes;
1048                                                 --callee_env->n_callers;
1049
1050                                                 /* remove this call from the list */
1051                                                 list_del(&entry->list);
1052                                                 continue;
1053                                         }
1054                                 }
1055                         }
1056                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1057                 }
1058         } while (did_inline);
1059
1060         /* inline other small functions. */
1061         for (i = 0; i < n_irgs; ++i) {
1062                 ir_node *call;
1063                 int phiproj_computed = 0;
1064
1065                 current_ir_graph = get_irp_irg(i);
1066                 env              = (inline_irg_env*)get_irg_link(current_ir_graph);
1067
1068                 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1069
1070                 /* note that the list of possible calls is updated during the process */
1071                 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1072                         irg_inline_property prop;
1073                         ir_graph            *callee;
1074                         ir_graph            *calleee;
1075
1076                         call   = entry->call;
1077                         callee = entry->callee;
1078
1079                         prop = get_irg_inline_property(callee);
1080                         if (prop == irg_inline_forbidden) {
1081                                 continue;
1082                         }
1083
1084                         calleee = pmap_get(ir_graph, copied_graphs, callee);
1085                         if (calleee != NULL) {
1086                                 /*
1087                                  * Remap callee if we have a copy.
1088                                  * FIXME: Should we do this only for recursive Calls ?
1089                                  */
1090                                 callee = calleee;
1091                         }
1092
1093                         if (prop >= irg_inline_forced ||
1094                             (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1095                                 if (current_ir_graph == callee) {
1096                                         /*
1097                                          * Recursive call: we cannot directly inline because we cannot walk
1098                                          * the graph and change it. So we have to make a copy of the graph
1099                                          * first.
1100                                          */
1101
1102                                         inline_irg_env *callee_env;
1103                                         ir_graph       *copy;
1104
1105                                         ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1106
1107                                         /*
1108                                          * No copy yet, create one.
1109                                          * Note that recursive methods are never leafs, so it is sufficient
1110                                          * to test this condition here.
1111                                          */
1112                                         copy = create_irg_copy(callee);
1113
1114                                         /* create_irg_copy() destroys the Proj links, recompute them */
1115                                         phiproj_computed = 0;
1116
1117                                         ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1118
1119                                         /* allocate new environment */
1120                                         callee_env = alloc_inline_irg_env();
1121                                         set_irg_link(copy, callee_env);
1122
1123                                         assure_irg_properties(copy,
1124                                                 IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
1125                                         wenv.x              = callee_env;
1126                                         wenv.ignore_callers = 1;
1127                                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1128
1129                                         /*
1130                                          * Enter the entity of the original graph. This is needed
1131                                          * for inline_method(). However, note that ent->irg still points
1132                                          * to callee, NOT to copy.
1133                                          */
1134                                         set_irg_entity(copy, get_irg_entity(callee));
1135
1136                                         pmap_insert(copied_graphs, callee, copy);
1137                                         callee = copy;
1138
1139                                         /* we have only one caller: the original graph */
1140                                         callee_env->n_callers      = 1;
1141                                         callee_env->n_callers_orig = 1;
1142                                 }
1143                                 if (! phiproj_computed) {
1144                                         phiproj_computed = 1;
1145                                         collect_phiprojs(current_ir_graph);
1146                                 }
1147                                 did_inline = inline_method(call, callee);
1148                                 if (did_inline) {
1149                                         inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1150
1151                                         /* call was inlined, Phi/Projs for current graph must be recomputed */
1152                                         phiproj_computed = 0;
1153
1154                                         /* callee was inline. Append its call list. */
1155                                         env->got_inline = 1;
1156                                         --env->n_call_nodes;
1157                                         append_call_list(env, callee_env, entry->loop_depth);
1158                                         --callee_env->n_callers;
1159
1160                                         /* after we have inlined callee, all called methods inside callee
1161                                            are now called once more */
1162                                         list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1163                                                 inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
1164                                                 ++penv->n_callers;
1165                                         }
1166
1167                                         /* remove this call from the list */
1168                                         list_del(&entry->list);
1169                                         continue;
1170                                 }
1171                         }
1172                 }
1173                 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1174         }
1175
1176         for (i = 0; i < n_irgs; ++i) {
1177                 irg = get_irp_irg(i);
1178                 env = (inline_irg_env*)get_irg_link(irg);
1179
1180                 if (env->got_inline) {
1181                         optimize_graph_df(irg);
1182                         optimize_cf(irg);
1183                 }
1184                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1185                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1186                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1187                         env->n_callers_orig, env->n_callers,
1188                         get_entity_name(get_irg_entity(irg))));
1189                 }
1190         }
1191
1192         /* kill the copied graphs: we don't need them anymore */
1193         foreach_pmap(copied_graphs, pm_entry) {
1194                 ir_graph *copy = (ir_graph*)pm_entry->value;
1195
1196                 /* reset the entity, otherwise it will be deleted in the next step ... */
1197                 set_irg_entity(copy, NULL);
1198                 free_ir_graph(copy);
1199         }
1200         pmap_destroy(copied_graphs);
1201
1202         obstack_free(&temp_obst, NULL);
1203         current_ir_graph = rem;
1204 }
1205
1206 typedef struct inline_leaf_functions_pass_t {
1207         ir_prog_pass_t pass;
1208         unsigned       maxsize;
1209         unsigned       leafsize;
1210         unsigned       size;
1211         int            ignore_runtime;
1212 } inline_leaf_functions_pass_t;
1213
1214 /**
1215  * Wrapper to run inline_leaf_functions() as a ir_prog pass.
1216  */
1217 static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
1218 {
1219         inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
1220
1221         (void)irp;
1222         inline_leaf_functions(
1223                 pass->maxsize, pass->leafsize,
1224                 pass->size, pass->ignore_runtime);
1225         return 0;
1226 }
1227
1228 /* create a pass for inline_leaf_functions() */
1229 ir_prog_pass_t *inline_leaf_functions_pass(
1230         const char *name, unsigned maxsize, unsigned leafsize,
1231         unsigned size, int ignore_runtime)
1232 {
1233         inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
1234
1235         pass->maxsize        = maxsize;
1236         pass->leafsize       = leafsize;
1237         pass->size           = size;
1238         pass->ignore_runtime = ignore_runtime;
1239
1240         return def_prog_pass_constructor(
1241                 &pass->pass,
1242                 name ? name : "inline_leaf_functions",
1243                 inline_leaf_functions_wrapper);
1244 }
1245
1246 /**
1247  * Calculate the parameter weights for transmitting the address of a local variable.
1248  */
1249 static unsigned calc_method_local_weight(ir_node *arg)
1250 {
1251         int      i, j, k;
1252         unsigned v, weight = 0;
1253
1254         for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1255                 ir_node *succ = get_irn_out(arg, i);
1256
1257                 switch (get_irn_opcode(succ)) {
1258                 case iro_Load:
1259                 case iro_Store:
1260                         /* Loads and Store can be removed */
1261                         weight += 3;
1262                         break;
1263                 case iro_Sel:
1264                         /* check if all args are constant */
1265                         for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1266                                 ir_node *idx = get_Sel_index(succ, j);
1267                                 if (! is_Const(idx))
1268                                         return 0;
1269                         }
1270                         /* Check users on this Sel. Note: if a 0 is returned here, there was
1271                            some unsupported node. */
1272                         v = calc_method_local_weight(succ);
1273                         if (v == 0)
1274                                 return 0;
1275                         /* we can kill one Sel with constant indexes, this is cheap */
1276                         weight += v + 1;
1277                         break;
1278                 case iro_Id:
1279                         /* when looking backward we might find Id nodes */
1280                         weight += calc_method_local_weight(succ);
1281                         break;
1282                 case iro_Tuple:
1283                         /* unoptimized tuple */
1284                         for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1285                                 ir_node *pred = get_Tuple_pred(succ, j);
1286                                 if (pred == arg) {
1287                                         /* look for Proj(j) */
1288                                         for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1289                                                 ir_node *succ_succ = get_irn_out(succ, k);
1290                                                 if (is_Proj(succ_succ)) {
1291                                                         if (get_Proj_proj(succ_succ) == j) {
1292                                                                 /* found */
1293                                                                 weight += calc_method_local_weight(succ_succ);
1294                                                         }
1295                                                 } else {
1296                                                         /* this should NOT happen */
1297                                                         return 0;
1298                                                 }
1299                                         }
1300                                 }
1301                         }
1302                         break;
1303                 default:
1304                         /* any other node: unsupported yet or bad. */
1305                         return 0;
1306                 }
1307         }
1308         return weight;
1309 }
1310
1311 /**
1312  * Calculate the parameter weights for transmitting the address of a local variable.
1313  */
1314 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
1315 {
1316         ir_entity *ent = get_irg_entity(irg);
1317         ir_type  *mtp;
1318         size_t   nparams;
1319         int      i;
1320         long     proj_nr;
1321         ir_node  *irg_args, *arg;
1322
1323         mtp      = get_entity_type(ent);
1324         nparams  = get_method_n_params(mtp);
1325
1326         /* allocate a new array. currently used as 'analysed' flag */
1327         env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1328
1329         /* If the method haven't parameters we have nothing to do. */
1330         if (nparams <= 0)
1331                 return;
1332
1333         assure_irg_outs(irg);
1334         irg_args = get_irg_args(irg);
1335         for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1336                 arg     = get_irn_out(irg_args, i);
1337                 proj_nr = get_Proj_proj(arg);
1338                 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1339         }
1340 }
1341
1342 /**
1343  * Calculate the benefice for transmitting an local variable address.
1344  * After inlining, the local variable might be transformed into a
1345  * SSA variable by scalar_replacement().
1346  */
1347 static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
1348 {
1349         inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
1350
1351         if (env->local_weights == NULL)
1352                 analyze_irg_local_weights(env, callee);
1353
1354         if (pos < ARR_LEN(env->local_weights))
1355                 return env->local_weights[pos];
1356         return 0;
1357 }
1358
1359 /**
1360  * Calculate a benefice value for inlining the given call.
1361  *
1362  * @param call       the call node we have to inspect
1363  * @param callee     the called graph
1364  */
1365 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1366 {
1367         ir_node   *call = entry->call;
1368         ir_entity *ent  = get_irg_entity(callee);
1369         ir_type   *callee_frame;
1370         size_t    i, n_members, n_params;
1371         ir_node   *frame_ptr;
1372         ir_type   *mtp;
1373         int       weight = 0;
1374         int       all_const;
1375         unsigned  cc, v;
1376         irg_inline_property prop;
1377
1378         inline_irg_env *callee_env;
1379
1380         prop = get_irg_inline_property(callee);
1381         if (prop == irg_inline_forbidden) {
1382                 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1383                     call, callee));
1384                 return entry->benefice = INT_MIN;
1385         }
1386
1387         callee_frame = get_irg_frame_type(callee);
1388         n_members = get_class_n_members(callee_frame);
1389         for (i = 0; i < n_members; ++i) {
1390                 ir_entity *frame_ent = get_class_member(callee_frame, i);
1391                 if (is_parameter_entity(frame_ent)) {
1392                         // TODO inliner should handle parameter entities by inserting Store operations
1393                         DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
1394                         set_irg_inline_property(callee, irg_inline_forbidden);
1395                         return entry->benefice = INT_MIN;
1396                 }
1397         }
1398
1399         if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
1400                 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1401                     call, callee));
1402                 return entry->benefice = INT_MIN;
1403         }
1404
1405         /* costs for every passed parameter */
1406         n_params = get_Call_n_params(call);
1407         mtp      = get_entity_type(ent);
1408         cc       = get_method_calling_convention(mtp);
1409         if (cc & cc_reg_param) {
1410                 /* register parameter, smaller costs for register parameters */
1411                 size_t max_regs = cc & ~cc_bits;
1412
1413                 if (max_regs < n_params)
1414                         weight += max_regs * 2 + (n_params - max_regs) * 5;
1415                 else
1416                         weight += n_params * 2;
1417         } else {
1418                 /* parameters are passed an stack */
1419                 weight += 5 * n_params;
1420         }
1421
1422         /* constant parameters improve the benefice */
1423         frame_ptr = get_irg_frame(current_ir_graph);
1424         all_const = 1;
1425         for (i = 0; i < n_params; ++i) {
1426                 ir_node *param = get_Call_param(call, i);
1427
1428                 if (is_Const(param)) {
1429                         weight += get_method_param_weight(ent, i);
1430                 } else {
1431                         all_const = 0;
1432                         if (is_SymConst(param))
1433                                 weight += get_method_param_weight(ent, i);
1434                         else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1435                                 /*
1436                                  * An address of a local variable is transmitted. After
1437                                  * inlining, scalar_replacement might be able to remove the
1438                                  * local variable, so honor this.
1439                                  */
1440                                 v = get_method_local_adress_weight(callee, i);
1441                                 weight += v;
1442                                 if (v > 0)
1443                                         entry->local_adr = 1;
1444                         }
1445                 }
1446         }
1447         entry->all_const = all_const;
1448
1449         callee_env = (inline_irg_env*)get_irg_link(callee);
1450         if (callee_env->n_callers == 1 &&
1451             callee != current_ir_graph &&
1452             !entity_is_externally_visible(ent)) {
1453                 weight += 700;
1454         }
1455
1456         /* give a bonus for functions with one block */
1457         if (callee_env->n_blocks == 1)
1458                 weight = weight * 3 / 2;
1459
1460         /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1461         if (callee_env->n_nodes < 30 && !callee_env->recursive)
1462                 weight += 2000;
1463
1464         /* and finally for leafs: they do not increase the register pressure
1465            because of callee safe registers */
1466         if (callee_env->n_call_nodes == 0)
1467                 weight += 400;
1468
1469         /** it's important to inline inner loops first */
1470         if (entry->loop_depth > 30)
1471                 weight += 30 * 1024;
1472         else
1473                 weight += entry->loop_depth * 1024;
1474
1475         /*
1476          * All arguments constant is probably a good sign, give an extra bonus
1477          */
1478         if (all_const)
1479                 weight += 1024;
1480
1481         return entry->benefice = weight;
1482 }
1483
1484 typedef struct walk_env_t {
1485         ir_graph **irgs;
1486         size_t   last_irg;
1487 } walk_env_t;
1488
1489 /**
1490  * Callgraph walker, collect all visited graphs.
1491  */
1492 static void callgraph_walker(ir_graph *irg, void *data)
1493 {
1494         walk_env_t *env = (walk_env_t *)data;
1495         env->irgs[env->last_irg++] = irg;
1496 }
1497
1498 /**
1499  * Creates an inline order for all graphs.
1500  *
1501  * @return the list of graphs.
1502  */
1503 static ir_graph **create_irg_list(void)
1504 {
1505         ir_entity  **free_methods;
1506         size_t     n_irgs = get_irp_n_irgs();
1507         walk_env_t env;
1508
1509         cgana(&free_methods);
1510         xfree(free_methods);
1511
1512         compute_callgraph();
1513
1514         env.irgs     = XMALLOCNZ(ir_graph*, n_irgs);
1515         env.last_irg = 0;
1516
1517         callgraph_walk(NULL, callgraph_walker, &env);
1518         assert(n_irgs == env.last_irg);
1519
1520         free_callgraph();
1521
1522         return env.irgs;
1523 }
1524
1525 /**
1526  * Push a call onto the priority list if its benefice is big enough.
1527  *
1528  * @param pqueue   the priority queue of calls
1529  * @param call     the call entry
1530  * @param inlien_threshold
1531  *                 the threshold value
1532  */
1533 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
1534                             int inline_threshold)
1535 {
1536         ir_graph            *callee  = call->callee;
1537         irg_inline_property prop     = get_irg_inline_property(callee);
1538         int                 benefice = calc_inline_benefice(call, callee);
1539
1540         DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
1541             get_irn_irg(call->call), call->call, callee, benefice));
1542
1543         if (prop < irg_inline_forced && benefice < inline_threshold) {
1544                 return;
1545         }
1546
1547         pqueue_put(pqueue, call, benefice);
1548 }
1549
1550 /**
1551  * Try to inline calls into a graph.
1552  *
1553  * @param irg      the graph into which we inline
1554  * @param maxsize  do NOT inline if the size of irg gets
1555  *                 bigger than this amount
1556  * @param inline_threshold
1557  *                 threshold value for inline decision
1558  * @param copied_graphs
1559  *                 map containing copied of recursive graphs
1560  */
1561 static void inline_into(ir_graph *irg, unsigned maxsize,
1562                         int inline_threshold, pmap *copied_graphs)
1563 {
1564         int            phiproj_computed = 0;
1565         inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
1566         wenv_t         wenv;
1567         pqueue_t       *pqueue;
1568
1569         if (env->n_call_nodes == 0)
1570                 return;
1571
1572         if (env->n_nodes > maxsize) {
1573                 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
1574                 return;
1575         }
1576
1577         current_ir_graph = irg;
1578         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1579
1580         /* put irgs into the pqueue */
1581         pqueue = new_pqueue();
1582
1583         list_for_each_entry(call_entry, curr_call, &env->calls, list) {
1584                 assert(is_Call(curr_call->call));
1585                 maybe_push_call(pqueue, curr_call, inline_threshold);
1586         }
1587
1588         /* note that the list of possible calls is updated during the process */
1589         while (!pqueue_empty(pqueue)) {
1590                 int                 did_inline;
1591                 call_entry          *curr_call  = (call_entry*)pqueue_pop_front(pqueue);
1592                 ir_graph            *callee     = curr_call->callee;
1593                 ir_node             *call_node  = curr_call->call;
1594                 inline_irg_env      *callee_env = (inline_irg_env*)get_irg_link(callee);
1595                 irg_inline_property prop        = get_irg_inline_property(callee);
1596                 ir_graph            *calleee;
1597                 int                 loop_depth;
1598
1599                 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
1600                         DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
1601                                                 env->n_nodes, callee, callee_env->n_nodes));
1602                         continue;
1603                 }
1604
1605                 calleee = pmap_get(ir_graph, copied_graphs, callee);
1606                 if (calleee != NULL) {
1607                         int benefice = curr_call->benefice;
1608                         /*
1609                          * Reduce the weight for recursive function IFF not all arguments are const.
1610                          * inlining recursive functions is rarely good.
1611                          */
1612                         if (!curr_call->all_const)
1613                                 benefice -= 2000;
1614                         if (benefice < inline_threshold)
1615                                 continue;
1616
1617                         /*
1618                          * Remap callee if we have a copy.
1619                          */
1620                         callee     = calleee;
1621                         callee_env = (inline_irg_env*)get_irg_link(callee);
1622                 }
1623
1624                 if (current_ir_graph == callee) {
1625                         /*
1626                          * Recursive call: we cannot directly inline because we cannot
1627                          * walk the graph and change it. So we have to make a copy of
1628                          * the graph first.
1629                          */
1630                         int benefice = curr_call->benefice;
1631                         ir_graph *copy;
1632
1633                         /*
1634                          * Reduce the weight for recursive function IFF not all arguments are const.
1635                          * inlining recursive functions is rarely good.
1636                          */
1637                         if (!curr_call->all_const)
1638                                 benefice -= 2000;
1639                         if (benefice < inline_threshold)
1640                                 continue;
1641
1642                         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1643
1644                         /*
1645                          * No copy yet, create one.
1646                          * Note that recursive methods are never leafs, so it is
1647                          * sufficient to test this condition here.
1648                          */
1649                         copy = create_irg_copy(callee);
1650
1651                         /* create_irg_copy() destroys the Proj links, recompute them */
1652                         phiproj_computed = 0;
1653
1654                         ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1655
1656                         /* allocate a new environment */
1657                         callee_env = alloc_inline_irg_env();
1658                         set_irg_link(copy, callee_env);
1659
1660                         assure_irg_properties(copy, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
1661                         memset(&wenv, 0, sizeof(wenv));
1662                         wenv.x              = callee_env;
1663                         wenv.ignore_callers = 1;
1664                         irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1665
1666                         /*
1667                          * Enter the entity of the original graph. This is needed
1668                          * for inline_method(). However, note that ent->irg still points
1669                          * to callee, NOT to copy.
1670                          */
1671                         set_irg_entity(copy, get_irg_entity(callee));
1672
1673                         pmap_insert(copied_graphs, callee, copy);
1674                         callee = copy;
1675
1676                         /* we have only one caller: the original graph */
1677                         callee_env->n_callers      = 1;
1678                         callee_env->n_callers_orig = 1;
1679                 }
1680                 if (! phiproj_computed) {
1681                         phiproj_computed = 1;
1682                         collect_phiprojs(current_ir_graph);
1683                 }
1684                 did_inline = inline_method(call_node, callee);
1685                 if (!did_inline)
1686                         continue;
1687
1688                 /* call was inlined, Phi/Projs for current graph must be recomputed */
1689                 phiproj_computed = 0;
1690
1691                 /* remove it from the caller list */
1692                 list_del(&curr_call->list);
1693
1694                 /* callee was inline. Append its call list. */
1695                 env->got_inline = 1;
1696                 --env->n_call_nodes;
1697
1698                 /* we just generate a bunch of new calls */
1699                 loop_depth = curr_call->loop_depth;
1700                 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1701                         inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
1702                         ir_node        *new_call;
1703                         call_entry     *new_entry;
1704
1705                         /* after we have inlined callee, all called methods inside
1706                          * callee are now called once more */
1707                         ++penv->n_callers;
1708
1709                         /* Note that the src list points to Call nodes in the inlined graph,
1710                          * but we need Call nodes in our graph. Luckily the inliner leaves
1711                          * this information in the link field. */
1712                         new_call = (ir_node*)get_irn_link(centry->call);
1713                         if (get_irn_irg(new_call) != irg) {
1714                                 /* centry->call has not been copied, which means it is dead.
1715                                  * This might happen during inlining, if a const function,
1716                                  * which cannot be inlined is only used as an unused argument
1717                                  * of another function, which is inlined. */
1718                                 continue;
1719                         }
1720                         assert(is_Call(new_call));
1721
1722                         new_entry = duplicate_call_entry(centry, new_call, loop_depth);
1723                         list_add_tail(&new_entry->list, &env->calls);
1724                         maybe_push_call(pqueue, new_entry, inline_threshold);
1725                 }
1726
1727                 env->n_call_nodes += callee_env->n_call_nodes;
1728                 env->n_nodes += callee_env->n_nodes;
1729                 --callee_env->n_callers;
1730         }
1731         ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1732         del_pqueue(pqueue);
1733 }
1734
1735 /*
1736  * Heuristic inliner. Calculates a benefice value for every call and inlines
1737  * those calls with a value higher than the threshold.
1738  */
1739 void inline_functions(unsigned maxsize, int inline_threshold,
1740                       opt_ptr after_inline_opt)
1741 {
1742         inline_irg_env   *env;
1743         size_t           i, n_irgs;
1744         ir_graph         *rem;
1745         wenv_t           wenv;
1746         pmap             *copied_graphs;
1747         pmap_entry       *pm_entry;
1748         ir_graph         **irgs;
1749
1750         rem = current_ir_graph;
1751         obstack_init(&temp_obst);
1752
1753         irgs = create_irg_list();
1754
1755         /* a map for the copied graphs, used to inline recursive calls */
1756         copied_graphs = pmap_create();
1757
1758         /* extend all irgs by a temporary data structure for inlining. */
1759         n_irgs = get_irp_n_irgs();
1760         for (i = 0; i < n_irgs; ++i)
1761                 set_irg_link(irgs[i], alloc_inline_irg_env());
1762
1763         /* Pre-compute information in temporary data structure. */
1764         wenv.ignore_runtime = 0;
1765         wenv.ignore_callers = 0;
1766         for (i = 0; i < n_irgs; ++i) {
1767                 ir_graph *irg = irgs[i];
1768
1769                 free_callee_info(irg);
1770
1771                 wenv.x = (inline_irg_env*)get_irg_link(irg);
1772                 assure_loopinfo(irg);
1773                 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1774         }
1775
1776         /* -- and now inline. -- */
1777         for (i = 0; i < n_irgs; ++i) {
1778                 ir_graph *irg = irgs[i];
1779
1780                 inline_into(irg, maxsize, inline_threshold, copied_graphs);
1781         }
1782
1783         for (i = 0; i < n_irgs; ++i) {
1784                 ir_graph *irg = irgs[i];
1785
1786                 env = (inline_irg_env*)get_irg_link(irg);
1787                 if (env->got_inline && after_inline_opt != NULL) {
1788                         /* this irg got calls inlined: optimize it */
1789                         after_inline_opt(irg);
1790                 }
1791                 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1792                         DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1793                         env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1794                         env->n_callers_orig, env->n_callers,
1795                         get_entity_name(get_irg_entity(irg))));
1796                 }
1797         }
1798
1799         /* kill the copied graphs: we don't need them anymore */
1800         foreach_pmap(copied_graphs, pm_entry) {
1801                 ir_graph *copy = (ir_graph*)pm_entry->value;
1802
1803                 /* reset the entity, otherwise it will be deleted in the next step ... */
1804                 set_irg_entity(copy, NULL);
1805                 free_ir_graph(copy);
1806         }
1807         pmap_destroy(copied_graphs);
1808
1809         xfree(irgs);
1810
1811         obstack_free(&temp_obst, NULL);
1812         current_ir_graph = rem;
1813 }
1814
1815 typedef struct inline_functions_pass_t {
1816         ir_prog_pass_t pass;
1817         unsigned       maxsize;
1818         int            inline_threshold;
1819         opt_ptr        after_inline_opt;
1820 } inline_functions_pass_t;
1821
1822 /**
1823  * Wrapper to run inline_functions() as a ir_prog pass.
1824  */
1825 static int inline_functions_wrapper(ir_prog *irp, void *context)
1826 {
1827         inline_functions_pass_t *pass = (inline_functions_pass_t*)context;
1828
1829         (void)irp;
1830         inline_functions(pass->maxsize, pass->inline_threshold,
1831                          pass->after_inline_opt);
1832         return 0;
1833 }
1834
1835 /* create a ir_prog pass for inline_functions */
1836 ir_prog_pass_t *inline_functions_pass(
1837           const char *name, unsigned maxsize, int inline_threshold,
1838           opt_ptr after_inline_opt)
1839 {
1840         inline_functions_pass_t *pass = XMALLOCZ(inline_functions_pass_t);
1841
1842         pass->maxsize          = maxsize;
1843         pass->inline_threshold = inline_threshold;
1844         pass->after_inline_opt = after_inline_opt;
1845
1846         return def_prog_pass_constructor(
1847                 &pass->pass, name ? name : "inline_functions",
1848                 inline_functions_wrapper);
1849 }
1850
1851 void firm_init_inline(void)
1852 {
1853         FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
1854 }