2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Dead node elimination and Procedure Inlining.
23 * @author Michael Beck, Goetz Lindenmaier
33 #include "irgraph_t.h"
36 #include "iroptimize.h"
53 #include "irbackedge_t.h"
59 #include "analyze_irg_args.h"
60 #include "iredges_t.h"
64 #include "iropt_dbg.h"
66 #include "irphase_t.h"
68 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
70 /*------------------------------------------------------------------*/
71 /* Routines for dead node elimination / copying garbage collection */
73 /*------------------------------------------------------------------*/
76 * Remember the new node in the old node by using a field all nodes have.
78 static void set_new_node(ir_node *node, ir_node *new_node)
80 set_irn_link(node, new_node);
84 * Get this new node, before the old node is forgotten.
86 static inline ir_node *get_new_node(ir_node *old_node)
88 assert(irn_visited(old_node));
89 return (ir_node*) get_irn_link(old_node);
92 /** a pointer to the new phases */
93 static ir_phase *new_phases[PHASE_LAST];
96 * Copies the node to the new obstack. The Ins of the new node point to
97 * the predecessors on the old obstack. For block/phi nodes not all
98 * predecessors might be copied. n->link points to the new node.
100 * @param n The node to be copied
101 * @param irg The irg onto which the node should be copied
103 static ir_node *copy_node(ir_node *n, ir_graph *irg)
105 ir_op *op = get_irn_op(n);
106 int arity = get_irn_arity(n);
107 dbg_info *dbgi = get_irn_dbg_info(n);
108 ir_mode *mode = get_irn_mode(n);
113 if (op == op_Block) {
115 n->attr.block.graph_arr = NULL;
117 block = get_nodes_block(n);
119 if (op->opar == oparity_dynamic) {
120 nn = new_ir_node(dbgi, irg, block, op, mode, -1, NULL);
121 for (i = 0; i < arity; ++i) {
122 ir_node *in = get_irn_n(n, i);
126 ir_node **ins = get_irn_in(n)+1;
127 nn = new_ir_node(dbgi, irg, block, op, mode, arity, ins);
130 /* Copy the attributes. These might point to additional data. If this
131 was allocated on the old obstack the pointers now are dangling. */
132 copy_node_attr(irg, n, nn);
133 if (op == op_Block) {
134 /* we cannot allow blocks WITHOUT macroblock input */
135 set_Block_MacroBlock(nn, get_Block_MacroBlock(n));
138 /* copy phase information for this node */
139 for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
140 ir_graph *old_irg = get_irn_irg(n);
141 ir_phase *old_ph = get_irg_phase(old_irg, i);
144 if (!phase_get_irn_data(old_ph, n))
146 phase_set_irn_data(new_phases[i], nn, phase_get_irn_data(old_ph, n));
150 hook_dead_node_elim_subst(current_ir_graph, n, nn);
155 static void copy_node_with_number(ir_node *node, void *env)
157 ir_graph *new_irg = (ir_graph*) env;
158 ir_node *new_node = copy_node(node, new_irg);
160 /* preserve the node numbers for easier debugging */
161 new_node->node_nr = node->node_nr;
165 * Reroute the inputs of a node from nodes in the old graph to copied nodes in
168 static void set_preds(ir_node *node, void *env)
170 ir_graph *new_irg = (ir_graph*) env;
175 new_node = get_new_node(node);
177 if (is_Block(node)) {
178 /* copy the macro block header */
179 ir_node *mbh = get_Block_MacroBlock(node);
181 /* get the macro block header */
182 ir_node *nmbh = get_new_node(mbh);
183 assert(nmbh != NULL);
184 set_Block_MacroBlock(new_node, nmbh);
186 ir_node *block = get_nodes_block(node);
187 ir_node *new_block = get_new_node(block);
188 set_nodes_block(new_node, new_block);
191 arity = get_irn_arity(new_node);
192 for (i = 0; i < arity; ++i) {
193 ir_node *in = get_irn_n(node, i);
194 ir_node *new_in = get_new_node(in);
195 set_irn_n(new_node, i, new_in);
198 /* Now the new node is complete. We can add it to the hash table for CSE. */
199 add_identities(new_irg->value_table, new_node);
203 * Copies the graph reachable from current_ir_graph->end to the obstack
204 * in current_ir_graph and fixes the environment.
205 * Then fixes the fields in current_ir_graph containing nodes of the
208 * @param copy_node_nr If non-zero, the node number will be copied
210 static void copy_graph_env(ir_graph *irg)
217 /* init the new_phases array */
218 for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
219 old_ph = get_irg_phase(irg, i);
220 if (old_ph == NULL) {
221 new_phases[i] = NULL;
223 new_phases[i] = xmalloc(sizeof(ir_phase));
224 phase_init(new_phases[i], "", irg, old_ph->growth_factor,
225 old_ph->data_init, old_ph->priv);
229 /* remove end_except and end_reg nodes */
230 old_end = get_irg_end(irg);
231 set_irg_end_except(irg, old_end);
232 set_irg_end_reg (irg, old_end);
235 irg_walk(irg->anchor, copy_node_with_number, set_preds, irg);
238 new_anchor = get_new_node(irg->anchor);
239 assert(new_anchor != NULL);
240 irg->anchor = new_anchor;
242 /* copy the new phases into the irg */
243 for (i = PHASE_NOT_IRG_MANAGED+1; i < PHASE_LAST; i++) {
244 old_ph = get_irg_phase(irg, i);
248 free_irg_phase(irg, i);
249 irg->phases[i] = new_phases[i];
256 * Copies all reachable nodes to a new obstack. Removes bad inputs
257 * from block nodes and the corresponding inputs from Phi nodes.
258 * Merges single exit blocks with single entry blocks and removes
260 * Adds all new nodes to a new hash table for CSE. Does not
261 * perform CSE, so the hash table might contain common subexpressions.
263 void dead_node_elimination(ir_graph *irg)
266 #ifdef INTERPROCEDURAL_VIEW
267 int rem_ipview = get_interprocedural_view();
269 struct obstack *graveyard_obst = NULL;
270 struct obstack *rebirth_obst = NULL;
272 edges_deactivate(irg);
274 /* inform statistics that we started a dead-node elimination run */
275 hook_dead_node_elim(irg, 1);
277 /* Remember external state of current_ir_graph. */
278 rem = current_ir_graph;
279 current_ir_graph = irg;
280 #ifdef INTERPROCEDURAL_VIEW
281 set_interprocedural_view(0);
284 assert(get_irg_phase_state(irg) != phase_building);
286 /* Handle graph state */
287 free_callee_info(irg);
290 free_loop_information(irg);
291 set_irg_doms_inconsistent(irg);
293 /* A quiet place, where the old obstack can rest in peace,
294 until it will be cremated. */
295 graveyard_obst = irg->obst;
297 /* A new obstack, where the reachable nodes will be copied to. */
298 rebirth_obst = XMALLOC(struct obstack);
299 irg->obst = rebirth_obst;
300 obstack_init(irg->obst);
301 irg->last_node_idx = 0;
303 /* We also need a new value table for CSE */
304 del_identities(irg->value_table);
305 irg->value_table = new_identities();
307 /* Copy the graph from the old to the new obstack */
310 /* Free memory from old unoptimized obstack */
311 obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
312 xfree(graveyard_obst); /* ... then free it. */
314 /* inform statistics that the run is over */
315 hook_dead_node_elim(irg, 0);
317 current_ir_graph = rem;
318 #ifdef INTERPROCEDURAL_VIEW
319 set_interprocedural_view(rem_ipview);
323 ir_graph_pass_t *dead_node_elimination_pass(const char *name)
325 return def_graph_pass(name ? name : "dce", dead_node_elimination);
331 __)|_| | \_/ | \_/(/_ |_/\__|__
333 The following stuff implements a facility that automatically patches
334 registered ir_node pointers to the new node when a dead node elimination occurs.
337 struct _survive_dce_t {
341 hook_entry_t dead_node_elim;
342 hook_entry_t dead_node_elim_subst;
345 typedef struct _survive_dce_list_t {
346 struct _survive_dce_list_t *next;
348 } survive_dce_list_t;
350 static void dead_node_hook(void *context, ir_graph *irg, int start)
352 survive_dce_t *sd = context;
355 /* Create a new map before the dead node elimination is performed. */
357 sd->new_places = pmap_create_ex(pmap_count(sd->places));
359 /* Patch back all nodes if dead node elimination is over and something is to be done. */
360 pmap_destroy(sd->places);
361 sd->places = sd->new_places;
362 sd->new_places = NULL;
367 * Hook called when dead node elimination replaces old by nw.
369 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw)
371 survive_dce_t *sd = context;
372 survive_dce_list_t *list = pmap_get(sd->places, old);
375 /* If the node is to be patched back, write the new address to all registered locations. */
377 survive_dce_list_t *p;
379 for (p = list; p; p = p->next)
382 pmap_insert(sd->new_places, nw, list);
387 * Make a new Survive DCE environment.
389 survive_dce_t *new_survive_dce(void)
391 survive_dce_t *res = XMALLOC(survive_dce_t);
392 obstack_init(&res->obst);
393 res->places = pmap_create();
394 res->new_places = NULL;
396 res->dead_node_elim.hook._hook_dead_node_elim = dead_node_hook;
397 res->dead_node_elim.context = res;
398 res->dead_node_elim.next = NULL;
400 res->dead_node_elim_subst.hook._hook_dead_node_elim_subst = dead_node_subst_hook;
401 res->dead_node_elim_subst.context = res;
402 res->dead_node_elim_subst.next = NULL;
404 register_hook(hook_dead_node_elim, &res->dead_node_elim);
405 register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
410 * Free a Survive DCE environment.
412 void free_survive_dce(survive_dce_t *sd)
414 obstack_free(&sd->obst, NULL);
415 pmap_destroy(sd->places);
416 unregister_hook(hook_dead_node_elim, &sd->dead_node_elim);
417 unregister_hook(hook_dead_node_elim_subst, &sd->dead_node_elim_subst);
422 * Register a node pointer to be patched upon DCE.
423 * When DCE occurs, the node pointer specified by @p place will be
424 * patched to the new address of the node it is pointing to.
426 * @param sd The Survive DCE environment.
427 * @param place The address of the node pointer.
429 void survive_dce_register_irn(survive_dce_t *sd, ir_node **place)
431 if (*place != NULL) {
432 ir_node *irn = *place;
433 survive_dce_list_t *curr = pmap_get(sd->places, irn);
434 survive_dce_list_t *nw = OALLOC(&sd->obst, survive_dce_list_t);
439 pmap_insert(sd->places, irn, nw);
443 /*--------------------------------------------------------------------*/
444 /* Functionality for inlining */
445 /*--------------------------------------------------------------------*/
448 * Copy node for inlineing. Updates attributes that change when
449 * inlineing but not for dead node elimination.
451 * Copies the node by calling copy_node() and then updates the entity if
452 * it's a local one. env must be a pointer of the frame type of the
453 * inlined procedure. The new entities must be in the link field of
456 static void copy_node_inline(ir_node *node, void *env)
458 ir_graph *new_irg = (ir_graph*) env;
461 new_node = copy_node(node, new_irg);
463 ir_graph *old_irg = get_irn_irg(node);
464 ir_type *old_frame_type = get_irg_frame_type(old_irg);
465 ir_entity *old_entity = get_Sel_entity(node);
466 assert(is_Sel(new_node));
467 /* use copied entities from the new frame */
468 if (get_entity_owner(old_entity) == old_frame_type) {
469 ir_entity *new_entity = get_entity_link(old_entity);
470 assert(new_entity != NULL);
471 set_Sel_entity(new_node, new_entity);
473 } else if (is_Block(new_node)) {
474 new_node->attr.block.irg.irg = new_irg;
478 static void set_preds_inline(ir_node *node, void *env)
482 set_preds(node, env);
484 /* move constants into start block */
485 new_node = get_new_node(node);
486 if (is_irn_constlike(new_node)) {
487 ir_graph *new_irg = (ir_graph *) env;
488 ir_node *start_block = get_irg_start_block(new_irg);
489 set_nodes_block(new_node, start_block);
494 * Walker: checks if P_value_arg_base is used.
496 static void find_addr(ir_node *node, void *env)
498 bool *allow_inline = env;
501 ir_graph *irg = current_ir_graph;
502 if (get_Sel_ptr(node) == get_irg_frame(irg)) {
503 /* access to frame */
504 ir_entity *ent = get_Sel_entity(node);
505 if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
506 /* access to value_type */
507 *allow_inline = false;
510 } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
512 * Refuse to inline alloca call unless user explicitly forced so as this
513 * may change program's memory overhead drastically when the function
514 * using alloca is called in loop. In GCC present in SPEC2000 inlining
515 * into schedule_block cause it to require 2GB of ram instead of 256MB.
517 * Sorrily this is true with our implementation also.
518 * Moreover, we cannot differentiate between alloca() and VLA yet, so
519 * this disables inlining of functions using VLA (which are completely
523 * - add a flag to the Alloc node for "real" alloca() calls
524 * - add a new Stack-Restore node at the end of a function using
527 *allow_inline = false;
532 * Check if we can inline a given call.
533 * Currently, we cannot inline two cases:
534 * - call with compound arguments
535 * - graphs that take the address of a parameter
537 * check these conditions here
539 static bool can_inline(ir_node *call, ir_graph *called_graph)
541 ir_entity *called = get_irg_entity(called_graph);
542 ir_type *called_type = get_entity_type(called);
543 ir_type *call_type = get_Call_type(call);
544 int n_params = get_method_n_params(called_type);
545 int n_arguments = get_method_n_params(call_type);
546 int n_res = get_method_n_ress(called_type);
547 irg_inline_property prop = get_irg_inline_property(called_graph);
551 if (prop == irg_inline_forbidden)
554 if (n_arguments != n_params) {
555 /* this is a bad feature of C: without a prototype, we can
556 * call a function with less parameters than needed. Currently
557 * we don't support this, although we could use Unknown than. */
560 if (n_res != get_method_n_ress(call_type)) {
564 /* Argh, compiling C has some bad consequences:
565 * It is implementation dependent what happens in that case.
566 * We support inlining, if the bitsize of the types matches AND
567 * the same arithmetic is used. */
568 for (i = n_params - 1; i >= 0; --i) {
569 ir_type *param_tp = get_method_param_type(called_type, i);
570 ir_type *arg_tp = get_method_param_type(call_type, i);
572 if (param_tp != arg_tp) {
573 ir_mode *pmode = get_type_mode(param_tp);
574 ir_mode *amode = get_type_mode(arg_tp);
576 if (pmode == NULL || amode == NULL)
578 if (get_mode_size_bits(pmode) != get_mode_size_bits(amode))
580 if (get_mode_arithmetic(pmode) != get_mode_arithmetic(amode))
582 /* otherwise we can simply "reinterpret" the bits */
585 for (i = n_res - 1; i >= 0; --i) {
586 ir_type *decl_res_tp = get_method_res_type(called_type, i);
587 ir_type *used_res_tp = get_method_res_type(call_type, i);
589 if (decl_res_tp != used_res_tp) {
590 ir_mode *decl_mode = get_type_mode(decl_res_tp);
591 ir_mode *used_mode = get_type_mode(used_res_tp);
592 if (decl_mode == NULL || used_mode == NULL)
594 if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
596 if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
598 /* otherwise we can "reinterpret" the bits */
602 /* check parameters for compound arguments */
603 for (i = 0; i < n_params; ++i) {
604 ir_type *p_type = get_method_param_type(call_type, i);
606 if (is_compound_type(p_type))
610 /* check results for compound arguments */
611 for (i = 0; i < n_res; ++i) {
612 ir_type *r_type = get_method_res_type(call_type, i);
614 if (is_compound_type(r_type))
619 irg_walk_graph(called_graph, find_addr, NULL, &res);
625 exc_handler, /**< There is a handler. */
626 exc_no_handler /**< Exception handling not represented. */
630 * copy all entities on the stack frame on 1 irg to the stackframe of another.
631 * Sets entity links of the old entities to the copies
633 static void copy_frame_entities(ir_graph *from, ir_graph *to)
635 ir_type *from_frame = get_irg_frame_type(from);
636 ir_type *to_frame = get_irg_frame_type(to);
637 int n_members = get_class_n_members(from_frame);
639 assert(from_frame != to_frame);
641 for (i = 0; i < n_members; ++i) {
642 ir_entity *old_ent = get_class_member(from_frame, i);
643 ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
644 set_entity_link(old_ent, new_ent);
648 /* Inlines a method at the given call site. */
649 int inline_method(ir_node *call, ir_graph *called_graph)
652 ir_node *post_call, *post_bl;
653 ir_node *in[pn_Start_max];
654 ir_node *end, *end_bl, *block;
659 int arity, n_ret, n_exc, n_res, i, j, rem_opt;
660 int irn_arity, n_params;
662 enum exc_mode exc_handling;
667 ir_graph *irg = get_irn_irg(call);
669 /* we cannot inline some types of calls */
670 if (! can_inline(call, called_graph))
673 /* We cannot inline a recursive call. The graph must be copied before
674 * the call the inline_method() using create_irg_copy(). */
675 if (called_graph == irg)
678 ent = get_irg_entity(called_graph);
679 mtp = get_entity_type(ent);
680 ctp = get_Call_type(call);
681 n_params = get_method_n_params(mtp);
682 n_res = get_method_n_ress(mtp);
684 rem = current_ir_graph;
685 current_ir_graph = irg;
687 DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
689 /* optimizations can cause problems when allocating new nodes */
690 rem_opt = get_opt_optimize();
693 /* Handle graph state */
694 assert(get_irg_phase_state(irg) != phase_building);
695 assert(get_irg_pinned(irg) == op_pin_state_pinned);
696 assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
697 set_irg_outs_inconsistent(irg);
698 set_irg_extblk_inconsistent(irg);
699 set_irg_doms_inconsistent(irg);
700 set_irg_loopinfo_inconsistent(irg);
701 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
702 set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
703 edges_deactivate(irg);
705 /* here we know we WILL inline, so inform the statistics */
706 hook_inline(call, called_graph);
708 /* -- Decide how to handle exception control flow: Is there a handler
709 for the Call node, or do we branch directly to End on an exception?
711 0 There is a handler.
712 2 Exception handling not represented in Firm. -- */
714 ir_node *Xproj = NULL;
716 for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
717 long proj_nr = get_Proj_proj(proj);
718 if (proj_nr == pn_Call_X_except) Xproj = proj;
720 exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
723 /* create the argument tuple */
724 args_in = ALLOCAN(ir_node*, n_params);
726 block = get_nodes_block(call);
727 for (i = n_params - 1; i >= 0; --i) {
728 ir_node *arg = get_Call_param(call, i);
729 ir_type *param_tp = get_method_param_type(mtp, i);
730 ir_mode *mode = get_type_mode(param_tp);
732 if (mode != get_irn_mode(arg)) {
733 arg = new_r_Conv(block, arg, mode);
738 /* the procedure and later replaces the Start node of the called graph.
739 * Post_call is the old Call node and collects the results of the called
740 * graph. Both will end up being a tuple. */
741 post_bl = get_nodes_block(call);
742 /* XxMxPxPxPxT of Start + parameter of Call */
743 in[pn_Start_M] = get_Call_mem(call);
744 in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
745 in[pn_Start_P_frame_base] = get_irg_frame(irg);
746 in[pn_Start_P_tls] = get_irg_tls(irg);
747 in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
748 pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
752 The new block gets the ins of the old block, pre_call and all its
753 predecessors and all Phi nodes. -- */
754 part_block(pre_call);
756 /* increment visited flag for later walk */
757 inc_irg_visited(called_graph);
759 /* link some nodes to nodes in the current graph so instead of copying
760 * the linked nodes will get used.
761 * So the copier will use the created Tuple instead of copying the start
762 * node, similar for singleton nodes like NoMem and Bad.
763 * Note: this will prohibit predecessors to be copied - only do it for
764 * nodes without predecessors */
766 ir_node *start_block;
771 start_block = get_irg_start_block(called_graph);
772 set_new_node(start_block, get_nodes_block(pre_call));
773 mark_irn_visited(start_block);
775 start = get_irg_start(called_graph);
776 set_new_node(start, pre_call);
777 mark_irn_visited(start);
779 bad = get_irg_bad(called_graph);
780 set_new_node(bad, get_irg_bad(irg));
781 mark_irn_visited(bad);
783 nomem = get_irg_no_mem(called_graph);
784 set_new_node(nomem, get_irg_no_mem(irg));
785 mark_irn_visited(nomem);
788 /* entitiy link is used to link entities on old stackframe to the
790 irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
792 /* copy entities and nodes */
793 assert(!irn_visited(get_irg_end(called_graph)));
794 copy_frame_entities(called_graph, irg);
795 irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
798 irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
800 /* -- Merge the end of the inlined procedure with the call site -- */
801 /* We will turn the old Call node into a Tuple with the following
804 0: Phi of all Memories of Return statements.
805 1: Jmp from new Block that merges the control flow from all exception
806 predecessors of the old end block.
807 2: Tuple of all arguments.
808 3: Phi of Exception memories.
809 In case the old Call directly branches to End on an exception we don't
810 need the block merging all exceptions nor the Phi of the exception
814 /* Precompute some values */
815 end_bl = get_new_node(get_irg_end_block(called_graph));
816 end = get_new_node(get_irg_end(called_graph));
817 arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
818 n_res = get_method_n_ress(get_Call_type(call));
820 res_pred = XMALLOCN(ir_node*, n_res);
821 cf_pred = XMALLOCN(ir_node*, arity);
823 /* archive keepalives */
824 irn_arity = get_irn_arity(end);
825 for (i = 0; i < irn_arity; i++) {
826 ir_node *ka = get_End_keepalive(end, i);
828 add_End_keepalive(get_irg_end(irg), ka);
831 /* replace Return nodes by Jump nodes */
833 for (i = 0; i < arity; i++) {
835 ret = get_Block_cfgpred(end_bl, i);
836 if (is_Return(ret)) {
837 ir_node *block = get_nodes_block(ret);
838 cf_pred[n_ret] = new_r_Jmp(block);
842 set_irn_in(post_bl, n_ret, cf_pred);
844 /* build a Tuple for all results of the method.
845 * add Phi node if there was more than one Return. */
846 turn_into_tuple(post_call, pn_Call_max);
847 /* First the Memory-Phi */
849 for (i = 0; i < arity; i++) {
850 ret = get_Block_cfgpred(end_bl, i);
851 if (is_Return(ret)) {
852 cf_pred[n_mem_phi++] = get_Return_mem(ret);
854 /* memory output for some exceptions is directly connected to End */
856 cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 3);
857 } else if (is_fragile_op(ret)) {
858 /* We rely that all cfops have the memory output at the same position. */
859 cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 0);
860 } else if (is_Raise(ret)) {
861 cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
864 phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
865 set_Tuple_pred(call, pn_Call_M, phi);
866 /* Conserve Phi-list for further inlinings -- but might be optimized */
867 if (get_nodes_block(phi) == post_bl) {
868 set_irn_link(phi, get_irn_link(post_bl));
869 set_irn_link(post_bl, phi);
871 /* Now the real results */
873 ir_node *result_tuple;
874 for (j = 0; j < n_res; j++) {
875 ir_type *res_type = get_method_res_type(ctp, j);
876 ir_mode *res_mode = get_type_mode(res_type);
878 for (i = 0; i < arity; i++) {
879 ret = get_Block_cfgpred(end_bl, i);
880 if (is_Return(ret)) {
881 ir_node *res = get_Return_res(ret, j);
882 if (get_irn_mode(res) != res_mode) {
883 ir_node *block = get_nodes_block(res);
884 res = new_r_Conv(block, res, res_mode);
886 cf_pred[n_ret] = res;
891 ir_mode *mode = get_irn_mode(cf_pred[0]);
892 phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
894 phi = new_r_Bad(irg);
897 /* Conserve Phi-list for further inlinings -- but might be optimized */
898 if (get_nodes_block(phi) == post_bl) {
899 set_Phi_next(phi, get_Block_phis(post_bl));
900 set_Block_phis(post_bl, phi);
903 result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
904 set_Tuple_pred(call, pn_Call_T_result, result_tuple);
906 set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
908 /* handle the regular call */
909 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
911 /* For now, we cannot inline calls with value_base */
912 set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
914 /* Finally the exception control flow.
915 We have two possible situations:
916 First if the Call branches to an exception handler:
917 We need to add a Phi node to
918 collect the memory containing the exception objects. Further we need
919 to add another block to get a correct representation of this Phi. To
920 this block we add a Jmp that resolves into the X output of the Call
921 when the Call is turned into a tuple.
922 Second: There is no exception edge. Just add all inlined exception
923 branches to the End node.
925 if (exc_handling == exc_handler) {
927 for (i = 0; i < arity; i++) {
929 ret = get_Block_cfgpred(end_bl, i);
930 irn = skip_Proj(ret);
931 if (is_fragile_op(irn) || is_Raise(irn)) {
932 cf_pred[n_exc] = ret;
939 set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
941 ir_node *block = new_r_Block(irg, n_exc, cf_pred);
942 set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
945 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
948 ir_node *main_end_bl;
949 int main_end_bl_arity;
952 /* assert(exc_handling == 1 || no exceptions. ) */
954 for (i = 0; i < arity; i++) {
955 ir_node *ret = get_Block_cfgpred(end_bl, i);
956 ir_node *irn = skip_Proj(ret);
958 if (is_fragile_op(irn) || is_Raise(irn)) {
959 cf_pred[n_exc] = ret;
963 main_end_bl = get_irg_end_block(irg);
964 main_end_bl_arity = get_irn_arity(main_end_bl);
965 end_preds = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
967 for (i = 0; i < main_end_bl_arity; ++i)
968 end_preds[i] = get_irn_n(main_end_bl, i);
969 for (i = 0; i < n_exc; ++i)
970 end_preds[main_end_bl_arity + i] = cf_pred[i];
971 set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
972 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
978 /* -- Turn CSE back on. -- */
979 set_optimize(rem_opt);
980 current_ir_graph = rem;
985 /********************************************************************/
986 /* Apply inlining to small methods. */
987 /********************************************************************/
989 static struct obstack temp_obst;
991 /** Represents a possible inlinable call in a graph. */
992 typedef struct _call_entry {
993 ir_node *call; /**< The Call node. */
994 ir_graph *callee; /**< The callee IR-graph. */
995 list_head list; /**< List head for linking the next one. */
996 int loop_depth; /**< The loop depth of this call. */
997 int benefice; /**< The calculated benefice of this call. */
998 unsigned local_adr:1; /**< Set if this call gets an address of a local variable. */
999 unsigned all_const:1; /**< Set if this call has only constant parameters. */
1003 * environment for inlining small irgs
1005 typedef struct _inline_env_t {
1006 struct obstack obst; /**< An obstack where call_entries are allocated on. */
1007 list_head calls; /**< The call entry list. */
1011 * Returns the irg called from a Call node. If the irg is not
1012 * known, NULL is returned.
1014 * @param call the call node
1016 static ir_graph *get_call_called_irg(ir_node *call)
1020 addr = get_Call_ptr(call);
1021 if (is_Global(addr)) {
1022 ir_entity *ent = get_Global_entity(addr);
1023 /* we don't know which function gets finally bound to a weak symbol */
1024 if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
1027 return get_entity_irg(ent);
1034 * Walker: Collect all calls to known graphs inside a graph.
1036 static void collect_calls(ir_node *call, void *env)
1039 if (is_Call(call)) {
1040 ir_graph *called_irg = get_call_called_irg(call);
1042 if (called_irg != NULL) {
1043 /* The Call node calls a locally defined method. Remember to inline. */
1044 inline_env_t *ienv = env;
1045 call_entry *entry = OALLOC(&ienv->obst, call_entry);
1047 entry->callee = called_irg;
1048 entry->loop_depth = 0;
1049 entry->benefice = 0;
1050 entry->local_adr = 0;
1051 entry->all_const = 0;
1053 list_add_tail(&entry->list, &ienv->calls);
1059 * Inlines all small methods at call sites where the called address comes
1060 * from a Const node that references the entity representing the called
1062 * The size argument is a rough measure for the code size of the method:
1063 * Methods where the obstack containing the firm graph is smaller than
1066 void inline_small_irgs(ir_graph *irg, int size)
1068 ir_graph *rem = current_ir_graph;
1072 current_ir_graph = irg;
1073 /* Handle graph state */
1074 assert(get_irg_phase_state(irg) != phase_building);
1075 free_callee_info(irg);
1077 /* Find Call nodes to inline.
1078 (We can not inline during a walk of the graph, as inlining the same
1079 method several times changes the visited flag of the walked graph:
1080 after the first inlining visited of the callee equals visited of
1081 the caller. With the next inlining both are increased.) */
1082 obstack_init(&env.obst);
1083 INIT_LIST_HEAD(&env.calls);
1084 irg_walk_graph(irg, NULL, collect_calls, &env);
1086 if (! list_empty(&env.calls)) {
1087 /* There are calls to inline */
1088 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1089 collect_phiprojs(irg);
1091 list_for_each_entry(call_entry, entry, &env.calls, list) {
1092 ir_graph *callee = entry->callee;
1093 irg_inline_property prop = get_irg_inline_property(callee);
1095 if (prop == irg_inline_forbidden) {
1099 if (prop >= irg_inline_forced ||
1100 _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
1101 inline_method(entry->call, callee);
1104 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1106 obstack_free(&env.obst, NULL);
1107 current_ir_graph = rem;
1110 struct inline_small_irgs_pass_t {
1111 ir_graph_pass_t pass;
1116 * Wrapper to run inline_small_irgs() as a pass.
1118 static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
1120 struct inline_small_irgs_pass_t *pass = context;
1122 inline_small_irgs(irg, pass->size);
1126 /* create a pass for inline_small_irgs() */
1127 ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
1129 struct inline_small_irgs_pass_t *pass =
1130 XMALLOCZ(struct inline_small_irgs_pass_t);
1133 return def_graph_pass_constructor(
1134 &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
1138 * Environment for inlining irgs.
1141 list_head calls; /**< List of of all call nodes in this graph. */
1142 unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
1143 unsigned n_nodes; /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
1144 unsigned n_blocks; /**< Number of Blocks in graph without Start and End block. */
1145 unsigned n_nodes_orig; /**< for statistics */
1146 unsigned n_call_nodes; /**< Number of Call nodes in the graph. */
1147 unsigned n_call_nodes_orig; /**< for statistics */
1148 unsigned n_callers; /**< Number of known graphs that call this graphs. */
1149 unsigned n_callers_orig; /**< for statistics */
1150 unsigned got_inline:1; /**< Set, if at least one call inside this graph was inlined. */
1151 unsigned recursive:1; /**< Set, if this function is self recursive. */
1155 * Allocate a new environment for inlining.
1157 static inline_irg_env *alloc_inline_irg_env(void)
1159 inline_irg_env *env = OALLOC(&temp_obst, inline_irg_env);
1160 INIT_LIST_HEAD(&env->calls);
1161 env->local_weights = NULL;
1162 env->n_nodes = -2; /* do not count count Start, End */
1163 env->n_blocks = -2; /* do not count count Start, End Block */
1164 env->n_nodes_orig = -2; /* do not count Start, End */
1165 env->n_call_nodes = 0;
1166 env->n_call_nodes_orig = 0;
1168 env->n_callers_orig = 0;
1169 env->got_inline = 0;
1174 typedef struct walker_env {
1175 inline_irg_env *x; /**< the inline environment */
1176 char ignore_runtime; /**< the ignore runtime flag */
1177 char ignore_callers; /**< if set, do change callers data */
1181 * post-walker: collect all calls in the inline-environment
1182 * of a graph and sum some statistics.
1184 static void collect_calls2(ir_node *call, void *ctx)
1187 inline_irg_env *x = env->x;
1188 ir_opcode code = get_irn_opcode(call);
1192 /* count meaningful nodes in irg */
1193 if (code != iro_Proj && code != iro_Tuple && code != iro_Sync) {
1194 if (code != iro_Block) {
1202 if (code != iro_Call) return;
1204 /* check, if it's a runtime call */
1205 if (env->ignore_runtime) {
1206 ir_node *symc = get_Call_ptr(call);
1208 if (is_Global(symc)) {
1209 ir_entity *ent = get_Global_entity(symc);
1211 if (get_entity_additional_properties(ent) & mtp_property_runtime)
1216 /* collect all call nodes */
1218 ++x->n_call_nodes_orig;
1220 callee = get_call_called_irg(call);
1221 if (callee != NULL) {
1222 if (! env->ignore_callers) {
1223 inline_irg_env *callee_env = get_irg_link(callee);
1224 /* count all static callers */
1225 ++callee_env->n_callers;
1226 ++callee_env->n_callers_orig;
1228 if (callee == current_ir_graph)
1231 /* link it in the list of possible inlinable entries */
1232 entry = OALLOC(&temp_obst, call_entry);
1234 entry->callee = callee;
1235 entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
1236 entry->benefice = 0;
1237 entry->local_adr = 0;
1238 entry->all_const = 0;
1240 list_add_tail(&entry->list, &x->calls);
1245 * Returns TRUE if the number of callers is 0 in the irg's environment,
1246 * hence this irg is a leave.
1248 inline static int is_leave(ir_graph *irg)
1250 inline_irg_env *env = get_irg_link(irg);
1251 return env->n_call_nodes == 0;
1255 * Returns TRUE if the number of nodes in the callee is
1256 * smaller then size in the irg's environment.
1258 inline static int is_smaller(ir_graph *callee, unsigned size)
1260 inline_irg_env *env = get_irg_link(callee);
1261 return env->n_nodes < size;
1265 * Duplicate a call entry.
1267 * @param entry the original entry to duplicate
1268 * @param new_call the new call node
1269 * @param loop_depth_delta
1270 * delta value for the loop depth
1272 static call_entry *duplicate_call_entry(const call_entry *entry,
1273 ir_node *new_call, int loop_depth_delta)
1275 call_entry *nentry = OALLOC(&temp_obst, call_entry);
1276 nentry->call = new_call;
1277 nentry->callee = entry->callee;
1278 nentry->benefice = entry->benefice;
1279 nentry->loop_depth = entry->loop_depth + loop_depth_delta;
1280 nentry->local_adr = entry->local_adr;
1281 nentry->all_const = entry->all_const;
1287 * Append all call nodes of the source environment to the nodes of in the destination
1290 * @param dst destination environment
1291 * @param src source environment
1292 * @param loop_depth the loop depth of the call that is replaced by the src list
1294 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
1296 call_entry *entry, *nentry;
1298 /* Note that the src list points to Call nodes in the inlined graph, but
1299 we need Call nodes in our graph. Luckily the inliner leaves this information
1300 in the link field. */
1301 list_for_each_entry(call_entry, entry, &src->calls, list) {
1302 nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
1303 list_add_tail(&nentry->list, &dst->calls);
1305 dst->n_call_nodes += src->n_call_nodes;
1306 dst->n_nodes += src->n_nodes;
1310 * Inlines small leave methods at call sites where the called address comes
1311 * from a Const node that references the entity representing the called
1313 * The size argument is a rough measure for the code size of the method:
1314 * Methods where the obstack containing the firm graph is smaller than
1317 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
1318 unsigned size, int ignore_runtime)
1320 inline_irg_env *env;
1326 call_entry *entry, *next;
1327 const call_entry *centry;
1328 pmap *copied_graphs;
1329 pmap_entry *pm_entry;
1331 rem = current_ir_graph;
1332 obstack_init(&temp_obst);
1334 /* a map for the copied graphs, used to inline recursive calls */
1335 copied_graphs = pmap_create();
1337 /* extend all irgs by a temporary data structure for inlining. */
1338 n_irgs = get_irp_n_irgs();
1339 for (i = 0; i < n_irgs; ++i)
1340 set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
1342 /* Pre-compute information in temporary data structure. */
1343 wenv.ignore_runtime = ignore_runtime;
1344 wenv.ignore_callers = 0;
1345 for (i = 0; i < n_irgs; ++i) {
1346 ir_graph *irg = get_irp_irg(i);
1348 assert(get_irg_phase_state(irg) != phase_building);
1349 free_callee_info(irg);
1351 assure_cf_loop(irg);
1352 wenv.x = get_irg_link(irg);
1353 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
1356 /* -- and now inline. -- */
1358 /* Inline leaves recursively -- we might construct new leaves. */
1362 for (i = 0; i < n_irgs; ++i) {
1364 int phiproj_computed = 0;
1366 current_ir_graph = get_irp_irg(i);
1367 env = get_irg_link(current_ir_graph);
1369 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1370 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1372 irg_inline_property prop;
1374 if (env->n_nodes > maxsize)
1378 callee = entry->callee;
1380 prop = get_irg_inline_property(callee);
1381 if (prop == irg_inline_forbidden) {
1385 if (is_leave(callee) && (
1386 is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
1387 if (!phiproj_computed) {
1388 phiproj_computed = 1;
1389 collect_phiprojs(current_ir_graph);
1391 did_inline = inline_method(call, callee);
1394 inline_irg_env *callee_env = get_irg_link(callee);
1396 /* call was inlined, Phi/Projs for current graph must be recomputed */
1397 phiproj_computed = 0;
1399 /* Do some statistics */
1400 env->got_inline = 1;
1401 --env->n_call_nodes;
1402 env->n_nodes += callee_env->n_nodes;
1403 --callee_env->n_callers;
1405 /* remove this call from the list */
1406 list_del(&entry->list);
1411 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1413 } while (did_inline);
1415 /* inline other small functions. */
1416 for (i = 0; i < n_irgs; ++i) {
1418 int phiproj_computed = 0;
1420 current_ir_graph = get_irp_irg(i);
1421 env = get_irg_link(current_ir_graph);
1423 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1425 /* note that the list of possible calls is updated during the process */
1426 list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
1427 irg_inline_property prop;
1432 callee = entry->callee;
1434 prop = get_irg_inline_property(callee);
1435 if (prop == irg_inline_forbidden) {
1439 e = pmap_find(copied_graphs, callee);
1442 * Remap callee if we have a copy.
1443 * FIXME: Should we do this only for recursive Calls ?
1448 if (prop >= irg_inline_forced ||
1449 (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
1450 if (current_ir_graph == callee) {
1452 * Recursive call: we cannot directly inline because we cannot walk
1453 * the graph and change it. So we have to make a copy of the graph
1457 inline_irg_env *callee_env;
1460 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1463 * No copy yet, create one.
1464 * Note that recursive methods are never leaves, so it is sufficient
1465 * to test this condition here.
1467 copy = create_irg_copy(callee);
1469 /* create_irg_copy() destroys the Proj links, recompute them */
1470 phiproj_computed = 0;
1472 ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1474 /* allocate new environment */
1475 callee_env = alloc_inline_irg_env();
1476 set_irg_link(copy, callee_env);
1478 assure_cf_loop(copy);
1479 wenv.x = callee_env;
1480 wenv.ignore_callers = 1;
1481 irg_walk_graph(copy, NULL, collect_calls2, &wenv);
1484 * Enter the entity of the original graph. This is needed
1485 * for inline_method(). However, note that ent->irg still points
1486 * to callee, NOT to copy.
1488 set_irg_entity(copy, get_irg_entity(callee));
1490 pmap_insert(copied_graphs, callee, copy);
1493 /* we have only one caller: the original graph */
1494 callee_env->n_callers = 1;
1495 callee_env->n_callers_orig = 1;
1497 if (! phiproj_computed) {
1498 phiproj_computed = 1;
1499 collect_phiprojs(current_ir_graph);
1501 did_inline = inline_method(call, callee);
1503 inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
1505 /* call was inlined, Phi/Projs for current graph must be recomputed */
1506 phiproj_computed = 0;
1508 /* callee was inline. Append it's call list. */
1509 env->got_inline = 1;
1510 --env->n_call_nodes;
1511 append_call_list(env, callee_env, entry->loop_depth);
1512 --callee_env->n_callers;
1514 /* after we have inlined callee, all called methods inside callee
1515 are now called once more */
1516 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
1517 inline_irg_env *penv = get_irg_link(centry->callee);
1521 /* remove this call from the list */
1522 list_del(&entry->list);
1527 ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1530 for (i = 0; i < n_irgs; ++i) {
1531 irg = get_irp_irg(i);
1532 env = get_irg_link(irg);
1534 if (env->got_inline) {
1535 optimize_graph_df(irg);
1538 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
1539 DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
1540 env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
1541 env->n_callers_orig, env->n_callers,
1542 get_entity_name(get_irg_entity(irg))));
1546 /* kill the copied graphs: we don't need them anymore */
1547 foreach_pmap(copied_graphs, pm_entry) {
1548 ir_graph *copy = pm_entry->value;
1550 /* reset the entity, otherwise it will be deleted in the next step ... */
1551 set_irg_entity(copy, NULL);
1552 free_ir_graph(copy);
1554 pmap_destroy(copied_graphs);
1556 obstack_free(&temp_obst, NULL);
1557 current_ir_graph = rem;
1560 struct inline_leave_functions_pass_t {
1561 ir_prog_pass_t pass;
1569 * Wrapper to run inline_leave_functions() as a ir_prog pass.
1571 static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
1573 struct inline_leave_functions_pass_t *pass = context;
1576 inline_leave_functions(
1577 pass->maxsize, pass->leavesize,
1578 pass->size, pass->ignore_runtime);
1582 /* create a pass for inline_leave_functions() */
1583 ir_prog_pass_t *inline_leave_functions_pass(
1584 const char *name, unsigned maxsize, unsigned leavesize,
1585 unsigned size, int ignore_runtime)
1587 struct inline_leave_functions_pass_t *pass =
1588 XMALLOCZ(struct inline_leave_functions_pass_t);
1590 pass->maxsize = maxsize;
1591 pass->leavesize = leavesize;
1593 pass->ignore_runtime = ignore_runtime;
1595 return def_prog_pass_constructor(
1597 name ? name : "inline_leave_functions",
1598 inline_leave_functions_wrapper);
1602 * Calculate the parameter weights for transmitting the address of a local variable.
1604 static unsigned calc_method_local_weight(ir_node *arg)
1607 unsigned v, weight = 0;
1609 for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
1610 ir_node *succ = get_irn_out(arg, i);
1612 switch (get_irn_opcode(succ)) {
1615 /* Loads and Store can be removed */
1619 /* check if all args are constant */
1620 for (j = get_Sel_n_indexs(succ) - 1; j >= 0; --j) {
1621 ir_node *idx = get_Sel_index(succ, j);
1622 if (! is_Const(idx))
1625 /* Check users on this Sel. Note: if a 0 is returned here, there was
1626 some unsupported node. */
1627 v = calc_method_local_weight(succ);
1630 /* we can kill one Sel with constant indexes, this is cheap */
1634 /* when looking backward we might find Id nodes */
1635 weight += calc_method_local_weight(succ);
1638 /* unoptimized tuple */
1639 for (j = get_Tuple_n_preds(succ) - 1; j >= 0; --j) {
1640 ir_node *pred = get_Tuple_pred(succ, j);
1642 /* look for Proj(j) */
1643 for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
1644 ir_node *succ_succ = get_irn_out(succ, k);
1645 if (is_Proj(succ_succ)) {
1646 if (get_Proj_proj(succ_succ) == j) {
1648 weight += calc_method_local_weight(succ_succ);
1651 /* this should NOT happen */
1659 /* any other node: unsupported yet or bad. */
1667 * Calculate the parameter weights for transmitting the address of a local variable.
1669 static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
1671 ir_entity *ent = get_irg_entity(irg);
1673 int nparams, i, proj_nr;
1674 ir_node *irg_args, *arg;
1676 mtp = get_entity_type(ent);
1677 nparams = get_method_n_params(mtp);
1679 /* allocate a new array. currently used as 'analysed' flag */
1680 env->local_weights = NEW_ARR_D(unsigned, &temp_obst, nparams);
1682 /* If the method haven't parameters we have nothing to do. */
1686 assure_irg_outs(irg);
1687 irg_args = get_irg_args(irg);
1688 for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
1689 arg = get_irn_out(irg_args, i);
1690 proj_nr = get_Proj_proj(arg);
1691 env->local_weights[proj_nr] = calc_method_local_weight(arg);
1696 * Calculate the benefice for transmitting an local variable address.
1697 * After inlining, the local variable might be transformed into a
1698 * SSA variable by scalar_replacement().
1700 static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
1702 inline_irg_env *env = get_irg_link(callee);
1704 if (env->local_weights != NULL) {
1705 if (pos < ARR_LEN(env->local_weights))
1706 return env->local_weights[pos];
1710 analyze_irg_local_weights(env, callee);
1712 if (pos < ARR_LEN(env->local_weights))
1713 return env->local_weights[pos];
1718 * Calculate a benefice value for inlining the given call.
1720 * @param call the call node we have to inspect
1721 * @param callee the called graph
1723 static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
1725 ir_node *call = entry->call;
1726 ir_entity *ent = get_irg_entity(callee);
1730 int i, n_params, all_const;
1732 irg_inline_property prop;
1734 inline_irg_env *callee_env;
1736 prop = get_irg_inline_property(callee);
1737 if (prop == irg_inline_forbidden) {
1738 DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
1740 return entry->benefice = INT_MIN;
1743 if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
1744 DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
1746 return entry->benefice = INT_MIN;
1749 /* costs for every passed parameter */
1750 n_params = get_Call_n_params(call);
1751 mtp = get_entity_type(ent);
1752 cc = get_method_calling_convention(mtp);
1753 if (cc & cc_reg_param) {
1754 /* register parameter, smaller costs for register parameters */
1755 int max_regs = cc & ~cc_bits;
1757 if (max_regs < n_params)
1758 weight += max_regs * 2 + (n_params - max_regs) * 5;
1760 weight += n_params * 2;
1762 /* parameters are passed an stack */
1763 weight += 5 * n_params;
1766 /* constant parameters improve the benefice */
1767 frame_ptr = get_irg_frame(current_ir_graph);
1769 for (i = 0; i < n_params; ++i) {
1770 ir_node *param = get_Call_param(call, i);
1772 if (is_Const(param)) {
1773 weight += get_method_param_weight(ent, i);
1776 if (is_SymConst(param))
1777 weight += get_method_param_weight(ent, i);
1778 else if (is_Sel(param) && get_Sel_ptr(param) == frame_ptr) {
1780 * An address of a local variable is transmitted. After
1781 * inlining, scalar_replacement might be able to remove the
1782 * local variable, so honor this.
1784 v = get_method_local_adress_weight(callee, i);
1787 entry->local_adr = 1;
1791 entry->all_const = all_const;
1793 callee_env = get_irg_link(callee);
1794 if (callee_env->n_callers == 1 &&
1795 callee != current_ir_graph &&
1796 !entity_is_externally_visible(ent)) {
1800 /* give a bonus for functions with one block */
1801 if (callee_env->n_blocks == 1)
1802 weight = weight * 3 / 2;
1804 /* and one for small non-recursive functions: we want them to be inlined in mostly every case */
1805 if (callee_env->n_nodes < 30 && !callee_env->recursive)
1808 /* and finally for leaves: they do not increase the register pressure
1809 because of callee safe registers */
1810 if (callee_env->n_call_nodes == 0)
1813 /** it's important to inline inner loops first */
1814 if (entry->loop_depth > 30)
1815 weight += 30 * 1024;
1817 weight += entry->loop_depth * 1024;
1820 * All arguments constant is probably a good sign, give an extra bonus
1825 return entry->benefice = weight;
1828 static ir_graph **irgs;
1829 static int last_irg;
1832 * Callgraph walker, collect all visited graphs.
1834 static void callgraph_walker(ir_graph *irg, void *data)
1837 irgs[last_irg++] = irg;
1841 * Creates an inline order for all graphs.
1843 * @return the list of graphs.
1845 static ir_graph **create_irg_list(void)
1847 ir_entity **free_methods;
1849 int n_irgs = get_irp_n_irgs();
1851 cgana(&arr_len, &free_methods);
1852 xfree(free_methods);
1854 compute_callgraph();
1857 irgs = XMALLOCNZ(ir_graph*, n_irgs);
1859 callgraph_walk(NULL, callgraph_walker, NULL);
1860 assert(n_irgs == last_irg);
1866 * Push a call onto the priority list if its benefice is big enough.
1868 * @param pqueue the priority queue of calls
1869 * @param call the call entry
1870 * @param inlien_threshold
1871 * the threshold value
1873 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
1874 int inline_threshold)
1876 ir_graph *callee = call->callee;
1877 irg_inline_property prop = get_irg_inline_property(callee);
1878 int benefice = calc_inline_benefice(call, callee);
1880 DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
1881 get_irn_irg(call->call), call->call, callee, benefice));
1883 if (prop < irg_inline_forced && benefice < inline_threshold) {
1887 pqueue_put(pqueue, call, benefice);
1891 * Try to inline calls into a graph.
1893 * @param irg the graph into which we inline
1894 * @param maxsize do NOT inline if the size of irg gets
1895 * bigger than this amount
1896 * @param inline_threshold
1897 * threshold value for inline decision
1898 * @param copied_graphs
1899 * map containing copied of recursive graphs
1901 static void inline_into(ir_graph *irg, unsigned maxsize,
1902 int inline_threshold, pmap *copied_graphs)
1904 int phiproj_computed = 0;
1905 inline_irg_env *env = get_irg_link(irg);
1906 call_entry *curr_call;
1910 if (env->n_call_nodes == 0)
1913 if (env->n_nodes > maxsize) {
1914 DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
1918 current_ir_graph = irg;
1919 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1921 /* put irgs into the pqueue */
1922 pqueue = new_pqueue();
1924 list_for_each_entry(call_entry, curr_call, &env->calls, list) {
1925 assert(is_Call(curr_call->call));
1926 maybe_push_call(pqueue, curr_call, inline_threshold);
1929 /* note that the list of possible calls is updated during the process */
1930 while (!pqueue_empty(pqueue)) {
1932 call_entry *curr_call = pqueue_pop_front(pqueue);
1933 ir_graph *callee = curr_call->callee;
1934 ir_node *call_node = curr_call->call;
1935 inline_irg_env *callee_env = get_irg_link(callee);
1936 irg_inline_property prop = get_irg_inline_property(callee);
1938 const call_entry *centry;
1941 if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
1942 DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
1943 env->n_nodes, callee, callee_env->n_nodes));
1947 e = pmap_find(copied_graphs, callee);
1949 int benefice = curr_call->benefice;
1951 * Reduce the weight for recursive function IFF not all arguments are const.
1952 * inlining recursive functions is rarely good.
1954 if (!curr_call->all_const)
1956 if (benefice < inline_threshold)
1960 * Remap callee if we have a copy.
1963 callee_env = get_irg_link(callee);
1966 if (current_ir_graph == callee) {
1968 * Recursive call: we cannot directly inline because we cannot
1969 * walk the graph and change it. So we have to make a copy of
1972 int benefice = curr_call->benefice;
1976 * Reduce the weight for recursive function IFF not all arguments are const.
1977 * inlining recursive functions is rarely good.
1979 if (!curr_call->all_const)
1981 if (benefice < inline_threshold)
1984 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1987 * No copy yet, create one.
1988 * Note that recursive methods are never leaves, so it is
1989 * sufficient to test this condition here.
1991 copy = create_irg_copy(callee);
1993 /* create_irg_copy() destroys the Proj links, recompute them */
1994 phiproj_computed = 0;
1996 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
1998 /* allocate a new environment */
1999 callee_env = alloc_inline_irg_env();
2000 set_irg_link(copy, callee_env);
2002 assure_cf_loop(copy);
2003 wenv.x = callee_env;
2004 wenv.ignore_callers = 1;
2005 irg_walk_graph(copy, NULL, collect_calls2, &wenv);
2008 * Enter the entity of the original graph. This is needed
2009 * for inline_method(). However, note that ent->irg still points
2010 * to callee, NOT to copy.
2012 set_irg_entity(copy, get_irg_entity(callee));
2014 pmap_insert(copied_graphs, callee, copy);
2017 /* we have only one caller: the original graph */
2018 callee_env->n_callers = 1;
2019 callee_env->n_callers_orig = 1;
2021 if (! phiproj_computed) {
2022 phiproj_computed = 1;
2023 collect_phiprojs(current_ir_graph);
2025 did_inline = inline_method(call_node, callee);
2029 /* call was inlined, Phi/Projs for current graph must be recomputed */
2030 phiproj_computed = 0;
2032 /* remove it from the caller list */
2033 list_del(&curr_call->list);
2035 /* callee was inline. Append it's call list. */
2036 env->got_inline = 1;
2037 --env->n_call_nodes;
2039 /* we just generate a bunch of new calls */
2040 loop_depth = curr_call->loop_depth;
2041 list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
2042 inline_irg_env *penv = get_irg_link(centry->callee);
2044 call_entry *new_entry;
2046 /* after we have inlined callee, all called methods inside
2047 * callee are now called once more */
2050 /* Note that the src list points to Call nodes in the inlined graph,
2051 * but we need Call nodes in our graph. Luckily the inliner leaves
2052 * this information in the link field. */
2053 new_call = get_irn_link(centry->call);
2054 assert(is_Call(new_call));
2056 new_entry = duplicate_call_entry(centry, new_call, loop_depth);
2057 list_add_tail(&new_entry->list, &env->calls);
2058 maybe_push_call(pqueue, new_entry, inline_threshold);
2061 env->n_call_nodes += callee_env->n_call_nodes;
2062 env->n_nodes += callee_env->n_nodes;
2063 --callee_env->n_callers;
2065 ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
2070 * Heuristic inliner. Calculates a benefice value for every call and inlines
2071 * those calls with a value higher than the threshold.
2073 void inline_functions(unsigned maxsize, int inline_threshold,
2074 opt_ptr after_inline_opt)
2076 inline_irg_env *env;
2080 pmap *copied_graphs;
2081 pmap_entry *pm_entry;
2084 rem = current_ir_graph;
2085 obstack_init(&temp_obst);
2087 irgs = create_irg_list();
2089 /* a map for the copied graphs, used to inline recursive calls */
2090 copied_graphs = pmap_create();
2092 /* extend all irgs by a temporary data structure for inlining. */
2093 n_irgs = get_irp_n_irgs();
2094 for (i = 0; i < n_irgs; ++i)
2095 set_irg_link(irgs[i], alloc_inline_irg_env());
2097 /* Pre-compute information in temporary data structure. */
2098 wenv.ignore_runtime = 0;
2099 wenv.ignore_callers = 0;
2100 for (i = 0; i < n_irgs; ++i) {
2101 ir_graph *irg = irgs[i];
2103 free_callee_info(irg);
2105 wenv.x = get_irg_link(irg);
2106 assure_cf_loop(irg);
2107 irg_walk_graph(irg, NULL, collect_calls2, &wenv);
2110 /* -- and now inline. -- */
2111 for (i = 0; i < n_irgs; ++i) {
2112 ir_graph *irg = irgs[i];
2114 inline_into(irg, maxsize, inline_threshold, copied_graphs);
2117 for (i = 0; i < n_irgs; ++i) {
2118 ir_graph *irg = irgs[i];
2120 env = get_irg_link(irg);
2121 if (env->got_inline && after_inline_opt != NULL) {
2122 /* this irg got calls inlined: optimize it */
2123 after_inline_opt(irg);
2125 if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
2126 DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
2127 env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
2128 env->n_callers_orig, env->n_callers,
2129 get_entity_name(get_irg_entity(irg))));
2133 /* kill the copied graphs: we don't need them anymore */
2134 foreach_pmap(copied_graphs, pm_entry) {
2135 ir_graph *copy = pm_entry->value;
2137 /* reset the entity, otherwise it will be deleted in the next step ... */
2138 set_irg_entity(copy, NULL);
2139 free_ir_graph(copy);
2141 pmap_destroy(copied_graphs);
2145 obstack_free(&temp_obst, NULL);
2146 current_ir_graph = rem;
2149 struct inline_functions_pass_t {
2150 ir_prog_pass_t pass;
2152 int inline_threshold;
2153 opt_ptr after_inline_opt;
2157 * Wrapper to run inline_functions() as a ir_prog pass.
2159 static int inline_functions_wrapper(ir_prog *irp, void *context)
2161 struct inline_functions_pass_t *pass = context;
2164 inline_functions(pass->maxsize, pass->inline_threshold,
2165 pass->after_inline_opt);
2169 /* create a ir_prog pass for inline_functions */
2170 ir_prog_pass_t *inline_functions_pass(
2171 const char *name, unsigned maxsize, int inline_threshold,
2172 opt_ptr after_inline_opt)
2174 struct inline_functions_pass_t *pass =
2175 XMALLOCZ(struct inline_functions_pass_t);
2177 pass->maxsize = maxsize;
2178 pass->inline_threshold = inline_threshold;
2179 pass->after_inline_opt = after_inline_opt;
2181 return def_prog_pass_constructor(
2182 &pass->pass, name ? name : "inline_functions",
2183 inline_functions_wrapper);
2186 void firm_init_inline(void)
2188 FIRM_DBG_REGISTER(dbg, "firm.opt.inline");