2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 size_t n_calls_SymConst;
54 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
55 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** Ready IRG's are marked in the ready set. */
62 static unsigned *ready_set;
64 /** IRG's that are in progress are marked here. */
65 static unsigned *busy_set;
68 * We misuse the mtp_property_inherited flag as temporary here.
69 * The is ok, as we cannot set or get it anyway using the
70 * get_addtional_properties API.
72 #define mtp_temporary mtp_property_inherited
75 * Walker: Collect all calls to const and pure functions
76 * to lists. Collect all Proj(Call) nodes into a Proj list.
78 static void collect_const_and_pure_calls(ir_node *node, void *env)
80 env_t *ctx = (env_t*)env;
89 /* set the link to NULL for all non-const/pure calls */
90 set_irn_link(call, NULL);
91 ptr = get_Call_ptr(call);
92 if (is_SymConst_addr_ent(ptr)) {
93 ent = get_SymConst_entity(ptr);
95 prop = get_entity_additional_properties(ent);
96 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
98 ++ctx->n_calls_SymConst;
103 /* ok, if we get here we found a call to a const or a pure function */
104 if (prop & mtp_property_pure) {
105 set_irn_link(call, ctx->pure_call_list);
106 ctx->pure_call_list = call;
108 if (prop & mtp_property_has_loop) {
109 set_irn_link(call, ctx->nonfloat_const_call_list);
110 ctx->nonfloat_const_call_list = call;
112 set_irn_link(call, ctx->float_const_call_list);
113 ctx->float_const_call_list = call;
116 } else if (is_Proj(node)) {
118 * Collect all memory and exception Proj's from
121 call = get_Proj_pred(node);
125 /* collect the Proj's in the Proj list */
126 switch (get_Proj_proj(node)) {
128 case pn_Call_X_except:
129 case pn_Call_X_regular:
130 set_irn_link(node, ctx->proj_list);
131 ctx->proj_list = node;
137 } /* collect_const_and_pure_calls */
140 * Fix the list of collected Calls.
142 * @param irg the graph that contained calls to pure functions
145 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
147 ir_node *call, *next, *mem, *proj;
150 /* First step: fix all calls by removing their memory input and let
152 * The original memory input is preserved in their link fields. */
153 for (call = ctx->float_const_call_list; call != NULL; call = next) {
154 next = (ir_node*)get_irn_link(call);
155 mem = get_Call_mem(call);
157 set_irn_link(call, mem);
158 set_Call_mem(call, get_irg_no_mem(irg));
161 * Unfortunately we cannot simply set the node to 'float'.
162 * There is a reason for that:
164 * - The call might be inside a loop/if that is NOT entered
165 * and calls a endless function. Setting the call to float
166 * would allow to move it out from the loop/if causing this
167 * function be called even if the loop/if is not entered ...
169 * This could be fixed using post-dominators for calls and Pin nodes
170 * but need some more analyzes to ensure that a call that potential
171 * never returns is not executed before some code that generates
172 * observable states...
175 /* finally, this call can float */
176 set_irn_pinned(call, op_pin_state_floats);
177 hook_func_call(irg, call);
180 /* Last step: fix all Proj's */
181 for (proj = ctx->proj_list; proj != NULL; proj = next) {
182 next = (ir_node*)get_irn_link(proj);
183 call = get_Proj_pred(proj);
184 mem = (ir_node*)get_irn_link(call);
186 /* beware of calls in the pure call list */
187 if (!mem || is_Call(mem))
189 assert(get_irn_mode(mem) == mode_M);
191 switch (get_Proj_proj(proj)) {
193 /* in dead code there might be cycles where proj == mem */
198 case pn_Call_X_except:
200 exchange(proj, new_r_Bad(irg, mode_X));
202 case pn_Call_X_regular: {
203 ir_node *block = get_nodes_block(call);
205 exchange(proj, new_r_Jmp(block));
214 /* ... including exception edges */
215 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
216 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
218 } /* fix_const_call_list */
221 * Walker: Collect all calls to nothrow functions
222 * to lists. Collect all Proj(Call) nodes into a Proj list.
224 static void collect_nothrow_calls(ir_node *node, void *env)
226 env_t *ctx = (env_t*)env;
234 /* set the link to NULL for all non-const/pure calls */
235 set_irn_link(call, NULL);
236 ptr = get_Call_ptr(call);
237 if (is_SymConst_addr_ent(ptr)) {
238 ent = get_SymConst_entity(ptr);
240 prop = get_entity_additional_properties(ent);
241 if ((prop & mtp_property_nothrow) == 0)
243 ++ctx->n_calls_SymConst;
248 /* ok, if we get here we found a call to a nothrow function */
249 set_irn_link(call, ctx->nothrow_call_list);
250 ctx->nothrow_call_list = call;
251 } else if (is_Proj(node)) {
253 * Collect all memory and exception Proj's from
256 call = get_Proj_pred(node);
260 /* collect the Proj's in the Proj list */
261 switch (get_Proj_proj(node)) {
263 case pn_Call_X_except:
264 case pn_Call_X_regular:
265 set_irn_link(node, ctx->proj_list);
266 ctx->proj_list = node;
272 } /* collect_nothrow_calls */
275 * Fix the list of collected nothrow Calls.
277 * @param irg the graph that contained calls to pure functions
278 * @param call_list the list of all call sites of const functions
279 * @param proj_list the list of all memory/exception Proj's of this call sites
281 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
283 ir_node *call, *next, *proj;
286 /* First step: go through the list of calls and mark them. */
287 for (call = call_list; call; call = next) {
288 next = (ir_node*)get_irn_link(call);
290 /* current_ir_graph is in memory anyway, so it's a good marker */
291 set_irn_link(call, ¤t_ir_graph);
292 hook_func_call(irg, call);
295 /* Second step: Remove all exception Proj's */
296 for (proj = proj_list; proj; proj = next) {
297 next = (ir_node*)get_irn_link(proj);
298 call = get_Proj_pred(proj);
300 /* handle only marked calls */
301 if (get_irn_link(call) != ¤t_ir_graph)
304 /* kill any exception flow */
305 switch (get_Proj_proj(proj)) {
306 case pn_Call_X_except:
308 exchange(proj, new_r_Bad(irg, mode_X));
310 case pn_Call_X_regular: {
311 ir_node *block = get_nodes_block(call);
313 exchange(proj, new_r_Jmp(block));
321 /* changes were done ... */
323 /* ... including exception edges */
324 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
325 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
327 } /* fix_nothrow_call_list */
330 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
331 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
332 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
333 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
334 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
337 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
340 * Calculate the bigger property of two. Handle the temporary flag right.
342 static mtp_additional_properties max_property(mtp_additional_properties a,
343 mtp_additional_properties b)
345 mtp_additional_properties r;
346 mtp_additional_properties t = (a | b) & mtp_temporary;
350 if (a == mtp_no_property || b == mtp_no_property)
351 return mtp_no_property;
357 * Follow the memory chain starting at node and determine
360 * @return mtp_property_const if only calls of const functions are detected
361 * mtp_property_pure if only Loads and const/pure calls detected
362 * mtp_no_property else
364 static mtp_additional_properties follow_mem_(ir_node *node)
366 mtp_additional_properties mode = mtp_property_const;
367 mtp_additional_properties m;
372 if (mode == mtp_no_property)
373 return mtp_no_property;
375 if (irn_visited_else_mark(node))
378 switch (get_irn_opcode(node)) {
380 node = get_Proj_pred(node);
389 /* do a dfs search */
390 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
391 m = follow_mem_(get_irn_n(node, i));
392 mode = max_property(mode, m);
393 if (mode == mtp_no_property)
394 return mtp_no_property;
399 /* Beware volatile Loads are NOT allowed in pure functions. */
400 if (get_Load_volatility(node) == volatility_is_volatile)
401 return mtp_no_property;
402 mode = max_property(mode, mtp_property_pure);
403 node = get_Load_mem(node);
407 /* A call is only tolerable if its either constant or pure. */
408 ptr = get_Call_ptr(node);
409 if (is_SymConst_addr_ent(ptr)) {
410 ir_entity *ent = get_SymConst_entity(ptr);
411 ir_graph *irg = get_entity_irg(ent);
414 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
415 mode = max_property(mode, m);
417 /* we have a graph, analyze it. */
418 m = check_const_or_pure_function(irg, /*top=*/0);
419 mode = max_property(mode, m);
422 return mtp_no_property;
423 node = get_Call_mem(node);
427 return mtp_no_property;
433 * Follow the memory chain starting at node and determine
436 * @return mtp_property_const if only calls of const functions are detected
437 * mtp_property_pure if only Loads and const/pure calls detected
438 * mtp_no_property else
440 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
442 mtp_additional_properties m = follow_mem_(node);
443 return max_property(mode, m);
447 * Check if a graph represents a const or a pure function.
449 * @param irg the graph to check
450 * @param top if set, this is the top call
452 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
454 ir_node *end, *endbl;
456 ir_entity *entity = get_irg_entity(irg);
457 ir_type *type = get_entity_type(entity);
458 size_t n_params = get_method_n_params(type);
460 mtp_additional_properties may_be_const = mtp_property_const;
461 mtp_additional_properties prop = get_irg_additional_properties(irg);
463 /* libfirm handles aggregate parameters by passing around pointers to
464 * stuff in memory, so if we have compound parameters we are never const */
465 for (i = 0; i < n_params; ++i) {
466 ir_type *param = get_method_param_type(type, i);
467 if (is_compound_type(param)) {
468 prop &= ~mtp_property_const;
469 may_be_const = mtp_no_property;
473 if (prop & mtp_property_const) {
474 /* already marked as a const function */
475 return mtp_property_const;
477 if (prop & mtp_property_pure) {
478 /* already marked as a pure function */
479 return mtp_property_pure;
482 if (IS_IRG_READY(irg)) {
483 /* already checked */
484 return mtp_no_property;
486 if (IS_IRG_BUSY(irg)) {
487 /* We are still evaluate this method.
488 * The function (indirectly) calls itself and thus may not terminate.
490 return mtp_no_property;
494 end = get_irg_end(irg);
495 endbl = get_nodes_block(end);
498 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
499 inc_irg_visited(irg);
500 /* mark the initial mem: recursion of follow_mem() stops here */
501 mark_irn_visited(get_irg_initial_mem(irg));
503 /* visit every Return */
504 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
505 ir_node *node = get_Block_cfgpred(endbl, j);
506 unsigned code = get_irn_opcode(node);
509 /* Bad nodes usually do NOT produce anything, so it's ok */
513 if (code == iro_Return) {
514 mem = get_Return_mem(node);
516 /* Bad nodes usually do NOT produce anything, so it's ok */
520 if (mem != get_irg_initial_mem(irg))
521 prop = max_property(prop, follow_mem(mem, prop));
523 /* Exception found. Cannot be const or pure. */
524 prop = mtp_no_property;
527 if (prop == mtp_no_property)
531 if (prop != mtp_no_property) {
532 /* check, if a keep-alive exists */
533 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
534 ir_node *kept = get_End_keepalive(end, j);
536 if (is_Block(kept)) {
537 prop = mtp_no_property;
541 if (mode_M != get_irn_mode(kept))
544 prop = max_property(prop, follow_mem(kept, prop));
545 if (prop == mtp_no_property)
551 /* Set the property only if we are at top-level. */
552 if (prop != mtp_no_property) {
553 add_irg_additional_properties(irg, prop);
558 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
560 } /* check_const_or_pure_function */
563 * Handle calls to const functions.
567 static void handle_const_Calls(env_t *ctx)
571 ctx->n_calls_SymConst = 0;
572 ctx->n_calls_Sel = 0;
574 /* all calls of const functions can be transformed */
575 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
576 ir_graph *irg = get_irp_irg(i);
578 ctx->float_const_call_list = NULL;
579 ctx->nonfloat_const_call_list = NULL;
580 ctx->pure_call_list = NULL;
581 ctx->proj_list = NULL;
583 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
584 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
586 if (ctx->float_const_call_list != NULL)
587 fix_const_call_lists(irg, ctx);
588 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
590 confirm_irg_properties(irg,
591 IR_GRAPH_PROPERTIES_CONTROL_FLOW
592 | IR_GRAPH_PROPERTY_ONE_RETURN
593 | IR_GRAPH_PROPERTY_MANY_RETURNS);
598 * Handle calls to nothrow functions.
602 static void handle_nothrow_Calls(env_t *ctx)
606 ctx->n_calls_SymConst = 0;
607 ctx->n_calls_Sel = 0;
609 /* all calls of const functions can be transformed */
610 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
611 ir_graph *irg = get_irp_irg(i);
613 ctx->nothrow_call_list = NULL;
614 ctx->proj_list = NULL;
616 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
617 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
619 if (ctx->nothrow_call_list)
620 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
621 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
626 * Check, whether a given node represents a return value of
627 * a malloc like function (ie, new heap allocated memory).
629 * @param node the node to check
631 static int is_malloc_call_result(const ir_node *node)
633 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
634 /* Firm style high-level allocation */
637 /* TODO: check mtp_malloc */
642 * Update a property depending on a call property.
644 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
646 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
647 mtp_additional_properties r = orig_prop & call_prop;
652 * Check if a node is stored.
654 static bool is_stored(const ir_node *n)
658 foreach_out_edge(n, edge) {
659 const ir_node *succ = get_edge_src_irn(edge);
661 switch (get_irn_opcode(succ)) {
668 if (get_Store_value(succ) == n)
670 /* ok if its only the address input */
679 ptr = get_Call_ptr(succ);
680 if (is_SymConst_addr_ent(ptr)) {
681 ir_entity *ent = get_SymConst_entity(ptr);
684 /* we know the called entity */
685 for (i = get_Call_n_params(succ); i > 0;) {
686 if (get_Call_param(succ, --i) == n) {
687 /* n is the i'th param of the call */
688 if (get_method_param_access(ent, i) & ptr_access_store) {
689 /* n is store in ent */
695 /* unknown call address */
700 /* bad, potential alias */
708 * Check that the return value of an irg is not stored anywhere.
710 * return ~mtp_property_malloc if return values are stored, ~0 else
712 static mtp_additional_properties check_stored_result(ir_graph *irg)
714 ir_node *end_blk = get_irg_end_block(irg);
716 mtp_additional_properties res = ~mtp_no_property;
718 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
720 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
721 ir_node *pred = get_Block_cfgpred(end_blk, i);
724 if (! is_Return(pred))
726 for (j = get_Return_n_ress(pred); j > 0;) {
727 const ir_node *irn = get_Return_res(pred, --j);
729 if (is_stored(irn)) {
730 /* bad, might create an alias */
731 res = ~mtp_property_malloc;
737 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
742 * Check if a graph represents a nothrow or a malloc function.
744 * @param irg the graph to check
745 * @param top if set, this is the top call
747 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
749 mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
750 ir_node *end_blk = get_irg_end_block(irg);
755 if (IS_IRG_READY(irg)) {
756 /* already checked */
757 return get_irg_additional_properties(irg);
759 if (IS_IRG_BUSY(irg)) {
760 /* we are still evaluate this method. Be optimistic,
761 return the best possible so far but mark the result as temporary. */
762 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
766 ent = get_irg_entity(irg);
767 mtp = get_entity_type(ent);
769 if (get_method_n_ress(mtp) <= 0)
770 curr_prop &= ~mtp_property_malloc;
772 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
773 ir_node *pred = get_Block_cfgpred(end_blk, i);
775 if (is_Return(pred)) {
776 if (curr_prop & mtp_property_malloc) {
779 /* check, if malloc is called here */
780 for (j = get_Return_n_ress(pred); j > 0;) {
781 ir_node *res = get_Return_res(pred, --j);
783 /* skip Confirms and Casts */
784 res = skip_HighLevel_ops(res);
787 res = get_Proj_pred(res);
788 if (is_malloc_call_result(res)) {
789 /* ok, this is a malloc */
790 } else if (is_Call(res)) {
791 ir_node *ptr = get_Call_ptr(res);
793 if (is_SymConst_addr_ent(ptr)) {
795 ir_entity *ent = get_SymConst_entity(ptr);
796 ir_graph *callee = get_entity_irg(ent);
799 /* A self-recursive call. The property did not depend on this call. */
800 } else if (callee != NULL) {
801 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
802 curr_prop = update_property(curr_prop, prop);
804 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
808 curr_prop &= ~mtp_property_malloc;
811 /* unknown return value */
812 curr_prop &= ~mtp_property_malloc;
816 } else if (curr_prop & mtp_property_nothrow) {
817 /* exception flow detected */
818 pred = skip_Proj(pred);
821 ir_node *ptr = get_Call_ptr(pred);
823 if (is_SymConst_addr_ent(ptr)) {
825 ir_entity *ent = get_SymConst_entity(ptr);
826 ir_graph *callee = get_entity_irg(ent);
829 /* A self-recursive call. The property did not depend on this call. */
830 } else if (callee != NULL) {
831 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
832 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
833 curr_prop = update_property(curr_prop, prop);
835 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
836 curr_prop &= ~mtp_property_nothrow;
840 curr_prop &= ~mtp_property_nothrow;
843 /* real exception flow possible. */
844 curr_prop &= ~mtp_property_nothrow;
847 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
848 /* no need to search further */
853 if (curr_prop & mtp_property_malloc) {
855 * Note that the malloc property means not only return newly allocated
856 * memory, but also that this memory is ALIAS FREE.
857 * To ensure that, we do NOT allow that the returned memory is somewhere
860 curr_prop &= check_stored_result(irg);
863 if (curr_prop != mtp_no_property) {
864 if (top || (curr_prop & mtp_temporary) == 0) {
865 /* We use the temporary flag here to mark an optimistic result.
866 Set the property only if we are sure that it does NOT base on
867 temporary results OR if we are at top-level. */
868 add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
876 } /* check_nothrow_or_malloc */
879 * When a function was detected as "const", it might be moved out of loops.
880 * This might be dangerous if the graph can contain endless loops.
882 static void check_for_possible_endless_loops(ir_graph *irg)
885 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
887 root_loop = get_irg_loop(irg);
888 if (root_loop->flags & loop_outer_loop)
889 add_irg_additional_properties(irg, mtp_property_has_loop);
891 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
895 * optimize function calls by handling const functions
897 void optimize_funccalls(void)
902 size_t num_const = 0;
904 size_t num_nothrow = 0;
905 size_t num_malloc = 0;
907 /* prepare: mark all graphs as not analyzed */
908 last_idx = get_irp_last_idx();
909 ready_set = rbitset_malloc(last_idx);
910 busy_set = rbitset_malloc(last_idx);
912 /* first step: detect, which functions are nothrow or malloc */
913 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
914 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
915 ir_graph *irg = get_irp_irg(i);
916 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
918 if (prop & mtp_property_nothrow) {
920 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
921 } else if (prop & mtp_property_malloc) {
923 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
927 /* second step: remove exception edges: this must be done before the
928 detection of const and pure functions take place. */
929 handle_nothrow_Calls(&ctx);
930 DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
931 DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
932 ctx.n_calls_SymConst, ctx.n_calls_Sel));
934 rbitset_clear_all(ready_set, last_idx);
935 rbitset_clear_all(busy_set, last_idx);
937 /* third step: detect, which functions are const or pure */
938 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
939 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
940 ir_graph *irg = get_irp_irg(i);
941 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
943 if (prop & mtp_property_const) {
945 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
946 check_for_possible_endless_loops(irg);
947 } else if (prop & mtp_property_pure) {
949 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
953 handle_const_Calls(&ctx);
954 DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n",
955 num_const, num_pure));
956 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
957 ctx.n_calls_SymConst, ctx.n_calls_Sel));
963 /* initialize the funccall optimization */
964 void firm_init_funccalls(void)
966 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
969 /* Creates an ir_prog pass for optimize_funccalls. */
970 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
972 return def_prog_pass(name ? name : "funccall", optimize_funccalls);