2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 size_t n_calls_SymConst;
54 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
55 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** Ready IRG's are marked in the ready set. */
62 static unsigned *ready_set;
64 /** IRG's that are in progress are marked here. */
65 static unsigned *busy_set;
68 * We misuse the mtp_property_inherited flag as temporary here.
69 * The is ok, as we cannot set or get it anyway using the
70 * get_addtional_properties API.
72 #define mtp_temporary mtp_property_inherited
75 * Walker: Collect all calls to const and pure functions
76 * to lists. Collect all Proj(Call) nodes into a Proj list.
78 static void collect_const_and_pure_calls(ir_node *node, void *env)
80 env_t *ctx = (env_t*)env;
84 unsigned and_prop, or_prop, prop;
89 /* set the link to NULL for all non-const/pure calls */
90 set_irn_link(call, NULL);
91 ptr = get_Call_ptr(call);
93 ent = get_Global_entity(ptr);
95 prop = get_entity_additional_properties(ent);
96 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
98 ++ctx->n_calls_SymConst;
99 } else if (get_opt_closed_world() &&
101 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
102 /* If all possible callees are const functions, we can remove the memory edge. */
103 size_t i, n_callees = get_Call_n_callees(call);
104 if (n_callees == 0) {
105 /* This is kind of strange: dying code or a Call that will raise an exception
106 when executed as there is no implementation to call. So better not
111 /* note that const function are a subset of pure ones */
112 and_prop = mtp_property_const | mtp_property_pure;
114 for (i = 0; i < n_callees; ++i) {
115 ent = get_Call_callee(call, i);
116 if (ent == unknown_entity) {
117 /* we don't know which entity is called here */
120 prop = get_entity_additional_properties(ent);
123 if (and_prop == mtp_no_property)
126 prop = and_prop | (or_prop & mtp_property_has_loop);
131 /* ok, if we get here we found a call to a const or a pure function */
132 if (prop & mtp_property_pure) {
133 set_irn_link(call, ctx->pure_call_list);
134 ctx->pure_call_list = call;
136 if (prop & mtp_property_has_loop) {
137 set_irn_link(call, ctx->nonfloat_const_call_list);
138 ctx->nonfloat_const_call_list = call;
140 set_irn_link(call, ctx->float_const_call_list);
141 ctx->float_const_call_list = call;
144 } else if (is_Proj(node)) {
146 * Collect all memory and exception Proj's from
149 call = get_Proj_pred(node);
153 /* collect the Proj's in the Proj list */
154 switch (get_Proj_proj(node)) {
156 case pn_Call_X_except:
157 case pn_Call_X_regular:
158 set_irn_link(node, ctx->proj_list);
159 ctx->proj_list = node;
165 } /* collect_const_and_pure_calls */
168 * Fix the list of collected Calls.
170 * @param irg the graph that contained calls to pure functions
173 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
175 ir_node *call, *next, *mem, *proj;
178 /* First step: fix all calls by removing their memory input and let
180 * The original memory input is preserved in their link fields. */
181 for (call = ctx->float_const_call_list; call != NULL; call = next) {
182 next = (ir_node*)get_irn_link(call);
183 mem = get_Call_mem(call);
185 set_irn_link(call, mem);
186 set_Call_mem(call, get_irg_no_mem(irg));
189 * Unfortunately we cannot simply set the node to 'float'.
190 * There is a reason for that:
192 * - The call might be inside a loop/if that is NOT entered
193 * and calls a endless function. Setting the call to float
194 * would allow to move it out from the loop/if causing this
195 * function be called even if the loop/if is not entered ...
197 * This could be fixed using post-dominators for calls and Pin nodes
198 * but need some more analyzes to ensure that a call that potential
199 * never returns is not executed before some code that generates
200 * observable states...
203 /* finally, this call can float */
204 set_irn_pinned(call, op_pin_state_floats);
205 hook_func_call(irg, call);
208 /* Last step: fix all Proj's */
209 for (proj = ctx->proj_list; proj != NULL; proj = next) {
210 next = (ir_node*)get_irn_link(proj);
211 call = get_Proj_pred(proj);
212 mem = (ir_node*)get_irn_link(call);
214 /* beware of calls in the pure call list */
215 if (!mem || is_Call(mem))
217 assert(get_irn_mode(mem) == mode_M);
219 switch (get_Proj_proj(proj)) {
221 /* in dead code there might be cycles where proj == mem */
226 case pn_Call_X_except:
228 exchange(proj, get_irg_bad(irg));
230 case pn_Call_X_regular: {
231 ir_node *block = get_nodes_block(call);
233 exchange(proj, new_r_Jmp(block));
241 /* changes were done ... */
242 set_irg_outs_inconsistent(irg);
243 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
246 /* ... including exception edges */
247 set_irg_doms_inconsistent(irg);
249 } /* fix_const_call_list */
252 * Walker: Collect all calls to nothrow functions
253 * to lists. Collect all Proj(Call) nodes into a Proj list.
255 static void collect_nothrow_calls(ir_node *node, void *env)
257 env_t *ctx = (env_t*)env;
265 /* set the link to NULL for all non-const/pure calls */
266 set_irn_link(call, NULL);
267 ptr = get_Call_ptr(call);
268 if (is_Global(ptr)) {
269 ent = get_Global_entity(ptr);
271 prop = get_entity_additional_properties(ent);
272 if ((prop & mtp_property_nothrow) == 0)
274 ++ctx->n_calls_SymConst;
275 } else if (get_opt_closed_world() &&
277 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
278 /* If all possible callees are nothrow functions, we can remove the exception edge. */
279 size_t i, n_callees = get_Call_n_callees(call);
280 if (n_callees == 0) {
281 /* This is kind of strange: dying code or a Call that will raise an exception
282 when executed as there is no implementation to call. So better not
287 /* note that const function are a subset of pure ones */
288 prop = mtp_property_nothrow;
289 for (i = 0; i < n_callees; ++i) {
290 ent = get_Call_callee(call, i);
291 if (ent == unknown_entity) {
292 /* we don't know which entity is called here */
295 prop &= get_entity_additional_properties(ent);
296 if (prop == mtp_no_property)
303 /* ok, if we get here we found a call to a nothrow function */
304 set_irn_link(call, ctx->nothrow_call_list);
305 ctx->nothrow_call_list = call;
306 } else if (is_Proj(node)) {
308 * Collect all memory and exception Proj's from
311 call = get_Proj_pred(node);
315 /* collect the Proj's in the Proj list */
316 switch (get_Proj_proj(node)) {
318 case pn_Call_X_except:
319 case pn_Call_X_regular:
320 set_irn_link(node, ctx->proj_list);
321 ctx->proj_list = node;
327 } /* collect_nothrow_calls */
330 * Fix the list of collected nothrow Calls.
332 * @param irg the graph that contained calls to pure functions
333 * @param call_list the list of all call sites of const functions
334 * @param proj_list the list of all memory/exception Proj's of this call sites
336 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
338 ir_node *call, *next, *proj;
341 /* First step: go through the list of calls and mark them. */
342 for (call = call_list; call; call = next) {
343 next = (ir_node*)get_irn_link(call);
345 /* current_ir_graph is in memory anyway, so it's a good marker */
346 set_irn_link(call, ¤t_ir_graph);
347 hook_func_call(irg, call);
350 /* Second step: Remove all exception Proj's */
351 for (proj = proj_list; proj; proj = next) {
352 next = (ir_node*)get_irn_link(proj);
353 call = get_Proj_pred(proj);
355 /* handle only marked calls */
356 if (get_irn_link(call) != ¤t_ir_graph)
359 /* kill any exception flow */
360 switch (get_Proj_proj(proj)) {
361 case pn_Call_X_except:
363 exchange(proj, get_irg_bad(irg));
365 case pn_Call_X_regular: {
366 ir_node *block = get_nodes_block(call);
368 exchange(proj, new_r_Jmp(block));
376 /* changes were done ... */
377 set_irg_outs_inconsistent(irg);
378 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
381 /* ... including exception edges */
382 set_irg_doms_inconsistent(irg);
384 } /* fix_nothrow_call_list */
387 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
388 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
389 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
390 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
391 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
394 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
397 * Calculate the bigger property of two. Handle the temporary flag right.
399 static mtp_additional_properties max_property(mtp_additional_properties a,
400 mtp_additional_properties b)
402 mtp_additional_properties r;
403 mtp_additional_properties t = (a | b) & mtp_temporary;
407 if (a == mtp_no_property || b == mtp_no_property)
408 return mtp_no_property;
414 * Follow the memory chain starting at node and determine
417 * @return mtp_property_const if only calls of const functions are detected
418 * mtp_property_pure if only Loads and const/pure calls detected
419 * mtp_no_property else
421 static mtp_additional_properties follow_mem_(ir_node *node)
423 mtp_additional_properties mode = mtp_property_const;
424 mtp_additional_properties m;
429 if (mode == mtp_no_property)
430 return mtp_no_property;
432 if (irn_visited_else_mark(node))
435 switch (get_irn_opcode(node)) {
437 node = get_Proj_pred(node);
446 /* do a dfs search */
447 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
448 m = follow_mem_(get_irn_n(node, i));
449 mode = max_property(mode, m);
450 if (mode == mtp_no_property)
451 return mtp_no_property;
456 /* Beware volatile Loads are NOT allowed in pure functions. */
457 if (get_Load_volatility(node) == volatility_is_volatile)
458 return mtp_no_property;
459 mode = max_property(mode, mtp_property_pure);
460 node = get_Load_mem(node);
464 /* A call is only tolerable if its either constant or pure. */
465 ptr = get_Call_ptr(node);
466 if (is_SymConst_addr_ent(ptr)) {
467 ir_entity *ent = get_SymConst_entity(ptr);
468 ir_graph *irg = get_entity_irg(ent);
470 if (irg == get_irn_irg(node)) {
471 /* A self-recursive call. The property did not depend on this call. */
472 } else if (irg == NULL) {
473 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
474 mode = max_property(mode, m);
475 } else if (irg != NULL) {
476 /* we have a graph, analyze it. */
477 m = check_const_or_pure_function(irg, /*top=*/0);
478 mode = max_property(mode, m);
481 return mtp_no_property;
482 node = get_Call_mem(node);
486 return mtp_no_property;
492 * Follow the memory chain starting at node and determine
495 * @return mtp_property_const if only calls of const functions are detected
496 * mtp_property_pure if only Loads and const/pure calls detected
497 * mtp_no_property else
499 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
501 mtp_additional_properties m = follow_mem_(node);
502 return max_property(mode, m);
506 * Check if a graph represents a const or a pure function.
508 * @param irg the graph to check
509 * @param top if set, this is the top call
511 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
513 ir_node *end, *endbl;
515 mtp_additional_properties prop = get_irg_additional_properties(irg);
517 if (prop & mtp_property_const) {
518 /* already marked as a const function */
519 return mtp_property_const;
521 if (prop & mtp_property_pure) {
522 /* already marked as a pure function */
523 return mtp_property_pure;
526 if (IS_IRG_READY(irg)) {
527 /* already checked */
528 return mtp_no_property;
530 if (IS_IRG_BUSY(irg)) {
531 /* we are still evaluate this method. Be optimistic,
532 return the best possible so far but mark the result as temporary. */
533 return mtp_temporary | mtp_property_const;
537 end = get_irg_end(irg);
538 endbl = get_nodes_block(end);
539 prop = mtp_property_const;
541 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
542 inc_irg_visited(irg);
543 /* mark the initial mem: recursion of follow_mem() stops here */
544 mark_irn_visited(get_irg_initial_mem(irg));
546 /* visit every Return */
547 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
548 ir_node *node = get_Block_cfgpred(endbl, j);
549 unsigned code = get_irn_opcode(node);
552 /* Bad nodes usually do NOT produce anything, so it's ok */
556 if (code == iro_Return) {
557 mem = get_Return_mem(node);
559 /* Bad nodes usually do NOT produce anything, so it's ok */
563 if (mem != get_irg_initial_mem(irg))
564 prop = max_property(prop, follow_mem(mem, prop));
566 /* Exception found. Cannot be const or pure. */
567 prop = mtp_no_property;
570 if (prop == mtp_no_property)
574 if (prop != mtp_no_property) {
575 /* check, if a keep-alive exists */
576 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
577 ir_node *kept = get_End_keepalive(end, j);
579 if (is_Block(kept)) {
580 prop = mtp_no_property;
584 if (mode_M != get_irn_mode(kept))
587 prop = max_property(prop, follow_mem(kept, prop));
588 if (prop == mtp_no_property)
593 if (prop != mtp_no_property) {
594 if (top || (prop & mtp_temporary) == 0) {
595 /* We use the temporary flag here to mark optimistic result.
596 Set the property only if we are sure that it does NOT base on
597 temporary results OR if we are at top-level. */
598 add_irg_additional_properties(irg, prop & ~mtp_temporary);
605 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
607 } /* check_const_or_pure_function */
610 * Handle calls to const functions.
614 static void handle_const_Calls(env_t *ctx)
618 ctx->n_calls_SymConst = 0;
619 ctx->n_calls_Sel = 0;
621 /* all calls of const functions can be transformed */
622 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
623 ir_graph *irg = get_irp_irg(i);
625 ctx->float_const_call_list = NULL;
626 ctx->nonfloat_const_call_list = NULL;
627 ctx->pure_call_list = NULL;
628 ctx->proj_list = NULL;
630 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
631 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
633 if (ctx->float_const_call_list != NULL)
634 fix_const_call_lists(irg, ctx);
635 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
637 } /* handle_const_Calls */
640 * Handle calls to nothrow functions.
644 static void handle_nothrow_Calls(env_t *ctx)
648 ctx->n_calls_SymConst = 0;
649 ctx->n_calls_Sel = 0;
651 /* all calls of const functions can be transformed */
652 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
653 ir_graph *irg = get_irp_irg(i);
655 ctx->nothrow_call_list = NULL;
656 ctx->proj_list = NULL;
658 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
659 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
661 if (ctx->nothrow_call_list)
662 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
663 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
668 * Check, whether a given node represents a return value of
669 * a malloc like function (ie, new heap allocated memory).
671 * @param node the node to check
673 static int is_malloc_call_result(const ir_node *node)
675 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
676 /* Firm style high-level allocation */
679 /* TODO: check mtp_malloc */
684 * Update a property depending on a call property.
686 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
688 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
689 mtp_additional_properties r = orig_prop & call_prop;
694 * Check if a node is stored.
696 static int is_stored(const ir_node *n)
698 const ir_edge_t *edge;
701 foreach_out_edge(n, edge) {
702 const ir_node *succ = get_edge_src_irn(edge);
704 switch (get_irn_opcode(succ)) {
711 if (get_Store_value(succ) == n)
713 /* ok if its only the address input */
722 ptr = get_Call_ptr(succ);
723 if (is_Global(ptr)) {
724 ir_entity *ent = get_Global_entity(ptr);
727 /* we know the called entity */
728 for (i = get_Call_n_params(succ); i > 0;) {
729 if (get_Call_param(succ, --i) == n) {
730 /* n is the i'th param of the call */
731 if (get_method_param_access(ent, i) & ptr_access_store) {
732 /* n is store in ent */
738 /* unknown call address */
743 /* bad, potential alias */
751 * Check that the return value of an irg is not stored anywhere.
753 * return ~mtp_property_malloc if return values are stored, ~0 else
755 static mtp_additional_properties check_stored_result(ir_graph *irg)
757 ir_node *end_blk = get_irg_end_block(irg);
759 mtp_additional_properties res = ~mtp_no_property;
760 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
762 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
763 ir_node *pred = get_Block_cfgpred(end_blk, i);
766 if (! is_Return(pred))
768 for (j = get_Return_n_ress(pred); j > 0;) {
769 const ir_node *irn = get_Return_res(pred, --j);
771 if (is_stored(irn)) {
772 /* bad, might create an alias */
773 res = ~mtp_property_malloc;
780 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
785 * Check if a graph represents a nothrow or a malloc function.
787 * @param irg the graph to check
788 * @param top if set, this is the top call
790 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
792 mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
793 ir_node *end_blk = get_irg_end_block(irg);
798 if (IS_IRG_READY(irg)) {
799 /* already checked */
800 return get_irg_additional_properties(irg);
802 if (IS_IRG_BUSY(irg)) {
803 /* we are still evaluate this method. Be optimistic,
804 return the best possible so far but mark the result as temporary. */
805 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
809 ent = get_irg_entity(irg);
810 mtp = get_entity_type(ent);
812 if (get_method_n_ress(mtp) <= 0)
813 curr_prop &= ~mtp_property_malloc;
815 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
816 ir_node *pred = get_Block_cfgpred(end_blk, i);
818 if (is_Return(pred)) {
819 if (curr_prop & mtp_property_malloc) {
822 /* check, if malloc is called here */
823 for (j = get_Return_n_ress(pred); j > 0;) {
824 ir_node *res = get_Return_res(pred, --j);
826 /* skip Confirms and Casts */
827 res = skip_HighLevel_ops(res);
830 res = get_Proj_pred(res);
831 if (is_malloc_call_result(res)) {
832 /* ok, this is a malloc */
833 } else if (is_Call(res)) {
834 ir_node *ptr = get_Call_ptr(res);
836 if (is_Global(ptr)) {
838 ir_entity *ent = get_Global_entity(ptr);
839 ir_graph *callee = get_entity_irg(ent);
842 /* A self-recursive call. The property did not depend on this call. */
843 } else if (callee != NULL) {
844 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
845 curr_prop = update_property(curr_prop, prop);
847 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
849 } else if (get_opt_closed_world() &&
851 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
852 /* check if all possible callees are malloc functions. */
853 size_t i, n_callees = get_Call_n_callees(res);
854 if (n_callees == 0) {
855 /* This is kind of strange: dying code or a Call that will raise an exception
856 when executed as there is no implementation to call. So better not
858 curr_prop &= ~mtp_property_malloc;
862 for (i = 0; i < n_callees; ++i) {
863 ir_entity *ent = get_Call_callee(res, i);
864 if (ent == unknown_entity) {
865 /* we don't know which entity is called here */
866 curr_prop &= ~mtp_property_malloc;
869 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
870 curr_prop &= ~mtp_property_malloc;
874 /* if we pass the for cycle, malloc is still ok */
877 curr_prop &= ~mtp_property_malloc;
880 /* unknown return value */
881 curr_prop &= ~mtp_property_malloc;
885 } else if (curr_prop & mtp_property_nothrow) {
886 /* exception flow detected */
887 pred = skip_Proj(pred);
890 ir_node *ptr = get_Call_ptr(pred);
892 if (is_Global(ptr)) {
894 ir_entity *ent = get_Global_entity(ptr);
895 ir_graph *callee = get_entity_irg(ent);
898 /* A self-recursive call. The property did not depend on this call. */
899 } else if (callee != NULL) {
900 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
901 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
902 curr_prop = update_property(curr_prop, prop);
904 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
905 curr_prop &= ~mtp_property_nothrow;
907 } else if (get_opt_closed_world() &&
909 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
910 /* check if all possible callees are nothrow functions. */
911 size_t i, n_callees = get_Call_n_callees(pred);
912 if (n_callees == 0) {
913 /* This is kind of strange: dying code or a Call that will raise an exception
914 when executed as there is no implementation to call. So better not
916 curr_prop &= ~mtp_property_nothrow;
920 for (i = 0; i < n_callees; ++i) {
921 ir_entity *ent = get_Call_callee(pred, i);
922 if (ent == unknown_entity) {
923 /* we don't know which entity is called here */
924 curr_prop &= ~mtp_property_nothrow;
927 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
928 curr_prop &= ~mtp_property_nothrow;
932 /* if we pass the for cycle, nothrow is still ok */
935 curr_prop &= ~mtp_property_nothrow;
938 /* real exception flow possible. */
939 curr_prop &= ~mtp_property_nothrow;
942 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
943 /* no need to search further */
948 if (curr_prop & mtp_property_malloc) {
950 * Note that the malloc property means not only return newly allocated
951 * memory, but also that this memory is ALIAS FREE.
952 * To ensure that, we do NOT allow that the returned memory is somewhere
955 curr_prop &= check_stored_result(irg);
958 if (curr_prop != mtp_no_property) {
959 if (top || (curr_prop & mtp_temporary) == 0) {
960 /* We use the temporary flag here to mark an optimistic result.
961 Set the property only if we are sure that it does NOT base on
962 temporary results OR if we are at top-level. */
963 add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
971 } /* check_nothrow_or_malloc */
974 * When a function was detected as "const", it might be moved out of loops.
975 * This might be dangerous if the graph can contain endless loops.
977 static void check_for_possible_endless_loops(ir_graph *irg)
982 root_loop = get_irg_loop(irg);
983 if (root_loop->flags & loop_outer_loop)
984 add_irg_additional_properties(irg, mtp_property_has_loop);
988 * optimize function calls by handling const functions
990 void optimize_funccalls(void)
995 size_t num_const = 0;
997 size_t num_nothrow = 0;
998 size_t num_malloc = 0;
1000 /* prepare: mark all graphs as not analyzed */
1001 last_idx = get_irp_last_idx();
1002 ready_set = rbitset_malloc(last_idx);
1003 busy_set = rbitset_malloc(last_idx);
1005 /* first step: detect, which functions are nothrow or malloc */
1006 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1007 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1008 ir_graph *irg = get_irp_irg(i);
1009 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1011 if (prop & mtp_property_nothrow) {
1013 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1014 } else if (prop & mtp_property_malloc) {
1016 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1020 /* second step: remove exception edges: this must be done before the
1021 detection of const and pure functions take place. */
1022 handle_nothrow_Calls(&ctx);
1023 DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
1024 DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
1025 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1027 rbitset_clear_all(ready_set, last_idx);
1028 rbitset_clear_all(busy_set, last_idx);
1030 /* third step: detect, which functions are const or pure */
1031 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1032 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1033 ir_graph *irg = get_irp_irg(i);
1034 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1036 if (prop & mtp_property_const) {
1038 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1039 check_for_possible_endless_loops(irg);
1040 } else if (prop & mtp_property_pure) {
1042 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1046 handle_const_Calls(&ctx);
1047 DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure));
1048 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1049 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1055 /* initialize the funccall optimization */
1056 void firm_init_funccalls(void)
1058 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1061 /* Creates an ir_prog pass for optimize_funccalls. */
1062 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
1064 return def_prog_pass(name ? name : "funccall", optimize_funccalls);