2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
30 #include <adt/raw_bitset.h>
32 #include "funccall_t.h"
35 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
43 #include "iredges_t.h"
44 #include "analyze_irg_args.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 * The walker environment for updating function calls.
53 typedef struct _env_t {
54 unsigned n_calls_SymConst;
56 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
57 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
58 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
59 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
60 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
63 /** If non-null, evaluates entities for being a heap alloc. */
64 static check_alloc_entity_func is_alloc_entity = NULL;
66 /** Ready IRG's are marked in the ready set. */
67 static unsigned *ready_set;
69 /** IRG's that are in progress are marked here. */
70 static unsigned *busy_set;
73 * We misuse the mtp_property_inherited flag as temporary here.
74 * The is ok, as we cannot set or get it anyway using the
75 * get_addtional_properties API.
77 #define mtp_temporary mtp_property_inherited
80 * Walker: Collect all calls to const and pure functions
81 * to lists. Collect all Proj(Call) nodes into a Proj list.
83 static void collect_const_and_pure_calls(ir_node *node, void *env) {
87 unsigned and_prop, or_prop, prop;
92 /* set the link to NULL for all non-const/pure calls */
93 set_irn_link(call, NULL);
94 ptr = get_Call_ptr(call);
96 ent = get_Global_entity(ptr);
98 prop = get_entity_additional_properties(ent);
99 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
101 ++ctx->n_calls_SymConst;
102 } else if (get_opt_closed_world() &&
104 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
105 /* If all possible callees are const functions, we can remove the memory edge. */
106 int i, n_callees = get_Call_n_callees(call);
107 if (n_callees == 0) {
108 /* This is kind of strange: dying code or a Call that will raise an exception
109 when executed as there is no implementation to call. So better not
114 /* note that const function are a subset of pure ones */
115 and_prop = mtp_property_const | mtp_property_pure;
117 for (i = 0; i < n_callees; ++i) {
118 ent = get_Call_callee(call, i);
119 if (ent == unknown_entity) {
120 /* we don't know which entity is called here */
123 prop = get_entity_additional_properties(ent);
126 if (and_prop == mtp_no_property)
129 prop = and_prop | (or_prop & mtp_property_has_loop);
134 /* ok, if we get here we found a call to a const or a pure function */
135 if (prop & mtp_property_pure) {
136 set_irn_link(call, ctx->pure_call_list);
137 ctx->pure_call_list = call;
139 if (prop & mtp_property_has_loop) {
140 set_irn_link(call, ctx->nonfloat_const_call_list);
141 ctx->nonfloat_const_call_list = call;
143 set_irn_link(call, ctx->float_const_call_list);
144 ctx->float_const_call_list = call;
147 } else if (is_Proj(node)) {
149 * Collect all memory and exception Proj's from
152 call = get_Proj_pred(node);
156 /* collect the Proj's in the Proj list */
157 switch (get_Proj_proj(node)) {
158 case pn_Call_M_regular:
159 case pn_Call_X_except:
160 case pn_Call_X_regular:
161 case pn_Call_M_except:
162 set_irn_link(node, ctx->proj_list);
163 ctx->proj_list = node;
169 } /* collect_const_and_pure_calls */
172 * Fix the list of collected Calls.
174 * @param irg the graph that contained calls to pure functions
177 static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
178 ir_node *call, *next, *mem, *proj;
180 ir_graph *rem = current_ir_graph;
182 current_ir_graph = irg;
184 /* First step: fix all calls by removing their memory input and let
186 * The original memory input is preserved in their link fields. */
187 for (call = ctx->float_const_call_list; call != NULL; call = next) {
188 next = get_irn_link(call);
189 mem = get_Call_mem(call);
191 set_irn_link(call, mem);
192 set_Call_mem(call, get_irg_no_mem(irg));
195 * Unfortunately we cannot simply set the node to 'float'.
196 * There is a reason for that:
198 * - The call might be inside a loop/if that is NOT entered
199 * and calls a endless function. Setting the call to float
200 * would allow to move it out from the loop/if causing this
201 * function be called even if the loop/if is not entered ...
203 * This could be fixed using post-dominators for calls and Pin nodes
204 * but need some more analyzes to ensure that a call that potential
205 * never returns is not executed before some code that generates
206 * observable states...
209 /* finally, this call can float */
210 set_irn_pinned(call, op_pin_state_floats);
211 hook_func_call(irg, call);
214 /* Last step: fix all Proj's */
215 for (proj = ctx->proj_list; proj != NULL; proj = next) {
216 next = get_irn_link(proj);
217 call = get_Proj_pred(proj);
218 mem = get_irn_link(call);
220 /* beware of calls in the pure call list */
221 if (!mem || is_Call(mem))
223 assert(get_irn_mode(mem) == mode_M);
225 switch (get_Proj_proj(proj)) {
226 case pn_Call_M_regular: {
227 /* in dead code there might be cycles where proj == mem */
232 case pn_Call_X_except:
233 case pn_Call_M_except:
235 exchange(proj, get_irg_bad(irg));
237 case pn_Call_X_regular: {
238 ir_node *block = get_nodes_block(call);
240 exchange(proj, new_r_Jmp(irg, block));
248 /* changes were done ... */
249 set_irg_outs_inconsistent(irg);
250 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
253 /* ... including exception edges */
254 set_irg_doms_inconsistent(irg);
256 current_ir_graph = rem;
257 } /* fix_const_call_list */
260 * Walker: Collect all calls to nothrow functions
261 * to lists. Collect all Proj(Call) nodes into a Proj list.
263 static void collect_nothrow_calls(ir_node *node, void *env) {
272 /* set the link to NULL for all non-const/pure calls */
273 set_irn_link(call, NULL);
274 ptr = get_Call_ptr(call);
275 if (is_Global(ptr)) {
276 ent = get_Global_entity(ptr);
278 prop = get_entity_additional_properties(ent);
279 if ((prop & mtp_property_nothrow) == 0)
281 ++ctx->n_calls_SymConst;
282 } else if (get_opt_closed_world() &&
284 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
285 /* If all possible callees are nothrow functions, we can remove the exception edge. */
286 int i, n_callees = get_Call_n_callees(call);
287 if (n_callees == 0) {
288 /* This is kind of strange: dying code or a Call that will raise an exception
289 when executed as there is no implementation to call. So better not
294 /* note that const function are a subset of pure ones */
295 prop = mtp_property_nothrow;
296 for (i = 0; i < n_callees; ++i) {
297 ent = get_Call_callee(call, i);
298 if (ent == unknown_entity) {
299 /* we don't know which entity is called here */
302 prop &= get_entity_additional_properties(ent);
303 if (prop == mtp_no_property)
310 /* ok, if we get here we found a call to a nothrow function */
311 set_irn_link(call, ctx->nothrow_call_list);
312 ctx->nothrow_call_list = call;
313 } else if (is_Proj(node)) {
315 * Collect all memory and exception Proj's from
318 call = get_Proj_pred(node);
322 /* collect the Proj's in the Proj list */
323 switch (get_Proj_proj(node)) {
324 case pn_Call_M_regular:
325 case pn_Call_X_except:
326 case pn_Call_X_regular:
327 case pn_Call_M_except:
328 set_irn_link(node, ctx->proj_list);
329 ctx->proj_list = node;
335 } /* collect_nothrow_calls */
338 * Fix the list of collected nothrow Calls.
340 * @param irg the graph that contained calls to pure functions
341 * @param call_list the list of all call sites of const functions
342 * @param proj_list the list of all memory/exception Proj's of this call sites
344 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
345 ir_node *call, *next, *proj;
347 ir_graph *rem = current_ir_graph;
349 current_ir_graph = irg;
351 /* First step: go through the list of calls and mark them. */
352 for (call = call_list; call; call = next) {
353 next = get_irn_link(call);
355 /* current_ir_graph is in memory anyway, so it's a good marker */
356 set_irn_link(call, ¤t_ir_graph);
357 hook_func_call(irg, call);
360 /* Second step: Remove all exception Proj's */
361 for (proj = proj_list; proj; proj = next) {
362 next = get_irn_link(proj);
363 call = get_Proj_pred(proj);
365 /* handle only marked calls */
366 if (get_irn_link(call) != ¤t_ir_graph)
369 /* kill any exception flow */
370 switch (get_Proj_proj(proj)) {
371 case pn_Call_X_except:
372 case pn_Call_M_except:
374 exchange(proj, get_irg_bad(irg));
376 case pn_Call_X_regular: {
377 ir_node *block = get_nodes_block(call);
379 exchange(proj, new_r_Jmp(irg, block));
387 /* changes were done ... */
388 set_irg_outs_inconsistent(irg);
389 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
392 /* ... including exception edges */
393 set_irg_doms_inconsistent(irg);
395 current_ir_graph = rem;
396 } /* fix_nothrow_call_list */
399 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
400 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
401 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
402 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
403 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
406 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
407 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
410 * Calculate the bigger property of two. Handle the temporary flag right.
412 static unsigned max_property(unsigned a, unsigned b) {
413 unsigned r, t = (a | b) & mtp_temporary;
417 if (a == mtp_no_property || b == mtp_no_property)
418 return mtp_no_property;
424 * Follow the memory chain starting at node and determine
427 * @return mtp_property_const if only calls of const functions are detected
428 * mtp_property_pure if only Loads and const/pure
430 * mtp_no_property else
432 static unsigned _follow_mem(ir_node *node) {
433 unsigned m, mode = mtp_property_const;
438 if (mode == mtp_no_property)
439 return mtp_no_property;
441 if (irn_visited_else_mark(node))
444 switch (get_irn_opcode(node)) {
446 node = get_Proj_pred(node);
455 /* do a dfs search */
456 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
457 m = _follow_mem(get_irn_n(node, i));
458 mode = max_property(mode, m);
459 if (mode == mtp_no_property)
460 return mtp_no_property;
465 /* Beware volatile Loads are NOT allowed in pure functions. */
466 if (get_Load_volatility(node) == volatility_is_volatile)
467 return mtp_no_property;
468 mode = max_property(mode, mtp_property_pure);
469 node = get_Load_mem(node);
473 /* A call is only tolerable if its either constant or pure. */
474 ptr = get_Call_ptr(node);
475 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
476 ir_entity *ent = get_SymConst_entity(ptr);
477 ir_graph *irg = get_entity_irg(ent);
479 if (irg == current_ir_graph) {
480 /* A self-recursive call. The property did not depend on this call. */
481 } else if (irg == NULL) {
482 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
483 mode = max_property(mode, m);
484 } else if (irg != NULL) {
485 /* we have a graph, analyze it. */
486 m = check_const_or_pure_function(irg, /*top=*/0);
487 mode = max_property(mode, m);
490 return mtp_no_property;
491 node = get_Call_mem(node);
495 return mtp_no_property;
501 * Follow the memory chain starting at node and determine
504 * @return mtp_property_const if only calls of const functions are detected
505 * mtp_property_pure if only Loads and const/pure calls detected
506 * mtp_no_property else
508 static unsigned follow_mem(ir_node *node, unsigned mode) {
511 m = _follow_mem(node);
512 return max_property(mode, m);
516 * Check if a graph represents a const or a pure function.
518 * @param irg the graph to check
519 * @param top if set, this is the top call
521 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
522 ir_node *end, *endbl;
524 unsigned prop = get_irg_additional_properties(irg);
525 ir_graph *rem = current_ir_graph;
527 if (prop & mtp_property_const) {
528 /* already marked as a const function */
529 return mtp_property_const;
531 if (prop & mtp_property_pure) {
532 /* already marked as a pure function */
533 return mtp_property_pure;
536 if (IS_IRG_READY(irg)) {
537 /* already checked */
538 return mtp_no_property;
540 if (IS_IRG_BUSY(irg)) {
541 /* we are still evaluate this method. Be optimistic,
542 return the best possible so far but mark the result as temporary. */
543 return mtp_temporary | mtp_property_const;
547 end = get_irg_end(irg);
548 endbl = get_nodes_block(end);
549 prop = mtp_property_const;
551 current_ir_graph = irg;
553 inc_irg_visited(irg);
554 /* mark the initial mem: recursion of follow_mem stops here */
555 mark_irn_visited(get_irg_initial_mem(irg));
557 /* visit every Return */
558 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
559 ir_node *node = get_Block_cfgpred(endbl, j);
560 ir_opcode code = get_irn_opcode(node);
563 /* Bad nodes usually do NOT produce anything, so it's ok */
567 if (code == iro_Return) {
568 mem = get_Return_mem(node);
570 /* Bad nodes usually do NOT produce anything, so it's ok */
574 if (mem != get_irg_initial_mem(irg))
575 prop = max_property(prop, follow_mem(mem, prop));
577 /* Exception found. Cannot be const or pure. */
578 prop = mtp_no_property;
581 if (prop == mtp_no_property)
585 if (prop != mtp_no_property) {
586 /* check, if a keep-alive exists */
587 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
588 ir_node *kept = get_End_keepalive(end, j);
590 if (is_Block(kept)) {
591 prop = mtp_no_property;
595 if (mode_M != get_irn_mode(kept))
598 prop = max_property(prop, follow_mem(kept, prop));
599 if (prop == mtp_no_property)
604 if (prop != mtp_no_property) {
605 if (top || (prop & mtp_temporary) == 0) {
606 /* We use the temporary flag here to mark optimistic result.
607 Set the property only if we are sure that it does NOT base on
608 temporary results OR if we are at top-level. */
609 set_irg_additional_property(irg, prop & ~mtp_temporary);
616 current_ir_graph = rem;
618 } /* check_const_or_pure_function */
621 * Handle calls to const functions.
625 static void handle_const_Calls(env_t *ctx) {
628 ctx->n_calls_SymConst = 0;
629 ctx->n_calls_Sel = 0;
631 /* all calls of const functions can be transformed */
632 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
633 ir_graph *irg = get_irp_irg(i);
635 ctx->float_const_call_list = NULL;
636 ctx->nonfloat_const_call_list = NULL;
637 ctx->pure_call_list = NULL;
638 ctx->proj_list = NULL;
639 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
641 if (ctx->float_const_call_list != NULL) {
642 fix_const_call_lists(irg, ctx);
644 /* this graph was changed, invalidate analysis info */
645 set_irg_outs_inconsistent(irg);
646 set_irg_doms_inconsistent(irg);
649 } /* handle_const_Calls */
652 * Handle calls to nothrow functions.
656 static void handle_nothrow_Calls(env_t *ctx) {
659 ctx->n_calls_SymConst = 0;
660 ctx->n_calls_Sel = 0;
662 /* all calls of const functions can be transformed */
663 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
664 ir_graph *irg = get_irp_irg(i);
666 ctx->nothrow_call_list = NULL;
667 ctx->proj_list = NULL;
668 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
670 if (ctx->nothrow_call_list) {
671 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
673 /* this graph was changed, invalidate analysis info */
674 set_irg_outs_inconsistent(irg);
675 set_irg_doms_inconsistent(irg);
681 * Check, whether a given node represents a return value of
682 * a malloc like function (ie, new heap allocated memory).
684 * @param node the node to check
686 static int is_malloc_call_result(const ir_node *node) {
687 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
688 /* Firm style high-level allocation */
691 if (is_alloc_entity != NULL && is_Call(node)) {
692 ir_node *ptr = get_Call_ptr(node);
694 if (is_Global(ptr)) {
695 ir_entity *ent = get_Global_entity(ptr);
696 return is_alloc_entity(ent);
700 } /* is_malloc_call_result */
703 * Update a property depending on a call property.
705 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
706 unsigned t = (orig_prop | call_prop) & mtp_temporary;
707 unsigned r = orig_prop & call_prop;
709 } /** update_property */
712 * Check if a node is stored.
714 static int is_stored(const ir_node *n) {
715 const ir_edge_t *edge;
718 foreach_out_edge(n, edge) {
719 const ir_node *succ = get_edge_src_irn(edge);
721 switch (get_irn_opcode(succ)) {
728 if (get_Store_value(succ) == n)
730 /* ok if its only the address input */
739 ptr = get_Call_ptr(succ);
740 if (is_Global(ptr)) {
741 ir_entity *ent = get_Global_entity(ptr);
744 /* we know the called entity */
745 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
746 if (get_Call_param(succ, i) == n) {
747 /* n is the i'th param of the call */
748 if (get_method_param_access(ent, i) & ptr_access_store) {
749 /* n is store in ent */
755 /* unknown call address */
760 /* bad, potential alias */
768 * Check that the return value of an irg is not stored anywhere.
770 * return ~mtp_property_malloc if return values are stored, ~0 else
772 static unsigned check_stored_result(ir_graph *irg) {
773 ir_node *end_blk = get_irg_end_block(irg);
776 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
778 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
779 ir_node *pred = get_Block_cfgpred(end_blk, i);
781 if (! is_Return(pred))
783 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
784 const ir_node *irn = get_Return_res(pred, j);
786 if (is_stored(irn)) {
787 /* bad, might create an alias */
788 res = ~mtp_property_malloc;
795 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
797 } /* check_stored_result */
800 * Check if a graph represents a nothrow or a malloc function.
802 * @param irg the graph to check
803 * @param top if set, this is the top call
805 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
806 ir_node *end_blk = get_irg_end_block(irg);
810 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
812 if (IS_IRG_READY(irg)) {
813 /* already checked */
814 return get_irg_additional_properties(irg);
816 if (IS_IRG_BUSY(irg)) {
817 /* we are still evaluate this method. Be optimistic,
818 return the best possible so far but mark the result as temporary. */
819 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
823 ent = get_irg_entity(irg);
824 mtp = get_entity_type(ent);
826 if (get_method_n_ress(mtp) <= 0)
827 curr_prop &= ~mtp_property_malloc;
829 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
830 ir_node *pred = get_Block_cfgpred(end_blk, i);
832 if (is_Return(pred)) {
833 if (curr_prop & mtp_property_malloc) {
834 /* check, if malloc is called here */
835 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
836 ir_node *res = get_Return_res(pred, j);
838 /* skip Confirms and Casts */
839 res = skip_HighLevel_ops(res);
842 res = get_Proj_pred(res);
843 if (is_malloc_call_result(res)) {
844 /* ok, this is a malloc */
845 } else if (is_Call(res)) {
846 ir_node *ptr = get_Call_ptr(res);
848 if (is_Global(ptr)) {
850 ir_entity *ent = get_Global_entity(ptr);
851 ir_graph *callee = get_entity_irg(ent);
854 /* A self-recursive call. The property did not depend on this call. */
855 } else if (callee != NULL) {
856 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
857 curr_prop = update_property(curr_prop, prop);
859 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
861 } else if (get_opt_closed_world() &&
863 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
864 /* check if all possible callees are malloc functions. */
865 int i, n_callees = get_Call_n_callees(res);
866 if (n_callees == 0) {
867 /* This is kind of strange: dying code or a Call that will raise an exception
868 when executed as there is no implementation to call. So better not
870 curr_prop &= ~mtp_property_malloc;
874 for (i = 0; i < n_callees; ++i) {
875 ir_entity *ent = get_Call_callee(res, i);
876 if (ent == unknown_entity) {
877 /* we don't know which entity is called here */
878 curr_prop &= ~mtp_property_malloc;
881 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
882 curr_prop &= ~mtp_property_malloc;
886 /* if we pass the for cycle, malloc is still ok */
889 curr_prop &= ~mtp_property_malloc;
892 /* unknown return value */
893 curr_prop &= ~mtp_property_malloc;
897 } else if (curr_prop & mtp_property_nothrow) {
898 /* exception flow detected */
899 pred = skip_Proj(pred);
902 ir_node *ptr = get_Call_ptr(pred);
904 if (is_Global(ptr)) {
906 ir_entity *ent = get_Global_entity(ptr);
907 ir_graph *callee = get_entity_irg(ent);
910 /* A self-recursive call. The property did not depend on this call. */
911 } else if (callee != NULL) {
912 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
913 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
914 curr_prop = update_property(curr_prop, prop);
916 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
917 curr_prop &= ~mtp_property_nothrow;
919 } else if (get_opt_closed_world() &&
921 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
922 /* check if all possible callees are nothrow functions. */
923 int i, n_callees = get_Call_n_callees(pred);
924 if (n_callees == 0) {
925 /* This is kind of strange: dying code or a Call that will raise an exception
926 when executed as there is no implementation to call. So better not
928 curr_prop &= ~mtp_property_nothrow;
932 for (i = 0; i < n_callees; ++i) {
933 ir_entity *ent = get_Call_callee(pred, i);
934 if (ent == unknown_entity) {
935 /* we don't know which entity is called here */
936 curr_prop &= ~mtp_property_nothrow;
939 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
940 curr_prop &= ~mtp_property_nothrow;
944 /* if we pass the for cycle, nothrow is still ok */
947 curr_prop &= ~mtp_property_nothrow;
950 /* real exception flow possible. */
951 curr_prop &= ~mtp_property_nothrow;
954 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
955 /* no need to search further */
960 if (curr_prop & mtp_property_malloc) {
962 * Note that the malloc property means not only return newly allocated
963 * memory, but also that this memory is ALIAS FREE.
964 * To ensure that, we do NOT allow that the returned memory is somewhere
967 curr_prop &= check_stored_result(irg);
970 if (curr_prop != mtp_no_property) {
971 if (top || (curr_prop & mtp_temporary) == 0) {
972 /* We use the temporary flag here to mark an optimistic result.
973 Set the property only if we are sure that it does NOT base on
974 temporary results OR if we are at top-level. */
975 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
983 } /* check_nothrow_or_malloc */
986 * When a function was detected as "const", it might be moved out of loops.
987 * This might be dangerous if the graph might contain endless loops.
989 static void check_for_possible_endless_loops(ir_graph *irg) {
993 root_loop = get_irg_loop(irg);
994 if (root_loop->flags & loop_outer_loop)
995 set_irg_additional_property(irg, mtp_property_has_loop);
999 * optimize function calls by handling const functions
1001 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
1004 unsigned num_const = 0;
1005 unsigned num_pure = 0;
1006 unsigned num_nothrow = 0;
1007 unsigned num_malloc = 0;
1009 is_alloc_entity = callback;
1011 /* prepare: mark all graphs as not analyzed */
1012 last_idx = get_irp_last_idx();
1013 ready_set = rbitset_malloc(last_idx);
1014 busy_set = rbitset_malloc(last_idx);
1016 /* first step: detect, which functions are nothrow or malloc */
1017 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1018 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1019 ir_graph *irg = get_irp_irg(i);
1020 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1022 if (prop & mtp_property_nothrow) {
1024 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1025 } else if (prop & mtp_property_malloc) {
1027 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1031 /* second step: remove exception edges: this must be done before the
1032 detection of const and pure functions take place. */
1033 if (force_run || num_nothrow > 0) {
1036 handle_nothrow_Calls(&ctx);
1037 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1038 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1039 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1041 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1044 rbitset_clear_all(ready_set, last_idx);
1045 rbitset_clear_all(busy_set, last_idx);
1047 /* third step: detect, which functions are const or pure */
1048 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1049 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1050 ir_graph *irg = get_irp_irg(i);
1051 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1053 if (prop & mtp_property_const) {
1055 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1056 check_for_possible_endless_loops(irg);
1057 } else if (prop & mtp_property_pure) {
1059 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1063 if (force_run || num_const > 0) {
1066 handle_const_Calls(&ctx);
1067 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1068 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1069 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1071 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1075 } /* optimize_funccalls */
1077 /* initialize the funccall optimization */
1078 void firm_init_funccalls(void) {
1079 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1080 // firm_dbg_set_mask(dbg, -1);
1081 } /* firm_init_funccalls */