2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
30 #include <adt/raw_bitset.h>
32 #include "funccall_t.h"
35 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
42 #include "iredges_t.h"
43 #include "analyze_irg_args.h"
47 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
50 * The walker environment for updating function calls.
52 typedef struct _env_t {
53 unsigned n_calls_SymConst;
55 ir_node *const_call_list; /**< The list of all const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** If non-null, evaluates entities for being a heap alloc. */
62 static check_alloc_entity_func is_alloc_entity = NULL;
64 /** Ready IRG's are marked in the ready set. */
65 static unsigned *ready_set;
67 /** IRG's that are in progress are marked here. */
68 static unsigned *busy_set;
71 * We misuse the mtp_property_inherited flag as temporary here.
72 * The is ok, as we cannot set or get it anyway using the
73 * get_addtional_properties API.
75 #define mtp_temporary mtp_property_inherited
78 * Walker: Collect all calls to const and pure functions
79 * to lists. Collect all Proj(Call) nodes into a Proj list.
81 static void collect_const_and_pure_calls(ir_node *node, void *env) {
90 /* set the link to NULL for all non-const/pure calls */
91 set_irn_link(call, NULL);
92 ptr = get_Call_ptr(call);
93 if (is_SymConst_addr_ent(ptr)) {
94 ent = get_SymConst_entity(ptr);
96 prop = get_entity_additional_properties(ent);
97 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
99 ++ctx->n_calls_SymConst;
100 } else if (get_opt_closed_world() &&
102 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
103 /* If all possible callees are const functions, we can remove the memory edge. */
104 int i, n_callees = get_Call_n_callees(call);
105 if (n_callees == 0) {
106 /* This is kind of strange: dying code or a Call that will raise an exception
107 when executed as there is no implementation to call. So better not
112 /* note that const function are a subset of pure ones */
113 prop = mtp_property_const | mtp_property_pure;
114 for (i = 0; i < n_callees; ++i) {
115 ent = get_Call_callee(call, i);
116 if (ent == unknown_entity) {
117 /* we don't know which entity is called here */
120 prop &= get_entity_additional_properties(ent);
121 if (prop == mtp_no_property)
128 /* ok, if we get here we found a call to a const or a pure function */
129 if (prop & mtp_property_pure) {
130 set_irn_link(call, ctx->pure_call_list);
131 ctx->pure_call_list = call;
133 set_irn_link(call, ctx->const_call_list);
134 ctx->const_call_list = call;
136 } else if (is_Proj(node)) {
138 * Collect all memory and exception Proj's from
141 call = get_Proj_pred(node);
145 /* collect the Proj's in the Proj list */
146 switch (get_Proj_proj(node)) {
147 case pn_Call_M_regular:
148 case pn_Call_X_except:
149 case pn_Call_X_regular:
150 case pn_Call_M_except:
151 set_irn_link(node, ctx->proj_list);
152 ctx->proj_list = node;
158 } /* collect_const_and_pure_calls */
161 * Fix the list of collected Calls.
163 * @param irg the graph that contained calls to pure functions
164 * @param call_list the list of all call sites of const functions
165 * @param proj_list the list of all memory/exception Proj's of this call sites
167 static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
168 ir_node *call, *next, *mem, *proj;
170 ir_graph *rem = current_ir_graph;
172 current_ir_graph = irg;
174 /* First step: fix all calls by removing it's memory input.
175 It's original memory input is preserved in their link fields. */
176 for (call = call_list; call; call = next) {
177 next = get_irn_link(call);
178 mem = get_Call_mem(call);
180 set_irn_link(call, mem);
181 set_Call_mem(call, get_irg_no_mem(irg));
184 * Sorrily we cannot simply set the node to 'float'.
185 * There is a reason for that:
187 * - The call might be inside a loop/if that is NOT entered
188 * and calls a endless function. Setting the call to float
189 * would allow to move it out from the loop/if causing this
190 * function be called even if the loop/if is not entered ...
192 * This could be fixed using post-dominators for calls and Pin nodes
193 * but need some more analyzes to ensure that a call that potential
194 * never returns is not executed before some code that generates
195 * observable states...
198 /* finally, this call can float
199 set_irn_pinned(call, op_pin_state_floats); */
200 hook_func_call(irg, call);
203 /* Second step: fix all Proj's */
204 for (proj = proj_list; proj; proj = next) {
205 next = get_irn_link(proj);
206 call = get_Proj_pred(proj);
207 mem = get_irn_link(call);
209 /* beware of calls in the pure call list */
210 if (! mem || get_irn_op(mem) == op_Call)
212 assert(get_irn_mode(mem) == mode_M);
214 switch (get_Proj_proj(proj)) {
215 case pn_Call_M_regular: {
216 /* in dead code there might be cycles where proj == mem */
221 case pn_Call_X_except:
222 case pn_Call_M_except:
224 exchange(proj, get_irg_bad(irg));
226 case pn_Call_X_regular: {
227 ir_node *block = get_nodes_block(call);
229 exchange(proj, new_r_Jmp(irg, block));
237 /* changes were done ... */
238 set_irg_outs_inconsistent(irg);
239 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
242 /* ... including exception edges */
243 set_irg_doms_inconsistent(irg);
245 current_ir_graph = rem;
246 } /* fix_const_call_list */
249 * Walker: Collect all calls to nothrow functions
250 * to lists. Collect all Proj(Call) nodes into a Proj list.
252 static void collect_nothrow_calls(ir_node *node, void *env) {
261 /* set the link to NULL for all non-const/pure calls */
262 set_irn_link(call, NULL);
263 ptr = get_Call_ptr(call);
264 if (is_SymConst_addr_ent(ptr)) {
265 ent = get_SymConst_entity(ptr);
267 prop = get_entity_additional_properties(ent);
268 if ((prop & mtp_property_nothrow) == 0)
270 ++ctx->n_calls_SymConst;
271 } else if (get_opt_closed_world() &&
273 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
274 /* If all possible callees are nothrow functions, we can remove the exception edge. */
275 int i, n_callees = get_Call_n_callees(call);
276 if (n_callees == 0) {
277 /* This is kind of strange: dying code or a Call that will raise an exception
278 when executed as there is no implementation to call. So better not
283 /* note that const function are a subset of pure ones */
284 prop = mtp_property_nothrow;
285 for (i = 0; i < n_callees; ++i) {
286 ent = get_Call_callee(call, i);
287 if (ent == unknown_entity) {
288 /* we don't know which entity is called here */
291 prop &= get_entity_additional_properties(ent);
292 if (prop == mtp_no_property)
299 /* ok, if we get here we found a call to a nothrow function */
300 set_irn_link(call, ctx->nothrow_call_list);
301 ctx->nothrow_call_list = call;
302 } else if (is_Proj(node)) {
304 * Collect all memory and exception Proj's from
307 call = get_Proj_pred(node);
311 /* collect the Proj's in the Proj list */
312 switch (get_Proj_proj(node)) {
313 case pn_Call_M_regular:
314 case pn_Call_X_except:
315 case pn_Call_X_regular:
316 case pn_Call_M_except:
317 set_irn_link(node, ctx->proj_list);
318 ctx->proj_list = node;
324 } /* collect_nothrow_calls */
327 * Fix the list of collected nothrow Calls.
329 * @param irg the graph that contained calls to pure functions
330 * @param call_list the list of all call sites of const functions
331 * @param proj_list the list of all memory/exception Proj's of this call sites
333 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
334 ir_node *call, *next, *proj;
336 ir_graph *rem = current_ir_graph;
338 current_ir_graph = irg;
340 /* First step: go through the list of calls and mark them. */
341 for (call = call_list; call; call = next) {
342 next = get_irn_link(call);
344 /* current_ir_graph is in memory anyway, so it's a good marker */
345 set_irn_link(call, ¤t_ir_graph);
346 hook_func_call(irg, call);
349 /* Second step: Remove all exception Proj's */
350 for (proj = proj_list; proj; proj = next) {
351 next = get_irn_link(proj);
352 call = get_Proj_pred(proj);
354 /* handle only marked calls */
355 if (get_irn_link(call) != ¤t_ir_graph)
358 /* kill any exception flow */
359 switch (get_Proj_proj(proj)) {
360 case pn_Call_X_except:
361 case pn_Call_M_except:
363 exchange(proj, get_irg_bad(irg));
365 case pn_Call_X_regular: {
366 ir_node *block = get_nodes_block(call);
368 exchange(proj, new_r_Jmp(irg, block));
376 /* changes were done ... */
377 set_irg_outs_inconsistent(irg);
378 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
381 /* ... including exception edges */
382 set_irg_doms_inconsistent(irg);
384 current_ir_graph = rem;
385 } /* fix_nothrow_call_list */
388 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
389 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
390 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
391 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
392 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
395 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
396 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
399 * Calculate the bigger property of two. Handle the temporary flag right.
401 static unsigned max_property(unsigned a, unsigned b) {
402 unsigned r, t = (a | b) & mtp_temporary;
406 if (a == mtp_no_property || b == mtp_no_property)
407 return mtp_no_property;
413 * Follow the memory chain starting at node and determine
416 * @return mtp_property_const if only calls of const functions are detected
417 * mtp_property_pure if only Loads and const/pure
419 * mtp_no_property else
421 static unsigned _follow_mem(ir_node *node) {
422 unsigned m, mode = mtp_property_const;
427 if (mode == mtp_no_property)
428 return mtp_no_property;
430 if (irn_visited(node))
433 mark_irn_visited(node);
435 switch (get_irn_opcode(node)) {
437 node = get_Proj_pred(node);
446 /* do a dfs search */
447 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
448 m = _follow_mem(get_irn_n(node, i));
449 mode = max_property(mode, m);
450 if (mode == mtp_no_property)
451 return mtp_no_property;
456 /* Beware volatile Loads are NOT allowed in pure functions. */
457 if (get_Load_volatility(node) == volatility_is_volatile)
458 return mtp_no_property;
459 mode = max_property(mode, mtp_property_pure);
460 node = get_Load_mem(node);
464 /* A call is only tolerable if its either constant or pure. */
465 ptr = get_Call_ptr(node);
466 if (get_irn_op(ptr) == op_SymConst &&
467 get_SymConst_kind(ptr) == symconst_addr_ent) {
468 ir_entity *ent = get_SymConst_entity(ptr);
469 ir_graph *irg = get_entity_irg(ent);
471 if (irg == current_ir_graph) {
472 /* A self-recursive call. The property did not depend on this call. */
473 } else if (irg == NULL) {
474 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
475 mode = max_property(mode, m);
476 } else if (irg != NULL) {
477 /* we have a graph, analyze it. */
478 m = check_const_or_pure_function(irg, /*top=*/0);
479 mode = max_property(mode, m);
482 return mtp_no_property;
483 node = get_Call_mem(node);
487 return mtp_no_property;
493 * Follow the memory chain starting at node and determine
496 * @return mtp_property_const if only calls of const functions are detected
497 * mtp_property_pure if only Loads and const/pure calls detected
498 * mtp_no_property else
500 static unsigned follow_mem(ir_node *node, unsigned mode) {
503 m = _follow_mem(node);
504 return max_property(mode, m);
508 * Check if a graph represents a const or a pure function.
510 * @param irg the graph to check
511 * @param top if set, this is the top call
513 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
514 ir_node *end, *endbl;
516 unsigned prop = get_irg_additional_properties(irg);
517 ir_graph *rem = current_ir_graph;
519 if (prop & mtp_property_const) {
520 /* already marked as a const function */
521 return mtp_property_const;
523 if (prop & mtp_property_pure) {
524 /* already marked as a pure function */
525 return mtp_property_pure;
528 if (IS_IRG_READY(irg)) {
529 /* already checked */
530 return mtp_no_property;
532 if (IS_IRG_BUSY(irg)) {
533 /* we are still evaluate this method. Be optimistic,
534 return the best possible so far but mark the result as temporary. */
535 return mtp_temporary | mtp_property_const;
539 end = get_irg_end(irg);
540 endbl = get_nodes_block(end);
541 prop = mtp_property_const;
543 current_ir_graph = irg;
545 inc_irg_visited(irg);
546 /* mark the initial mem: recursion of follow_mem stops here */
547 mark_irn_visited(get_irg_initial_mem(irg));
549 /* visit every Return */
550 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
551 ir_node *node = get_Block_cfgpred(endbl, j);
552 ir_opcode code = get_irn_opcode(node);
555 /* Bad nodes usually do NOT produce anything, so it's ok */
559 if (code == iro_Return) {
560 mem = get_Return_mem(node);
562 /* Bad nodes usually do NOT produce anything, so it's ok */
566 if (mem != get_irg_initial_mem(irg))
567 prop = max_property(prop, follow_mem(mem, prop));
569 /* Exception found. Cannot be const or pure. */
570 prop = mtp_no_property;
573 if (prop == mtp_no_property)
577 if (prop != mtp_no_property) {
578 /* check, if a keep-alive exists */
579 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
580 ir_node *mem = get_End_keepalive(end, j);
582 if (mode_M != get_irn_mode(mem))
585 prop = max_property(prop, follow_mem(mem, prop));
586 if (prop == mtp_no_property)
591 if (prop != mtp_no_property) {
592 if (top || (prop & mtp_temporary) == 0) {
593 /* We use the temporary flag here to mark optimistic result.
594 Set the property only if we are sure that it does NOT base on
595 temporary results OR if we are at top-level. */
596 set_irg_additional_property(irg, prop & ~mtp_temporary);
603 current_ir_graph = rem;
605 } /* check_const_or_pure_function */
608 * Handle calls to const functions.
612 static void handle_const_Calls(env_t *ctx) {
615 ctx->n_calls_SymConst = 0;
616 ctx->n_calls_Sel = 0;
618 /* all calls of const functions can be transformed */
619 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
620 ir_graph *irg = get_irp_irg(i);
622 ctx->const_call_list = NULL;
623 ctx->pure_call_list = NULL;
624 ctx->proj_list = NULL;
625 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
627 if (ctx->const_call_list) {
628 fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
630 /* this graph was changed, invalidate analysis info */
631 set_irg_outs_inconsistent(irg);
632 set_irg_doms_inconsistent(irg);
635 } /* handle_const_Calls */
638 * Handle calls to nothrow functions.
642 static void handle_nothrow_Calls(env_t *ctx) {
645 ctx->n_calls_SymConst = 0;
646 ctx->n_calls_Sel = 0;
648 /* all calls of const functions can be transformed */
649 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
650 ir_graph *irg = get_irp_irg(i);
652 ctx->nothrow_call_list = NULL;
653 ctx->proj_list = NULL;
654 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
656 if (ctx->nothrow_call_list) {
657 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
659 /* this graph was changed, invalidate analysis info */
660 set_irg_outs_inconsistent(irg);
661 set_irg_doms_inconsistent(irg);
667 * Check, whether a given node represents a return value of
668 * a malloc like function (ie, new heap allocated memory).
670 * @param node the node to check
672 static int is_malloc_call_result(const ir_node *node) {
673 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
674 /* Firm style high-level allocation */
677 if (is_alloc_entity != NULL && is_Call(node)) {
678 ir_node *ptr = get_Call_ptr(node);
680 if (is_SymConst_addr_ent(ptr)) {
681 ir_entity *ent = get_SymConst_entity(ptr);
682 return is_alloc_entity(ent);
686 } /* is_malloc_call_result */
689 * Update a property depending on a call property.
691 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
692 unsigned t = (orig_prop | call_prop) & mtp_temporary;
693 unsigned r = orig_prop & call_prop;
695 } /** update_property */
698 * Check if a node is stored.
700 static int is_stored(const ir_node *n) {
701 const ir_edge_t *edge;
704 foreach_out_edge(n, edge) {
705 const ir_node *succ = get_edge_src_irn(edge);
707 switch (get_irn_opcode(succ)) {
714 if (get_Store_value(succ) == n)
716 /* ok if its only the address input */
725 ptr = get_Call_ptr(succ);
726 if (is_SymConst_addr_ent(ptr)) {
727 ir_entity *ent = get_SymConst_entity(ptr);
730 /* we know the called entity */
731 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
732 if (get_Call_param(succ, i) == n) {
733 /* n is the i'th param of the call */
734 if (get_method_param_access(ent, i) & ptr_access_store) {
735 /* n is store in ent */
741 /* unknown call address */
746 /* bad, potential alias */
754 * Check that the return value of an irg is not stored anywhere.
756 * return ~mtp_property_malloc if return values are stored, ~0 else
758 static unsigned check_stored_result(ir_graph *irg) {
759 ir_node *end_blk = get_irg_end_block(irg);
762 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
764 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
765 ir_node *pred = get_Block_cfgpred(end_blk, i);
767 if (! is_Return(pred))
769 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
770 const ir_node *irn = get_Return_res(pred, j);
772 if (is_stored(irn)) {
773 /* bad, might create an alias */
774 res = ~mtp_property_malloc;
781 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
783 } /* check_stored_result */
786 * Check if a graph represents a nothrow or a malloc function.
788 * @param irg the graph to check
789 * @param top if set, this is the top call
791 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
792 ir_node *end_blk = get_irg_end_block(irg);
796 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
798 if (IS_IRG_READY(irg)) {
799 /* already checked */
800 return get_irg_additional_properties(irg);
802 if (IS_IRG_BUSY(irg)) {
803 /* we are still evaluate this method. Be optimistic,
804 return the best possible so far but mark the result as temporary. */
805 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
809 ent = get_irg_entity(irg);
810 mtp = get_entity_type(ent);
812 if (get_method_n_ress(mtp) <= 0)
813 curr_prop &= ~mtp_property_malloc;
815 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
816 ir_node *pred = get_Block_cfgpred(end_blk, i);
818 if (is_Return(pred)) {
819 if (curr_prop & mtp_property_malloc) {
820 /* check, if malloc is called here */
821 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
822 ir_node *res = get_Return_res(pred, j);
824 /* skip Confirms and Casts */
825 res = skip_HighLevel_ops(res);
828 res = get_Proj_pred(res);
829 if (is_malloc_call_result(res)) {
830 /* ok, this is a malloc */
831 } else if (is_Call(res)) {
832 ir_node *ptr = get_Call_ptr(res);
834 if (is_SymConst_addr_ent(ptr)) {
836 ir_entity *ent = get_SymConst_entity(ptr);
837 ir_graph *callee = get_entity_irg(ent);
840 /* A self-recursive call. The property did not depend on this call. */
841 } else if (callee != NULL) {
842 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
843 curr_prop = update_property(curr_prop, prop);
845 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
847 } else if (get_opt_closed_world() &&
849 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
850 /* check if all possible callees are malloc functions. */
851 int i, n_callees = get_Call_n_callees(res);
852 if (n_callees == 0) {
853 /* This is kind of strange: dying code or a Call that will raise an exception
854 when executed as there is no implementation to call. So better not
856 curr_prop &= ~mtp_property_malloc;
860 for (i = 0; i < n_callees; ++i) {
861 ir_entity *ent = get_Call_callee(res, i);
862 if (ent == unknown_entity) {
863 /* we don't know which entity is called here */
864 curr_prop &= ~mtp_property_malloc;
867 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
868 curr_prop &= ~mtp_property_malloc;
872 /* if we pass the for cycle, malloc is still ok */
875 curr_prop &= ~mtp_property_malloc;
878 /* unknown return value */
879 curr_prop &= ~mtp_property_malloc;
883 } else if (curr_prop & mtp_property_nothrow) {
884 /* exception flow detected */
885 pred = skip_Proj(pred);
888 ir_node *ptr = get_Call_ptr(pred);
890 if (is_SymConst_addr_ent(ptr)) {
892 ir_entity *ent = get_SymConst_entity(ptr);
893 ir_graph *callee = get_entity_irg(ent);
896 /* A self-recursive call. The property did not depend on this call. */
897 } else if (callee != NULL) {
898 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
899 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
900 curr_prop = update_property(curr_prop, prop);
902 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
903 curr_prop &= ~mtp_property_nothrow;
905 } else if (get_opt_closed_world() &&
907 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
908 /* check if all possible callees are nothrow functions. */
909 int i, n_callees = get_Call_n_callees(pred);
910 if (n_callees == 0) {
911 /* This is kind of strange: dying code or a Call that will raise an exception
912 when executed as there is no implementation to call. So better not
914 curr_prop &= ~mtp_property_nothrow;
918 for (i = 0; i < n_callees; ++i) {
919 ir_entity *ent = get_Call_callee(pred, i);
920 if (ent == unknown_entity) {
921 /* we don't know which entity is called here */
922 curr_prop &= ~mtp_property_nothrow;
925 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
926 curr_prop &= ~mtp_property_nothrow;
930 /* if we pass the for cycle, nothrow is still ok */
933 curr_prop &= ~mtp_property_nothrow;
936 /* real exception flow possible. */
937 curr_prop &= ~mtp_property_nothrow;
940 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
941 /* no need to search further */
946 if (curr_prop & mtp_property_malloc) {
948 * Note that the malloc property means not only return newly allocated
949 * memory, but also that this memory is ALIAS FREE.
950 * To ensure that, we do NOT allow that the returned memory is somewhere
953 curr_prop &= check_stored_result(irg);
956 if (curr_prop != mtp_no_property) {
957 if (top || (curr_prop & mtp_temporary) == 0) {
958 /* We use the temporary flag here to mark an optimistic result.
959 Set the property only if we are sure that it does NOT base on
960 temporary results OR if we are at top-level. */
961 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
969 } /* check_nothrow_or_malloc */
972 * optimize function calls by handling const functions
974 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
977 unsigned num_const = 0;
978 unsigned num_pure = 0;
979 unsigned num_nothrow = 0;
980 unsigned num_malloc = 0;
982 is_alloc_entity = callback;
984 /* prepare: mark all graphs as not analyzed */
985 last_idx = get_irp_last_idx();
986 ready_set = rbitset_malloc(last_idx);
987 busy_set = rbitset_malloc(last_idx);
989 /* first step: detect, which functions are nothrow or malloc */
990 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
991 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
992 ir_graph *irg = get_irp_irg(i);
993 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
995 if (prop & mtp_property_nothrow) {
997 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
998 } else if (prop & mtp_property_malloc) {
1000 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1004 /* second step: remove exception edges: this must be done before the
1005 detection of const and pure functions take place. */
1006 if (force_run || num_nothrow > 0) {
1009 handle_nothrow_Calls(&ctx);
1010 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1011 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1012 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1014 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1017 rbitset_clear_all(ready_set, last_idx);
1018 rbitset_clear_all(busy_set, last_idx);
1020 /* third step: detect, which functions are const or pure */
1021 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1022 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1023 ir_graph *irg = get_irp_irg(i);
1024 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1026 if (prop & mtp_property_const) {
1028 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1029 } else if (prop & mtp_property_pure) {
1031 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1035 if (force_run || num_const > 0) {
1038 handle_const_Calls(&ctx);
1039 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1040 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1041 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1043 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1047 } /* optimize_funccalls */
1049 /* initialize the funccall optimization */
1050 void firm_init_funccalls(void) {
1051 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1052 // firm_dbg_set_mask(dbg, -1);
1053 } /* firm_init_funccalls */