2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
30 #include <adt/raw_bitset.h>
32 #include "funccall_t.h"
35 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
43 #include "iredges_t.h"
44 #include "analyze_irg_args.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 * The walker environment for updating function calls.
53 typedef struct _env_t {
54 unsigned n_calls_SymConst;
56 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
57 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
58 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
59 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
60 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
63 /** If non-null, evaluates entities for being a heap alloc. */
64 static check_alloc_entity_func is_alloc_entity = NULL;
66 /** Ready IRG's are marked in the ready set. */
67 static unsigned *ready_set;
69 /** IRG's that are in progress are marked here. */
70 static unsigned *busy_set;
73 * We misuse the mtp_property_inherited flag as temporary here.
74 * The is ok, as we cannot set or get it anyway using the
75 * get_addtional_properties API.
77 #define mtp_temporary mtp_property_inherited
80 * Walker: Collect all calls to const and pure functions
81 * to lists. Collect all Proj(Call) nodes into a Proj list.
83 static void collect_const_and_pure_calls(ir_node *node, void *env) {
87 unsigned and_prop, or_prop, prop;
92 /* set the link to NULL for all non-const/pure calls */
93 set_irn_link(call, NULL);
94 ptr = get_Call_ptr(call);
96 ent = get_Global_entity(ptr);
98 prop = get_entity_additional_properties(ent);
99 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
101 ++ctx->n_calls_SymConst;
102 } else if (get_opt_closed_world() &&
104 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
105 /* If all possible callees are const functions, we can remove the memory edge. */
106 int i, n_callees = get_Call_n_callees(call);
107 if (n_callees == 0) {
108 /* This is kind of strange: dying code or a Call that will raise an exception
109 when executed as there is no implementation to call. So better not
114 /* note that const function are a subset of pure ones */
115 and_prop = mtp_property_const | mtp_property_pure;
117 for (i = 0; i < n_callees; ++i) {
118 ent = get_Call_callee(call, i);
119 if (ent == unknown_entity) {
120 /* we don't know which entity is called here */
123 prop = get_entity_additional_properties(ent);
126 if (and_prop == mtp_no_property)
129 prop = and_prop | (or_prop & mtp_property_has_loop);
134 /* ok, if we get here we found a call to a const or a pure function */
135 if (prop & mtp_property_pure) {
136 set_irn_link(call, ctx->pure_call_list);
137 ctx->pure_call_list = call;
139 if (prop & mtp_property_has_loop) {
140 set_irn_link(call, ctx->nonfloat_const_call_list);
141 ctx->nonfloat_const_call_list = call;
143 set_irn_link(call, ctx->float_const_call_list);
144 ctx->float_const_call_list = call;
147 } else if (is_Proj(node)) {
149 * Collect all memory and exception Proj's from
152 call = get_Proj_pred(node);
156 /* collect the Proj's in the Proj list */
157 switch (get_Proj_proj(node)) {
158 case pn_Call_M_regular:
159 case pn_Call_X_except:
160 case pn_Call_X_regular:
161 case pn_Call_M_except:
162 set_irn_link(node, ctx->proj_list);
163 ctx->proj_list = node;
169 } /* collect_const_and_pure_calls */
172 * Fix the list of collected Calls.
174 * @param irg the graph that contained calls to pure functions
177 static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
178 ir_node *call, *next, *mem, *proj;
180 ir_graph *rem = current_ir_graph;
182 current_ir_graph = irg;
184 /* First step: fix all calls by removing their memory input and let
186 * The original memory input is preserved in their link fields. */
187 for (call = ctx->float_const_call_list; call != NULL; call = next) {
188 next = get_irn_link(call);
189 mem = get_Call_mem(call);
191 set_irn_link(call, mem);
192 set_Call_mem(call, get_irg_no_mem(irg));
195 * Unfortunately we cannot simply set the node to 'float'.
196 * There is a reason for that:
198 * - The call might be inside a loop/if that is NOT entered
199 * and calls a endless function. Setting the call to float
200 * would allow to move it out from the loop/if causing this
201 * function be called even if the loop/if is not entered ...
203 * This could be fixed using post-dominators for calls and Pin nodes
204 * but need some more analyzes to ensure that a call that potential
205 * never returns is not executed before some code that generates
206 * observable states...
209 /* finally, this call can float */
210 set_irn_pinned(call, op_pin_state_floats);
211 hook_func_call(irg, call);
214 /* Last step: fix all Proj's */
215 for (proj = ctx->proj_list; proj != NULL; proj = next) {
216 next = get_irn_link(proj);
217 call = get_Proj_pred(proj);
218 mem = get_irn_link(call);
220 /* beware of calls in the pure call list */
221 if (!mem || is_Call(mem))
223 assert(get_irn_mode(mem) == mode_M);
225 switch (get_Proj_proj(proj)) {
226 case pn_Call_M_regular: {
227 /* in dead code there might be cycles where proj == mem */
232 case pn_Call_X_except:
233 case pn_Call_M_except:
235 exchange(proj, get_irg_bad(irg));
237 case pn_Call_X_regular: {
238 ir_node *block = get_nodes_block(call);
240 exchange(proj, new_r_Jmp(irg, block));
248 /* changes were done ... */
249 set_irg_outs_inconsistent(irg);
250 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
253 /* ... including exception edges */
254 set_irg_doms_inconsistent(irg);
256 current_ir_graph = rem;
257 } /* fix_const_call_list */
260 * Walker: Collect all calls to nothrow functions
261 * to lists. Collect all Proj(Call) nodes into a Proj list.
263 static void collect_nothrow_calls(ir_node *node, void *env) {
272 /* set the link to NULL for all non-const/pure calls */
273 set_irn_link(call, NULL);
274 ptr = get_Call_ptr(call);
275 if (is_Global(ptr)) {
276 ent = get_Global_entity(ptr);
278 prop = get_entity_additional_properties(ent);
279 if ((prop & mtp_property_nothrow) == 0)
281 ++ctx->n_calls_SymConst;
282 } else if (get_opt_closed_world() &&
284 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
285 /* If all possible callees are nothrow functions, we can remove the exception edge. */
286 int i, n_callees = get_Call_n_callees(call);
287 if (n_callees == 0) {
288 /* This is kind of strange: dying code or a Call that will raise an exception
289 when executed as there is no implementation to call. So better not
294 /* note that const function are a subset of pure ones */
295 prop = mtp_property_nothrow;
296 for (i = 0; i < n_callees; ++i) {
297 ent = get_Call_callee(call, i);
298 if (ent == unknown_entity) {
299 /* we don't know which entity is called here */
302 prop &= get_entity_additional_properties(ent);
303 if (prop == mtp_no_property)
310 /* ok, if we get here we found a call to a nothrow function */
311 set_irn_link(call, ctx->nothrow_call_list);
312 ctx->nothrow_call_list = call;
313 } else if (is_Proj(node)) {
315 * Collect all memory and exception Proj's from
318 call = get_Proj_pred(node);
322 /* collect the Proj's in the Proj list */
323 switch (get_Proj_proj(node)) {
324 case pn_Call_M_regular:
325 case pn_Call_X_except:
326 case pn_Call_X_regular:
327 case pn_Call_M_except:
328 set_irn_link(node, ctx->proj_list);
329 ctx->proj_list = node;
335 } /* collect_nothrow_calls */
338 * Fix the list of collected nothrow Calls.
340 * @param irg the graph that contained calls to pure functions
341 * @param call_list the list of all call sites of const functions
342 * @param proj_list the list of all memory/exception Proj's of this call sites
344 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
345 ir_node *call, *next, *proj;
347 ir_graph *rem = current_ir_graph;
349 current_ir_graph = irg;
351 /* First step: go through the list of calls and mark them. */
352 for (call = call_list; call; call = next) {
353 next = get_irn_link(call);
355 /* current_ir_graph is in memory anyway, so it's a good marker */
356 set_irn_link(call, ¤t_ir_graph);
357 hook_func_call(irg, call);
360 /* Second step: Remove all exception Proj's */
361 for (proj = proj_list; proj; proj = next) {
362 next = get_irn_link(proj);
363 call = get_Proj_pred(proj);
365 /* handle only marked calls */
366 if (get_irn_link(call) != ¤t_ir_graph)
369 /* kill any exception flow */
370 switch (get_Proj_proj(proj)) {
371 case pn_Call_X_except:
372 case pn_Call_M_except:
374 exchange(proj, get_irg_bad(irg));
376 case pn_Call_X_regular: {
377 ir_node *block = get_nodes_block(call);
379 exchange(proj, new_r_Jmp(irg, block));
387 /* changes were done ... */
388 set_irg_outs_inconsistent(irg);
389 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
392 /* ... including exception edges */
393 set_irg_doms_inconsistent(irg);
395 current_ir_graph = rem;
396 } /* fix_nothrow_call_list */
399 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
400 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
401 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
402 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
403 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
406 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
407 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
410 * Calculate the bigger property of two. Handle the temporary flag right.
412 static unsigned max_property(unsigned a, unsigned b) {
413 unsigned r, t = (a | b) & mtp_temporary;
417 if (a == mtp_no_property || b == mtp_no_property)
418 return mtp_no_property;
424 * Follow the memory chain starting at node and determine
427 * @return mtp_property_const if only calls of const functions are detected
428 * mtp_property_pure if only Loads and const/pure
430 * mtp_no_property else
432 static unsigned _follow_mem(ir_node *node) {
433 unsigned m, mode = mtp_property_const;
438 if (mode == mtp_no_property)
439 return mtp_no_property;
441 if (irn_visited(node))
444 mark_irn_visited(node);
446 switch (get_irn_opcode(node)) {
448 node = get_Proj_pred(node);
457 /* do a dfs search */
458 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
459 m = _follow_mem(get_irn_n(node, i));
460 mode = max_property(mode, m);
461 if (mode == mtp_no_property)
462 return mtp_no_property;
467 /* Beware volatile Loads are NOT allowed in pure functions. */
468 if (get_Load_volatility(node) == volatility_is_volatile)
469 return mtp_no_property;
470 mode = max_property(mode, mtp_property_pure);
471 node = get_Load_mem(node);
475 /* A call is only tolerable if its either constant or pure. */
476 ptr = get_Call_ptr(node);
477 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
478 ir_entity *ent = get_SymConst_entity(ptr);
479 ir_graph *irg = get_entity_irg(ent);
481 if (irg == current_ir_graph) {
482 /* A self-recursive call. The property did not depend on this call. */
483 } else if (irg == NULL) {
484 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
485 mode = max_property(mode, m);
486 } else if (irg != NULL) {
487 /* we have a graph, analyze it. */
488 m = check_const_or_pure_function(irg, /*top=*/0);
489 mode = max_property(mode, m);
492 return mtp_no_property;
493 node = get_Call_mem(node);
497 return mtp_no_property;
503 * Follow the memory chain starting at node and determine
506 * @return mtp_property_const if only calls of const functions are detected
507 * mtp_property_pure if only Loads and const/pure calls detected
508 * mtp_no_property else
510 static unsigned follow_mem(ir_node *node, unsigned mode) {
513 m = _follow_mem(node);
514 return max_property(mode, m);
518 * Check if a graph represents a const or a pure function.
520 * @param irg the graph to check
521 * @param top if set, this is the top call
523 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
524 ir_node *end, *endbl;
526 unsigned prop = get_irg_additional_properties(irg);
527 ir_graph *rem = current_ir_graph;
529 if (prop & mtp_property_const) {
530 /* already marked as a const function */
531 return mtp_property_const;
533 if (prop & mtp_property_pure) {
534 /* already marked as a pure function */
535 return mtp_property_pure;
538 if (IS_IRG_READY(irg)) {
539 /* already checked */
540 return mtp_no_property;
542 if (IS_IRG_BUSY(irg)) {
543 /* we are still evaluate this method. Be optimistic,
544 return the best possible so far but mark the result as temporary. */
545 return mtp_temporary | mtp_property_const;
549 end = get_irg_end(irg);
550 endbl = get_nodes_block(end);
551 prop = mtp_property_const;
553 current_ir_graph = irg;
555 inc_irg_visited(irg);
556 /* mark the initial mem: recursion of follow_mem stops here */
557 mark_irn_visited(get_irg_initial_mem(irg));
559 /* visit every Return */
560 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
561 ir_node *node = get_Block_cfgpred(endbl, j);
562 ir_opcode code = get_irn_opcode(node);
565 /* Bad nodes usually do NOT produce anything, so it's ok */
569 if (code == iro_Return) {
570 mem = get_Return_mem(node);
572 /* Bad nodes usually do NOT produce anything, so it's ok */
576 if (mem != get_irg_initial_mem(irg))
577 prop = max_property(prop, follow_mem(mem, prop));
579 /* Exception found. Cannot be const or pure. */
580 prop = mtp_no_property;
583 if (prop == mtp_no_property)
587 if (prop != mtp_no_property) {
588 /* check, if a keep-alive exists */
589 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
590 ir_node *kept = get_End_keepalive(end, j);
592 if (is_Block(kept)) {
593 prop = mtp_no_property;
597 if (mode_M != get_irn_mode(kept))
600 prop = max_property(prop, follow_mem(kept, prop));
601 if (prop == mtp_no_property)
606 if (prop != mtp_no_property) {
607 if (top || (prop & mtp_temporary) == 0) {
608 /* We use the temporary flag here to mark optimistic result.
609 Set the property only if we are sure that it does NOT base on
610 temporary results OR if we are at top-level. */
611 set_irg_additional_property(irg, prop & ~mtp_temporary);
618 current_ir_graph = rem;
620 } /* check_const_or_pure_function */
623 * Handle calls to const functions.
627 static void handle_const_Calls(env_t *ctx) {
630 ctx->n_calls_SymConst = 0;
631 ctx->n_calls_Sel = 0;
633 /* all calls of const functions can be transformed */
634 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
635 ir_graph *irg = get_irp_irg(i);
637 ctx->float_const_call_list = NULL;
638 ctx->nonfloat_const_call_list = NULL;
639 ctx->pure_call_list = NULL;
640 ctx->proj_list = NULL;
641 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
643 if (ctx->float_const_call_list != NULL) {
644 fix_const_call_lists(irg, ctx);
646 /* this graph was changed, invalidate analysis info */
647 set_irg_outs_inconsistent(irg);
648 set_irg_doms_inconsistent(irg);
651 } /* handle_const_Calls */
654 * Handle calls to nothrow functions.
658 static void handle_nothrow_Calls(env_t *ctx) {
661 ctx->n_calls_SymConst = 0;
662 ctx->n_calls_Sel = 0;
664 /* all calls of const functions can be transformed */
665 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
666 ir_graph *irg = get_irp_irg(i);
668 ctx->nothrow_call_list = NULL;
669 ctx->proj_list = NULL;
670 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
672 if (ctx->nothrow_call_list) {
673 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
675 /* this graph was changed, invalidate analysis info */
676 set_irg_outs_inconsistent(irg);
677 set_irg_doms_inconsistent(irg);
683 * Check, whether a given node represents a return value of
684 * a malloc like function (ie, new heap allocated memory).
686 * @param node the node to check
688 static int is_malloc_call_result(const ir_node *node) {
689 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
690 /* Firm style high-level allocation */
693 if (is_alloc_entity != NULL && is_Call(node)) {
694 ir_node *ptr = get_Call_ptr(node);
696 if (is_Global(ptr)) {
697 ir_entity *ent = get_Global_entity(ptr);
698 return is_alloc_entity(ent);
702 } /* is_malloc_call_result */
705 * Update a property depending on a call property.
707 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
708 unsigned t = (orig_prop | call_prop) & mtp_temporary;
709 unsigned r = orig_prop & call_prop;
711 } /** update_property */
714 * Check if a node is stored.
716 static int is_stored(const ir_node *n) {
717 const ir_edge_t *edge;
720 foreach_out_edge(n, edge) {
721 const ir_node *succ = get_edge_src_irn(edge);
723 switch (get_irn_opcode(succ)) {
730 if (get_Store_value(succ) == n)
732 /* ok if its only the address input */
741 ptr = get_Call_ptr(succ);
742 if (is_Global(ptr)) {
743 ir_entity *ent = get_Global_entity(ptr);
746 /* we know the called entity */
747 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
748 if (get_Call_param(succ, i) == n) {
749 /* n is the i'th param of the call */
750 if (get_method_param_access(ent, i) & ptr_access_store) {
751 /* n is store in ent */
757 /* unknown call address */
762 /* bad, potential alias */
770 * Check that the return value of an irg is not stored anywhere.
772 * return ~mtp_property_malloc if return values are stored, ~0 else
774 static unsigned check_stored_result(ir_graph *irg) {
775 ir_node *end_blk = get_irg_end_block(irg);
778 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
780 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
781 ir_node *pred = get_Block_cfgpred(end_blk, i);
783 if (! is_Return(pred))
785 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
786 const ir_node *irn = get_Return_res(pred, j);
788 if (is_stored(irn)) {
789 /* bad, might create an alias */
790 res = ~mtp_property_malloc;
797 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
799 } /* check_stored_result */
802 * Check if a graph represents a nothrow or a malloc function.
804 * @param irg the graph to check
805 * @param top if set, this is the top call
807 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
808 ir_node *end_blk = get_irg_end_block(irg);
812 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
814 if (IS_IRG_READY(irg)) {
815 /* already checked */
816 return get_irg_additional_properties(irg);
818 if (IS_IRG_BUSY(irg)) {
819 /* we are still evaluate this method. Be optimistic,
820 return the best possible so far but mark the result as temporary. */
821 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
825 ent = get_irg_entity(irg);
826 mtp = get_entity_type(ent);
828 if (get_method_n_ress(mtp) <= 0)
829 curr_prop &= ~mtp_property_malloc;
831 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
832 ir_node *pred = get_Block_cfgpred(end_blk, i);
834 if (is_Return(pred)) {
835 if (curr_prop & mtp_property_malloc) {
836 /* check, if malloc is called here */
837 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
838 ir_node *res = get_Return_res(pred, j);
840 /* skip Confirms and Casts */
841 res = skip_HighLevel_ops(res);
844 res = get_Proj_pred(res);
845 if (is_malloc_call_result(res)) {
846 /* ok, this is a malloc */
847 } else if (is_Call(res)) {
848 ir_node *ptr = get_Call_ptr(res);
850 if (is_Global(ptr)) {
852 ir_entity *ent = get_Global_entity(ptr);
853 ir_graph *callee = get_entity_irg(ent);
856 /* A self-recursive call. The property did not depend on this call. */
857 } else if (callee != NULL) {
858 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
859 curr_prop = update_property(curr_prop, prop);
861 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
863 } else if (get_opt_closed_world() &&
865 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
866 /* check if all possible callees are malloc functions. */
867 int i, n_callees = get_Call_n_callees(res);
868 if (n_callees == 0) {
869 /* This is kind of strange: dying code or a Call that will raise an exception
870 when executed as there is no implementation to call. So better not
872 curr_prop &= ~mtp_property_malloc;
876 for (i = 0; i < n_callees; ++i) {
877 ir_entity *ent = get_Call_callee(res, i);
878 if (ent == unknown_entity) {
879 /* we don't know which entity is called here */
880 curr_prop &= ~mtp_property_malloc;
883 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
884 curr_prop &= ~mtp_property_malloc;
888 /* if we pass the for cycle, malloc is still ok */
891 curr_prop &= ~mtp_property_malloc;
894 /* unknown return value */
895 curr_prop &= ~mtp_property_malloc;
899 } else if (curr_prop & mtp_property_nothrow) {
900 /* exception flow detected */
901 pred = skip_Proj(pred);
904 ir_node *ptr = get_Call_ptr(pred);
906 if (is_Global(ptr)) {
908 ir_entity *ent = get_Global_entity(ptr);
909 ir_graph *callee = get_entity_irg(ent);
912 /* A self-recursive call. The property did not depend on this call. */
913 } else if (callee != NULL) {
914 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
915 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
916 curr_prop = update_property(curr_prop, prop);
918 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
919 curr_prop &= ~mtp_property_nothrow;
921 } else if (get_opt_closed_world() &&
923 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
924 /* check if all possible callees are nothrow functions. */
925 int i, n_callees = get_Call_n_callees(pred);
926 if (n_callees == 0) {
927 /* This is kind of strange: dying code or a Call that will raise an exception
928 when executed as there is no implementation to call. So better not
930 curr_prop &= ~mtp_property_nothrow;
934 for (i = 0; i < n_callees; ++i) {
935 ir_entity *ent = get_Call_callee(pred, i);
936 if (ent == unknown_entity) {
937 /* we don't know which entity is called here */
938 curr_prop &= ~mtp_property_nothrow;
941 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
942 curr_prop &= ~mtp_property_nothrow;
946 /* if we pass the for cycle, nothrow is still ok */
949 curr_prop &= ~mtp_property_nothrow;
952 /* real exception flow possible. */
953 curr_prop &= ~mtp_property_nothrow;
956 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
957 /* no need to search further */
962 if (curr_prop & mtp_property_malloc) {
964 * Note that the malloc property means not only return newly allocated
965 * memory, but also that this memory is ALIAS FREE.
966 * To ensure that, we do NOT allow that the returned memory is somewhere
969 curr_prop &= check_stored_result(irg);
972 if (curr_prop != mtp_no_property) {
973 if (top || (curr_prop & mtp_temporary) == 0) {
974 /* We use the temporary flag here to mark an optimistic result.
975 Set the property only if we are sure that it does NOT base on
976 temporary results OR if we are at top-level. */
977 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
985 } /* check_nothrow_or_malloc */
988 * When a function was detected as "const", it might be moved out of loops.
989 * This might be dangerous if the graph might contain endless loops.
991 static void check_for_possible_endless_loops(ir_graph *irg) {
995 root_loop = get_irg_loop(irg);
996 if (root_loop->flags & loop_outer_loop)
997 set_irg_additional_property(irg, mtp_property_has_loop);
1001 * optimize function calls by handling const functions
1003 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
1006 unsigned num_const = 0;
1007 unsigned num_pure = 0;
1008 unsigned num_nothrow = 0;
1009 unsigned num_malloc = 0;
1011 is_alloc_entity = callback;
1013 /* prepare: mark all graphs as not analyzed */
1014 last_idx = get_irp_last_idx();
1015 ready_set = rbitset_malloc(last_idx);
1016 busy_set = rbitset_malloc(last_idx);
1018 /* first step: detect, which functions are nothrow or malloc */
1019 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1020 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1021 ir_graph *irg = get_irp_irg(i);
1022 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1024 if (prop & mtp_property_nothrow) {
1026 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1027 } else if (prop & mtp_property_malloc) {
1029 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1033 /* second step: remove exception edges: this must be done before the
1034 detection of const and pure functions take place. */
1035 if (force_run || num_nothrow > 0) {
1038 handle_nothrow_Calls(&ctx);
1039 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1040 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1041 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1043 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1046 rbitset_clear_all(ready_set, last_idx);
1047 rbitset_clear_all(busy_set, last_idx);
1049 /* third step: detect, which functions are const or pure */
1050 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1051 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1052 ir_graph *irg = get_irp_irg(i);
1053 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1055 if (prop & mtp_property_const) {
1057 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1058 check_for_possible_endless_loops(irg);
1059 } else if (prop & mtp_property_pure) {
1061 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1065 if (force_run || num_const > 0) {
1068 handle_const_Calls(&ctx);
1069 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1070 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1071 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1073 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1077 } /* optimize_funccalls */
1079 /* initialize the funccall optimization */
1080 void firm_init_funccalls(void) {
1081 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1082 // firm_dbg_set_mask(dbg, -1);
1083 } /* firm_init_funccalls */