2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
28 #include <adt/raw_bitset.h>
33 #include "irgraph_t.h"
37 #include "dbginfo_t.h"
41 #include "iredges_t.h"
43 #include "iroptimize.h"
44 #include "analyze_irg_args.h"
48 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 * The walker environment for updating function calls.
53 typedef struct _env_t {
54 unsigned n_calls_SymConst;
56 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
57 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
58 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
59 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
60 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
63 /** If non-null, evaluates entities for being a heap alloc. */
64 static check_alloc_entity_func is_alloc_entity = NULL;
66 /** Ready IRG's are marked in the ready set. */
67 static unsigned *ready_set;
69 /** IRG's that are in progress are marked here. */
70 static unsigned *busy_set;
73 * We misuse the mtp_property_inherited flag as temporary here.
74 * The is ok, as we cannot set or get it anyway using the
75 * get_addtional_properties API.
77 #define mtp_temporary mtp_property_inherited
80 * Walker: Collect all calls to const and pure functions
81 * to lists. Collect all Proj(Call) nodes into a Proj list.
83 static void collect_const_and_pure_calls(ir_node *node, void *env) {
87 unsigned and_prop, or_prop, prop;
92 /* set the link to NULL for all non-const/pure calls */
93 set_irn_link(call, NULL);
94 ptr = get_Call_ptr(call);
96 ent = get_Global_entity(ptr);
98 prop = get_entity_additional_properties(ent);
99 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
101 ++ctx->n_calls_SymConst;
102 } else if (get_opt_closed_world() &&
104 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
105 /* If all possible callees are const functions, we can remove the memory edge. */
106 int i, n_callees = get_Call_n_callees(call);
107 if (n_callees == 0) {
108 /* This is kind of strange: dying code or a Call that will raise an exception
109 when executed as there is no implementation to call. So better not
114 /* note that const function are a subset of pure ones */
115 and_prop = mtp_property_const | mtp_property_pure;
117 for (i = 0; i < n_callees; ++i) {
118 ent = get_Call_callee(call, i);
119 if (ent == unknown_entity) {
120 /* we don't know which entity is called here */
123 prop = get_entity_additional_properties(ent);
126 if (and_prop == mtp_no_property)
129 prop = and_prop | (or_prop & mtp_property_has_loop);
134 /* ok, if we get here we found a call to a const or a pure function */
135 if (prop & mtp_property_pure) {
136 set_irn_link(call, ctx->pure_call_list);
137 ctx->pure_call_list = call;
139 if (prop & mtp_property_has_loop) {
140 set_irn_link(call, ctx->nonfloat_const_call_list);
141 ctx->nonfloat_const_call_list = call;
143 set_irn_link(call, ctx->float_const_call_list);
144 ctx->float_const_call_list = call;
147 } else if (is_Proj(node)) {
149 * Collect all memory and exception Proj's from
152 call = get_Proj_pred(node);
156 /* collect the Proj's in the Proj list */
157 switch (get_Proj_proj(node)) {
159 case pn_Call_X_except:
160 case pn_Call_X_regular:
161 set_irn_link(node, ctx->proj_list);
162 ctx->proj_list = node;
168 } /* collect_const_and_pure_calls */
171 * Fix the list of collected Calls.
173 * @param irg the graph that contained calls to pure functions
176 static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
177 ir_node *call, *next, *mem, *proj;
179 ir_graph *rem = current_ir_graph;
181 current_ir_graph = irg;
183 /* First step: fix all calls by removing their memory input and let
185 * The original memory input is preserved in their link fields. */
186 for (call = ctx->float_const_call_list; call != NULL; call = next) {
187 next = get_irn_link(call);
188 mem = get_Call_mem(call);
190 set_irn_link(call, mem);
191 set_Call_mem(call, get_irg_no_mem(irg));
194 * Unfortunately we cannot simply set the node to 'float'.
195 * There is a reason for that:
197 * - The call might be inside a loop/if that is NOT entered
198 * and calls a endless function. Setting the call to float
199 * would allow to move it out from the loop/if causing this
200 * function be called even if the loop/if is not entered ...
202 * This could be fixed using post-dominators for calls and Pin nodes
203 * but need some more analyzes to ensure that a call that potential
204 * never returns is not executed before some code that generates
205 * observable states...
208 /* finally, this call can float */
209 set_irn_pinned(call, op_pin_state_floats);
210 hook_func_call(irg, call);
213 /* Last step: fix all Proj's */
214 for (proj = ctx->proj_list; proj != NULL; proj = next) {
215 next = get_irn_link(proj);
216 call = get_Proj_pred(proj);
217 mem = get_irn_link(call);
219 /* beware of calls in the pure call list */
220 if (!mem || is_Call(mem))
222 assert(get_irn_mode(mem) == mode_M);
224 switch (get_Proj_proj(proj)) {
226 /* in dead code there might be cycles where proj == mem */
231 case pn_Call_X_except:
233 exchange(proj, get_irg_bad(irg));
235 case pn_Call_X_regular: {
236 ir_node *block = get_nodes_block(call);
238 exchange(proj, new_r_Jmp(block));
246 /* changes were done ... */
247 set_irg_outs_inconsistent(irg);
248 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
251 /* ... including exception edges */
252 set_irg_doms_inconsistent(irg);
254 current_ir_graph = rem;
255 } /* fix_const_call_list */
258 * Walker: Collect all calls to nothrow functions
259 * to lists. Collect all Proj(Call) nodes into a Proj list.
261 static void collect_nothrow_calls(ir_node *node, void *env) {
270 /* set the link to NULL for all non-const/pure calls */
271 set_irn_link(call, NULL);
272 ptr = get_Call_ptr(call);
273 if (is_Global(ptr)) {
274 ent = get_Global_entity(ptr);
276 prop = get_entity_additional_properties(ent);
277 if ((prop & mtp_property_nothrow) == 0)
279 ++ctx->n_calls_SymConst;
280 } else if (get_opt_closed_world() &&
282 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
283 /* If all possible callees are nothrow functions, we can remove the exception edge. */
284 int i, n_callees = get_Call_n_callees(call);
285 if (n_callees == 0) {
286 /* This is kind of strange: dying code or a Call that will raise an exception
287 when executed as there is no implementation to call. So better not
292 /* note that const function are a subset of pure ones */
293 prop = mtp_property_nothrow;
294 for (i = 0; i < n_callees; ++i) {
295 ent = get_Call_callee(call, i);
296 if (ent == unknown_entity) {
297 /* we don't know which entity is called here */
300 prop &= get_entity_additional_properties(ent);
301 if (prop == mtp_no_property)
308 /* ok, if we get here we found a call to a nothrow function */
309 set_irn_link(call, ctx->nothrow_call_list);
310 ctx->nothrow_call_list = call;
311 } else if (is_Proj(node)) {
313 * Collect all memory and exception Proj's from
316 call = get_Proj_pred(node);
320 /* collect the Proj's in the Proj list */
321 switch (get_Proj_proj(node)) {
323 case pn_Call_X_except:
324 case pn_Call_X_regular:
325 set_irn_link(node, ctx->proj_list);
326 ctx->proj_list = node;
332 } /* collect_nothrow_calls */
335 * Fix the list of collected nothrow Calls.
337 * @param irg the graph that contained calls to pure functions
338 * @param call_list the list of all call sites of const functions
339 * @param proj_list the list of all memory/exception Proj's of this call sites
341 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
342 ir_node *call, *next, *proj;
344 ir_graph *rem = current_ir_graph;
346 current_ir_graph = irg;
348 /* First step: go through the list of calls and mark them. */
349 for (call = call_list; call; call = next) {
350 next = get_irn_link(call);
352 /* current_ir_graph is in memory anyway, so it's a good marker */
353 set_irn_link(call, ¤t_ir_graph);
354 hook_func_call(irg, call);
357 /* Second step: Remove all exception Proj's */
358 for (proj = proj_list; proj; proj = next) {
359 next = get_irn_link(proj);
360 call = get_Proj_pred(proj);
362 /* handle only marked calls */
363 if (get_irn_link(call) != ¤t_ir_graph)
366 /* kill any exception flow */
367 switch (get_Proj_proj(proj)) {
368 case pn_Call_X_except:
370 exchange(proj, get_irg_bad(irg));
372 case pn_Call_X_regular: {
373 ir_node *block = get_nodes_block(call);
375 exchange(proj, new_r_Jmp(block));
383 /* changes were done ... */
384 set_irg_outs_inconsistent(irg);
385 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
388 /* ... including exception edges */
389 set_irg_doms_inconsistent(irg);
391 current_ir_graph = rem;
392 } /* fix_nothrow_call_list */
395 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
396 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
397 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
398 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
399 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
402 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
403 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
406 * Calculate the bigger property of two. Handle the temporary flag right.
408 static unsigned max_property(unsigned a, unsigned b) {
409 unsigned r, t = (a | b) & mtp_temporary;
413 if (a == mtp_no_property || b == mtp_no_property)
414 return mtp_no_property;
420 * Follow the memory chain starting at node and determine
423 * @return mtp_property_const if only calls of const functions are detected
424 * mtp_property_pure if only Loads and const/pure calls detected
425 * mtp_no_property else
427 static unsigned _follow_mem(ir_node *node) {
428 unsigned m, mode = mtp_property_const;
433 if (mode == mtp_no_property)
434 return mtp_no_property;
436 if (irn_visited_else_mark(node))
439 switch (get_irn_opcode(node)) {
441 node = get_Proj_pred(node);
450 /* do a dfs search */
451 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
452 m = _follow_mem(get_irn_n(node, i));
453 mode = max_property(mode, m);
454 if (mode == mtp_no_property)
455 return mtp_no_property;
460 /* Beware volatile Loads are NOT allowed in pure functions. */
461 if (get_Load_volatility(node) == volatility_is_volatile)
462 return mtp_no_property;
463 mode = max_property(mode, mtp_property_pure);
464 node = get_Load_mem(node);
468 /* A call is only tolerable if its either constant or pure. */
469 ptr = get_Call_ptr(node);
470 if (is_SymConst_addr_ent(ptr)) {
471 ir_entity *ent = get_SymConst_entity(ptr);
472 ir_graph *irg = get_entity_irg(ent);
474 if (irg == current_ir_graph) {
475 /* A self-recursive call. The property did not depend on this call. */
476 } else if (irg == NULL) {
477 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
478 mode = max_property(mode, m);
479 } else if (irg != NULL) {
480 /* we have a graph, analyze it. */
481 m = check_const_or_pure_function(irg, /*top=*/0);
482 mode = max_property(mode, m);
485 return mtp_no_property;
486 node = get_Call_mem(node);
490 return mtp_no_property;
496 * Follow the memory chain starting at node and determine
499 * @return mtp_property_const if only calls of const functions are detected
500 * mtp_property_pure if only Loads and const/pure calls detected
501 * mtp_no_property else
503 static unsigned follow_mem(ir_node *node, unsigned mode) {
506 m = _follow_mem(node);
507 return max_property(mode, m);
511 * Check if a graph represents a const or a pure function.
513 * @param irg the graph to check
514 * @param top if set, this is the top call
516 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
517 ir_node *end, *endbl;
519 unsigned prop = get_irg_additional_properties(irg);
520 ir_graph *rem = current_ir_graph;
522 if (prop & mtp_property_const) {
523 /* already marked as a const function */
524 return mtp_property_const;
526 if (prop & mtp_property_pure) {
527 /* already marked as a pure function */
528 return mtp_property_pure;
531 if (IS_IRG_READY(irg)) {
532 /* already checked */
533 return mtp_no_property;
535 if (IS_IRG_BUSY(irg)) {
536 /* we are still evaluate this method. Be optimistic,
537 return the best possible so far but mark the result as temporary. */
538 return mtp_temporary | mtp_property_const;
542 end = get_irg_end(irg);
543 endbl = get_nodes_block(end);
544 prop = mtp_property_const;
546 current_ir_graph = irg;
548 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
549 inc_irg_visited(irg);
550 /* mark the initial mem: recursion of follow_mem() stops here */
551 mark_irn_visited(get_irg_initial_mem(irg));
553 /* visit every Return */
554 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
555 ir_node *node = get_Block_cfgpred(endbl, j);
556 ir_opcode code = get_irn_opcode(node);
559 /* Bad nodes usually do NOT produce anything, so it's ok */
563 if (code == iro_Return) {
564 mem = get_Return_mem(node);
566 /* Bad nodes usually do NOT produce anything, so it's ok */
570 if (mem != get_irg_initial_mem(irg))
571 prop = max_property(prop, follow_mem(mem, prop));
573 /* Exception found. Cannot be const or pure. */
574 prop = mtp_no_property;
577 if (prop == mtp_no_property)
581 if (prop != mtp_no_property) {
582 /* check, if a keep-alive exists */
583 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
584 ir_node *kept = get_End_keepalive(end, j);
586 if (is_Block(kept)) {
587 prop = mtp_no_property;
591 if (mode_M != get_irn_mode(kept))
594 prop = max_property(prop, follow_mem(kept, prop));
595 if (prop == mtp_no_property)
600 if (prop != mtp_no_property) {
601 if (top || (prop & mtp_temporary) == 0) {
602 /* We use the temporary flag here to mark optimistic result.
603 Set the property only if we are sure that it does NOT base on
604 temporary results OR if we are at top-level. */
605 set_irg_additional_property(irg, prop & ~mtp_temporary);
612 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
613 current_ir_graph = rem;
615 } /* check_const_or_pure_function */
618 * Handle calls to const functions.
622 static void handle_const_Calls(env_t *ctx) {
625 ctx->n_calls_SymConst = 0;
626 ctx->n_calls_Sel = 0;
628 /* all calls of const functions can be transformed */
629 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
630 ir_graph *irg = get_irp_irg(i);
632 ctx->float_const_call_list = NULL;
633 ctx->nonfloat_const_call_list = NULL;
634 ctx->pure_call_list = NULL;
635 ctx->proj_list = NULL;
637 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
638 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
640 if (ctx->float_const_call_list != NULL)
641 fix_const_call_lists(irg, ctx);
642 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
644 } /* handle_const_Calls */
647 * Handle calls to nothrow functions.
651 static void handle_nothrow_Calls(env_t *ctx) {
654 ctx->n_calls_SymConst = 0;
655 ctx->n_calls_Sel = 0;
657 /* all calls of const functions can be transformed */
658 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
659 ir_graph *irg = get_irp_irg(i);
661 ctx->nothrow_call_list = NULL;
662 ctx->proj_list = NULL;
664 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
665 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
667 if (ctx->nothrow_call_list)
668 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
669 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
674 * Check, whether a given node represents a return value of
675 * a malloc like function (ie, new heap allocated memory).
677 * @param node the node to check
679 static int is_malloc_call_result(const ir_node *node) {
680 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
681 /* Firm style high-level allocation */
684 if (is_alloc_entity != NULL && is_Call(node)) {
685 ir_node *ptr = get_Call_ptr(node);
687 if (is_Global(ptr)) {
688 ir_entity *ent = get_Global_entity(ptr);
689 return is_alloc_entity(ent);
693 } /* is_malloc_call_result */
696 * Update a property depending on a call property.
698 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
699 unsigned t = (orig_prop | call_prop) & mtp_temporary;
700 unsigned r = orig_prop & call_prop;
702 } /** update_property */
705 * Check if a node is stored.
707 static int is_stored(const ir_node *n) {
708 const ir_edge_t *edge;
711 foreach_out_edge(n, edge) {
712 const ir_node *succ = get_edge_src_irn(edge);
714 switch (get_irn_opcode(succ)) {
721 if (get_Store_value(succ) == n)
723 /* ok if its only the address input */
732 ptr = get_Call_ptr(succ);
733 if (is_Global(ptr)) {
734 ir_entity *ent = get_Global_entity(ptr);
737 /* we know the called entity */
738 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
739 if (get_Call_param(succ, i) == n) {
740 /* n is the i'th param of the call */
741 if (get_method_param_access(ent, i) & ptr_access_store) {
742 /* n is store in ent */
748 /* unknown call address */
753 /* bad, potential alias */
761 * Check that the return value of an irg is not stored anywhere.
763 * return ~mtp_property_malloc if return values are stored, ~0 else
765 static unsigned check_stored_result(ir_graph *irg) {
766 ir_node *end_blk = get_irg_end_block(irg);
769 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
771 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
772 ir_node *pred = get_Block_cfgpred(end_blk, i);
774 if (! is_Return(pred))
776 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
777 const ir_node *irn = get_Return_res(pred, j);
779 if (is_stored(irn)) {
780 /* bad, might create an alias */
781 res = ~mtp_property_malloc;
788 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
790 } /* check_stored_result */
793 * Check if a graph represents a nothrow or a malloc function.
795 * @param irg the graph to check
796 * @param top if set, this is the top call
798 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
799 ir_node *end_blk = get_irg_end_block(irg);
803 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
805 if (IS_IRG_READY(irg)) {
806 /* already checked */
807 return get_irg_additional_properties(irg);
809 if (IS_IRG_BUSY(irg)) {
810 /* we are still evaluate this method. Be optimistic,
811 return the best possible so far but mark the result as temporary. */
812 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
816 ent = get_irg_entity(irg);
817 mtp = get_entity_type(ent);
819 if (get_method_n_ress(mtp) <= 0)
820 curr_prop &= ~mtp_property_malloc;
822 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
823 ir_node *pred = get_Block_cfgpred(end_blk, i);
825 if (is_Return(pred)) {
826 if (curr_prop & mtp_property_malloc) {
827 /* check, if malloc is called here */
828 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
829 ir_node *res = get_Return_res(pred, j);
831 /* skip Confirms and Casts */
832 res = skip_HighLevel_ops(res);
835 res = get_Proj_pred(res);
836 if (is_malloc_call_result(res)) {
837 /* ok, this is a malloc */
838 } else if (is_Call(res)) {
839 ir_node *ptr = get_Call_ptr(res);
841 if (is_Global(ptr)) {
843 ir_entity *ent = get_Global_entity(ptr);
844 ir_graph *callee = get_entity_irg(ent);
847 /* A self-recursive call. The property did not depend on this call. */
848 } else if (callee != NULL) {
849 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
850 curr_prop = update_property(curr_prop, prop);
852 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
854 } else if (get_opt_closed_world() &&
856 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
857 /* check if all possible callees are malloc functions. */
858 int i, n_callees = get_Call_n_callees(res);
859 if (n_callees == 0) {
860 /* This is kind of strange: dying code or a Call that will raise an exception
861 when executed as there is no implementation to call. So better not
863 curr_prop &= ~mtp_property_malloc;
867 for (i = 0; i < n_callees; ++i) {
868 ir_entity *ent = get_Call_callee(res, i);
869 if (ent == unknown_entity) {
870 /* we don't know which entity is called here */
871 curr_prop &= ~mtp_property_malloc;
874 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
875 curr_prop &= ~mtp_property_malloc;
879 /* if we pass the for cycle, malloc is still ok */
882 curr_prop &= ~mtp_property_malloc;
885 /* unknown return value */
886 curr_prop &= ~mtp_property_malloc;
890 } else if (curr_prop & mtp_property_nothrow) {
891 /* exception flow detected */
892 pred = skip_Proj(pred);
895 ir_node *ptr = get_Call_ptr(pred);
897 if (is_Global(ptr)) {
899 ir_entity *ent = get_Global_entity(ptr);
900 ir_graph *callee = get_entity_irg(ent);
903 /* A self-recursive call. The property did not depend on this call. */
904 } else if (callee != NULL) {
905 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
906 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
907 curr_prop = update_property(curr_prop, prop);
909 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
910 curr_prop &= ~mtp_property_nothrow;
912 } else if (get_opt_closed_world() &&
914 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
915 /* check if all possible callees are nothrow functions. */
916 int i, n_callees = get_Call_n_callees(pred);
917 if (n_callees == 0) {
918 /* This is kind of strange: dying code or a Call that will raise an exception
919 when executed as there is no implementation to call. So better not
921 curr_prop &= ~mtp_property_nothrow;
925 for (i = 0; i < n_callees; ++i) {
926 ir_entity *ent = get_Call_callee(pred, i);
927 if (ent == unknown_entity) {
928 /* we don't know which entity is called here */
929 curr_prop &= ~mtp_property_nothrow;
932 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
933 curr_prop &= ~mtp_property_nothrow;
937 /* if we pass the for cycle, nothrow is still ok */
940 curr_prop &= ~mtp_property_nothrow;
943 /* real exception flow possible. */
944 curr_prop &= ~mtp_property_nothrow;
947 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
948 /* no need to search further */
953 if (curr_prop & mtp_property_malloc) {
955 * Note that the malloc property means not only return newly allocated
956 * memory, but also that this memory is ALIAS FREE.
957 * To ensure that, we do NOT allow that the returned memory is somewhere
960 curr_prop &= check_stored_result(irg);
963 if (curr_prop != mtp_no_property) {
964 if (top || (curr_prop & mtp_temporary) == 0) {
965 /* We use the temporary flag here to mark an optimistic result.
966 Set the property only if we are sure that it does NOT base on
967 temporary results OR if we are at top-level. */
968 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
976 } /* check_nothrow_or_malloc */
979 * When a function was detected as "const", it might be moved out of loops.
980 * This might be dangerous if the graph can contain endless loops.
982 static void check_for_possible_endless_loops(ir_graph *irg) {
986 root_loop = get_irg_loop(irg);
987 if (root_loop->flags & loop_outer_loop)
988 set_irg_additional_property(irg, mtp_property_has_loop);
992 * optimize function calls by handling const functions
994 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
997 unsigned num_const = 0;
998 unsigned num_pure = 0;
999 unsigned num_nothrow = 0;
1000 unsigned num_malloc = 0;
1002 is_alloc_entity = callback;
1004 /* prepare: mark all graphs as not analyzed */
1005 last_idx = get_irp_last_idx();
1006 ready_set = rbitset_malloc(last_idx);
1007 busy_set = rbitset_malloc(last_idx);
1009 /* first step: detect, which functions are nothrow or malloc */
1010 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1011 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1012 ir_graph *irg = get_irp_irg(i);
1013 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1015 if (prop & mtp_property_nothrow) {
1017 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1018 } else if (prop & mtp_property_malloc) {
1020 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1024 /* second step: remove exception edges: this must be done before the
1025 detection of const and pure functions take place. */
1026 if (force_run || num_nothrow > 0) {
1029 handle_nothrow_Calls(&ctx);
1030 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1031 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1032 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1034 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1037 rbitset_clear_all(ready_set, last_idx);
1038 rbitset_clear_all(busy_set, last_idx);
1040 /* third step: detect, which functions are const or pure */
1041 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1042 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1043 ir_graph *irg = get_irp_irg(i);
1044 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1046 if (prop & mtp_property_const) {
1048 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1049 check_for_possible_endless_loops(irg);
1050 } else if (prop & mtp_property_pure) {
1052 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1056 if (force_run || num_const > 0) {
1059 handle_const_Calls(&ctx);
1060 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1061 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1062 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1064 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1068 } /* optimize_funccalls */
1070 /* initialize the funccall optimization */
1071 void firm_init_funccalls(void) {
1072 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1073 } /* firm_init_funccalls */
1076 ir_prog_pass_t pass;
1078 check_alloc_entity_func callback;
1082 * Wrapper for running optimize_funccalls() as an ir_prog pass.
1084 static int pass_wrapper(ir_prog *irp, void *context)
1086 struct pass_t *pass = context;
1089 optimize_funccalls(pass->force_run, pass->callback);
1091 } /* pass_wrapper */
1093 /* Creates an ir_prog pass for optimize_funccalls. */
1094 ir_prog_pass_t *optimize_funccalls_pass(
1096 int force_run, check_alloc_entity_func callback)
1098 struct pass_t *pass = XMALLOCZ(struct pass_t);
1100 pass->force_run = force_run;
1101 pass->callback = callback;
1103 return def_prog_pass_constructor(
1104 &pass->pass, name ? name : "funccall", pass_wrapper);
1105 } /* optimize_funccalls_pass */