2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
28 #include <adt/raw_bitset.h>
30 #include "funccall_t.h"
33 #include "irgraph_t.h"
37 #include "dbginfo_t.h"
41 #include "iredges_t.h"
43 #include "analyze_irg_args.h"
47 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
50 * The walker environment for updating function calls.
52 typedef struct _env_t {
53 unsigned n_calls_SymConst;
55 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
56 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
57 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
58 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
59 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
62 /** If non-null, evaluates entities for being a heap alloc. */
63 static check_alloc_entity_func is_alloc_entity = NULL;
65 /** Ready IRG's are marked in the ready set. */
66 static unsigned *ready_set;
68 /** IRG's that are in progress are marked here. */
69 static unsigned *busy_set;
72 * We misuse the mtp_property_inherited flag as temporary here.
73 * The is ok, as we cannot set or get it anyway using the
74 * get_addtional_properties API.
76 #define mtp_temporary mtp_property_inherited
79 * Walker: Collect all calls to const and pure functions
80 * to lists. Collect all Proj(Call) nodes into a Proj list.
82 static void collect_const_and_pure_calls(ir_node *node, void *env) {
86 unsigned and_prop, or_prop, prop;
91 /* set the link to NULL for all non-const/pure calls */
92 set_irn_link(call, NULL);
93 ptr = get_Call_ptr(call);
95 ent = get_Global_entity(ptr);
97 prop = get_entity_additional_properties(ent);
98 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
100 ++ctx->n_calls_SymConst;
101 } else if (get_opt_closed_world() &&
103 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
104 /* If all possible callees are const functions, we can remove the memory edge. */
105 int i, n_callees = get_Call_n_callees(call);
106 if (n_callees == 0) {
107 /* This is kind of strange: dying code or a Call that will raise an exception
108 when executed as there is no implementation to call. So better not
113 /* note that const function are a subset of pure ones */
114 and_prop = mtp_property_const | mtp_property_pure;
116 for (i = 0; i < n_callees; ++i) {
117 ent = get_Call_callee(call, i);
118 if (ent == unknown_entity) {
119 /* we don't know which entity is called here */
122 prop = get_entity_additional_properties(ent);
125 if (and_prop == mtp_no_property)
128 prop = and_prop | (or_prop & mtp_property_has_loop);
133 /* ok, if we get here we found a call to a const or a pure function */
134 if (prop & mtp_property_pure) {
135 set_irn_link(call, ctx->pure_call_list);
136 ctx->pure_call_list = call;
138 if (prop & mtp_property_has_loop) {
139 set_irn_link(call, ctx->nonfloat_const_call_list);
140 ctx->nonfloat_const_call_list = call;
142 set_irn_link(call, ctx->float_const_call_list);
143 ctx->float_const_call_list = call;
146 } else if (is_Proj(node)) {
148 * Collect all memory and exception Proj's from
151 call = get_Proj_pred(node);
155 /* collect the Proj's in the Proj list */
156 switch (get_Proj_proj(node)) {
158 case pn_Call_X_except:
159 case pn_Call_X_regular:
160 set_irn_link(node, ctx->proj_list);
161 ctx->proj_list = node;
167 } /* collect_const_and_pure_calls */
170 * Fix the list of collected Calls.
172 * @param irg the graph that contained calls to pure functions
175 static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
176 ir_node *call, *next, *mem, *proj;
178 ir_graph *rem = current_ir_graph;
180 current_ir_graph = irg;
182 /* First step: fix all calls by removing their memory input and let
184 * The original memory input is preserved in their link fields. */
185 for (call = ctx->float_const_call_list; call != NULL; call = next) {
186 next = get_irn_link(call);
187 mem = get_Call_mem(call);
189 set_irn_link(call, mem);
190 set_Call_mem(call, get_irg_no_mem(irg));
193 * Unfortunately we cannot simply set the node to 'float'.
194 * There is a reason for that:
196 * - The call might be inside a loop/if that is NOT entered
197 * and calls a endless function. Setting the call to float
198 * would allow to move it out from the loop/if causing this
199 * function be called even if the loop/if is not entered ...
201 * This could be fixed using post-dominators for calls and Pin nodes
202 * but need some more analyzes to ensure that a call that potential
203 * never returns is not executed before some code that generates
204 * observable states...
207 /* finally, this call can float */
208 set_irn_pinned(call, op_pin_state_floats);
209 hook_func_call(irg, call);
212 /* Last step: fix all Proj's */
213 for (proj = ctx->proj_list; proj != NULL; proj = next) {
214 next = get_irn_link(proj);
215 call = get_Proj_pred(proj);
216 mem = get_irn_link(call);
218 /* beware of calls in the pure call list */
219 if (!mem || is_Call(mem))
221 assert(get_irn_mode(mem) == mode_M);
223 switch (get_Proj_proj(proj)) {
225 /* in dead code there might be cycles where proj == mem */
230 case pn_Call_X_except:
232 exchange(proj, get_irg_bad(irg));
234 case pn_Call_X_regular: {
235 ir_node *block = get_nodes_block(call);
237 exchange(proj, new_r_Jmp(block));
245 /* changes were done ... */
246 set_irg_outs_inconsistent(irg);
247 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
250 /* ... including exception edges */
251 set_irg_doms_inconsistent(irg);
253 current_ir_graph = rem;
254 } /* fix_const_call_list */
257 * Walker: Collect all calls to nothrow functions
258 * to lists. Collect all Proj(Call) nodes into a Proj list.
260 static void collect_nothrow_calls(ir_node *node, void *env) {
269 /* set the link to NULL for all non-const/pure calls */
270 set_irn_link(call, NULL);
271 ptr = get_Call_ptr(call);
272 if (is_Global(ptr)) {
273 ent = get_Global_entity(ptr);
275 prop = get_entity_additional_properties(ent);
276 if ((prop & mtp_property_nothrow) == 0)
278 ++ctx->n_calls_SymConst;
279 } else if (get_opt_closed_world() &&
281 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
282 /* If all possible callees are nothrow functions, we can remove the exception edge. */
283 int i, n_callees = get_Call_n_callees(call);
284 if (n_callees == 0) {
285 /* This is kind of strange: dying code or a Call that will raise an exception
286 when executed as there is no implementation to call. So better not
291 /* note that const function are a subset of pure ones */
292 prop = mtp_property_nothrow;
293 for (i = 0; i < n_callees; ++i) {
294 ent = get_Call_callee(call, i);
295 if (ent == unknown_entity) {
296 /* we don't know which entity is called here */
299 prop &= get_entity_additional_properties(ent);
300 if (prop == mtp_no_property)
307 /* ok, if we get here we found a call to a nothrow function */
308 set_irn_link(call, ctx->nothrow_call_list);
309 ctx->nothrow_call_list = call;
310 } else if (is_Proj(node)) {
312 * Collect all memory and exception Proj's from
315 call = get_Proj_pred(node);
319 /* collect the Proj's in the Proj list */
320 switch (get_Proj_proj(node)) {
322 case pn_Call_X_except:
323 case pn_Call_X_regular:
324 set_irn_link(node, ctx->proj_list);
325 ctx->proj_list = node;
331 } /* collect_nothrow_calls */
334 * Fix the list of collected nothrow Calls.
336 * @param irg the graph that contained calls to pure functions
337 * @param call_list the list of all call sites of const functions
338 * @param proj_list the list of all memory/exception Proj's of this call sites
340 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
341 ir_node *call, *next, *proj;
343 ir_graph *rem = current_ir_graph;
345 current_ir_graph = irg;
347 /* First step: go through the list of calls and mark them. */
348 for (call = call_list; call; call = next) {
349 next = get_irn_link(call);
351 /* current_ir_graph is in memory anyway, so it's a good marker */
352 set_irn_link(call, ¤t_ir_graph);
353 hook_func_call(irg, call);
356 /* Second step: Remove all exception Proj's */
357 for (proj = proj_list; proj; proj = next) {
358 next = get_irn_link(proj);
359 call = get_Proj_pred(proj);
361 /* handle only marked calls */
362 if (get_irn_link(call) != ¤t_ir_graph)
365 /* kill any exception flow */
366 switch (get_Proj_proj(proj)) {
367 case pn_Call_X_except:
369 exchange(proj, get_irg_bad(irg));
371 case pn_Call_X_regular: {
372 ir_node *block = get_nodes_block(call);
374 exchange(proj, new_r_Jmp(block));
382 /* changes were done ... */
383 set_irg_outs_inconsistent(irg);
384 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
387 /* ... including exception edges */
388 set_irg_doms_inconsistent(irg);
390 current_ir_graph = rem;
391 } /* fix_nothrow_call_list */
394 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
395 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
396 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
397 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
398 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
401 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
402 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
405 * Calculate the bigger property of two. Handle the temporary flag right.
407 static unsigned max_property(unsigned a, unsigned b) {
408 unsigned r, t = (a | b) & mtp_temporary;
412 if (a == mtp_no_property || b == mtp_no_property)
413 return mtp_no_property;
419 * Follow the memory chain starting at node and determine
422 * @return mtp_property_const if only calls of const functions are detected
423 * mtp_property_pure if only Loads and const/pure calls detected
424 * mtp_no_property else
426 static unsigned _follow_mem(ir_node *node) {
427 unsigned m, mode = mtp_property_const;
432 if (mode == mtp_no_property)
433 return mtp_no_property;
435 if (irn_visited_else_mark(node))
438 switch (get_irn_opcode(node)) {
440 node = get_Proj_pred(node);
449 /* do a dfs search */
450 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
451 m = _follow_mem(get_irn_n(node, i));
452 mode = max_property(mode, m);
453 if (mode == mtp_no_property)
454 return mtp_no_property;
459 /* Beware volatile Loads are NOT allowed in pure functions. */
460 if (get_Load_volatility(node) == volatility_is_volatile)
461 return mtp_no_property;
462 mode = max_property(mode, mtp_property_pure);
463 node = get_Load_mem(node);
467 /* A call is only tolerable if its either constant or pure. */
468 ptr = get_Call_ptr(node);
469 if (is_SymConst_addr_ent(ptr)) {
470 ir_entity *ent = get_SymConst_entity(ptr);
471 ir_graph *irg = get_entity_irg(ent);
473 if (irg == current_ir_graph) {
474 /* A self-recursive call. The property did not depend on this call. */
475 } else if (irg == NULL) {
476 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
477 mode = max_property(mode, m);
478 } else if (irg != NULL) {
479 /* we have a graph, analyze it. */
480 m = check_const_or_pure_function(irg, /*top=*/0);
481 mode = max_property(mode, m);
484 return mtp_no_property;
485 node = get_Call_mem(node);
489 return mtp_no_property;
495 * Follow the memory chain starting at node and determine
498 * @return mtp_property_const if only calls of const functions are detected
499 * mtp_property_pure if only Loads and const/pure calls detected
500 * mtp_no_property else
502 static unsigned follow_mem(ir_node *node, unsigned mode) {
505 m = _follow_mem(node);
506 return max_property(mode, m);
510 * Check if a graph represents a const or a pure function.
512 * @param irg the graph to check
513 * @param top if set, this is the top call
515 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
516 ir_node *end, *endbl;
518 unsigned prop = get_irg_additional_properties(irg);
519 ir_graph *rem = current_ir_graph;
521 if (prop & mtp_property_const) {
522 /* already marked as a const function */
523 return mtp_property_const;
525 if (prop & mtp_property_pure) {
526 /* already marked as a pure function */
527 return mtp_property_pure;
530 if (IS_IRG_READY(irg)) {
531 /* already checked */
532 return mtp_no_property;
534 if (IS_IRG_BUSY(irg)) {
535 /* we are still evaluate this method. Be optimistic,
536 return the best possible so far but mark the result as temporary. */
537 return mtp_temporary | mtp_property_const;
541 end = get_irg_end(irg);
542 endbl = get_nodes_block(end);
543 prop = mtp_property_const;
545 current_ir_graph = irg;
547 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
548 inc_irg_visited(irg);
549 /* mark the initial mem: recursion of follow_mem() stops here */
550 mark_irn_visited(get_irg_initial_mem(irg));
552 /* visit every Return */
553 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
554 ir_node *node = get_Block_cfgpred(endbl, j);
555 ir_opcode code = get_irn_opcode(node);
558 /* Bad nodes usually do NOT produce anything, so it's ok */
562 if (code == iro_Return) {
563 mem = get_Return_mem(node);
565 /* Bad nodes usually do NOT produce anything, so it's ok */
569 if (mem != get_irg_initial_mem(irg))
570 prop = max_property(prop, follow_mem(mem, prop));
572 /* Exception found. Cannot be const or pure. */
573 prop = mtp_no_property;
576 if (prop == mtp_no_property)
580 if (prop != mtp_no_property) {
581 /* check, if a keep-alive exists */
582 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
583 ir_node *kept = get_End_keepalive(end, j);
585 if (is_Block(kept)) {
586 prop = mtp_no_property;
590 if (mode_M != get_irn_mode(kept))
593 prop = max_property(prop, follow_mem(kept, prop));
594 if (prop == mtp_no_property)
599 if (prop != mtp_no_property) {
600 if (top || (prop & mtp_temporary) == 0) {
601 /* We use the temporary flag here to mark optimistic result.
602 Set the property only if we are sure that it does NOT base on
603 temporary results OR if we are at top-level. */
604 set_irg_additional_property(irg, prop & ~mtp_temporary);
611 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
612 current_ir_graph = rem;
614 } /* check_const_or_pure_function */
617 * Handle calls to const functions.
621 static void handle_const_Calls(env_t *ctx) {
624 ctx->n_calls_SymConst = 0;
625 ctx->n_calls_Sel = 0;
627 /* all calls of const functions can be transformed */
628 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
629 ir_graph *irg = get_irp_irg(i);
631 ctx->float_const_call_list = NULL;
632 ctx->nonfloat_const_call_list = NULL;
633 ctx->pure_call_list = NULL;
634 ctx->proj_list = NULL;
636 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
637 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
639 if (ctx->float_const_call_list != NULL)
640 fix_const_call_lists(irg, ctx);
641 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
643 } /* handle_const_Calls */
646 * Handle calls to nothrow functions.
650 static void handle_nothrow_Calls(env_t *ctx) {
653 ctx->n_calls_SymConst = 0;
654 ctx->n_calls_Sel = 0;
656 /* all calls of const functions can be transformed */
657 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
658 ir_graph *irg = get_irp_irg(i);
660 ctx->nothrow_call_list = NULL;
661 ctx->proj_list = NULL;
663 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
664 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
666 if (ctx->nothrow_call_list)
667 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
668 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
673 * Check, whether a given node represents a return value of
674 * a malloc like function (ie, new heap allocated memory).
676 * @param node the node to check
678 static int is_malloc_call_result(const ir_node *node) {
679 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
680 /* Firm style high-level allocation */
683 if (is_alloc_entity != NULL && is_Call(node)) {
684 ir_node *ptr = get_Call_ptr(node);
686 if (is_Global(ptr)) {
687 ir_entity *ent = get_Global_entity(ptr);
688 return is_alloc_entity(ent);
692 } /* is_malloc_call_result */
695 * Update a property depending on a call property.
697 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
698 unsigned t = (orig_prop | call_prop) & mtp_temporary;
699 unsigned r = orig_prop & call_prop;
701 } /** update_property */
704 * Check if a node is stored.
706 static int is_stored(const ir_node *n) {
707 const ir_edge_t *edge;
710 foreach_out_edge(n, edge) {
711 const ir_node *succ = get_edge_src_irn(edge);
713 switch (get_irn_opcode(succ)) {
720 if (get_Store_value(succ) == n)
722 /* ok if its only the address input */
731 ptr = get_Call_ptr(succ);
732 if (is_Global(ptr)) {
733 ir_entity *ent = get_Global_entity(ptr);
736 /* we know the called entity */
737 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
738 if (get_Call_param(succ, i) == n) {
739 /* n is the i'th param of the call */
740 if (get_method_param_access(ent, i) & ptr_access_store) {
741 /* n is store in ent */
747 /* unknown call address */
752 /* bad, potential alias */
760 * Check that the return value of an irg is not stored anywhere.
762 * return ~mtp_property_malloc if return values are stored, ~0 else
764 static unsigned check_stored_result(ir_graph *irg) {
765 ir_node *end_blk = get_irg_end_block(irg);
768 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
770 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
771 ir_node *pred = get_Block_cfgpred(end_blk, i);
773 if (! is_Return(pred))
775 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
776 const ir_node *irn = get_Return_res(pred, j);
778 if (is_stored(irn)) {
779 /* bad, might create an alias */
780 res = ~mtp_property_malloc;
787 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
789 } /* check_stored_result */
792 * Check if a graph represents a nothrow or a malloc function.
794 * @param irg the graph to check
795 * @param top if set, this is the top call
797 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
798 ir_node *end_blk = get_irg_end_block(irg);
802 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
804 if (IS_IRG_READY(irg)) {
805 /* already checked */
806 return get_irg_additional_properties(irg);
808 if (IS_IRG_BUSY(irg)) {
809 /* we are still evaluate this method. Be optimistic,
810 return the best possible so far but mark the result as temporary. */
811 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
815 ent = get_irg_entity(irg);
816 mtp = get_entity_type(ent);
818 if (get_method_n_ress(mtp) <= 0)
819 curr_prop &= ~mtp_property_malloc;
821 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
822 ir_node *pred = get_Block_cfgpred(end_blk, i);
824 if (is_Return(pred)) {
825 if (curr_prop & mtp_property_malloc) {
826 /* check, if malloc is called here */
827 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
828 ir_node *res = get_Return_res(pred, j);
830 /* skip Confirms and Casts */
831 res = skip_HighLevel_ops(res);
834 res = get_Proj_pred(res);
835 if (is_malloc_call_result(res)) {
836 /* ok, this is a malloc */
837 } else if (is_Call(res)) {
838 ir_node *ptr = get_Call_ptr(res);
840 if (is_Global(ptr)) {
842 ir_entity *ent = get_Global_entity(ptr);
843 ir_graph *callee = get_entity_irg(ent);
846 /* A self-recursive call. The property did not depend on this call. */
847 } else if (callee != NULL) {
848 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
849 curr_prop = update_property(curr_prop, prop);
851 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
853 } else if (get_opt_closed_world() &&
855 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
856 /* check if all possible callees are malloc functions. */
857 int i, n_callees = get_Call_n_callees(res);
858 if (n_callees == 0) {
859 /* This is kind of strange: dying code or a Call that will raise an exception
860 when executed as there is no implementation to call. So better not
862 curr_prop &= ~mtp_property_malloc;
866 for (i = 0; i < n_callees; ++i) {
867 ir_entity *ent = get_Call_callee(res, i);
868 if (ent == unknown_entity) {
869 /* we don't know which entity is called here */
870 curr_prop &= ~mtp_property_malloc;
873 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
874 curr_prop &= ~mtp_property_malloc;
878 /* if we pass the for cycle, malloc is still ok */
881 curr_prop &= ~mtp_property_malloc;
884 /* unknown return value */
885 curr_prop &= ~mtp_property_malloc;
889 } else if (curr_prop & mtp_property_nothrow) {
890 /* exception flow detected */
891 pred = skip_Proj(pred);
894 ir_node *ptr = get_Call_ptr(pred);
896 if (is_Global(ptr)) {
898 ir_entity *ent = get_Global_entity(ptr);
899 ir_graph *callee = get_entity_irg(ent);
902 /* A self-recursive call. The property did not depend on this call. */
903 } else if (callee != NULL) {
904 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
905 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
906 curr_prop = update_property(curr_prop, prop);
908 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
909 curr_prop &= ~mtp_property_nothrow;
911 } else if (get_opt_closed_world() &&
913 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
914 /* check if all possible callees are nothrow functions. */
915 int i, n_callees = get_Call_n_callees(pred);
916 if (n_callees == 0) {
917 /* This is kind of strange: dying code or a Call that will raise an exception
918 when executed as there is no implementation to call. So better not
920 curr_prop &= ~mtp_property_nothrow;
924 for (i = 0; i < n_callees; ++i) {
925 ir_entity *ent = get_Call_callee(pred, i);
926 if (ent == unknown_entity) {
927 /* we don't know which entity is called here */
928 curr_prop &= ~mtp_property_nothrow;
931 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
932 curr_prop &= ~mtp_property_nothrow;
936 /* if we pass the for cycle, nothrow is still ok */
939 curr_prop &= ~mtp_property_nothrow;
942 /* real exception flow possible. */
943 curr_prop &= ~mtp_property_nothrow;
946 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
947 /* no need to search further */
952 if (curr_prop & mtp_property_malloc) {
954 * Note that the malloc property means not only return newly allocated
955 * memory, but also that this memory is ALIAS FREE.
956 * To ensure that, we do NOT allow that the returned memory is somewhere
959 curr_prop &= check_stored_result(irg);
962 if (curr_prop != mtp_no_property) {
963 if (top || (curr_prop & mtp_temporary) == 0) {
964 /* We use the temporary flag here to mark an optimistic result.
965 Set the property only if we are sure that it does NOT base on
966 temporary results OR if we are at top-level. */
967 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
975 } /* check_nothrow_or_malloc */
978 * When a function was detected as "const", it might be moved out of loops.
979 * This might be dangerous if the graph can contain endless loops.
981 static void check_for_possible_endless_loops(ir_graph *irg) {
985 root_loop = get_irg_loop(irg);
986 if (root_loop->flags & loop_outer_loop)
987 set_irg_additional_property(irg, mtp_property_has_loop);
991 * optimize function calls by handling const functions
993 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
996 unsigned num_const = 0;
997 unsigned num_pure = 0;
998 unsigned num_nothrow = 0;
999 unsigned num_malloc = 0;
1001 is_alloc_entity = callback;
1003 /* prepare: mark all graphs as not analyzed */
1004 last_idx = get_irp_last_idx();
1005 ready_set = rbitset_malloc(last_idx);
1006 busy_set = rbitset_malloc(last_idx);
1008 /* first step: detect, which functions are nothrow or malloc */
1009 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1010 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1011 ir_graph *irg = get_irp_irg(i);
1012 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1014 if (prop & mtp_property_nothrow) {
1016 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1017 } else if (prop & mtp_property_malloc) {
1019 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1023 /* second step: remove exception edges: this must be done before the
1024 detection of const and pure functions take place. */
1025 if (force_run || num_nothrow > 0) {
1028 handle_nothrow_Calls(&ctx);
1029 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1030 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1031 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1033 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1036 rbitset_clear_all(ready_set, last_idx);
1037 rbitset_clear_all(busy_set, last_idx);
1039 /* third step: detect, which functions are const or pure */
1040 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1041 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1042 ir_graph *irg = get_irp_irg(i);
1043 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1045 if (prop & mtp_property_const) {
1047 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1048 check_for_possible_endless_loops(irg);
1049 } else if (prop & mtp_property_pure) {
1051 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1055 if (force_run || num_const > 0) {
1058 handle_const_Calls(&ctx);
1059 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1060 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1061 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1063 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1067 } /* optimize_funccalls */
1069 /* initialize the funccall optimization */
1070 void firm_init_funccalls(void) {
1071 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1072 } /* firm_init_funccalls */
1075 ir_prog_pass_t pass;
1077 check_alloc_entity_func callback;
1081 * Wrapper for running optimize_funccalls() as an ir_prog pass.
1083 static int pass_wrapper(ir_prog *irp, void *context)
1085 struct pass_t *pass = context;
1088 optimize_funccalls(pass->force_run, pass->callback);
1090 } /* pass_wrapper */
1092 /* Creates an ir_prog pass for optimize_funccalls. */
1093 ir_prog_pass_t *optimize_funccalls_pass(
1095 int force_run, check_alloc_entity_func callback)
1097 struct pass_t *pass = XMALLOCZ(struct pass_t);
1099 pass->force_run = force_run;
1100 pass->callback = callback;
1102 return def_prog_pass_constructor(
1103 &pass->pass, name ? name : "funccall", pass_wrapper);
1104 } /* optimize_funccalls_pass */