2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 size_t n_calls_SymConst;
54 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
55 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** Ready IRG's are marked in the ready set. */
62 static unsigned *ready_set;
64 /** IRG's that are in progress are marked here. */
65 static unsigned *busy_set;
68 * We misuse the mtp_property_inherited flag as temporary here.
69 * The is ok, as we cannot set or get it anyway using the
70 * get_addtional_properties API.
72 #define mtp_temporary mtp_property_inherited
75 * Walker: Collect all calls to const and pure functions
76 * to lists. Collect all Proj(Call) nodes into a Proj list.
78 static void collect_const_and_pure_calls(ir_node *node, void *env)
80 env_t *ctx = (env_t*)env;
84 unsigned and_prop, or_prop, prop;
89 /* set the link to NULL for all non-const/pure calls */
90 set_irn_link(call, NULL);
91 ptr = get_Call_ptr(call);
93 ent = get_Global_entity(ptr);
95 prop = get_entity_additional_properties(ent);
96 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
98 ++ctx->n_calls_SymConst;
99 } else if (get_opt_closed_world() &&
101 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
102 /* If all possible callees are const functions, we can remove the memory edge. */
103 size_t i, n_callees = get_Call_n_callees(call);
104 if (n_callees == 0) {
105 /* This is kind of strange: dying code or a Call that will raise an exception
106 when executed as there is no implementation to call. So better not
111 /* note that const function are a subset of pure ones */
112 and_prop = mtp_property_const | mtp_property_pure;
114 for (i = 0; i < n_callees; ++i) {
115 ent = get_Call_callee(call, i);
116 if (ent == unknown_entity) {
117 /* we don't know which entity is called here */
120 prop = get_entity_additional_properties(ent);
123 if (and_prop == mtp_no_property)
126 prop = and_prop | (or_prop & mtp_property_has_loop);
131 /* ok, if we get here we found a call to a const or a pure function */
132 if (prop & mtp_property_pure) {
133 set_irn_link(call, ctx->pure_call_list);
134 ctx->pure_call_list = call;
136 if (prop & mtp_property_has_loop) {
137 set_irn_link(call, ctx->nonfloat_const_call_list);
138 ctx->nonfloat_const_call_list = call;
140 set_irn_link(call, ctx->float_const_call_list);
141 ctx->float_const_call_list = call;
144 } else if (is_Proj(node)) {
146 * Collect all memory and exception Proj's from
149 call = get_Proj_pred(node);
153 /* collect the Proj's in the Proj list */
154 switch (get_Proj_proj(node)) {
156 case pn_Call_X_except:
157 case pn_Call_X_regular:
158 set_irn_link(node, ctx->proj_list);
159 ctx->proj_list = node;
165 } /* collect_const_and_pure_calls */
168 * Fix the list of collected Calls.
170 * @param irg the graph that contained calls to pure functions
173 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
175 ir_node *call, *next, *mem, *proj;
178 /* First step: fix all calls by removing their memory input and let
180 * The original memory input is preserved in their link fields. */
181 for (call = ctx->float_const_call_list; call != NULL; call = next) {
182 next = (ir_node*)get_irn_link(call);
183 mem = get_Call_mem(call);
185 set_irn_link(call, mem);
186 set_Call_mem(call, get_irg_no_mem(irg));
189 * Unfortunately we cannot simply set the node to 'float'.
190 * There is a reason for that:
192 * - The call might be inside a loop/if that is NOT entered
193 * and calls a endless function. Setting the call to float
194 * would allow to move it out from the loop/if causing this
195 * function be called even if the loop/if is not entered ...
197 * This could be fixed using post-dominators for calls and Pin nodes
198 * but need some more analyzes to ensure that a call that potential
199 * never returns is not executed before some code that generates
200 * observable states...
203 /* finally, this call can float */
204 set_irn_pinned(call, op_pin_state_floats);
205 hook_func_call(irg, call);
208 /* Last step: fix all Proj's */
209 for (proj = ctx->proj_list; proj != NULL; proj = next) {
210 next = (ir_node*)get_irn_link(proj);
211 call = get_Proj_pred(proj);
212 mem = (ir_node*)get_irn_link(call);
214 /* beware of calls in the pure call list */
215 if (!mem || is_Call(mem))
217 assert(get_irn_mode(mem) == mode_M);
219 switch (get_Proj_proj(proj)) {
221 /* in dead code there might be cycles where proj == mem */
226 case pn_Call_X_except:
228 exchange(proj, new_r_Bad(irg, mode_X));
230 case pn_Call_X_regular: {
231 ir_node *block = get_nodes_block(call);
233 exchange(proj, new_r_Jmp(block));
241 /* changes were done ... */
242 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
245 /* ... including exception edges */
246 set_irg_doms_inconsistent(irg);
248 } /* fix_const_call_list */
251 * Walker: Collect all calls to nothrow functions
252 * to lists. Collect all Proj(Call) nodes into a Proj list.
254 static void collect_nothrow_calls(ir_node *node, void *env)
256 env_t *ctx = (env_t*)env;
264 /* set the link to NULL for all non-const/pure calls */
265 set_irn_link(call, NULL);
266 ptr = get_Call_ptr(call);
267 if (is_Global(ptr)) {
268 ent = get_Global_entity(ptr);
270 prop = get_entity_additional_properties(ent);
271 if ((prop & mtp_property_nothrow) == 0)
273 ++ctx->n_calls_SymConst;
274 } else if (get_opt_closed_world() &&
276 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
277 /* If all possible callees are nothrow functions, we can remove the exception edge. */
278 size_t i, n_callees = get_Call_n_callees(call);
279 if (n_callees == 0) {
280 /* This is kind of strange: dying code or a Call that will raise an exception
281 when executed as there is no implementation to call. So better not
286 /* note that const function are a subset of pure ones */
287 prop = mtp_property_nothrow;
288 for (i = 0; i < n_callees; ++i) {
289 ent = get_Call_callee(call, i);
290 if (ent == unknown_entity) {
291 /* we don't know which entity is called here */
294 prop &= get_entity_additional_properties(ent);
295 if (prop == mtp_no_property)
302 /* ok, if we get here we found a call to a nothrow function */
303 set_irn_link(call, ctx->nothrow_call_list);
304 ctx->nothrow_call_list = call;
305 } else if (is_Proj(node)) {
307 * Collect all memory and exception Proj's from
310 call = get_Proj_pred(node);
314 /* collect the Proj's in the Proj list */
315 switch (get_Proj_proj(node)) {
317 case pn_Call_X_except:
318 case pn_Call_X_regular:
319 set_irn_link(node, ctx->proj_list);
320 ctx->proj_list = node;
326 } /* collect_nothrow_calls */
329 * Fix the list of collected nothrow Calls.
331 * @param irg the graph that contained calls to pure functions
332 * @param call_list the list of all call sites of const functions
333 * @param proj_list the list of all memory/exception Proj's of this call sites
335 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
337 ir_node *call, *next, *proj;
340 /* First step: go through the list of calls and mark them. */
341 for (call = call_list; call; call = next) {
342 next = (ir_node*)get_irn_link(call);
344 /* current_ir_graph is in memory anyway, so it's a good marker */
345 set_irn_link(call, ¤t_ir_graph);
346 hook_func_call(irg, call);
349 /* Second step: Remove all exception Proj's */
350 for (proj = proj_list; proj; proj = next) {
351 next = (ir_node*)get_irn_link(proj);
352 call = get_Proj_pred(proj);
354 /* handle only marked calls */
355 if (get_irn_link(call) != ¤t_ir_graph)
358 /* kill any exception flow */
359 switch (get_Proj_proj(proj)) {
360 case pn_Call_X_except:
362 exchange(proj, new_r_Bad(irg, mode_X));
364 case pn_Call_X_regular: {
365 ir_node *block = get_nodes_block(call);
367 exchange(proj, new_r_Jmp(block));
375 /* changes were done ... */
376 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
379 /* ... including exception edges */
380 set_irg_doms_inconsistent(irg);
382 } /* fix_nothrow_call_list */
385 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
386 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
387 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
388 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
389 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
392 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
395 * Calculate the bigger property of two. Handle the temporary flag right.
397 static mtp_additional_properties max_property(mtp_additional_properties a,
398 mtp_additional_properties b)
400 mtp_additional_properties r;
401 mtp_additional_properties t = (a | b) & mtp_temporary;
405 if (a == mtp_no_property || b == mtp_no_property)
406 return mtp_no_property;
412 * Follow the memory chain starting at node and determine
415 * @return mtp_property_const if only calls of const functions are detected
416 * mtp_property_pure if only Loads and const/pure calls detected
417 * mtp_no_property else
419 static mtp_additional_properties follow_mem_(ir_node *node)
421 mtp_additional_properties mode = mtp_property_const;
422 mtp_additional_properties m;
427 if (mode == mtp_no_property)
428 return mtp_no_property;
430 if (irn_visited_else_mark(node))
433 switch (get_irn_opcode(node)) {
435 node = get_Proj_pred(node);
444 /* do a dfs search */
445 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
446 m = follow_mem_(get_irn_n(node, i));
447 mode = max_property(mode, m);
448 if (mode == mtp_no_property)
449 return mtp_no_property;
454 /* Beware volatile Loads are NOT allowed in pure functions. */
455 if (get_Load_volatility(node) == volatility_is_volatile)
456 return mtp_no_property;
457 mode = max_property(mode, mtp_property_pure);
458 node = get_Load_mem(node);
462 /* A call is only tolerable if its either constant or pure. */
463 ptr = get_Call_ptr(node);
464 if (is_SymConst_addr_ent(ptr)) {
465 ir_entity *ent = get_SymConst_entity(ptr);
466 ir_graph *irg = get_entity_irg(ent);
469 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
470 mode = max_property(mode, m);
472 /* we have a graph, analyze it. */
473 m = check_const_or_pure_function(irg, /*top=*/0);
474 mode = max_property(mode, m);
477 return mtp_no_property;
478 node = get_Call_mem(node);
482 return mtp_no_property;
488 * Follow the memory chain starting at node and determine
491 * @return mtp_property_const if only calls of const functions are detected
492 * mtp_property_pure if only Loads and const/pure calls detected
493 * mtp_no_property else
495 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
497 mtp_additional_properties m = follow_mem_(node);
498 return max_property(mode, m);
502 * Check if a graph represents a const or a pure function.
504 * @param irg the graph to check
505 * @param top if set, this is the top call
507 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
509 ir_node *end, *endbl;
511 ir_entity *entity = get_irg_entity(irg);
512 ir_type *type = get_entity_type(entity);
513 size_t n_params = get_method_n_params(type);
515 mtp_additional_properties may_be_const = mtp_property_const;
516 mtp_additional_properties prop = get_irg_additional_properties(irg);
518 /* libfirm handles aggregate parameters by passing around pointers to
519 * stuff in memory, so if we have compound parameters we are never const */
520 for (i = 0; i < n_params; ++i) {
521 ir_type *param = get_method_param_type(type, i);
522 if (is_compound_type(param)) {
523 prop &= ~mtp_property_const;
524 may_be_const = mtp_no_property;
528 if (prop & mtp_property_const) {
529 /* already marked as a const function */
530 return mtp_property_const;
532 if (prop & mtp_property_pure) {
533 /* already marked as a pure function */
534 return mtp_property_pure;
537 if (IS_IRG_READY(irg)) {
538 /* already checked */
539 return mtp_no_property;
541 if (IS_IRG_BUSY(irg)) {
542 /* We are still evaluate this method.
543 * The function (indirectly) calls itself and thus may not terminate.
545 return mtp_no_property;
549 end = get_irg_end(irg);
550 endbl = get_nodes_block(end);
553 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
554 inc_irg_visited(irg);
555 /* mark the initial mem: recursion of follow_mem() stops here */
556 mark_irn_visited(get_irg_initial_mem(irg));
558 /* visit every Return */
559 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
560 ir_node *node = get_Block_cfgpred(endbl, j);
561 unsigned code = get_irn_opcode(node);
564 /* Bad nodes usually do NOT produce anything, so it's ok */
568 if (code == iro_Return) {
569 mem = get_Return_mem(node);
571 /* Bad nodes usually do NOT produce anything, so it's ok */
575 if (mem != get_irg_initial_mem(irg))
576 prop = max_property(prop, follow_mem(mem, prop));
578 /* Exception found. Cannot be const or pure. */
579 prop = mtp_no_property;
582 if (prop == mtp_no_property)
586 if (prop != mtp_no_property) {
587 /* check, if a keep-alive exists */
588 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
589 ir_node *kept = get_End_keepalive(end, j);
591 if (is_Block(kept)) {
592 prop = mtp_no_property;
596 if (mode_M != get_irn_mode(kept))
599 prop = max_property(prop, follow_mem(kept, prop));
600 if (prop == mtp_no_property)
606 /* Set the property only if we are at top-level. */
607 if (prop != mtp_no_property) {
608 add_irg_additional_properties(irg, prop);
613 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
615 } /* check_const_or_pure_function */
618 * Handle calls to const functions.
622 static void handle_const_Calls(env_t *ctx)
626 ctx->n_calls_SymConst = 0;
627 ctx->n_calls_Sel = 0;
629 /* all calls of const functions can be transformed */
630 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
631 ir_graph *irg = get_irp_irg(i);
633 ctx->float_const_call_list = NULL;
634 ctx->nonfloat_const_call_list = NULL;
635 ctx->pure_call_list = NULL;
636 ctx->proj_list = NULL;
638 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
639 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
641 if (ctx->float_const_call_list != NULL)
642 fix_const_call_lists(irg, ctx);
643 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
645 } /* handle_const_Calls */
648 * Handle calls to nothrow functions.
652 static void handle_nothrow_Calls(env_t *ctx)
656 ctx->n_calls_SymConst = 0;
657 ctx->n_calls_Sel = 0;
659 /* all calls of const functions can be transformed */
660 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
661 ir_graph *irg = get_irp_irg(i);
663 ctx->nothrow_call_list = NULL;
664 ctx->proj_list = NULL;
666 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
667 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
669 if (ctx->nothrow_call_list)
670 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
671 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
676 * Check, whether a given node represents a return value of
677 * a malloc like function (ie, new heap allocated memory).
679 * @param node the node to check
681 static int is_malloc_call_result(const ir_node *node)
683 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
684 /* Firm style high-level allocation */
687 /* TODO: check mtp_malloc */
692 * Update a property depending on a call property.
694 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
696 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
697 mtp_additional_properties r = orig_prop & call_prop;
702 * Check if a node is stored.
704 static int is_stored(const ir_node *n)
706 const ir_edge_t *edge;
709 foreach_out_edge(n, edge) {
710 const ir_node *succ = get_edge_src_irn(edge);
712 switch (get_irn_opcode(succ)) {
719 if (get_Store_value(succ) == n)
721 /* ok if its only the address input */
730 ptr = get_Call_ptr(succ);
731 if (is_Global(ptr)) {
732 ir_entity *ent = get_Global_entity(ptr);
735 /* we know the called entity */
736 for (i = get_Call_n_params(succ); i > 0;) {
737 if (get_Call_param(succ, --i) == n) {
738 /* n is the i'th param of the call */
739 if (get_method_param_access(ent, i) & ptr_access_store) {
740 /* n is store in ent */
746 /* unknown call address */
751 /* bad, potential alias */
759 * Check that the return value of an irg is not stored anywhere.
761 * return ~mtp_property_malloc if return values are stored, ~0 else
763 static mtp_additional_properties check_stored_result(ir_graph *irg)
765 ir_node *end_blk = get_irg_end_block(irg);
767 mtp_additional_properties res = ~mtp_no_property;
768 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
770 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
771 ir_node *pred = get_Block_cfgpred(end_blk, i);
774 if (! is_Return(pred))
776 for (j = get_Return_n_ress(pred); j > 0;) {
777 const ir_node *irn = get_Return_res(pred, --j);
779 if (is_stored(irn)) {
780 /* bad, might create an alias */
781 res = ~mtp_property_malloc;
788 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
793 * Check if a graph represents a nothrow or a malloc function.
795 * @param irg the graph to check
796 * @param top if set, this is the top call
798 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
800 mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
801 ir_node *end_blk = get_irg_end_block(irg);
806 if (IS_IRG_READY(irg)) {
807 /* already checked */
808 return get_irg_additional_properties(irg);
810 if (IS_IRG_BUSY(irg)) {
811 /* we are still evaluate this method. Be optimistic,
812 return the best possible so far but mark the result as temporary. */
813 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
817 ent = get_irg_entity(irg);
818 mtp = get_entity_type(ent);
820 if (get_method_n_ress(mtp) <= 0)
821 curr_prop &= ~mtp_property_malloc;
823 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
824 ir_node *pred = get_Block_cfgpred(end_blk, i);
826 if (is_Return(pred)) {
827 if (curr_prop & mtp_property_malloc) {
830 /* check, if malloc is called here */
831 for (j = get_Return_n_ress(pred); j > 0;) {
832 ir_node *res = get_Return_res(pred, --j);
834 /* skip Confirms and Casts */
835 res = skip_HighLevel_ops(res);
838 res = get_Proj_pred(res);
839 if (is_malloc_call_result(res)) {
840 /* ok, this is a malloc */
841 } else if (is_Call(res)) {
842 ir_node *ptr = get_Call_ptr(res);
844 if (is_Global(ptr)) {
846 ir_entity *ent = get_Global_entity(ptr);
847 ir_graph *callee = get_entity_irg(ent);
850 /* A self-recursive call. The property did not depend on this call. */
851 } else if (callee != NULL) {
852 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
853 curr_prop = update_property(curr_prop, prop);
855 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
857 } else if (get_opt_closed_world() &&
859 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
860 /* check if all possible callees are malloc functions. */
861 size_t i, n_callees = get_Call_n_callees(res);
862 if (n_callees == 0) {
863 /* This is kind of strange: dying code or a Call that will raise an exception
864 when executed as there is no implementation to call. So better not
866 curr_prop &= ~mtp_property_malloc;
870 for (i = 0; i < n_callees; ++i) {
871 ir_entity *ent = get_Call_callee(res, i);
872 if (ent == unknown_entity) {
873 /* we don't know which entity is called here */
874 curr_prop &= ~mtp_property_malloc;
877 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
878 curr_prop &= ~mtp_property_malloc;
882 /* if we pass the for cycle, malloc is still ok */
885 curr_prop &= ~mtp_property_malloc;
888 /* unknown return value */
889 curr_prop &= ~mtp_property_malloc;
893 } else if (curr_prop & mtp_property_nothrow) {
894 /* exception flow detected */
895 pred = skip_Proj(pred);
898 ir_node *ptr = get_Call_ptr(pred);
900 if (is_Global(ptr)) {
902 ir_entity *ent = get_Global_entity(ptr);
903 ir_graph *callee = get_entity_irg(ent);
906 /* A self-recursive call. The property did not depend on this call. */
907 } else if (callee != NULL) {
908 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
909 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
910 curr_prop = update_property(curr_prop, prop);
912 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
913 curr_prop &= ~mtp_property_nothrow;
915 } else if (get_opt_closed_world() &&
917 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
918 /* check if all possible callees are nothrow functions. */
919 size_t i, n_callees = get_Call_n_callees(pred);
920 if (n_callees == 0) {
921 /* This is kind of strange: dying code or a Call that will raise an exception
922 when executed as there is no implementation to call. So better not
924 curr_prop &= ~mtp_property_nothrow;
928 for (i = 0; i < n_callees; ++i) {
929 ir_entity *ent = get_Call_callee(pred, i);
930 if (ent == unknown_entity) {
931 /* we don't know which entity is called here */
932 curr_prop &= ~mtp_property_nothrow;
935 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
936 curr_prop &= ~mtp_property_nothrow;
940 /* if we pass the for cycle, nothrow is still ok */
943 curr_prop &= ~mtp_property_nothrow;
946 /* real exception flow possible. */
947 curr_prop &= ~mtp_property_nothrow;
950 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
951 /* no need to search further */
956 if (curr_prop & mtp_property_malloc) {
958 * Note that the malloc property means not only return newly allocated
959 * memory, but also that this memory is ALIAS FREE.
960 * To ensure that, we do NOT allow that the returned memory is somewhere
963 curr_prop &= check_stored_result(irg);
966 if (curr_prop != mtp_no_property) {
967 if (top || (curr_prop & mtp_temporary) == 0) {
968 /* We use the temporary flag here to mark an optimistic result.
969 Set the property only if we are sure that it does NOT base on
970 temporary results OR if we are at top-level. */
971 add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
979 } /* check_nothrow_or_malloc */
982 * When a function was detected as "const", it might be moved out of loops.
983 * This might be dangerous if the graph can contain endless loops.
985 static void check_for_possible_endless_loops(ir_graph *irg)
990 root_loop = get_irg_loop(irg);
991 if (root_loop->flags & loop_outer_loop)
992 add_irg_additional_properties(irg, mtp_property_has_loop);
996 * optimize function calls by handling const functions
998 void optimize_funccalls(void)
1003 size_t num_const = 0;
1004 size_t num_pure = 0;
1005 size_t num_nothrow = 0;
1006 size_t num_malloc = 0;
1008 /* prepare: mark all graphs as not analyzed */
1009 last_idx = get_irp_last_idx();
1010 ready_set = rbitset_malloc(last_idx);
1011 busy_set = rbitset_malloc(last_idx);
1013 /* first step: detect, which functions are nothrow or malloc */
1014 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1015 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1016 ir_graph *irg = get_irp_irg(i);
1017 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1019 if (prop & mtp_property_nothrow) {
1021 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1022 } else if (prop & mtp_property_malloc) {
1024 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1028 /* second step: remove exception edges: this must be done before the
1029 detection of const and pure functions take place. */
1030 handle_nothrow_Calls(&ctx);
1031 DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
1032 DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
1033 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1035 rbitset_clear_all(ready_set, last_idx);
1036 rbitset_clear_all(busy_set, last_idx);
1038 /* third step: detect, which functions are const or pure */
1039 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1040 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1041 ir_graph *irg = get_irp_irg(i);
1042 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1044 if (prop & mtp_property_const) {
1046 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1047 check_for_possible_endless_loops(irg);
1048 } else if (prop & mtp_property_pure) {
1050 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1054 handle_const_Calls(&ctx);
1055 DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure));
1056 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1057 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1063 /* initialize the funccall optimization */
1064 void firm_init_funccalls(void)
1066 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1069 /* Creates an ir_prog pass for optimize_funccalls. */
1070 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
1072 return def_prog_pass(name ? name : "funccall", optimize_funccalls);