2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 size_t n_calls_SymConst;
54 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
55 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** Ready IRG's are marked in the ready set. */
62 static unsigned *ready_set;
64 /** IRG's that are in progress are marked here. */
65 static unsigned *busy_set;
68 * We misuse the mtp_property_inherited flag as temporary here.
69 * The is ok, as we cannot set or get it anyway using the
70 * get_addtional_properties API.
72 #define mtp_temporary mtp_property_inherited
75 * Walker: Collect all calls to const and pure functions
76 * to lists. Collect all Proj(Call) nodes into a Proj list.
78 static void collect_const_and_pure_calls(ir_node *node, void *env)
80 env_t *ctx = (env_t*)env;
84 unsigned and_prop, or_prop, prop;
89 /* set the link to NULL for all non-const/pure calls */
90 set_irn_link(call, NULL);
91 ptr = get_Call_ptr(call);
92 if (is_SymConst_addr_ent(ptr)) {
93 ent = get_SymConst_entity(ptr);
95 prop = get_entity_additional_properties(ent);
96 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
98 ++ctx->n_calls_SymConst;
99 } else if (get_opt_closed_world() &&
101 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
102 /* If all possible callees are const functions, we can remove the memory edge. */
103 size_t i, n_callees = get_Call_n_callees(call);
104 if (n_callees == 0) {
105 /* This is kind of strange: dying code or a Call that will raise an exception
106 when executed as there is no implementation to call. So better not
111 /* note that const function are a subset of pure ones */
112 and_prop = mtp_property_const | mtp_property_pure;
114 for (i = 0; i < n_callees; ++i) {
115 ent = get_Call_callee(call, i);
116 if (is_unknown_entity(ent)) {
117 /* we don't know which entity is called here */
120 prop = get_entity_additional_properties(ent);
123 if (and_prop == mtp_no_property)
126 prop = and_prop | (or_prop & mtp_property_has_loop);
131 /* ok, if we get here we found a call to a const or a pure function */
132 if (prop & mtp_property_pure) {
133 set_irn_link(call, ctx->pure_call_list);
134 ctx->pure_call_list = call;
136 if (prop & mtp_property_has_loop) {
137 set_irn_link(call, ctx->nonfloat_const_call_list);
138 ctx->nonfloat_const_call_list = call;
140 set_irn_link(call, ctx->float_const_call_list);
141 ctx->float_const_call_list = call;
144 } else if (is_Proj(node)) {
146 * Collect all memory and exception Proj's from
149 call = get_Proj_pred(node);
153 /* collect the Proj's in the Proj list */
154 switch (get_Proj_proj(node)) {
156 case pn_Call_X_except:
157 case pn_Call_X_regular:
158 set_irn_link(node, ctx->proj_list);
159 ctx->proj_list = node;
165 } /* collect_const_and_pure_calls */
168 * Fix the list of collected Calls.
170 * @param irg the graph that contained calls to pure functions
173 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
175 ir_node *call, *next, *mem, *proj;
178 /* First step: fix all calls by removing their memory input and let
180 * The original memory input is preserved in their link fields. */
181 for (call = ctx->float_const_call_list; call != NULL; call = next) {
182 next = (ir_node*)get_irn_link(call);
183 mem = get_Call_mem(call);
185 set_irn_link(call, mem);
186 set_Call_mem(call, get_irg_no_mem(irg));
189 * Unfortunately we cannot simply set the node to 'float'.
190 * There is a reason for that:
192 * - The call might be inside a loop/if that is NOT entered
193 * and calls a endless function. Setting the call to float
194 * would allow to move it out from the loop/if causing this
195 * function be called even if the loop/if is not entered ...
197 * This could be fixed using post-dominators for calls and Pin nodes
198 * but need some more analyzes to ensure that a call that potential
199 * never returns is not executed before some code that generates
200 * observable states...
203 /* finally, this call can float */
204 set_irn_pinned(call, op_pin_state_floats);
205 hook_func_call(irg, call);
208 /* Last step: fix all Proj's */
209 for (proj = ctx->proj_list; proj != NULL; proj = next) {
210 next = (ir_node*)get_irn_link(proj);
211 call = get_Proj_pred(proj);
212 mem = (ir_node*)get_irn_link(call);
214 /* beware of calls in the pure call list */
215 if (!mem || is_Call(mem))
217 assert(get_irn_mode(mem) == mode_M);
219 switch (get_Proj_proj(proj)) {
221 /* in dead code there might be cycles where proj == mem */
226 case pn_Call_X_except:
228 exchange(proj, new_r_Bad(irg, mode_X));
230 case pn_Call_X_regular: {
231 ir_node *block = get_nodes_block(call);
233 exchange(proj, new_r_Jmp(block));
242 /* ... including exception edges */
243 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
244 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
246 } /* fix_const_call_list */
249 * Walker: Collect all calls to nothrow functions
250 * to lists. Collect all Proj(Call) nodes into a Proj list.
252 static void collect_nothrow_calls(ir_node *node, void *env)
254 env_t *ctx = (env_t*)env;
262 /* set the link to NULL for all non-const/pure calls */
263 set_irn_link(call, NULL);
264 ptr = get_Call_ptr(call);
265 if (is_SymConst_addr_ent(ptr)) {
266 ent = get_SymConst_entity(ptr);
268 prop = get_entity_additional_properties(ent);
269 if ((prop & mtp_property_nothrow) == 0)
271 ++ctx->n_calls_SymConst;
272 } else if (get_opt_closed_world() &&
274 get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
275 /* If all possible callees are nothrow functions, we can remove the exception edge. */
276 size_t i, n_callees = get_Call_n_callees(call);
277 if (n_callees == 0) {
278 /* This is kind of strange: dying code or a Call that will raise an exception
279 when executed as there is no implementation to call. So better not
284 /* note that const function are a subset of pure ones */
285 prop = mtp_property_nothrow;
286 for (i = 0; i < n_callees; ++i) {
287 ent = get_Call_callee(call, i);
288 if (is_unknown_entity(ent)) {
289 /* we don't know which entity is called here */
292 prop &= get_entity_additional_properties(ent);
293 if (prop == mtp_no_property)
300 /* ok, if we get here we found a call to a nothrow function */
301 set_irn_link(call, ctx->nothrow_call_list);
302 ctx->nothrow_call_list = call;
303 } else if (is_Proj(node)) {
305 * Collect all memory and exception Proj's from
308 call = get_Proj_pred(node);
312 /* collect the Proj's in the Proj list */
313 switch (get_Proj_proj(node)) {
315 case pn_Call_X_except:
316 case pn_Call_X_regular:
317 set_irn_link(node, ctx->proj_list);
318 ctx->proj_list = node;
324 } /* collect_nothrow_calls */
327 * Fix the list of collected nothrow Calls.
329 * @param irg the graph that contained calls to pure functions
330 * @param call_list the list of all call sites of const functions
331 * @param proj_list the list of all memory/exception Proj's of this call sites
333 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
335 ir_node *call, *next, *proj;
338 /* First step: go through the list of calls and mark them. */
339 for (call = call_list; call; call = next) {
340 next = (ir_node*)get_irn_link(call);
342 /* current_ir_graph is in memory anyway, so it's a good marker */
343 set_irn_link(call, ¤t_ir_graph);
344 hook_func_call(irg, call);
347 /* Second step: Remove all exception Proj's */
348 for (proj = proj_list; proj; proj = next) {
349 next = (ir_node*)get_irn_link(proj);
350 call = get_Proj_pred(proj);
352 /* handle only marked calls */
353 if (get_irn_link(call) != ¤t_ir_graph)
356 /* kill any exception flow */
357 switch (get_Proj_proj(proj)) {
358 case pn_Call_X_except:
360 exchange(proj, new_r_Bad(irg, mode_X));
362 case pn_Call_X_regular: {
363 ir_node *block = get_nodes_block(call);
365 exchange(proj, new_r_Jmp(block));
373 /* changes were done ... */
375 /* ... including exception edges */
376 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
377 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
379 } /* fix_nothrow_call_list */
382 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
383 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
384 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
385 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
386 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
389 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
392 * Calculate the bigger property of two. Handle the temporary flag right.
394 static mtp_additional_properties max_property(mtp_additional_properties a,
395 mtp_additional_properties b)
397 mtp_additional_properties r;
398 mtp_additional_properties t = (a | b) & mtp_temporary;
402 if (a == mtp_no_property || b == mtp_no_property)
403 return mtp_no_property;
409 * Follow the memory chain starting at node and determine
412 * @return mtp_property_const if only calls of const functions are detected
413 * mtp_property_pure if only Loads and const/pure calls detected
414 * mtp_no_property else
416 static mtp_additional_properties follow_mem_(ir_node *node)
418 mtp_additional_properties mode = mtp_property_const;
419 mtp_additional_properties m;
424 if (mode == mtp_no_property)
425 return mtp_no_property;
427 if (irn_visited_else_mark(node))
430 switch (get_irn_opcode(node)) {
432 node = get_Proj_pred(node);
441 /* do a dfs search */
442 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
443 m = follow_mem_(get_irn_n(node, i));
444 mode = max_property(mode, m);
445 if (mode == mtp_no_property)
446 return mtp_no_property;
451 /* Beware volatile Loads are NOT allowed in pure functions. */
452 if (get_Load_volatility(node) == volatility_is_volatile)
453 return mtp_no_property;
454 mode = max_property(mode, mtp_property_pure);
455 node = get_Load_mem(node);
459 /* A call is only tolerable if its either constant or pure. */
460 ptr = get_Call_ptr(node);
461 if (is_SymConst_addr_ent(ptr)) {
462 ir_entity *ent = get_SymConst_entity(ptr);
463 ir_graph *irg = get_entity_irg(ent);
466 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
467 mode = max_property(mode, m);
469 /* we have a graph, analyze it. */
470 m = check_const_or_pure_function(irg, /*top=*/0);
471 mode = max_property(mode, m);
474 return mtp_no_property;
475 node = get_Call_mem(node);
479 return mtp_no_property;
485 * Follow the memory chain starting at node and determine
488 * @return mtp_property_const if only calls of const functions are detected
489 * mtp_property_pure if only Loads and const/pure calls detected
490 * mtp_no_property else
492 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
494 mtp_additional_properties m = follow_mem_(node);
495 return max_property(mode, m);
499 * Check if a graph represents a const or a pure function.
501 * @param irg the graph to check
502 * @param top if set, this is the top call
504 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
506 ir_node *end, *endbl;
508 ir_entity *entity = get_irg_entity(irg);
509 ir_type *type = get_entity_type(entity);
510 size_t n_params = get_method_n_params(type);
512 mtp_additional_properties may_be_const = mtp_property_const;
513 mtp_additional_properties prop = get_irg_additional_properties(irg);
515 /* libfirm handles aggregate parameters by passing around pointers to
516 * stuff in memory, so if we have compound parameters we are never const */
517 for (i = 0; i < n_params; ++i) {
518 ir_type *param = get_method_param_type(type, i);
519 if (is_compound_type(param)) {
520 prop &= ~mtp_property_const;
521 may_be_const = mtp_no_property;
525 if (prop & mtp_property_const) {
526 /* already marked as a const function */
527 return mtp_property_const;
529 if (prop & mtp_property_pure) {
530 /* already marked as a pure function */
531 return mtp_property_pure;
534 if (IS_IRG_READY(irg)) {
535 /* already checked */
536 return mtp_no_property;
538 if (IS_IRG_BUSY(irg)) {
539 /* We are still evaluate this method.
540 * The function (indirectly) calls itself and thus may not terminate.
542 return mtp_no_property;
546 end = get_irg_end(irg);
547 endbl = get_nodes_block(end);
550 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
551 inc_irg_visited(irg);
552 /* mark the initial mem: recursion of follow_mem() stops here */
553 mark_irn_visited(get_irg_initial_mem(irg));
555 /* visit every Return */
556 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
557 ir_node *node = get_Block_cfgpred(endbl, j);
558 unsigned code = get_irn_opcode(node);
561 /* Bad nodes usually do NOT produce anything, so it's ok */
565 if (code == iro_Return) {
566 mem = get_Return_mem(node);
568 /* Bad nodes usually do NOT produce anything, so it's ok */
572 if (mem != get_irg_initial_mem(irg))
573 prop = max_property(prop, follow_mem(mem, prop));
575 /* Exception found. Cannot be const or pure. */
576 prop = mtp_no_property;
579 if (prop == mtp_no_property)
583 if (prop != mtp_no_property) {
584 /* check, if a keep-alive exists */
585 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
586 ir_node *kept = get_End_keepalive(end, j);
588 if (is_Block(kept)) {
589 prop = mtp_no_property;
593 if (mode_M != get_irn_mode(kept))
596 prop = max_property(prop, follow_mem(kept, prop));
597 if (prop == mtp_no_property)
603 /* Set the property only if we are at top-level. */
604 if (prop != mtp_no_property) {
605 add_irg_additional_properties(irg, prop);
610 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
612 } /* check_const_or_pure_function */
615 * Handle calls to const functions.
619 static void handle_const_Calls(env_t *ctx)
623 ctx->n_calls_SymConst = 0;
624 ctx->n_calls_Sel = 0;
626 /* all calls of const functions can be transformed */
627 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
628 ir_graph *irg = get_irp_irg(i);
630 ctx->float_const_call_list = NULL;
631 ctx->nonfloat_const_call_list = NULL;
632 ctx->pure_call_list = NULL;
633 ctx->proj_list = NULL;
635 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
636 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
638 if (ctx->float_const_call_list != NULL)
639 fix_const_call_lists(irg, ctx);
640 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
642 confirm_irg_properties(irg,
643 IR_GRAPH_PROPERTIES_CONTROL_FLOW
644 | IR_GRAPH_PROPERTY_ONE_RETURN
645 | IR_GRAPH_PROPERTY_MANY_RETURNS);
650 * Handle calls to nothrow functions.
654 static void handle_nothrow_Calls(env_t *ctx)
658 ctx->n_calls_SymConst = 0;
659 ctx->n_calls_Sel = 0;
661 /* all calls of const functions can be transformed */
662 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
663 ir_graph *irg = get_irp_irg(i);
665 ctx->nothrow_call_list = NULL;
666 ctx->proj_list = NULL;
668 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
669 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
671 if (ctx->nothrow_call_list)
672 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
673 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
678 * Check, whether a given node represents a return value of
679 * a malloc like function (ie, new heap allocated memory).
681 * @param node the node to check
683 static int is_malloc_call_result(const ir_node *node)
685 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
686 /* Firm style high-level allocation */
689 /* TODO: check mtp_malloc */
694 * Update a property depending on a call property.
696 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
698 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
699 mtp_additional_properties r = orig_prop & call_prop;
704 * Check if a node is stored.
706 static bool is_stored(const ir_node *n)
708 const ir_edge_t *edge;
711 foreach_out_edge(n, edge) {
712 const ir_node *succ = get_edge_src_irn(edge);
714 switch (get_irn_opcode(succ)) {
721 if (get_Store_value(succ) == n)
723 /* ok if its only the address input */
732 ptr = get_Call_ptr(succ);
733 if (is_SymConst_addr_ent(ptr)) {
734 ir_entity *ent = get_SymConst_entity(ptr);
737 /* we know the called entity */
738 for (i = get_Call_n_params(succ); i > 0;) {
739 if (get_Call_param(succ, --i) == n) {
740 /* n is the i'th param of the call */
741 if (get_method_param_access(ent, i) & ptr_access_store) {
742 /* n is store in ent */
748 /* unknown call address */
753 /* bad, potential alias */
761 * Check that the return value of an irg is not stored anywhere.
763 * return ~mtp_property_malloc if return values are stored, ~0 else
765 static mtp_additional_properties check_stored_result(ir_graph *irg)
767 ir_node *end_blk = get_irg_end_block(irg);
769 mtp_additional_properties res = ~mtp_no_property;
771 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
773 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
774 ir_node *pred = get_Block_cfgpred(end_blk, i);
777 if (! is_Return(pred))
779 for (j = get_Return_n_ress(pred); j > 0;) {
780 const ir_node *irn = get_Return_res(pred, --j);
782 if (is_stored(irn)) {
783 /* bad, might create an alias */
784 res = ~mtp_property_malloc;
790 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
795 * Check if a graph represents a nothrow or a malloc function.
797 * @param irg the graph to check
798 * @param top if set, this is the top call
800 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
802 mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
803 ir_node *end_blk = get_irg_end_block(irg);
808 if (IS_IRG_READY(irg)) {
809 /* already checked */
810 return get_irg_additional_properties(irg);
812 if (IS_IRG_BUSY(irg)) {
813 /* we are still evaluate this method. Be optimistic,
814 return the best possible so far but mark the result as temporary. */
815 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
819 ent = get_irg_entity(irg);
820 mtp = get_entity_type(ent);
822 if (get_method_n_ress(mtp) <= 0)
823 curr_prop &= ~mtp_property_malloc;
825 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
826 ir_node *pred = get_Block_cfgpred(end_blk, i);
828 if (is_Return(pred)) {
829 if (curr_prop & mtp_property_malloc) {
832 /* check, if malloc is called here */
833 for (j = get_Return_n_ress(pred); j > 0;) {
834 ir_node *res = get_Return_res(pred, --j);
836 /* skip Confirms and Casts */
837 res = skip_HighLevel_ops(res);
840 res = get_Proj_pred(res);
841 if (is_malloc_call_result(res)) {
842 /* ok, this is a malloc */
843 } else if (is_Call(res)) {
844 ir_node *ptr = get_Call_ptr(res);
846 if (is_SymConst_addr_ent(ptr)) {
848 ir_entity *ent = get_SymConst_entity(ptr);
849 ir_graph *callee = get_entity_irg(ent);
852 /* A self-recursive call. The property did not depend on this call. */
853 } else if (callee != NULL) {
854 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
855 curr_prop = update_property(curr_prop, prop);
857 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
859 } else if (get_opt_closed_world() &&
861 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
862 /* check if all possible callees are malloc functions. */
863 size_t i, n_callees = get_Call_n_callees(res);
864 if (n_callees == 0) {
865 /* This is kind of strange: dying code or a Call that will raise an exception
866 when executed as there is no implementation to call. So better not
868 curr_prop &= ~mtp_property_malloc;
872 for (i = 0; i < n_callees; ++i) {
873 ir_entity *ent = get_Call_callee(res, i);
874 if (is_unknown_entity(ent)) {
875 /* we don't know which entity is called here */
876 curr_prop &= ~mtp_property_malloc;
879 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
880 curr_prop &= ~mtp_property_malloc;
884 /* if we pass the for cycle, malloc is still ok */
887 curr_prop &= ~mtp_property_malloc;
890 /* unknown return value */
891 curr_prop &= ~mtp_property_malloc;
895 } else if (curr_prop & mtp_property_nothrow) {
896 /* exception flow detected */
897 pred = skip_Proj(pred);
900 ir_node *ptr = get_Call_ptr(pred);
902 if (is_SymConst_addr_ent(ptr)) {
904 ir_entity *ent = get_SymConst_entity(ptr);
905 ir_graph *callee = get_entity_irg(ent);
908 /* A self-recursive call. The property did not depend on this call. */
909 } else if (callee != NULL) {
910 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
911 mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
912 curr_prop = update_property(curr_prop, prop);
914 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
915 curr_prop &= ~mtp_property_nothrow;
917 } else if (get_opt_closed_world() &&
919 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
920 /* check if all possible callees are nothrow functions. */
921 size_t i, n_callees = get_Call_n_callees(pred);
922 if (n_callees == 0) {
923 /* This is kind of strange: dying code or a Call that will raise an exception
924 when executed as there is no implementation to call. So better not
926 curr_prop &= ~mtp_property_nothrow;
930 for (i = 0; i < n_callees; ++i) {
931 ir_entity *ent = get_Call_callee(pred, i);
932 if (is_unknown_entity(ent)) {
933 /* we don't know which entity is called here */
934 curr_prop &= ~mtp_property_nothrow;
937 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
938 curr_prop &= ~mtp_property_nothrow;
942 /* if we pass the for cycle, nothrow is still ok */
945 curr_prop &= ~mtp_property_nothrow;
948 /* real exception flow possible. */
949 curr_prop &= ~mtp_property_nothrow;
952 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
953 /* no need to search further */
958 if (curr_prop & mtp_property_malloc) {
960 * Note that the malloc property means not only return newly allocated
961 * memory, but also that this memory is ALIAS FREE.
962 * To ensure that, we do NOT allow that the returned memory is somewhere
965 curr_prop &= check_stored_result(irg);
968 if (curr_prop != mtp_no_property) {
969 if (top || (curr_prop & mtp_temporary) == 0) {
970 /* We use the temporary flag here to mark an optimistic result.
971 Set the property only if we are sure that it does NOT base on
972 temporary results OR if we are at top-level. */
973 add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
981 } /* check_nothrow_or_malloc */
984 * When a function was detected as "const", it might be moved out of loops.
985 * This might be dangerous if the graph can contain endless loops.
987 static void check_for_possible_endless_loops(ir_graph *irg)
990 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
992 root_loop = get_irg_loop(irg);
993 if (root_loop->flags & loop_outer_loop)
994 add_irg_additional_properties(irg, mtp_property_has_loop);
996 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
1000 * optimize function calls by handling const functions
1002 void optimize_funccalls(void)
1007 size_t num_const = 0;
1008 size_t num_pure = 0;
1009 size_t num_nothrow = 0;
1010 size_t num_malloc = 0;
1012 /* prepare: mark all graphs as not analyzed */
1013 last_idx = get_irp_last_idx();
1014 ready_set = rbitset_malloc(last_idx);
1015 busy_set = rbitset_malloc(last_idx);
1017 /* first step: detect, which functions are nothrow or malloc */
1018 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1019 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1020 ir_graph *irg = get_irp_irg(i);
1021 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1023 if (prop & mtp_property_nothrow) {
1025 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1026 } else if (prop & mtp_property_malloc) {
1028 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1032 /* second step: remove exception edges: this must be done before the
1033 detection of const and pure functions take place. */
1034 handle_nothrow_Calls(&ctx);
1035 DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
1036 DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
1037 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1039 rbitset_clear_all(ready_set, last_idx);
1040 rbitset_clear_all(busy_set, last_idx);
1042 /* third step: detect, which functions are const or pure */
1043 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1044 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
1045 ir_graph *irg = get_irp_irg(i);
1046 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1048 if (prop & mtp_property_const) {
1050 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1051 check_for_possible_endless_loops(irg);
1052 } else if (prop & mtp_property_pure) {
1054 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1058 handle_const_Calls(&ctx);
1059 DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n",
1060 num_const, num_pure));
1061 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1062 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1068 /* initialize the funccall optimization */
1069 void firm_init_funccalls(void)
1071 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1074 /* Creates an ir_prog pass for optimize_funccalls. */
1075 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
1077 return def_prog_pass(name ? name : "funccall", optimize_funccalls);