2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
35 #include "dbginfo_t.h"
39 #include "iredges_t.h"
41 #include "iroptimize.h"
42 #include "analyze_irg_args.h"
44 #include "raw_bitset.h"
47 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
50 * The walker environment for updating function calls.
52 typedef struct _env_t {
53 unsigned n_calls_SymConst;
55 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
56 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
57 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
58 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
59 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
62 /** If non-null, evaluates entities for being a heap alloc. */
63 static check_alloc_entity_func is_alloc_entity = NULL;
65 /** Ready IRG's are marked in the ready set. */
66 static unsigned *ready_set;
68 /** IRG's that are in progress are marked here. */
69 static unsigned *busy_set;
72 * We misuse the mtp_property_inherited flag as temporary here.
73 * The is ok, as we cannot set or get it anyway using the
74 * get_addtional_properties API.
76 #define mtp_temporary mtp_property_inherited
79 * Walker: Collect all calls to const and pure functions
80 * to lists. Collect all Proj(Call) nodes into a Proj list.
82 static void collect_const_and_pure_calls(ir_node *node, void *env)
87 unsigned and_prop, or_prop, prop;
92 /* set the link to NULL for all non-const/pure calls */
93 set_irn_link(call, NULL);
94 ptr = get_Call_ptr(call);
96 ent = get_Global_entity(ptr);
98 prop = get_entity_additional_properties(ent);
99 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
101 ++ctx->n_calls_SymConst;
102 } else if (get_opt_closed_world() &&
104 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
105 /* If all possible callees are const functions, we can remove the memory edge. */
106 int i, n_callees = get_Call_n_callees(call);
107 if (n_callees == 0) {
108 /* This is kind of strange: dying code or a Call that will raise an exception
109 when executed as there is no implementation to call. So better not
114 /* note that const function are a subset of pure ones */
115 and_prop = mtp_property_const | mtp_property_pure;
117 for (i = 0; i < n_callees; ++i) {
118 ent = get_Call_callee(call, i);
119 if (ent == unknown_entity) {
120 /* we don't know which entity is called here */
123 prop = get_entity_additional_properties(ent);
126 if (and_prop == mtp_no_property)
129 prop = and_prop | (or_prop & mtp_property_has_loop);
134 /* ok, if we get here we found a call to a const or a pure function */
135 if (prop & mtp_property_pure) {
136 set_irn_link(call, ctx->pure_call_list);
137 ctx->pure_call_list = call;
139 if (prop & mtp_property_has_loop) {
140 set_irn_link(call, ctx->nonfloat_const_call_list);
141 ctx->nonfloat_const_call_list = call;
143 set_irn_link(call, ctx->float_const_call_list);
144 ctx->float_const_call_list = call;
147 } else if (is_Proj(node)) {
149 * Collect all memory and exception Proj's from
152 call = get_Proj_pred(node);
156 /* collect the Proj's in the Proj list */
157 switch (get_Proj_proj(node)) {
159 case pn_Call_X_except:
160 case pn_Call_X_regular:
161 set_irn_link(node, ctx->proj_list);
162 ctx->proj_list = node;
168 } /* collect_const_and_pure_calls */
171 * Fix the list of collected Calls.
173 * @param irg the graph that contained calls to pure functions
176 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
178 ir_node *call, *next, *mem, *proj;
180 ir_graph *rem = current_ir_graph;
182 current_ir_graph = irg;
184 /* First step: fix all calls by removing their memory input and let
186 * The original memory input is preserved in their link fields. */
187 for (call = ctx->float_const_call_list; call != NULL; call = next) {
188 next = get_irn_link(call);
189 mem = get_Call_mem(call);
191 set_irn_link(call, mem);
192 set_Call_mem(call, get_irg_no_mem(irg));
195 * Unfortunately we cannot simply set the node to 'float'.
196 * There is a reason for that:
198 * - The call might be inside a loop/if that is NOT entered
199 * and calls a endless function. Setting the call to float
200 * would allow to move it out from the loop/if causing this
201 * function be called even if the loop/if is not entered ...
203 * This could be fixed using post-dominators for calls and Pin nodes
204 * but need some more analyzes to ensure that a call that potential
205 * never returns is not executed before some code that generates
206 * observable states...
209 /* finally, this call can float */
210 set_irn_pinned(call, op_pin_state_floats);
211 hook_func_call(irg, call);
214 /* Last step: fix all Proj's */
215 for (proj = ctx->proj_list; proj != NULL; proj = next) {
216 next = get_irn_link(proj);
217 call = get_Proj_pred(proj);
218 mem = get_irn_link(call);
220 /* beware of calls in the pure call list */
221 if (!mem || is_Call(mem))
223 assert(get_irn_mode(mem) == mode_M);
225 switch (get_Proj_proj(proj)) {
227 /* in dead code there might be cycles where proj == mem */
232 case pn_Call_X_except:
234 exchange(proj, get_irg_bad(irg));
236 case pn_Call_X_regular: {
237 ir_node *block = get_nodes_block(call);
239 exchange(proj, new_r_Jmp(block));
247 /* changes were done ... */
248 set_irg_outs_inconsistent(irg);
249 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
252 /* ... including exception edges */
253 set_irg_doms_inconsistent(irg);
255 current_ir_graph = rem;
256 } /* fix_const_call_list */
259 * Walker: Collect all calls to nothrow functions
260 * to lists. Collect all Proj(Call) nodes into a Proj list.
262 static void collect_nothrow_calls(ir_node *node, void *env)
272 /* set the link to NULL for all non-const/pure calls */
273 set_irn_link(call, NULL);
274 ptr = get_Call_ptr(call);
275 if (is_Global(ptr)) {
276 ent = get_Global_entity(ptr);
278 prop = get_entity_additional_properties(ent);
279 if ((prop & mtp_property_nothrow) == 0)
281 ++ctx->n_calls_SymConst;
282 } else if (get_opt_closed_world() &&
284 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
285 /* If all possible callees are nothrow functions, we can remove the exception edge. */
286 int i, n_callees = get_Call_n_callees(call);
287 if (n_callees == 0) {
288 /* This is kind of strange: dying code or a Call that will raise an exception
289 when executed as there is no implementation to call. So better not
294 /* note that const function are a subset of pure ones */
295 prop = mtp_property_nothrow;
296 for (i = 0; i < n_callees; ++i) {
297 ent = get_Call_callee(call, i);
298 if (ent == unknown_entity) {
299 /* we don't know which entity is called here */
302 prop &= get_entity_additional_properties(ent);
303 if (prop == mtp_no_property)
310 /* ok, if we get here we found a call to a nothrow function */
311 set_irn_link(call, ctx->nothrow_call_list);
312 ctx->nothrow_call_list = call;
313 } else if (is_Proj(node)) {
315 * Collect all memory and exception Proj's from
318 call = get_Proj_pred(node);
322 /* collect the Proj's in the Proj list */
323 switch (get_Proj_proj(node)) {
325 case pn_Call_X_except:
326 case pn_Call_X_regular:
327 set_irn_link(node, ctx->proj_list);
328 ctx->proj_list = node;
334 } /* collect_nothrow_calls */
337 * Fix the list of collected nothrow Calls.
339 * @param irg the graph that contained calls to pure functions
340 * @param call_list the list of all call sites of const functions
341 * @param proj_list the list of all memory/exception Proj's of this call sites
343 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
345 ir_node *call, *next, *proj;
347 ir_graph *rem = current_ir_graph;
349 current_ir_graph = irg;
351 /* First step: go through the list of calls and mark them. */
352 for (call = call_list; call; call = next) {
353 next = get_irn_link(call);
355 /* current_ir_graph is in memory anyway, so it's a good marker */
356 set_irn_link(call, ¤t_ir_graph);
357 hook_func_call(irg, call);
360 /* Second step: Remove all exception Proj's */
361 for (proj = proj_list; proj; proj = next) {
362 next = get_irn_link(proj);
363 call = get_Proj_pred(proj);
365 /* handle only marked calls */
366 if (get_irn_link(call) != ¤t_ir_graph)
369 /* kill any exception flow */
370 switch (get_Proj_proj(proj)) {
371 case pn_Call_X_except:
373 exchange(proj, get_irg_bad(irg));
375 case pn_Call_X_regular: {
376 ir_node *block = get_nodes_block(call);
378 exchange(proj, new_r_Jmp(block));
386 /* changes were done ... */
387 set_irg_outs_inconsistent(irg);
388 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
391 /* ... including exception edges */
392 set_irg_doms_inconsistent(irg);
394 current_ir_graph = rem;
395 } /* fix_nothrow_call_list */
398 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
399 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
400 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
401 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
402 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
405 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
406 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
409 * Calculate the bigger property of two. Handle the temporary flag right.
411 static unsigned max_property(unsigned a, unsigned b)
413 unsigned r, t = (a | b) & mtp_temporary;
417 if (a == mtp_no_property || b == mtp_no_property)
418 return mtp_no_property;
424 * Follow the memory chain starting at node and determine
427 * @return mtp_property_const if only calls of const functions are detected
428 * mtp_property_pure if only Loads and const/pure calls detected
429 * mtp_no_property else
431 static unsigned _follow_mem(ir_node *node)
433 unsigned m, mode = mtp_property_const;
438 if (mode == mtp_no_property)
439 return mtp_no_property;
441 if (irn_visited_else_mark(node))
444 switch (get_irn_opcode(node)) {
446 node = get_Proj_pred(node);
455 /* do a dfs search */
456 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
457 m = _follow_mem(get_irn_n(node, i));
458 mode = max_property(mode, m);
459 if (mode == mtp_no_property)
460 return mtp_no_property;
465 /* Beware volatile Loads are NOT allowed in pure functions. */
466 if (get_Load_volatility(node) == volatility_is_volatile)
467 return mtp_no_property;
468 mode = max_property(mode, mtp_property_pure);
469 node = get_Load_mem(node);
473 /* A call is only tolerable if its either constant or pure. */
474 ptr = get_Call_ptr(node);
475 if (is_SymConst_addr_ent(ptr)) {
476 ir_entity *ent = get_SymConst_entity(ptr);
477 ir_graph *irg = get_entity_irg(ent);
479 if (irg == current_ir_graph) {
480 /* A self-recursive call. The property did not depend on this call. */
481 } else if (irg == NULL) {
482 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
483 mode = max_property(mode, m);
484 } else if (irg != NULL) {
485 /* we have a graph, analyze it. */
486 m = check_const_or_pure_function(irg, /*top=*/0);
487 mode = max_property(mode, m);
490 return mtp_no_property;
491 node = get_Call_mem(node);
495 return mtp_no_property;
501 * Follow the memory chain starting at node and determine
504 * @return mtp_property_const if only calls of const functions are detected
505 * mtp_property_pure if only Loads and const/pure calls detected
506 * mtp_no_property else
508 static unsigned follow_mem(ir_node *node, unsigned mode)
512 m = _follow_mem(node);
513 return max_property(mode, m);
517 * Check if a graph represents a const or a pure function.
519 * @param irg the graph to check
520 * @param top if set, this is the top call
522 static unsigned check_const_or_pure_function(ir_graph *irg, int top)
524 ir_node *end, *endbl;
526 unsigned prop = get_irg_additional_properties(irg);
527 ir_graph *rem = current_ir_graph;
529 if (prop & mtp_property_const) {
530 /* already marked as a const function */
531 return mtp_property_const;
533 if (prop & mtp_property_pure) {
534 /* already marked as a pure function */
535 return mtp_property_pure;
538 if (IS_IRG_READY(irg)) {
539 /* already checked */
540 return mtp_no_property;
542 if (IS_IRG_BUSY(irg)) {
543 /* we are still evaluate this method. Be optimistic,
544 return the best possible so far but mark the result as temporary. */
545 return mtp_temporary | mtp_property_const;
549 end = get_irg_end(irg);
550 endbl = get_nodes_block(end);
551 prop = mtp_property_const;
553 current_ir_graph = irg;
555 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
556 inc_irg_visited(irg);
557 /* mark the initial mem: recursion of follow_mem() stops here */
558 mark_irn_visited(get_irg_initial_mem(irg));
560 /* visit every Return */
561 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
562 ir_node *node = get_Block_cfgpred(endbl, j);
563 ir_opcode code = get_irn_opcode(node);
566 /* Bad nodes usually do NOT produce anything, so it's ok */
570 if (code == iro_Return) {
571 mem = get_Return_mem(node);
573 /* Bad nodes usually do NOT produce anything, so it's ok */
577 if (mem != get_irg_initial_mem(irg))
578 prop = max_property(prop, follow_mem(mem, prop));
580 /* Exception found. Cannot be const or pure. */
581 prop = mtp_no_property;
584 if (prop == mtp_no_property)
588 if (prop != mtp_no_property) {
589 /* check, if a keep-alive exists */
590 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
591 ir_node *kept = get_End_keepalive(end, j);
593 if (is_Block(kept)) {
594 prop = mtp_no_property;
598 if (mode_M != get_irn_mode(kept))
601 prop = max_property(prop, follow_mem(kept, prop));
602 if (prop == mtp_no_property)
607 if (prop != mtp_no_property) {
608 if (top || (prop & mtp_temporary) == 0) {
609 /* We use the temporary flag here to mark optimistic result.
610 Set the property only if we are sure that it does NOT base on
611 temporary results OR if we are at top-level. */
612 set_irg_additional_property(irg, prop & ~mtp_temporary);
619 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
620 current_ir_graph = rem;
622 } /* check_const_or_pure_function */
625 * Handle calls to const functions.
629 static void handle_const_Calls(env_t *ctx)
633 ctx->n_calls_SymConst = 0;
634 ctx->n_calls_Sel = 0;
636 /* all calls of const functions can be transformed */
637 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
638 ir_graph *irg = get_irp_irg(i);
640 ctx->float_const_call_list = NULL;
641 ctx->nonfloat_const_call_list = NULL;
642 ctx->pure_call_list = NULL;
643 ctx->proj_list = NULL;
645 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
646 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
648 if (ctx->float_const_call_list != NULL)
649 fix_const_call_lists(irg, ctx);
650 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
652 } /* handle_const_Calls */
655 * Handle calls to nothrow functions.
659 static void handle_nothrow_Calls(env_t *ctx)
663 ctx->n_calls_SymConst = 0;
664 ctx->n_calls_Sel = 0;
666 /* all calls of const functions can be transformed */
667 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
668 ir_graph *irg = get_irp_irg(i);
670 ctx->nothrow_call_list = NULL;
671 ctx->proj_list = NULL;
673 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
674 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
676 if (ctx->nothrow_call_list)
677 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
678 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
683 * Check, whether a given node represents a return value of
684 * a malloc like function (ie, new heap allocated memory).
686 * @param node the node to check
688 static int is_malloc_call_result(const ir_node *node)
690 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
691 /* Firm style high-level allocation */
694 if (is_alloc_entity != NULL && is_Call(node)) {
695 ir_node *ptr = get_Call_ptr(node);
697 if (is_Global(ptr)) {
698 ir_entity *ent = get_Global_entity(ptr);
699 return is_alloc_entity(ent);
703 } /* is_malloc_call_result */
706 * Update a property depending on a call property.
708 static unsigned update_property(unsigned orig_prop, unsigned call_prop)
710 unsigned t = (orig_prop | call_prop) & mtp_temporary;
711 unsigned r = orig_prop & call_prop;
713 } /** update_property */
716 * Check if a node is stored.
718 static int is_stored(const ir_node *n)
720 const ir_edge_t *edge;
723 foreach_out_edge(n, edge) {
724 const ir_node *succ = get_edge_src_irn(edge);
726 switch (get_irn_opcode(succ)) {
733 if (get_Store_value(succ) == n)
735 /* ok if its only the address input */
744 ptr = get_Call_ptr(succ);
745 if (is_Global(ptr)) {
746 ir_entity *ent = get_Global_entity(ptr);
749 /* we know the called entity */
750 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
751 if (get_Call_param(succ, i) == n) {
752 /* n is the i'th param of the call */
753 if (get_method_param_access(ent, i) & ptr_access_store) {
754 /* n is store in ent */
760 /* unknown call address */
765 /* bad, potential alias */
773 * Check that the return value of an irg is not stored anywhere.
775 * return ~mtp_property_malloc if return values are stored, ~0 else
777 static unsigned check_stored_result(ir_graph *irg)
779 ir_node *end_blk = get_irg_end_block(irg);
782 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
784 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
785 ir_node *pred = get_Block_cfgpred(end_blk, i);
787 if (! is_Return(pred))
789 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
790 const ir_node *irn = get_Return_res(pred, j);
792 if (is_stored(irn)) {
793 /* bad, might create an alias */
794 res = ~mtp_property_malloc;
801 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
803 } /* check_stored_result */
806 * Check if a graph represents a nothrow or a malloc function.
808 * @param irg the graph to check
809 * @param top if set, this is the top call
811 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top)
813 ir_node *end_blk = get_irg_end_block(irg);
817 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
819 if (IS_IRG_READY(irg)) {
820 /* already checked */
821 return get_irg_additional_properties(irg);
823 if (IS_IRG_BUSY(irg)) {
824 /* we are still evaluate this method. Be optimistic,
825 return the best possible so far but mark the result as temporary. */
826 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
830 ent = get_irg_entity(irg);
831 mtp = get_entity_type(ent);
833 if (get_method_n_ress(mtp) <= 0)
834 curr_prop &= ~mtp_property_malloc;
836 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
837 ir_node *pred = get_Block_cfgpred(end_blk, i);
839 if (is_Return(pred)) {
840 if (curr_prop & mtp_property_malloc) {
841 /* check, if malloc is called here */
842 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
843 ir_node *res = get_Return_res(pred, j);
845 /* skip Confirms and Casts */
846 res = skip_HighLevel_ops(res);
849 res = get_Proj_pred(res);
850 if (is_malloc_call_result(res)) {
851 /* ok, this is a malloc */
852 } else if (is_Call(res)) {
853 ir_node *ptr = get_Call_ptr(res);
855 if (is_Global(ptr)) {
857 ir_entity *ent = get_Global_entity(ptr);
858 ir_graph *callee = get_entity_irg(ent);
861 /* A self-recursive call. The property did not depend on this call. */
862 } else if (callee != NULL) {
863 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
864 curr_prop = update_property(curr_prop, prop);
866 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
868 } else if (get_opt_closed_world() &&
870 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
871 /* check if all possible callees are malloc functions. */
872 int i, n_callees = get_Call_n_callees(res);
873 if (n_callees == 0) {
874 /* This is kind of strange: dying code or a Call that will raise an exception
875 when executed as there is no implementation to call. So better not
877 curr_prop &= ~mtp_property_malloc;
881 for (i = 0; i < n_callees; ++i) {
882 ir_entity *ent = get_Call_callee(res, i);
883 if (ent == unknown_entity) {
884 /* we don't know which entity is called here */
885 curr_prop &= ~mtp_property_malloc;
888 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
889 curr_prop &= ~mtp_property_malloc;
893 /* if we pass the for cycle, malloc is still ok */
896 curr_prop &= ~mtp_property_malloc;
899 /* unknown return value */
900 curr_prop &= ~mtp_property_malloc;
904 } else if (curr_prop & mtp_property_nothrow) {
905 /* exception flow detected */
906 pred = skip_Proj(pred);
909 ir_node *ptr = get_Call_ptr(pred);
911 if (is_Global(ptr)) {
913 ir_entity *ent = get_Global_entity(ptr);
914 ir_graph *callee = get_entity_irg(ent);
917 /* A self-recursive call. The property did not depend on this call. */
918 } else if (callee != NULL) {
919 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
920 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
921 curr_prop = update_property(curr_prop, prop);
923 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
924 curr_prop &= ~mtp_property_nothrow;
926 } else if (get_opt_closed_world() &&
928 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
929 /* check if all possible callees are nothrow functions. */
930 int i, n_callees = get_Call_n_callees(pred);
931 if (n_callees == 0) {
932 /* This is kind of strange: dying code or a Call that will raise an exception
933 when executed as there is no implementation to call. So better not
935 curr_prop &= ~mtp_property_nothrow;
939 for (i = 0; i < n_callees; ++i) {
940 ir_entity *ent = get_Call_callee(pred, i);
941 if (ent == unknown_entity) {
942 /* we don't know which entity is called here */
943 curr_prop &= ~mtp_property_nothrow;
946 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
947 curr_prop &= ~mtp_property_nothrow;
951 /* if we pass the for cycle, nothrow is still ok */
954 curr_prop &= ~mtp_property_nothrow;
957 /* real exception flow possible. */
958 curr_prop &= ~mtp_property_nothrow;
961 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
962 /* no need to search further */
967 if (curr_prop & mtp_property_malloc) {
969 * Note that the malloc property means not only return newly allocated
970 * memory, but also that this memory is ALIAS FREE.
971 * To ensure that, we do NOT allow that the returned memory is somewhere
974 curr_prop &= check_stored_result(irg);
977 if (curr_prop != mtp_no_property) {
978 if (top || (curr_prop & mtp_temporary) == 0) {
979 /* We use the temporary flag here to mark an optimistic result.
980 Set the property only if we are sure that it does NOT base on
981 temporary results OR if we are at top-level. */
982 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
990 } /* check_nothrow_or_malloc */
993 * When a function was detected as "const", it might be moved out of loops.
994 * This might be dangerous if the graph can contain endless loops.
996 static void check_for_possible_endless_loops(ir_graph *irg)
1001 root_loop = get_irg_loop(irg);
1002 if (root_loop->flags & loop_outer_loop)
1003 set_irg_additional_property(irg, mtp_property_has_loop);
1007 * optimize function calls by handling const functions
1009 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
1012 unsigned num_const = 0;
1013 unsigned num_pure = 0;
1014 unsigned num_nothrow = 0;
1015 unsigned num_malloc = 0;
1017 is_alloc_entity = callback;
1019 /* prepare: mark all graphs as not analyzed */
1020 last_idx = get_irp_last_idx();
1021 ready_set = rbitset_malloc(last_idx);
1022 busy_set = rbitset_malloc(last_idx);
1024 /* first step: detect, which functions are nothrow or malloc */
1025 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1026 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1027 ir_graph *irg = get_irp_irg(i);
1028 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1030 if (prop & mtp_property_nothrow) {
1032 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1033 } else if (prop & mtp_property_malloc) {
1035 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1039 /* second step: remove exception edges: this must be done before the
1040 detection of const and pure functions take place. */
1041 if (force_run || num_nothrow > 0) {
1044 handle_nothrow_Calls(&ctx);
1045 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1046 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1047 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1049 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1052 rbitset_clear_all(ready_set, last_idx);
1053 rbitset_clear_all(busy_set, last_idx);
1055 /* third step: detect, which functions are const or pure */
1056 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1057 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1058 ir_graph *irg = get_irp_irg(i);
1059 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1061 if (prop & mtp_property_const) {
1063 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1064 check_for_possible_endless_loops(irg);
1065 } else if (prop & mtp_property_pure) {
1067 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1071 if (force_run || num_const > 0) {
1074 handle_const_Calls(&ctx);
1075 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1076 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1077 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1079 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1083 } /* optimize_funccalls */
1085 /* initialize the funccall optimization */
1086 void firm_init_funccalls(void)
1088 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1089 } /* firm_init_funccalls */
1092 ir_prog_pass_t pass;
1094 check_alloc_entity_func callback;
1098 * Wrapper for running optimize_funccalls() as an ir_prog pass.
1100 static int pass_wrapper(ir_prog *irp, void *context)
1102 struct pass_t *pass = context;
1105 optimize_funccalls(pass->force_run, pass->callback);
1107 } /* pass_wrapper */
1109 /* Creates an ir_prog pass for optimize_funccalls. */
1110 ir_prog_pass_t *optimize_funccalls_pass(
1112 int force_run, check_alloc_entity_func callback)
1114 struct pass_t *pass = XMALLOCZ(struct pass_t);
1116 pass->force_run = force_run;
1117 pass->callback = callback;
1119 return def_prog_pass_constructor(
1120 &pass->pass, name ? name : "funccall", pass_wrapper);
1121 } /* optimize_funccalls_pass */