2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
28 #include <adt/raw_bitset.h>
30 #include "funccall_t.h"
33 #include "irgraph_t.h"
37 #include "dbginfo_t.h"
41 #include "iredges_t.h"
42 #include "analyze_irg_args.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct _env_t {
52 unsigned n_calls_SymConst;
54 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
55 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
56 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
57 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
58 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
61 /** If non-null, evaluates entities for being a heap alloc. */
62 static check_alloc_entity_func is_alloc_entity = NULL;
64 /** Ready IRG's are marked in the ready set. */
65 static unsigned *ready_set;
67 /** IRG's that are in progress are marked here. */
68 static unsigned *busy_set;
71 * We misuse the mtp_property_inherited flag as temporary here.
72 * The is ok, as we cannot set or get it anyway using the
73 * get_addtional_properties API.
75 #define mtp_temporary mtp_property_inherited
78 * Walker: Collect all calls to const and pure functions
79 * to lists. Collect all Proj(Call) nodes into a Proj list.
81 static void collect_const_and_pure_calls(ir_node *node, void *env) {
85 unsigned and_prop, or_prop, prop;
90 /* set the link to NULL for all non-const/pure calls */
91 set_irn_link(call, NULL);
92 ptr = get_Call_ptr(call);
94 ent = get_Global_entity(ptr);
96 prop = get_entity_additional_properties(ent);
97 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
99 ++ctx->n_calls_SymConst;
100 } else if (get_opt_closed_world() &&
102 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
103 /* If all possible callees are const functions, we can remove the memory edge. */
104 int i, n_callees = get_Call_n_callees(call);
105 if (n_callees == 0) {
106 /* This is kind of strange: dying code or a Call that will raise an exception
107 when executed as there is no implementation to call. So better not
112 /* note that const function are a subset of pure ones */
113 and_prop = mtp_property_const | mtp_property_pure;
115 for (i = 0; i < n_callees; ++i) {
116 ent = get_Call_callee(call, i);
117 if (ent == unknown_entity) {
118 /* we don't know which entity is called here */
121 prop = get_entity_additional_properties(ent);
124 if (and_prop == mtp_no_property)
127 prop = and_prop | (or_prop & mtp_property_has_loop);
132 /* ok, if we get here we found a call to a const or a pure function */
133 if (prop & mtp_property_pure) {
134 set_irn_link(call, ctx->pure_call_list);
135 ctx->pure_call_list = call;
137 if (prop & mtp_property_has_loop) {
138 set_irn_link(call, ctx->nonfloat_const_call_list);
139 ctx->nonfloat_const_call_list = call;
141 set_irn_link(call, ctx->float_const_call_list);
142 ctx->float_const_call_list = call;
145 } else if (is_Proj(node)) {
147 * Collect all memory and exception Proj's from
150 call = get_Proj_pred(node);
154 /* collect the Proj's in the Proj list */
155 switch (get_Proj_proj(node)) {
156 case pn_Call_M_regular:
157 case pn_Call_X_except:
158 case pn_Call_X_regular:
159 case pn_Call_M_except:
160 set_irn_link(node, ctx->proj_list);
161 ctx->proj_list = node;
167 } /* collect_const_and_pure_calls */
170 * Fix the list of collected Calls.
172 * @param irg the graph that contained calls to pure functions
175 static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
176 ir_node *call, *next, *mem, *proj;
178 ir_graph *rem = current_ir_graph;
180 current_ir_graph = irg;
182 /* First step: fix all calls by removing their memory input and let
184 * The original memory input is preserved in their link fields. */
185 for (call = ctx->float_const_call_list; call != NULL; call = next) {
186 next = get_irn_link(call);
187 mem = get_Call_mem(call);
189 set_irn_link(call, mem);
190 set_Call_mem(call, get_irg_no_mem(irg));
193 * Unfortunately we cannot simply set the node to 'float'.
194 * There is a reason for that:
196 * - The call might be inside a loop/if that is NOT entered
197 * and calls a endless function. Setting the call to float
198 * would allow to move it out from the loop/if causing this
199 * function be called even if the loop/if is not entered ...
201 * This could be fixed using post-dominators for calls and Pin nodes
202 * but need some more analyzes to ensure that a call that potential
203 * never returns is not executed before some code that generates
204 * observable states...
207 /* finally, this call can float */
208 set_irn_pinned(call, op_pin_state_floats);
209 hook_func_call(irg, call);
212 /* Last step: fix all Proj's */
213 for (proj = ctx->proj_list; proj != NULL; proj = next) {
214 next = get_irn_link(proj);
215 call = get_Proj_pred(proj);
216 mem = get_irn_link(call);
218 /* beware of calls in the pure call list */
219 if (!mem || is_Call(mem))
221 assert(get_irn_mode(mem) == mode_M);
223 switch (get_Proj_proj(proj)) {
224 case pn_Call_M_regular: {
225 /* in dead code there might be cycles where proj == mem */
230 case pn_Call_X_except:
231 case pn_Call_M_except:
233 exchange(proj, get_irg_bad(irg));
235 case pn_Call_X_regular: {
236 ir_node *block = get_nodes_block(call);
238 exchange(proj, new_r_Jmp(irg, block));
246 /* changes were done ... */
247 set_irg_outs_inconsistent(irg);
248 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
251 /* ... including exception edges */
252 set_irg_doms_inconsistent(irg);
254 current_ir_graph = rem;
255 } /* fix_const_call_list */
258 * Walker: Collect all calls to nothrow functions
259 * to lists. Collect all Proj(Call) nodes into a Proj list.
261 static void collect_nothrow_calls(ir_node *node, void *env) {
270 /* set the link to NULL for all non-const/pure calls */
271 set_irn_link(call, NULL);
272 ptr = get_Call_ptr(call);
273 if (is_Global(ptr)) {
274 ent = get_Global_entity(ptr);
276 prop = get_entity_additional_properties(ent);
277 if ((prop & mtp_property_nothrow) == 0)
279 ++ctx->n_calls_SymConst;
280 } else if (get_opt_closed_world() &&
282 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
283 /* If all possible callees are nothrow functions, we can remove the exception edge. */
284 int i, n_callees = get_Call_n_callees(call);
285 if (n_callees == 0) {
286 /* This is kind of strange: dying code or a Call that will raise an exception
287 when executed as there is no implementation to call. So better not
292 /* note that const function are a subset of pure ones */
293 prop = mtp_property_nothrow;
294 for (i = 0; i < n_callees; ++i) {
295 ent = get_Call_callee(call, i);
296 if (ent == unknown_entity) {
297 /* we don't know which entity is called here */
300 prop &= get_entity_additional_properties(ent);
301 if (prop == mtp_no_property)
308 /* ok, if we get here we found a call to a nothrow function */
309 set_irn_link(call, ctx->nothrow_call_list);
310 ctx->nothrow_call_list = call;
311 } else if (is_Proj(node)) {
313 * Collect all memory and exception Proj's from
316 call = get_Proj_pred(node);
320 /* collect the Proj's in the Proj list */
321 switch (get_Proj_proj(node)) {
322 case pn_Call_M_regular:
323 case pn_Call_X_except:
324 case pn_Call_X_regular:
325 case pn_Call_M_except:
326 set_irn_link(node, ctx->proj_list);
327 ctx->proj_list = node;
333 } /* collect_nothrow_calls */
336 * Fix the list of collected nothrow Calls.
338 * @param irg the graph that contained calls to pure functions
339 * @param call_list the list of all call sites of const functions
340 * @param proj_list the list of all memory/exception Proj's of this call sites
342 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
343 ir_node *call, *next, *proj;
345 ir_graph *rem = current_ir_graph;
347 current_ir_graph = irg;
349 /* First step: go through the list of calls and mark them. */
350 for (call = call_list; call; call = next) {
351 next = get_irn_link(call);
353 /* current_ir_graph is in memory anyway, so it's a good marker */
354 set_irn_link(call, ¤t_ir_graph);
355 hook_func_call(irg, call);
358 /* Second step: Remove all exception Proj's */
359 for (proj = proj_list; proj; proj = next) {
360 next = get_irn_link(proj);
361 call = get_Proj_pred(proj);
363 /* handle only marked calls */
364 if (get_irn_link(call) != ¤t_ir_graph)
367 /* kill any exception flow */
368 switch (get_Proj_proj(proj)) {
369 case pn_Call_X_except:
370 case pn_Call_M_except:
372 exchange(proj, get_irg_bad(irg));
374 case pn_Call_X_regular: {
375 ir_node *block = get_nodes_block(call);
377 exchange(proj, new_r_Jmp(irg, block));
385 /* changes were done ... */
386 set_irg_outs_inconsistent(irg);
387 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
390 /* ... including exception edges */
391 set_irg_doms_inconsistent(irg);
393 current_ir_graph = rem;
394 } /* fix_nothrow_call_list */
397 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
398 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
399 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
400 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
401 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
404 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
405 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
408 * Calculate the bigger property of two. Handle the temporary flag right.
410 static unsigned max_property(unsigned a, unsigned b) {
411 unsigned r, t = (a | b) & mtp_temporary;
415 if (a == mtp_no_property || b == mtp_no_property)
416 return mtp_no_property;
422 * Follow the memory chain starting at node and determine
425 * @return mtp_property_const if only calls of const functions are detected
426 * mtp_property_pure if only Loads and const/pure
428 * mtp_no_property else
430 static unsigned _follow_mem(ir_node *node) {
431 unsigned m, mode = mtp_property_const;
436 if (mode == mtp_no_property)
437 return mtp_no_property;
439 if (irn_visited_else_mark(node))
442 switch (get_irn_opcode(node)) {
444 node = get_Proj_pred(node);
453 /* do a dfs search */
454 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
455 m = _follow_mem(get_irn_n(node, i));
456 mode = max_property(mode, m);
457 if (mode == mtp_no_property)
458 return mtp_no_property;
463 /* Beware volatile Loads are NOT allowed in pure functions. */
464 if (get_Load_volatility(node) == volatility_is_volatile)
465 return mtp_no_property;
466 mode = max_property(mode, mtp_property_pure);
467 node = get_Load_mem(node);
471 /* A call is only tolerable if its either constant or pure. */
472 ptr = get_Call_ptr(node);
473 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
474 ir_entity *ent = get_SymConst_entity(ptr);
475 ir_graph *irg = get_entity_irg(ent);
477 if (irg == current_ir_graph) {
478 /* A self-recursive call. The property did not depend on this call. */
479 } else if (irg == NULL) {
480 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
481 mode = max_property(mode, m);
482 } else if (irg != NULL) {
483 /* we have a graph, analyze it. */
484 m = check_const_or_pure_function(irg, /*top=*/0);
485 mode = max_property(mode, m);
488 return mtp_no_property;
489 node = get_Call_mem(node);
493 return mtp_no_property;
499 * Follow the memory chain starting at node and determine
502 * @return mtp_property_const if only calls of const functions are detected
503 * mtp_property_pure if only Loads and const/pure calls detected
504 * mtp_no_property else
506 static unsigned follow_mem(ir_node *node, unsigned mode) {
509 m = _follow_mem(node);
510 return max_property(mode, m);
514 * Check if a graph represents a const or a pure function.
516 * @param irg the graph to check
517 * @param top if set, this is the top call
519 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
520 ir_node *end, *endbl;
522 unsigned prop = get_irg_additional_properties(irg);
523 ir_graph *rem = current_ir_graph;
525 if (prop & mtp_property_const) {
526 /* already marked as a const function */
527 return mtp_property_const;
529 if (prop & mtp_property_pure) {
530 /* already marked as a pure function */
531 return mtp_property_pure;
534 if (IS_IRG_READY(irg)) {
535 /* already checked */
536 return mtp_no_property;
538 if (IS_IRG_BUSY(irg)) {
539 /* we are still evaluate this method. Be optimistic,
540 return the best possible so far but mark the result as temporary. */
541 return mtp_temporary | mtp_property_const;
545 end = get_irg_end(irg);
546 endbl = get_nodes_block(end);
547 prop = mtp_property_const;
549 current_ir_graph = irg;
551 inc_irg_visited(irg);
552 /* mark the initial mem: recursion of follow_mem stops here */
553 mark_irn_visited(get_irg_initial_mem(irg));
555 /* visit every Return */
556 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
557 ir_node *node = get_Block_cfgpred(endbl, j);
558 ir_opcode code = get_irn_opcode(node);
561 /* Bad nodes usually do NOT produce anything, so it's ok */
565 if (code == iro_Return) {
566 mem = get_Return_mem(node);
568 /* Bad nodes usually do NOT produce anything, so it's ok */
572 if (mem != get_irg_initial_mem(irg))
573 prop = max_property(prop, follow_mem(mem, prop));
575 /* Exception found. Cannot be const or pure. */
576 prop = mtp_no_property;
579 if (prop == mtp_no_property)
583 if (prop != mtp_no_property) {
584 /* check, if a keep-alive exists */
585 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
586 ir_node *kept = get_End_keepalive(end, j);
588 if (is_Block(kept)) {
589 prop = mtp_no_property;
593 if (mode_M != get_irn_mode(kept))
596 prop = max_property(prop, follow_mem(kept, prop));
597 if (prop == mtp_no_property)
602 if (prop != mtp_no_property) {
603 if (top || (prop & mtp_temporary) == 0) {
604 /* We use the temporary flag here to mark optimistic result.
605 Set the property only if we are sure that it does NOT base on
606 temporary results OR if we are at top-level. */
607 set_irg_additional_property(irg, prop & ~mtp_temporary);
614 current_ir_graph = rem;
616 } /* check_const_or_pure_function */
619 * Handle calls to const functions.
623 static void handle_const_Calls(env_t *ctx) {
626 ctx->n_calls_SymConst = 0;
627 ctx->n_calls_Sel = 0;
629 /* all calls of const functions can be transformed */
630 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
631 ir_graph *irg = get_irp_irg(i);
633 ctx->float_const_call_list = NULL;
634 ctx->nonfloat_const_call_list = NULL;
635 ctx->pure_call_list = NULL;
636 ctx->proj_list = NULL;
637 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
639 if (ctx->float_const_call_list != NULL) {
640 fix_const_call_lists(irg, ctx);
642 /* this graph was changed, invalidate analysis info */
643 set_irg_outs_inconsistent(irg);
644 set_irg_doms_inconsistent(irg);
647 } /* handle_const_Calls */
650 * Handle calls to nothrow functions.
654 static void handle_nothrow_Calls(env_t *ctx) {
657 ctx->n_calls_SymConst = 0;
658 ctx->n_calls_Sel = 0;
660 /* all calls of const functions can be transformed */
661 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
662 ir_graph *irg = get_irp_irg(i);
664 ctx->nothrow_call_list = NULL;
665 ctx->proj_list = NULL;
666 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
668 if (ctx->nothrow_call_list) {
669 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
671 /* this graph was changed, invalidate analysis info */
672 set_irg_outs_inconsistent(irg);
673 set_irg_doms_inconsistent(irg);
679 * Check, whether a given node represents a return value of
680 * a malloc like function (ie, new heap allocated memory).
682 * @param node the node to check
684 static int is_malloc_call_result(const ir_node *node) {
685 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
686 /* Firm style high-level allocation */
689 if (is_alloc_entity != NULL && is_Call(node)) {
690 ir_node *ptr = get_Call_ptr(node);
692 if (is_Global(ptr)) {
693 ir_entity *ent = get_Global_entity(ptr);
694 return is_alloc_entity(ent);
698 } /* is_malloc_call_result */
701 * Update a property depending on a call property.
703 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
704 unsigned t = (orig_prop | call_prop) & mtp_temporary;
705 unsigned r = orig_prop & call_prop;
707 } /** update_property */
710 * Check if a node is stored.
712 static int is_stored(const ir_node *n) {
713 const ir_edge_t *edge;
716 foreach_out_edge(n, edge) {
717 const ir_node *succ = get_edge_src_irn(edge);
719 switch (get_irn_opcode(succ)) {
726 if (get_Store_value(succ) == n)
728 /* ok if its only the address input */
737 ptr = get_Call_ptr(succ);
738 if (is_Global(ptr)) {
739 ir_entity *ent = get_Global_entity(ptr);
742 /* we know the called entity */
743 for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
744 if (get_Call_param(succ, i) == n) {
745 /* n is the i'th param of the call */
746 if (get_method_param_access(ent, i) & ptr_access_store) {
747 /* n is store in ent */
753 /* unknown call address */
758 /* bad, potential alias */
766 * Check that the return value of an irg is not stored anywhere.
768 * return ~mtp_property_malloc if return values are stored, ~0 else
770 static unsigned check_stored_result(ir_graph *irg) {
771 ir_node *end_blk = get_irg_end_block(irg);
774 int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
776 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
777 ir_node *pred = get_Block_cfgpred(end_blk, i);
779 if (! is_Return(pred))
781 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
782 const ir_node *irn = get_Return_res(pred, j);
784 if (is_stored(irn)) {
785 /* bad, might create an alias */
786 res = ~mtp_property_malloc;
793 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
795 } /* check_stored_result */
798 * Check if a graph represents a nothrow or a malloc function.
800 * @param irg the graph to check
801 * @param top if set, this is the top call
803 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
804 ir_node *end_blk = get_irg_end_block(irg);
808 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
810 if (IS_IRG_READY(irg)) {
811 /* already checked */
812 return get_irg_additional_properties(irg);
814 if (IS_IRG_BUSY(irg)) {
815 /* we are still evaluate this method. Be optimistic,
816 return the best possible so far but mark the result as temporary. */
817 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
821 ent = get_irg_entity(irg);
822 mtp = get_entity_type(ent);
824 if (get_method_n_ress(mtp) <= 0)
825 curr_prop &= ~mtp_property_malloc;
827 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
828 ir_node *pred = get_Block_cfgpred(end_blk, i);
830 if (is_Return(pred)) {
831 if (curr_prop & mtp_property_malloc) {
832 /* check, if malloc is called here */
833 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
834 ir_node *res = get_Return_res(pred, j);
836 /* skip Confirms and Casts */
837 res = skip_HighLevel_ops(res);
840 res = get_Proj_pred(res);
841 if (is_malloc_call_result(res)) {
842 /* ok, this is a malloc */
843 } else if (is_Call(res)) {
844 ir_node *ptr = get_Call_ptr(res);
846 if (is_Global(ptr)) {
848 ir_entity *ent = get_Global_entity(ptr);
849 ir_graph *callee = get_entity_irg(ent);
852 /* A self-recursive call. The property did not depend on this call. */
853 } else if (callee != NULL) {
854 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
855 curr_prop = update_property(curr_prop, prop);
857 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
859 } else if (get_opt_closed_world() &&
861 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
862 /* check if all possible callees are malloc functions. */
863 int i, n_callees = get_Call_n_callees(res);
864 if (n_callees == 0) {
865 /* This is kind of strange: dying code or a Call that will raise an exception
866 when executed as there is no implementation to call. So better not
868 curr_prop &= ~mtp_property_malloc;
872 for (i = 0; i < n_callees; ++i) {
873 ir_entity *ent = get_Call_callee(res, i);
874 if (ent == unknown_entity) {
875 /* we don't know which entity is called here */
876 curr_prop &= ~mtp_property_malloc;
879 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
880 curr_prop &= ~mtp_property_malloc;
884 /* if we pass the for cycle, malloc is still ok */
887 curr_prop &= ~mtp_property_malloc;
890 /* unknown return value */
891 curr_prop &= ~mtp_property_malloc;
895 } else if (curr_prop & mtp_property_nothrow) {
896 /* exception flow detected */
897 pred = skip_Proj(pred);
900 ir_node *ptr = get_Call_ptr(pred);
902 if (is_Global(ptr)) {
904 ir_entity *ent = get_Global_entity(ptr);
905 ir_graph *callee = get_entity_irg(ent);
908 /* A self-recursive call. The property did not depend on this call. */
909 } else if (callee != NULL) {
910 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
911 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
912 curr_prop = update_property(curr_prop, prop);
914 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
915 curr_prop &= ~mtp_property_nothrow;
917 } else if (get_opt_closed_world() &&
919 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
920 /* check if all possible callees are nothrow functions. */
921 int i, n_callees = get_Call_n_callees(pred);
922 if (n_callees == 0) {
923 /* This is kind of strange: dying code or a Call that will raise an exception
924 when executed as there is no implementation to call. So better not
926 curr_prop &= ~mtp_property_nothrow;
930 for (i = 0; i < n_callees; ++i) {
931 ir_entity *ent = get_Call_callee(pred, i);
932 if (ent == unknown_entity) {
933 /* we don't know which entity is called here */
934 curr_prop &= ~mtp_property_nothrow;
937 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
938 curr_prop &= ~mtp_property_nothrow;
942 /* if we pass the for cycle, nothrow is still ok */
945 curr_prop &= ~mtp_property_nothrow;
948 /* real exception flow possible. */
949 curr_prop &= ~mtp_property_nothrow;
952 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
953 /* no need to search further */
958 if (curr_prop & mtp_property_malloc) {
960 * Note that the malloc property means not only return newly allocated
961 * memory, but also that this memory is ALIAS FREE.
962 * To ensure that, we do NOT allow that the returned memory is somewhere
965 curr_prop &= check_stored_result(irg);
968 if (curr_prop != mtp_no_property) {
969 if (top || (curr_prop & mtp_temporary) == 0) {
970 /* We use the temporary flag here to mark an optimistic result.
971 Set the property only if we are sure that it does NOT base on
972 temporary results OR if we are at top-level. */
973 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
981 } /* check_nothrow_or_malloc */
984 * When a function was detected as "const", it might be moved out of loops.
985 * This might be dangerous if the graph might contain endless loops.
987 static void check_for_possible_endless_loops(ir_graph *irg) {
991 root_loop = get_irg_loop(irg);
992 if (root_loop->flags & loop_outer_loop)
993 set_irg_additional_property(irg, mtp_property_has_loop);
997 * optimize function calls by handling const functions
999 void optimize_funccalls(int force_run, check_alloc_entity_func callback)
1002 unsigned num_const = 0;
1003 unsigned num_pure = 0;
1004 unsigned num_nothrow = 0;
1005 unsigned num_malloc = 0;
1007 is_alloc_entity = callback;
1009 /* prepare: mark all graphs as not analyzed */
1010 last_idx = get_irp_last_idx();
1011 ready_set = rbitset_malloc(last_idx);
1012 busy_set = rbitset_malloc(last_idx);
1014 /* first step: detect, which functions are nothrow or malloc */
1015 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
1016 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1017 ir_graph *irg = get_irp_irg(i);
1018 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
1020 if (prop & mtp_property_nothrow) {
1022 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
1023 } else if (prop & mtp_property_malloc) {
1025 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
1029 /* second step: remove exception edges: this must be done before the
1030 detection of const and pure functions take place. */
1031 if (force_run || num_nothrow > 0) {
1034 handle_nothrow_Calls(&ctx);
1035 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
1036 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
1037 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1039 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1042 rbitset_clear_all(ready_set, last_idx);
1043 rbitset_clear_all(busy_set, last_idx);
1045 /* third step: detect, which functions are const or pure */
1046 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
1047 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
1048 ir_graph *irg = get_irp_irg(i);
1049 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
1051 if (prop & mtp_property_const) {
1053 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
1054 check_for_possible_endless_loops(irg);
1055 } else if (prop & mtp_property_pure) {
1057 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
1061 if (force_run || num_const > 0) {
1064 handle_const_Calls(&ctx);
1065 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
1066 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
1067 ctx.n_calls_SymConst, ctx.n_calls_Sel));
1069 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
1073 } /* optimize_funccalls */
1075 /* initialize the funccall optimization */
1076 void firm_init_funccalls(void) {
1077 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
1078 // firm_dbg_set_mask(dbg, -1);
1079 } /* firm_init_funccalls */