2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
30 #include <adt/raw_bitset.h>
32 #include "funccall_t.h"
35 #include "irgraph_t.h"
39 #include "dbginfo_t.h"
45 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
48 * The walker environment for updating function calls.
50 typedef struct _env_t {
51 unsigned n_calls_SymConst;
53 ir_node *const_call_list; /**< The list of all const function calls that will be changed. */
54 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
55 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
56 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
59 /** If non-null, evaluates entities for being a heap alloc. */
60 static check_alloc_entity_func is_alloc_entity = NULL;
62 /** Ready IRG's are marked in the ready set. */
63 static unsigned *ready_set;
65 /** IRG's that are in progress are marked here. */
66 static unsigned *busy_set;
69 * We misuse the mtp_property_inherited flag as temporary here.
70 * The is ok, as we cannot set or get it anyway using the
71 * get_addtional_properties API.
73 #define mtp_temporary mtp_property_inherited
76 * Walker: Collect all calls to const and pure functions
77 * to lists. Collect all Proj(Call) nodes into a Proj list.
79 static void collect_const_and_pure_calls(ir_node *node, void *env) {
88 /* set the link to NULL for all non-const/pure calls */
89 set_irn_link(call, NULL);
90 ptr = get_Call_ptr(call);
91 if (is_SymConst_addr_ent(ptr)) {
92 ent = get_SymConst_entity(ptr);
94 prop = get_entity_additional_properties(ent);
95 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
97 ++ctx->n_calls_SymConst;
98 } else if (get_opt_closed_world() &&
100 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
101 /* If all possible callees are const functions, we can remove the memory edge. */
102 int i, n_callees = get_Call_n_callees(call);
103 if (n_callees == 0) {
104 /* This is kind of strange: dying code or a Call that will raise an exception
105 when executed as there is no implementation to call. So better not
110 /* note that const function are a subset of pure ones */
111 prop = mtp_property_const | mtp_property_pure;
112 for (i = 0; i < n_callees; ++i) {
113 ent = get_Call_callee(call, i);
114 if (ent == unknown_entity) {
115 /* we don't know which entity is called here */
118 prop &= get_entity_additional_properties(ent);
119 if (prop == mtp_no_property)
126 /* ok, if we get here we found a call to a const or a pure function */
127 if (prop & mtp_property_pure) {
128 set_irn_link(call, ctx->pure_call_list);
129 ctx->pure_call_list = call;
131 set_irn_link(call, ctx->const_call_list);
132 ctx->const_call_list = call;
134 } else if (is_Proj(node)) {
136 * Collect all memory and exception Proj's from
139 call = get_Proj_pred(node);
143 /* collect the Proj's in the Proj list */
144 switch (get_Proj_proj(node)) {
145 case pn_Call_M_regular:
146 case pn_Call_X_except:
147 case pn_Call_X_regular:
148 case pn_Call_M_except:
149 set_irn_link(node, ctx->proj_list);
150 ctx->proj_list = node;
156 } /* collect_const_and_pure_calls */
159 * Fix the list of collected Calls.
161 * @param irg the graph that contained calls to pure functions
162 * @param call_list the list of all call sites of const functions
163 * @param proj_list the list of all memory/exception Proj's of this call sites
165 static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
166 ir_node *call, *next, *mem, *proj;
168 ir_graph *rem = current_ir_graph;
170 current_ir_graph = irg;
172 /* First step: fix all calls by removing it's memory input.
173 It's original memory input is preserved in their link fields. */
174 for (call = call_list; call; call = next) {
175 next = get_irn_link(call);
176 mem = get_Call_mem(call);
178 set_irn_link(call, mem);
179 set_Call_mem(call, get_irg_no_mem(irg));
182 * Sorrily we cannot simply set the node to 'float'.
183 * There is a reason for that:
185 * - The call might be inside a loop/if that is NOT entered
186 * and calls a endless function. Setting the call to float
187 * would allow to move it out from the loop/if causing this
188 * function be called even if the loop/if is not entered ...
190 * This could be fixed using post-dominators for calls and Pin nodes
191 * but need some more analyzes to ensure that a call that potential
192 * never returns is not executed before some code that generates
193 * observable states...
196 /* finally, this call can float
197 set_irn_pinned(call, op_pin_state_floats); */
198 hook_func_call(irg, call);
201 /* Second step: fix all Proj's */
202 for (proj = proj_list; proj; proj = next) {
203 next = get_irn_link(proj);
204 call = get_Proj_pred(proj);
205 mem = get_irn_link(call);
207 /* beware of calls in the pure call list */
208 if (! mem || get_irn_op(mem) == op_Call)
210 assert(get_irn_mode(mem) == mode_M);
212 switch (get_Proj_proj(proj)) {
213 case pn_Call_M_regular: {
214 /* in dead code there might be cycles where proj == mem */
219 case pn_Call_X_except:
220 case pn_Call_M_except:
222 exchange(proj, get_irg_bad(irg));
224 case pn_Call_X_regular: {
225 ir_node *block = get_nodes_block(call);
227 exchange(proj, new_r_Jmp(irg, block));
235 /* changes were done ... */
236 set_irg_outs_inconsistent(irg);
237 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
240 /* ... including exception edges */
241 set_irg_doms_inconsistent(irg);
243 current_ir_graph = rem;
244 } /* fix_const_call_list */
247 * Walker: Collect all calls to nothrow functions
248 * to lists. Collect all Proj(Call) nodes into a Proj list.
250 static void collect_nothrow_calls(ir_node *node, void *env) {
259 /* set the link to NULL for all non-const/pure calls */
260 set_irn_link(call, NULL);
261 ptr = get_Call_ptr(call);
262 if (is_SymConst_addr_ent(ptr)) {
263 ent = get_SymConst_entity(ptr);
265 prop = get_entity_additional_properties(ent);
266 if ((prop & mtp_property_nothrow) == 0)
268 ++ctx->n_calls_SymConst;
269 } else if (get_opt_closed_world() &&
271 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
272 /* If all possible callees are nothrow functions, we can remove the exception edge. */
273 int i, n_callees = get_Call_n_callees(call);
274 if (n_callees == 0) {
275 /* This is kind of strange: dying code or a Call that will raise an exception
276 when executed as there is no implementation to call. So better not
281 /* note that const function are a subset of pure ones */
282 prop = mtp_property_nothrow;
283 for (i = 0; i < n_callees; ++i) {
284 ent = get_Call_callee(call, i);
285 if (ent == unknown_entity) {
286 /* we don't know which entity is called here */
289 prop &= get_entity_additional_properties(ent);
290 if (prop == mtp_no_property)
297 /* ok, if we get here we found a call to a nothrow function */
298 set_irn_link(call, ctx->nothrow_call_list);
299 ctx->nothrow_call_list = call;
300 } else if (is_Proj(node)) {
302 * Collect all memory and exception Proj's from
305 call = get_Proj_pred(node);
309 /* collect the Proj's in the Proj list */
310 switch (get_Proj_proj(node)) {
311 case pn_Call_M_regular:
312 case pn_Call_X_except:
313 case pn_Call_X_regular:
314 case pn_Call_M_except:
315 set_irn_link(node, ctx->proj_list);
316 ctx->proj_list = node;
322 } /* collect_nothrow_calls */
325 * Fix the list of collected nothrow Calls.
327 * @param irg the graph that contained calls to pure functions
328 * @param call_list the list of all call sites of const functions
329 * @param proj_list the list of all memory/exception Proj's of this call sites
331 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
332 ir_node *call, *next, *proj;
334 ir_graph *rem = current_ir_graph;
336 current_ir_graph = irg;
338 /* First step: go through the list of calls and mark them. */
339 for (call = call_list; call; call = next) {
340 next = get_irn_link(call);
342 /* current_ir_graph is in memory anyway, so it's a good marker */
343 set_irn_link(call, ¤t_ir_graph);
344 hook_func_call(irg, call);
347 /* Second step: Remove all exception Proj's */
348 for (proj = proj_list; proj; proj = next) {
349 next = get_irn_link(proj);
350 call = get_Proj_pred(proj);
352 /* handle only marked calls */
353 if (get_irn_link(call) != ¤t_ir_graph)
356 /* kill any exception flow */
357 switch (get_Proj_proj(proj)) {
358 case pn_Call_X_except:
359 case pn_Call_M_except:
361 exchange(proj, get_irg_bad(irg));
363 case pn_Call_X_regular: {
364 ir_node *block = get_nodes_block(call);
366 exchange(proj, new_r_Jmp(irg, block));
374 /* changes were done ... */
375 set_irg_outs_inconsistent(irg);
376 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
379 /* ... including exception edges */
380 set_irg_doms_inconsistent(irg);
382 current_ir_graph = rem;
383 } /* fix_nothrow_call_list */
386 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
387 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
388 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
389 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
390 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
393 static unsigned check_const_or_pure_function(ir_graph *irg, int top);
394 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
397 * Calculate the bigger property of two. Handle the temporary flag right.
399 static unsigned max_property(unsigned a, unsigned b) {
400 unsigned r, t = (a | b) & mtp_temporary;
404 if (a == mtp_no_property || b == mtp_no_property)
405 return mtp_no_property;
411 * Follow the memory chain starting at node and determine
414 * @return mtp_property_const if only calls of const functions are detected
415 * mtp_property_pure if only Loads and const/pure
417 * mtp_no_property else
419 static unsigned _follow_mem(ir_node *node) {
420 unsigned m, mode = mtp_property_const;
425 if (mode == mtp_no_property)
426 return mtp_no_property;
428 if (irn_visited(node))
431 mark_irn_visited(node);
433 switch (get_irn_opcode(node)) {
435 node = get_Proj_pred(node);
444 /* do a dfs search */
445 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
446 m = _follow_mem(get_irn_n(node, i));
447 mode = max_property(mode, m);
448 if (mode == mtp_no_property)
449 return mtp_no_property;
454 /* Beware volatile Loads are NOT allowed in pure functions. */
455 if (get_Load_volatility(node) == volatility_is_volatile)
456 return mtp_no_property;
457 mode = max_property(mode, mtp_property_pure);
458 node = get_Load_mem(node);
462 /* A call is only tolerable if its either constant or pure. */
463 ptr = get_Call_ptr(node);
464 if (get_irn_op(ptr) == op_SymConst &&
465 get_SymConst_kind(ptr) == symconst_addr_ent) {
466 ir_entity *ent = get_SymConst_entity(ptr);
467 ir_graph *irg = get_entity_irg(ent);
469 if (irg == current_ir_graph) {
470 /* A self-recursive call. The property did not depend on this call. */
471 } else if (irg == NULL) {
472 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
473 mode = max_property(mode, m);
474 } else if (irg != NULL) {
475 /* we have a graph, analyze it. */
476 m = check_const_or_pure_function(irg, /*top=*/0);
477 mode = max_property(mode, m);
480 return mtp_no_property;
481 node = get_Call_mem(node);
485 return mtp_no_property;
491 * Follow the memory chain starting at node and determine
494 * @return mtp_property_const if only calls of const functions are detected
495 * mtp_property_pure if only Loads and const/pure calls detected
496 * mtp_no_property else
498 static unsigned follow_mem(ir_node *node, unsigned mode) {
501 m = _follow_mem(node);
502 return max_property(mode, m);
506 * Check if a graph represents a const or a pure function.
508 * @param irg the graph to check
509 * @param top if set, this is the top call
511 static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
512 ir_node *end, *endbl;
514 unsigned prop = get_irg_additional_properties(irg);
515 ir_graph *rem = current_ir_graph;
517 if (prop & mtp_property_const) {
518 /* already marked as a const function */
519 return mtp_property_const;
521 if (prop & mtp_property_pure) {
522 /* already marked as a pure function */
523 return mtp_property_pure;
526 if (IS_IRG_READY(irg)) {
527 /* already checked */
528 return mtp_no_property;
530 if (IS_IRG_BUSY(irg)) {
531 /* we are still evaluate this method. Be optimistic,
532 return the best possible so far but mark the result as temporary. */
533 return mtp_temporary | mtp_property_const;
537 end = get_irg_end(irg);
538 endbl = get_nodes_block(end);
539 prop = mtp_property_const;
541 current_ir_graph = irg;
543 inc_irg_visited(irg);
544 /* mark the initial mem: recursion of follow_mem stops here */
545 mark_irn_visited(get_irg_initial_mem(irg));
547 /* visit every Return */
548 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
549 ir_node *node = get_Block_cfgpred(endbl, j);
550 ir_opcode code = get_irn_opcode(node);
553 /* Bad nodes usually do NOT produce anything, so it's ok */
557 if (code == iro_Return) {
558 mem = get_Return_mem(node);
560 /* Bad nodes usually do NOT produce anything, so it's ok */
564 if (mem != get_irg_initial_mem(irg))
565 prop = max_property(prop, follow_mem(mem, prop));
567 /* Exception found. Cannot be const or pure. */
568 prop = mtp_no_property;
571 if (prop == mtp_no_property)
575 if (prop != mtp_no_property) {
576 /* check, if a keep-alive exists */
577 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
578 ir_node *mem = get_End_keepalive(end, j);
580 if (mode_M != get_irn_mode(mem))
583 prop = max_property(prop, follow_mem(mem, prop));
584 if (prop == mtp_no_property)
589 if (prop != mtp_no_property) {
590 if (top || (prop & mtp_temporary) == 0) {
591 /* We use the temporary flag here to mark optimistic result.
592 Set the property only if we are sure that it does NOT base on
593 temporary results OR if we are at top-level. */
594 set_irg_additional_property(irg, prop & ~mtp_temporary);
601 current_ir_graph = rem;
603 } /* check_const_or_pure_function */
606 * Handle calls to const functions.
610 static void handle_const_Calls(env_t *ctx) {
613 ctx->n_calls_SymConst = 0;
614 ctx->n_calls_Sel = 0;
616 /* all calls of const functions can be transformed */
617 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
618 ir_graph *irg = get_irp_irg(i);
620 ctx->const_call_list = NULL;
621 ctx->pure_call_list = NULL;
622 ctx->proj_list = NULL;
623 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
625 if (ctx->const_call_list) {
626 fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
628 /* this graph was changed, invalidate analysis info */
629 set_irg_outs_inconsistent(irg);
630 set_irg_doms_inconsistent(irg);
633 } /* handle_const_Calls */
636 * Handle calls to nothrow functions.
640 static void handle_nothrow_Calls(env_t *ctx) {
643 ctx->n_calls_SymConst = 0;
644 ctx->n_calls_Sel = 0;
646 /* all calls of const functions can be transformed */
647 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
648 ir_graph *irg = get_irp_irg(i);
650 ctx->nothrow_call_list = NULL;
651 ctx->proj_list = NULL;
652 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
654 if (ctx->nothrow_call_list) {
655 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
657 /* this graph was changed, invalidate analysis info */
658 set_irg_outs_inconsistent(irg);
659 set_irg_doms_inconsistent(irg);
665 * Check, whether a given node represents a return value of
666 * a malloc like function (ie, new heap allocated memory).
668 * @param node the node to check
670 static int is_malloc_call_result(const ir_node *node) {
671 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
672 /* Firm style high-level allocation */
675 if (is_alloc_entity != NULL && is_Call(node)) {
676 ir_node *ptr = get_Call_ptr(node);
678 if (is_SymConst_addr_ent(ptr)) {
679 ir_entity *ent = get_SymConst_entity(ptr);
680 return is_alloc_entity(ent);
684 } /* is_malloc_call_result */
687 * Update a property depending on a call property.
689 static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
690 unsigned t = (orig_prop | call_prop) & mtp_temporary;
691 unsigned r = orig_prop & call_prop;
693 } /** update_property */
696 * Check if a graph represents a nothrow or a malloc function.
698 * @param irg the graph to check
699 * @param top if set, this is the top call
701 static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
702 ir_node *end_blk = get_irg_end_block(irg);
704 unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
706 if (IS_IRG_READY(irg)) {
707 /* already checked */
708 return get_irg_additional_properties(irg);
710 if (IS_IRG_BUSY(irg)) {
711 /* we are still evaluate this method. Be optimistic,
712 return the best possible so far but mark the result as temporary. */
713 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
717 for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
718 ir_node *pred = get_Block_cfgpred(end_blk, i);
720 if (is_Return(pred)) {
721 if (curr_prop & mtp_property_malloc) {
722 /* check, if malloc is called here */
723 for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
724 const ir_node *res = get_Return_res(pred, j);
725 const ir_node *irn = skip_Proj_const(res);
726 if (is_malloc_call_result(res)) {
727 /* ok, this is a malloc */
728 } else if (is_Call(res)) {
729 ir_node *ptr = get_Call_ptr(res);
731 if (is_SymConst_addr_ent(ptr)) {
733 ir_entity *ent = get_SymConst_entity(ptr);
734 ir_graph *callee = get_entity_irg(ent);
737 /* A self-recursive call. The property did not depend on this call. */
738 } else if (callee != NULL) {
739 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
740 curr_prop = update_property(curr_prop, prop);
742 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
744 } else if (get_opt_closed_world() &&
746 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
747 /* check if all possible callees are malloc functions. */
748 int i, n_callees = get_Call_n_callees(res);
749 if (n_callees == 0) {
750 /* This is kind of strange: dying code or a Call that will raise an exception
751 when executed as there is no implementation to call. So better not
753 curr_prop &= ~mtp_property_malloc;
757 for (i = 0; i < n_callees; ++i) {
758 ir_entity *ent = get_Call_callee(res, i);
759 if (ent == unknown_entity) {
760 /* we don't know which entity is called here */
761 curr_prop &= ~mtp_property_malloc;
764 if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
765 curr_prop &= ~mtp_property_malloc;
769 /* if we pass the for cycle, malloc is still ok */
772 curr_prop &= ~mtp_property_malloc;
777 } else if (curr_prop & mtp_property_nothrow) {
778 /* exception flow detected */
779 pred = skip_Proj(pred);
782 ir_node *ptr = get_Call_ptr(pred);
784 if (is_SymConst_addr_ent(ptr)) {
786 ir_entity *ent = get_SymConst_entity(ptr);
787 ir_graph *callee = get_entity_irg(ent);
790 /* A self-recursive call. The property did not depend on this call. */
791 } else if (callee != NULL) {
792 unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
793 curr_prop = update_property(curr_prop, prop);
795 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
797 } else if (get_opt_closed_world() &&
799 get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
800 /* check if all possible callees are nothrow functions. */
801 int i, n_callees = get_Call_n_callees(pred);
802 if (n_callees == 0) {
803 /* This is kind of strange: dying code or a Call that will raise an exception
804 when executed as there is no implementation to call. So better not
806 curr_prop &= ~mtp_property_nothrow;
810 for (i = 0; i < n_callees; ++i) {
811 ir_entity *ent = get_Call_callee(pred, i);
812 if (ent == unknown_entity) {
813 /* we don't know which entity is called here */
814 curr_prop &= ~mtp_property_nothrow;
817 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
818 curr_prop &= ~mtp_property_nothrow;
822 /* if we pass the for cycle, nothrow is still ok */
825 curr_prop &= ~mtp_property_nothrow;
828 /* real exception flow possible. */
829 curr_prop &= ~mtp_property_nothrow;
832 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
833 /* no need to search further */
837 if (curr_prop != mtp_no_property) {
838 if (top || (curr_prop & mtp_temporary) == 0) {
839 /* We use the temporary flag here to mark an optimistic result.
840 Set the property only if we are sure that it does NOT base on
841 temporary results OR if we are at top-level. */
842 set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
850 } /* check_nothrow_or_malloc */
853 * optimize function calls by handling const functions
855 void optimize_funccalls(int force_run)
858 unsigned num_const = 0;
859 unsigned num_pure = 0;
860 unsigned num_nothrow = 0;
861 unsigned num_malloc = 0;
863 /* prepare: mark all graphs as not analyzed */
864 last_idx = get_irp_last_idx();
865 ready_set = rbitset_malloc(last_idx);
866 busy_set = rbitset_malloc(last_idx);
868 /* first step: detect, which functions are nothrow or malloc */
869 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
870 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
871 ir_graph *irg = get_irp_irg(i);
872 unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
874 if (prop & mtp_property_nothrow) {
876 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
877 } else if (prop & mtp_property_malloc) {
879 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
883 /* second step: remove exception edges: this must be done before the
884 detection of const and pure functions take place. */
885 if (force_run || num_nothrow > 0) {
888 handle_nothrow_Calls(&ctx);
889 DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
890 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
891 ctx.n_calls_SymConst, ctx.n_calls_Sel));
893 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
896 rbitset_clear_all(ready_set, last_idx);
897 rbitset_clear_all(busy_set, last_idx);
899 /* third step: detect, which functions are const or pure */
900 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
901 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
902 ir_graph *irg = get_irp_irg(i);
903 unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
905 if (prop & mtp_property_const) {
907 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
908 } else if (prop & mtp_property_pure) {
910 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
914 if (force_run || num_const > 0) {
917 handle_const_Calls(&ctx);
918 DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
919 DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
920 ctx.n_calls_SymConst, ctx.n_calls_Sel));
922 DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
926 } /* optimize_funccalls */
928 /* initialize the funccall optimization */
929 void firm_init_funccalls(void) {
930 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
931 // firm_dbg_set_mask(dbg, -1);
932 } /* firm_init_funccalls */