2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
53 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
54 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
55 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
56 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
59 /** Ready IRG's are marked in the ready set. */
60 static unsigned *ready_set;
62 /** IRG's that are in progress are marked here. */
63 static unsigned *busy_set;
66 * We misuse the mtp_property_inherited flag as temporary here.
67 * The is ok, as we cannot set or get it anyway using the
68 * get_addtional_properties API.
70 #define mtp_temporary mtp_property_inherited
73 * Walker: Collect all calls to const and pure functions
74 * to lists. Collect all Proj(Call) nodes into a Proj list.
76 static void collect_const_and_pure_calls(ir_node *node, void *env)
78 env_t *ctx = (env_t*)env;
83 /* set the link to NULL for all non-const/pure calls */
84 set_irn_link(call, NULL);
85 ir_node *ptr = get_Call_ptr(call);
86 if (!is_SymConst_addr_ent(ptr))
89 ir_entity *ent = get_SymConst_entity(ptr);
91 unsigned prop = get_entity_additional_properties(ent);
92 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
95 /* ok, if we get here we found a call to a const or a pure function */
96 if (prop & mtp_property_pure) {
97 set_irn_link(call, ctx->pure_call_list);
98 ctx->pure_call_list = call;
100 if (prop & mtp_property_has_loop) {
101 set_irn_link(call, ctx->nonfloat_const_call_list);
102 ctx->nonfloat_const_call_list = call;
104 set_irn_link(call, ctx->float_const_call_list);
105 ctx->float_const_call_list = call;
108 } else if (is_Proj(node)) {
110 * Collect all memory and exception Proj's from
113 ir_node *call = get_Proj_pred(node);
117 /* collect the Proj's in the Proj list */
118 switch (get_Proj_proj(node)) {
120 case pn_Call_X_except:
121 case pn_Call_X_regular:
122 set_irn_link(node, ctx->proj_list);
123 ctx->proj_list = node;
132 * Fix the list of collected Calls.
134 * @param irg the graph that contained calls to pure functions
137 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
139 bool exc_changed = false;
141 /* First step: fix all calls by removing their memory input and let
143 * The original memory input is preserved in their link fields. */
145 for (ir_node *call = ctx->float_const_call_list; call != NULL; call = next) {
146 next = (ir_node*)get_irn_link(call);
147 ir_node *mem = get_Call_mem(call);
149 set_irn_link(call, mem);
150 set_Call_mem(call, get_irg_no_mem(irg));
153 * Unfortunately we cannot simply set the node to 'float'.
154 * There is a reason for that:
156 * - The call might be inside a loop/if that is NOT entered
157 * and calls a endless function. Setting the call to float
158 * would allow to move it out from the loop/if causing this
159 * function be called even if the loop/if is not entered ...
161 * This could be fixed using post-dominators for calls and Pin nodes
162 * but need some more analyzes to ensure that a call that potential
163 * never returns is not executed before some code that generates
164 * observable states...
167 /* finally, this call can float */
168 set_irn_pinned(call, op_pin_state_floats);
169 hook_func_call(irg, call);
172 /* Last step: fix all Proj's */
173 for (ir_node *proj = ctx->proj_list; proj != NULL; proj = next) {
174 next = (ir_node*)get_irn_link(proj);
175 ir_node *call = get_Proj_pred(proj);
176 ir_node *mem = (ir_node*)get_irn_link(call);
178 /* beware of calls in the pure call list */
179 if (!mem || is_Call(mem))
181 assert(get_irn_mode(mem) == mode_M);
183 switch (get_Proj_proj(proj)) {
185 /* in dead code there might be cycles where proj == mem */
190 case pn_Call_X_except:
192 exchange(proj, new_r_Bad(irg, mode_X));
194 case pn_Call_X_regular: {
195 ir_node *block = get_nodes_block(call);
197 exchange(proj, new_r_Jmp(block));
206 /* ... including exception edges */
207 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
208 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
213 * Walker: Collect all calls to nothrow functions
214 * to lists. Collect all Proj(Call) nodes into a Proj list.
216 static void collect_nothrow_calls(ir_node *node, void *env)
218 env_t *ctx = (env_t*)env;
221 ir_node *call = node;
223 /* set the link to NULL for all non-const/pure calls */
224 set_irn_link(call, NULL);
225 ir_node *ptr = get_Call_ptr(call);
226 if (!is_SymConst_addr_ent(ptr))
229 ir_entity *ent = get_SymConst_entity(ptr);
231 unsigned prop = get_entity_additional_properties(ent);
232 if ((prop & mtp_property_nothrow) == 0)
235 /* ok, if we get here we found a call to a nothrow function */
236 set_irn_link(call, ctx->nothrow_call_list);
237 ctx->nothrow_call_list = call;
238 } else if (is_Proj(node)) {
240 * Collect all memory and exception Proj's from
243 ir_node *call = get_Proj_pred(node);
247 /* collect the Proj's in the Proj list */
248 switch (get_Proj_proj(node)) {
250 case pn_Call_X_except:
251 case pn_Call_X_regular:
252 set_irn_link(node, ctx->proj_list);
253 ctx->proj_list = node;
262 * Fix the list of collected nothrow Calls.
264 * @param irg the graph that contained calls to pure functions
265 * @param call_list the list of all call sites of const functions
266 * @param proj_list the list of all memory/exception Proj's of this call sites
268 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list,
271 bool exc_changed = false;
273 /* First step: go through the list of calls and mark them. */
275 for (ir_node *call = call_list; call; call = next) {
276 next = (ir_node*)get_irn_link(call);
278 /* current_ir_graph is in memory anyway, so it's a good marker */
279 set_irn_link(call, ¤t_ir_graph);
280 hook_func_call(irg, call);
283 /* Second step: Remove all exception Proj's */
284 for (ir_node *proj = proj_list; proj; proj = next) {
285 next = (ir_node*)get_irn_link(proj);
286 ir_node *call = get_Proj_pred(proj);
288 /* handle only marked calls */
289 if (get_irn_link(call) != ¤t_ir_graph)
292 /* kill any exception flow */
293 switch (get_Proj_proj(proj)) {
294 case pn_Call_X_except:
296 exchange(proj, new_r_Bad(irg, mode_X));
298 case pn_Call_X_regular: {
299 ir_node *block = get_nodes_block(call);
301 exchange(proj, new_r_Jmp(block));
309 /* changes were done ... */
311 /* ... including exception edges */
312 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
313 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
318 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
319 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
320 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
321 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
322 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
325 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top);
328 * Calculate the bigger property of two. Handle the temporary flag right.
330 static mtp_additional_properties max_property(mtp_additional_properties a,
331 mtp_additional_properties b)
333 mtp_additional_properties t = (a | b) & mtp_temporary;
337 if (a == mtp_no_property || b == mtp_no_property)
338 return mtp_no_property;
339 mtp_additional_properties r = a > b ? a : b;
344 * Follow the memory chain starting at node and determine
347 * @return mtp_property_const if only calls of const functions are detected
348 * mtp_property_pure if only Loads and const/pure calls detected
349 * mtp_no_property else
351 static mtp_additional_properties follow_mem_(ir_node *node)
353 mtp_additional_properties mode = mtp_property_const;
356 if (mode == mtp_no_property)
357 return mtp_no_property;
359 if (irn_visited_else_mark(node))
362 switch (get_irn_opcode(node)) {
364 node = get_Proj_pred(node);
373 /* do a dfs search */
374 for (int i = get_irn_arity(node) - 1; i >= 0; --i) {
375 mtp_additional_properties m = follow_mem_(get_irn_n(node, i));
376 mode = max_property(mode, m);
377 if (mode == mtp_no_property)
378 return mtp_no_property;
383 /* Beware volatile Loads are NOT allowed in pure functions. */
384 if (get_Load_volatility(node) == volatility_is_volatile)
385 return mtp_no_property;
386 mode = max_property(mode, mtp_property_pure);
387 node = get_Load_mem(node);
391 /* A call is only tolerable if its either constant or pure. */
392 ir_node *ptr = get_Call_ptr(node);
393 if (!is_SymConst_addr_ent(ptr))
394 return mtp_no_property;
396 ir_entity *ent = get_SymConst_entity(ptr);
397 ir_graph *irg = get_entity_irg(ent);
399 mtp_additional_properties m;
401 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
402 mode = max_property(mode, m);
404 /* we have a graph, analyze it. */
405 m = check_const_or_pure_function(irg, false);
406 mode = max_property(mode, m);
408 node = get_Call_mem(node);
413 return mtp_no_property;
419 * Follow the memory chain starting at node and determine
422 * @return mtp_property_const if only calls of const functions are detected
423 * mtp_property_pure if only Loads and const/pure calls detected
424 * mtp_no_property else
426 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
428 mtp_additional_properties m = follow_mem_(node);
429 return max_property(mode, m);
433 * Check if a graph represents a const or a pure function.
435 * @param irg the graph to check
436 * @param top if set, this is the top call
438 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top)
440 ir_entity *entity = get_irg_entity(irg);
441 ir_type *type = get_entity_type(entity);
442 size_t n_params = get_method_n_params(type);
443 mtp_additional_properties may_be_const = mtp_property_const;
444 mtp_additional_properties prop = get_irg_additional_properties(irg);
446 /* libfirm handles aggregate parameters by passing around pointers to
447 * stuff in memory, so if we have compound parameters we are never const */
448 for (size_t i = 0; i < n_params; ++i) {
449 ir_type *param = get_method_param_type(type, i);
450 if (is_compound_type(param)) {
451 prop &= ~mtp_property_const;
452 may_be_const = mtp_no_property;
456 if (prop & mtp_property_const) {
457 /* already marked as a const function */
458 return mtp_property_const;
460 if (prop & mtp_property_pure) {
461 /* already marked as a pure function */
462 return mtp_property_pure;
465 if (IS_IRG_READY(irg)) {
466 /* already checked */
467 return mtp_no_property;
469 if (IS_IRG_BUSY(irg)) {
470 /* We are still evaluate this method.
471 * The function (indirectly) calls itself and thus may not terminate. */
472 return mtp_no_property;
476 ir_node *end = get_irg_end(irg);
477 ir_node *endbl = get_nodes_block(end);
480 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
481 inc_irg_visited(irg);
482 /* mark the initial mem: recursion of follow_mem() stops here */
483 mark_irn_visited(get_irg_initial_mem(irg));
485 /* visit every Return */
486 for (int j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
487 ir_node *node = get_Block_cfgpred(endbl, j);
488 unsigned code = get_irn_opcode(node);
490 /* Bad nodes usually do NOT produce anything, so it's ok */
494 if (code == iro_Return) {
495 ir_node *mem = get_Return_mem(node);
497 /* Bad nodes usually do NOT produce anything, so it's ok */
501 if (mem != get_irg_initial_mem(irg))
502 prop = max_property(prop, follow_mem(mem, prop));
504 /* Exception found. Cannot be const or pure. */
505 prop = mtp_no_property;
508 if (prop == mtp_no_property)
512 if (prop != mtp_no_property) {
513 /* check, if a keep-alive exists */
514 for (int j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
515 ir_node *kept = get_End_keepalive(end, j);
517 if (is_Block(kept)) {
518 prop = mtp_no_property;
522 if (mode_M != get_irn_mode(kept))
525 prop = max_property(prop, follow_mem(kept, prop));
526 if (prop == mtp_no_property)
532 /* Set the property only if we are at top-level. */
533 if (prop != mtp_no_property) {
534 add_irg_additional_properties(irg, prop);
539 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
544 * Handle calls to const functions.
548 static void handle_const_Calls(env_t *ctx)
550 /* all calls of const functions can be transformed */
551 size_t n = get_irp_n_irgs();
552 for (size_t i = 0; i < n; ++i) {
553 ir_graph *irg = get_irp_irg(i);
555 ctx->float_const_call_list = NULL;
556 ctx->nonfloat_const_call_list = NULL;
557 ctx->pure_call_list = NULL;
558 ctx->proj_list = NULL;
560 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
561 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
562 fix_const_call_lists(irg, ctx);
563 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
565 confirm_irg_properties(irg,
566 IR_GRAPH_PROPERTIES_CONTROL_FLOW
567 | IR_GRAPH_PROPERTY_ONE_RETURN
568 | IR_GRAPH_PROPERTY_MANY_RETURNS);
573 * Handle calls to nothrow functions.
577 static void handle_nothrow_Calls(env_t *ctx)
579 /* all calls of const functions can be transformed */
580 size_t n = get_irp_n_irgs();
581 for (size_t i = 0; i < n; ++i) {
582 ir_graph *irg = get_irp_irg(i);
584 ctx->nothrow_call_list = NULL;
585 ctx->proj_list = NULL;
587 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
588 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
590 if (ctx->nothrow_call_list)
591 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
592 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
597 * Check, whether a given node represents a return value of
598 * a malloc like function (ie, new heap allocated memory).
600 * @param node the node to check
602 static bool is_malloc_call_result(const ir_node *node)
604 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
605 /* Firm style high-level allocation */
608 /* TODO: check mtp_malloc */
613 * Update a property depending on a call property.
615 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
617 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
618 mtp_additional_properties r = orig_prop & call_prop;
623 * Check if a node is stored.
625 static bool is_stored(const ir_node *n)
629 foreach_out_edge(n, edge) {
630 const ir_node *succ = get_edge_src_irn(edge);
632 switch (get_irn_opcode(succ)) {
639 if (get_Store_value(succ) == n)
641 /* ok if its only the address input */
650 ptr = get_Call_ptr(succ);
651 if (is_SymConst_addr_ent(ptr)) {
652 ir_entity *ent = get_SymConst_entity(ptr);
655 /* we know the called entity */
656 for (i = get_Call_n_params(succ); i > 0;) {
657 if (get_Call_param(succ, --i) == n) {
658 /* n is the i'th param of the call */
659 if (get_method_param_access(ent, i) & ptr_access_store) {
660 /* n is store in ent */
666 /* unknown call address */
671 /* bad, potential alias */
679 * Check that the return value of an irg is not stored anywhere.
681 * return ~mtp_property_malloc if return values are stored, ~0 else
683 static mtp_additional_properties check_stored_result(ir_graph *irg)
685 ir_node *end_blk = get_irg_end_block(irg);
686 mtp_additional_properties res = ~mtp_no_property;
688 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
690 for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
691 ir_node *pred = get_Block_cfgpred(end_blk, i);
693 if (! is_Return(pred))
695 for (size_t j = get_Return_n_ress(pred); j > 0;) {
696 const ir_node *irn = get_Return_res(pred, --j);
698 if (is_stored(irn)) {
699 /* bad, might create an alias */
700 res = ~mtp_property_malloc;
706 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
711 * Check if a graph represents a nothrow or a malloc function.
713 * @param irg the graph to check
714 * @param top if set, this is the top call
716 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, bool top)
718 mtp_additional_properties curr_prop
719 = mtp_property_malloc | mtp_property_nothrow;
721 if (IS_IRG_READY(irg)) {
722 /* already checked */
723 return get_irg_additional_properties(irg);
725 if (IS_IRG_BUSY(irg)) {
726 /* we are still evaluate this method. Be optimistic,
727 return the best possible so far but mark the result as temporary. */
728 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
732 ir_entity *ent = get_irg_entity(irg);
733 ir_type *mtp = get_entity_type(ent);
735 if (get_method_n_ress(mtp) <= 0)
736 curr_prop &= ~mtp_property_malloc;
738 ir_node *end_blk = get_irg_end_block(irg);
739 for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
740 ir_node *pred = get_Block_cfgpred(end_blk, i);
742 if (is_Return(pred)) {
743 if (curr_prop & mtp_property_malloc) {
744 /* check, if malloc is called here */
745 for (size_t j = get_Return_n_ress(pred); j > 0;) {
746 ir_node *res = get_Return_res(pred, --j);
748 /* skip Confirms and Casts */
749 res = skip_HighLevel_ops(res);
752 res = get_Proj_pred(res);
753 if (is_malloc_call_result(res)) {
754 /* ok, this is a malloc */
755 } else if (is_Call(res)) {
756 ir_node *ptr = get_Call_ptr(res);
758 if (is_SymConst_addr_ent(ptr)) {
760 ir_entity *ent = get_SymConst_entity(ptr);
761 ir_graph *callee = get_entity_irg(ent);
764 /* A self-recursive call. The property did not depend on this call. */
765 } else if (callee != NULL) {
766 mtp_additional_properties prop = check_nothrow_or_malloc(callee, false);
767 curr_prop = update_property(curr_prop, prop);
769 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
773 curr_prop &= ~mtp_property_malloc;
776 /* unknown return value */
777 curr_prop &= ~mtp_property_malloc;
781 } else if (curr_prop & mtp_property_nothrow) {
782 /* exception flow detected */
783 pred = skip_Proj(pred);
786 ir_node *ptr = get_Call_ptr(pred);
788 if (is_SymConst_addr_ent(ptr)) {
790 ir_entity *ent = get_SymConst_entity(ptr);
791 ir_graph *callee = get_entity_irg(ent);
794 /* A self-recursive call. The property did not depend on this call. */
795 } else if (callee != NULL) {
796 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
797 mtp_additional_properties prop = check_nothrow_or_malloc(callee, false) | mtp_property_malloc;
798 curr_prop = update_property(curr_prop, prop);
800 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
801 curr_prop &= ~mtp_property_nothrow;
805 curr_prop &= ~mtp_property_nothrow;
808 /* real exception flow possible. */
809 curr_prop &= ~mtp_property_nothrow;
812 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
813 /* no need to search further */
818 if (curr_prop & mtp_property_malloc) {
819 /* Note that the malloc property means not only return newly allocated
820 * memory, but also that this memory is ALIAS FREE.
821 * To ensure that, we do NOT allow that the returned memory is somewhere
823 curr_prop &= check_stored_result(irg);
826 if (curr_prop != mtp_no_property
827 && (top || (curr_prop & mtp_temporary) == 0)) {
828 /* We use the temporary flag here to mark an optimistic result.
829 * Set the property only if we are sure that it does NOT base on
830 * temporary results OR if we are at top-level. */
831 add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
841 * When a function was detected as "const", it might be moved out of loops.
842 * This might be dangerous if the graph can contain endless loops.
844 static void check_for_possible_endless_loops(ir_graph *irg)
846 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
848 ir_loop *root_loop = get_irg_loop(irg);
849 if (root_loop->flags & loop_outer_loop)
850 add_irg_additional_properties(irg, mtp_property_has_loop);
852 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
855 void optimize_funccalls(void)
857 /* prepare: mark all graphs as not analyzed */
858 size_t last_idx = get_irp_last_idx();
859 ready_set = rbitset_malloc(last_idx);
860 busy_set = rbitset_malloc(last_idx);
862 /* first step: detect, which functions are nothrow or malloc */
863 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
864 for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
865 ir_graph *irg = get_irp_irg(i);
866 unsigned prop = check_nothrow_or_malloc(irg, true);
868 if (prop & mtp_property_nothrow) {
869 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
870 } else if (prop & mtp_property_malloc) {
871 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
875 /* second step: remove exception edges: this must be done before the
876 detection of const and pure functions take place. */
878 handle_nothrow_Calls(&ctx);
880 rbitset_clear_all(ready_set, last_idx);
881 rbitset_clear_all(busy_set, last_idx);
883 /* third step: detect, which functions are const or pure */
884 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
885 for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
886 ir_graph *irg = get_irp_irg(i);
887 unsigned prop = check_const_or_pure_function(irg, true);
889 if (prop & mtp_property_const) {
890 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
891 check_for_possible_endless_loops(irg);
892 } else if (prop & mtp_property_pure) {
893 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
897 handle_const_Calls(&ctx);
903 void firm_init_funccalls(void)
905 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
908 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
910 return def_prog_pass(name ? name : "funccall", optimize_funccalls);