2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Optimization of function calls.
23 * @author Michael Beck
31 #include "irgraph_t.h"
34 #include "dbginfo_t.h"
38 #include "iredges_t.h"
40 #include "iroptimize.h"
41 #include "analyze_irg_args.h"
43 #include "raw_bitset.h"
46 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
49 * The walker environment for updating function calls.
51 typedef struct env_t {
52 ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
53 ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
54 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
55 ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
56 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
59 /** Ready IRG's are marked in the ready set. */
60 static unsigned *ready_set;
62 /** IRG's that are in progress are marked here. */
63 static unsigned *busy_set;
66 * Walker: Collect all calls to const and pure functions
67 * to lists. Collect all Proj(Call) nodes into a Proj list.
69 static void collect_const_and_pure_calls(ir_node *node, void *env)
71 env_t *ctx = (env_t*)env;
76 /* set the link to NULL for all non-const/pure calls */
77 set_irn_link(call, NULL);
78 ir_node *ptr = get_Call_ptr(call);
79 if (!is_SymConst_addr_ent(ptr))
82 ir_entity *ent = get_SymConst_entity(ptr);
84 unsigned prop = get_entity_additional_properties(ent);
85 if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
88 /* ok, if we get here we found a call to a const or a pure function */
89 if (prop & mtp_property_pure) {
90 set_irn_link(call, ctx->pure_call_list);
91 ctx->pure_call_list = call;
93 if (prop & mtp_property_has_loop) {
94 set_irn_link(call, ctx->nonfloat_const_call_list);
95 ctx->nonfloat_const_call_list = call;
97 set_irn_link(call, ctx->float_const_call_list);
98 ctx->float_const_call_list = call;
101 } else if (is_Proj(node)) {
103 * Collect all memory and exception Proj's from
106 ir_node *call = get_Proj_pred(node);
110 /* collect the Proj's in the Proj list */
111 switch (get_Proj_proj(node)) {
113 case pn_Call_X_except:
114 case pn_Call_X_regular:
115 set_irn_link(node, ctx->proj_list);
116 ctx->proj_list = node;
125 * Fix the list of collected Calls.
127 * @param irg the graph that contained calls to pure functions
130 static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
132 bool exc_changed = false;
134 /* First step: fix all calls by removing their memory input and let
136 * The original memory input is preserved in their link fields. */
138 for (ir_node *call = ctx->float_const_call_list; call != NULL; call = next) {
139 next = (ir_node*)get_irn_link(call);
140 ir_node *mem = get_Call_mem(call);
142 set_irn_link(call, mem);
143 set_Call_mem(call, get_irg_no_mem(irg));
146 * Unfortunately we cannot simply set the node to 'float'.
147 * There is a reason for that:
149 * - The call might be inside a loop/if that is NOT entered
150 * and calls a endless function. Setting the call to float
151 * would allow to move it out from the loop/if causing this
152 * function be called even if the loop/if is not entered ...
154 * This could be fixed using post-dominators for calls and Pin nodes
155 * but need some more analyzes to ensure that a call that potential
156 * never returns is not executed before some code that generates
157 * observable states...
160 /* finally, this call can float */
161 set_irn_pinned(call, op_pin_state_floats);
162 hook_func_call(irg, call);
165 /* Last step: fix all Proj's */
166 for (ir_node *proj = ctx->proj_list; proj != NULL; proj = next) {
167 next = (ir_node*)get_irn_link(proj);
168 ir_node *call = get_Proj_pred(proj);
169 ir_node *mem = (ir_node*)get_irn_link(call);
171 /* beware of calls in the pure call list */
172 if (!mem || is_Call(mem))
174 assert(get_irn_mode(mem) == mode_M);
176 switch (get_Proj_proj(proj)) {
178 /* in dead code there might be cycles where proj == mem */
183 case pn_Call_X_except:
185 exchange(proj, new_r_Bad(irg, mode_X));
187 case pn_Call_X_regular: {
188 ir_node *block = get_nodes_block(call);
190 exchange(proj, new_r_Jmp(block));
199 /* ... including exception edges */
200 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
201 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
206 * Walker: Collect all calls to nothrow functions
207 * to lists. Collect all Proj(Call) nodes into a Proj list.
209 static void collect_nothrow_calls(ir_node *node, void *env)
211 env_t *ctx = (env_t*)env;
214 ir_node *call = node;
216 /* set the link to NULL for all non-const/pure calls */
217 set_irn_link(call, NULL);
218 ir_node *ptr = get_Call_ptr(call);
219 if (!is_SymConst_addr_ent(ptr))
222 ir_entity *ent = get_SymConst_entity(ptr);
224 unsigned prop = get_entity_additional_properties(ent);
225 if ((prop & mtp_property_nothrow) == 0)
228 /* ok, if we get here we found a call to a nothrow function */
229 set_irn_link(call, ctx->nothrow_call_list);
230 ctx->nothrow_call_list = call;
231 } else if (is_Proj(node)) {
233 * Collect all memory and exception Proj's from
236 ir_node *call = get_Proj_pred(node);
240 /* collect the Proj's in the Proj list */
241 switch (get_Proj_proj(node)) {
243 case pn_Call_X_except:
244 case pn_Call_X_regular:
245 set_irn_link(node, ctx->proj_list);
246 ctx->proj_list = node;
255 * Fix the list of collected nothrow Calls.
257 * @param irg the graph that contained calls to pure functions
258 * @param call_list the list of all call sites of const functions
259 * @param proj_list the list of all memory/exception Proj's of this call sites
261 static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list,
264 bool exc_changed = false;
266 /* First step: go through the list of calls and mark them. */
268 for (ir_node *call = call_list; call; call = next) {
269 next = (ir_node*)get_irn_link(call);
271 /* current_ir_graph is in memory anyway, so it's a good marker */
272 set_irn_link(call, ¤t_ir_graph);
273 hook_func_call(irg, call);
276 /* Second step: Remove all exception Proj's */
277 for (ir_node *proj = proj_list; proj; proj = next) {
278 next = (ir_node*)get_irn_link(proj);
279 ir_node *call = get_Proj_pred(proj);
281 /* handle only marked calls */
282 if (get_irn_link(call) != ¤t_ir_graph)
285 /* kill any exception flow */
286 switch (get_Proj_proj(proj)) {
287 case pn_Call_X_except:
289 exchange(proj, new_r_Bad(irg, mode_X));
291 case pn_Call_X_regular: {
292 ir_node *block = get_nodes_block(call);
294 exchange(proj, new_r_Jmp(block));
302 /* changes were done ... */
304 /* ... including exception edges */
305 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
306 | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
311 #define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
312 #define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
313 #define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
314 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
315 #define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
318 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top);
321 * Calculate the bigger property of two. Handle the temporary flag right.
323 static mtp_additional_properties max_property(mtp_additional_properties a,
324 mtp_additional_properties b)
326 mtp_additional_properties t = (a | b) & mtp_temporary;
330 if (a == mtp_no_property || b == mtp_no_property)
331 return mtp_no_property;
332 mtp_additional_properties r = a > b ? a : b;
337 * Follow the memory chain starting at node and determine
340 * @return mtp_property_const if only calls of const functions are detected
341 * mtp_property_pure if only Loads and const/pure calls detected
342 * mtp_no_property else
344 static mtp_additional_properties follow_mem_(ir_node *node)
346 mtp_additional_properties mode = mtp_property_const;
349 if (mode == mtp_no_property)
350 return mtp_no_property;
352 if (irn_visited_else_mark(node))
355 switch (get_irn_opcode(node)) {
357 node = get_Proj_pred(node);
366 /* do a dfs search */
367 for (int i = get_irn_arity(node) - 1; i >= 0; --i) {
368 mtp_additional_properties m = follow_mem_(get_irn_n(node, i));
369 mode = max_property(mode, m);
370 if (mode == mtp_no_property)
371 return mtp_no_property;
376 /* Beware volatile Loads are NOT allowed in pure functions. */
377 if (get_Load_volatility(node) == volatility_is_volatile)
378 return mtp_no_property;
379 mode = max_property(mode, mtp_property_pure);
380 node = get_Load_mem(node);
384 /* A call is only tolerable if its either constant or pure. */
385 ir_node *ptr = get_Call_ptr(node);
386 if (!is_SymConst_addr_ent(ptr))
387 return mtp_no_property;
389 ir_entity *ent = get_SymConst_entity(ptr);
390 ir_graph *irg = get_entity_irg(ent);
392 mtp_additional_properties m;
394 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
395 mode = max_property(mode, m);
397 /* we have a graph, analyze it. */
398 m = check_const_or_pure_function(irg, false);
399 mode = max_property(mode, m);
401 node = get_Call_mem(node);
406 return mtp_no_property;
412 * Follow the memory chain starting at node and determine
415 * @return mtp_property_const if only calls of const functions are detected
416 * mtp_property_pure if only Loads and const/pure calls detected
417 * mtp_no_property else
419 static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
421 mtp_additional_properties m = follow_mem_(node);
422 return max_property(mode, m);
426 * Check if a graph represents a const or a pure function.
428 * @param irg the graph to check
429 * @param top if set, this is the top call
431 static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top)
433 ir_entity *entity = get_irg_entity(irg);
434 ir_type *type = get_entity_type(entity);
435 size_t n_params = get_method_n_params(type);
436 mtp_additional_properties may_be_const = mtp_property_const;
437 mtp_additional_properties prop = get_entity_additional_properties(entity);
439 /* libfirm handles aggregate parameters by passing around pointers to
440 * stuff in memory, so if we have compound parameters we are never const */
441 for (size_t i = 0; i < n_params; ++i) {
442 ir_type *param = get_method_param_type(type, i);
443 if (is_compound_type(param)) {
444 prop &= ~mtp_property_const;
445 may_be_const = mtp_no_property;
449 if (prop & mtp_property_const) {
450 /* already marked as a const function */
451 return mtp_property_const;
453 if (prop & mtp_property_pure) {
454 /* already marked as a pure function */
455 return mtp_property_pure;
458 if (IS_IRG_READY(irg)) {
459 /* already checked */
460 return mtp_no_property;
462 if (IS_IRG_BUSY(irg)) {
463 /* We are still evaluate this method.
464 * The function (indirectly) calls itself and thus may not terminate. */
465 return mtp_no_property;
469 ir_node *end = get_irg_end(irg);
470 ir_node *endbl = get_nodes_block(end);
473 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
474 inc_irg_visited(irg);
475 /* mark the initial mem: recursion of follow_mem() stops here */
476 mark_irn_visited(get_irg_initial_mem(irg));
478 /* visit every Return */
479 for (int j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
480 ir_node *node = get_Block_cfgpred(endbl, j);
481 unsigned code = get_irn_opcode(node);
483 /* Bad nodes usually do NOT produce anything, so it's ok */
487 if (code == iro_Return) {
488 ir_node *mem = get_Return_mem(node);
490 /* Bad nodes usually do NOT produce anything, so it's ok */
494 if (mem != get_irg_initial_mem(irg))
495 prop = max_property(prop, follow_mem(mem, prop));
497 /* Exception found. Cannot be const or pure. */
498 prop = mtp_no_property;
501 if (prop == mtp_no_property)
505 if (prop != mtp_no_property) {
506 /* check, if a keep-alive exists */
507 for (int j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
508 ir_node *kept = get_End_keepalive(end, j);
510 if (is_Block(kept)) {
511 prop = mtp_no_property;
515 if (mode_M != get_irn_mode(kept))
518 prop = max_property(prop, follow_mem(kept, prop));
519 if (prop == mtp_no_property)
525 /* Set the property only if we are at top-level. */
526 if (prop != mtp_no_property) {
527 add_entity_additional_properties(entity, prop);
532 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
537 * Handle calls to const functions.
541 static void handle_const_Calls(env_t *ctx)
543 /* all calls of const functions can be transformed */
544 size_t n = get_irp_n_irgs();
545 for (size_t i = 0; i < n; ++i) {
546 ir_graph *irg = get_irp_irg(i);
548 ctx->float_const_call_list = NULL;
549 ctx->nonfloat_const_call_list = NULL;
550 ctx->pure_call_list = NULL;
551 ctx->proj_list = NULL;
553 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
554 irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
555 fix_const_call_lists(irg, ctx);
556 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
558 confirm_irg_properties(irg,
559 IR_GRAPH_PROPERTIES_CONTROL_FLOW
560 | IR_GRAPH_PROPERTY_ONE_RETURN
561 | IR_GRAPH_PROPERTY_MANY_RETURNS);
566 * Handle calls to nothrow functions.
570 static void handle_nothrow_Calls(env_t *ctx)
572 /* all calls of const functions can be transformed */
573 size_t n = get_irp_n_irgs();
574 for (size_t i = 0; i < n; ++i) {
575 ir_graph *irg = get_irp_irg(i);
577 ctx->nothrow_call_list = NULL;
578 ctx->proj_list = NULL;
580 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
581 irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
583 if (ctx->nothrow_call_list)
584 fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
585 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
590 * Check, whether a given node represents a return value of
591 * a malloc like function (ie, new heap allocated memory).
593 * @param node the node to check
595 static bool is_malloc_call_result(const ir_node *node)
597 if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
598 /* Firm style high-level allocation */
601 /* TODO: check mtp_malloc */
606 * Update a property depending on a call property.
608 static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
610 mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
611 mtp_additional_properties r = orig_prop & call_prop;
616 * Check if a node is stored.
618 static bool is_stored(const ir_node *n)
622 foreach_out_edge(n, edge) {
623 const ir_node *succ = get_edge_src_irn(edge);
625 switch (get_irn_opcode(succ)) {
632 if (get_Store_value(succ) == n)
634 /* ok if its only the address input */
643 ptr = get_Call_ptr(succ);
644 if (is_SymConst_addr_ent(ptr)) {
645 ir_entity *ent = get_SymConst_entity(ptr);
648 /* we know the called entity */
649 for (i = get_Call_n_params(succ); i > 0;) {
650 if (get_Call_param(succ, --i) == n) {
651 /* n is the i'th param of the call */
652 if (get_method_param_access(ent, i) & ptr_access_store) {
653 /* n is store in ent */
659 /* unknown call address */
664 /* bad, potential alias */
672 * Check that the return value of an irg is not stored anywhere.
674 * return ~mtp_property_malloc if return values are stored, ~0 else
676 static mtp_additional_properties check_stored_result(ir_graph *irg)
678 ir_node *end_blk = get_irg_end_block(irg);
679 mtp_additional_properties res = ~mtp_no_property;
681 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
683 for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
684 ir_node *pred = get_Block_cfgpred(end_blk, i);
686 if (! is_Return(pred))
688 for (size_t j = get_Return_n_ress(pred); j > 0;) {
689 const ir_node *irn = get_Return_res(pred, --j);
691 if (is_stored(irn)) {
692 /* bad, might create an alias */
693 res = ~mtp_property_malloc;
699 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
704 * Check if a graph represents a nothrow or a malloc function.
706 * @param irg the graph to check
707 * @param top if set, this is the top call
709 static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, bool top)
711 mtp_additional_properties curr_prop
712 = mtp_property_malloc | mtp_property_nothrow;
714 ir_entity *ent = get_irg_entity(irg);
715 if (IS_IRG_READY(irg)) {
716 /* already checked */
717 return get_entity_additional_properties(ent);
719 if (IS_IRG_BUSY(irg)) {
720 /* we are still evaluate this method. Be optimistic,
721 return the best possible so far but mark the result as temporary. */
722 return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
726 ir_type *mtp = get_entity_type(ent);
727 if (get_method_n_ress(mtp) <= 0)
728 curr_prop &= ~mtp_property_malloc;
730 ir_node *end_blk = get_irg_end_block(irg);
731 for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
732 ir_node *pred = get_Block_cfgpred(end_blk, i);
734 if (is_Return(pred)) {
735 if (curr_prop & mtp_property_malloc) {
736 /* check, if malloc is called here */
737 for (size_t j = get_Return_n_ress(pred); j > 0;) {
738 ir_node *res = get_Return_res(pred, --j);
740 /* skip Confirms and Casts */
741 res = skip_HighLevel_ops(res);
744 res = get_Proj_pred(res);
745 if (is_malloc_call_result(res)) {
746 /* ok, this is a malloc */
747 } else if (is_Call(res)) {
748 ir_node *ptr = get_Call_ptr(res);
750 if (is_SymConst_addr_ent(ptr)) {
752 ir_entity *ent = get_SymConst_entity(ptr);
753 ir_graph *callee = get_entity_irg(ent);
756 /* A self-recursive call. The property did not depend on this call. */
757 } else if (callee != NULL) {
758 mtp_additional_properties prop = check_nothrow_or_malloc(callee, false);
759 curr_prop = update_property(curr_prop, prop);
761 curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
765 curr_prop &= ~mtp_property_malloc;
768 /* unknown return value */
769 curr_prop &= ~mtp_property_malloc;
773 } else if (curr_prop & mtp_property_nothrow) {
774 /* exception flow detected */
775 pred = skip_Proj(pred);
778 ir_node *ptr = get_Call_ptr(pred);
780 if (is_SymConst_addr_ent(ptr)) {
782 ir_entity *ent = get_SymConst_entity(ptr);
783 ir_graph *callee = get_entity_irg(ent);
786 /* A self-recursive call. The property did not depend on this call. */
787 } else if (callee != NULL) {
788 /* Note: we check here for nothrow only, so do NOT reset the malloc property */
789 mtp_additional_properties prop = check_nothrow_or_malloc(callee, false) | mtp_property_malloc;
790 curr_prop = update_property(curr_prop, prop);
792 if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
793 curr_prop &= ~mtp_property_nothrow;
797 curr_prop &= ~mtp_property_nothrow;
800 /* real exception flow possible. */
801 curr_prop &= ~mtp_property_nothrow;
804 if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
805 /* no need to search further */
810 if (curr_prop & mtp_property_malloc) {
811 /* Note that the malloc property means not only return newly allocated
812 * memory, but also that this memory is ALIAS FREE.
813 * To ensure that, we do NOT allow that the returned memory is somewhere
815 curr_prop &= check_stored_result(irg);
818 if (curr_prop != mtp_no_property
819 && (top || (curr_prop & mtp_temporary) == 0)) {
820 /* We use the temporary flag here to mark an optimistic result.
821 * Set the property only if we are sure that it does NOT base on
822 * temporary results OR if we are at top-level. */
823 add_entity_additional_properties(ent, curr_prop & ~mtp_temporary);
833 * When a function was detected as "const", it might be moved out of loops.
834 * This might be dangerous if the graph can contain endless loops.
836 static void check_for_possible_endless_loops(ir_graph *irg)
838 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
840 ir_loop *root_loop = get_irg_loop(irg);
841 if (root_loop->flags & loop_outer_loop) {
842 ir_entity *ent = get_irg_entity(irg);
843 add_entity_additional_properties(ent, mtp_property_has_loop);
846 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
849 void optimize_funccalls(void)
851 /* prepare: mark all graphs as not analyzed */
852 size_t last_idx = get_irp_last_idx();
853 ready_set = rbitset_malloc(last_idx);
854 busy_set = rbitset_malloc(last_idx);
856 /* first step: detect, which functions are nothrow or malloc */
857 DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
858 for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
859 ir_graph *irg = get_irp_irg(i);
860 unsigned prop = check_nothrow_or_malloc(irg, true);
862 if (prop & mtp_property_nothrow) {
863 DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
864 } else if (prop & mtp_property_malloc) {
865 DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
869 /* second step: remove exception edges: this must be done before the
870 detection of const and pure functions take place. */
872 handle_nothrow_Calls(&ctx);
874 rbitset_clear_all(ready_set, last_idx);
875 rbitset_clear_all(busy_set, last_idx);
877 /* third step: detect, which functions are const or pure */
878 DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
879 for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
880 ir_graph *irg = get_irp_irg(i);
881 unsigned prop = check_const_or_pure_function(irg, true);
883 if (prop & mtp_property_const) {
884 DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
885 check_for_possible_endless_loops(irg);
886 } else if (prop & mtp_property_pure) {
887 DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
891 handle_const_Calls(&ctx);
897 void firm_init_funccalls(void)
899 FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
902 ir_prog_pass_t *optimize_funccalls_pass(const char *name)
904 return def_prog_pass(name ? name : "funccall", optimize_funccalls);