3 * File name: ir/opt/funccall.c
4 * Purpose: optimization of function calls
8 * Copyright: (c) 1998-2006 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
12 #include "irgraph_t.h"
16 #include "dbginfo_t.h"
23 * The walker environment for rem_mem_from_const_fkt_calls
25 typedef struct _env_t {
26 int n_calls_removed_SymConst;
27 int n_calls_removed_Sel;
28 ir_node *const_call_list; /**< The list of all const function calls that will be changed. */
29 ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
30 ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
34 * Collect all calls to const and pure functions
35 * to lists. Collect all Proj(Call) nodes into a Proj list.
37 static void collect_calls(ir_node *node, void *env)
47 /* set the link to NULL for all non-const/pure calls */
48 set_irn_link(call, NULL);
49 ptr = get_Call_ptr(call);
50 if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
51 ent = get_SymConst_entity(ptr);
53 mode = get_entity_additional_properties(ent);
54 if ((mode & (mtp_property_const|mtp_property_pure)) == 0)
56 ++ctx->n_calls_removed_SymConst;
57 } else if (get_opt_closed_world() &&
59 get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
60 /* If all possible callees are const functions, we can remove the memory edge. */
61 int i, n_callees = get_Call_n_callees(call);
63 /* This is kind of strange: dying code or a Call that will raise an exception
64 when executed as there is no implementation to call. So better not
68 /* note that const function are a subset of pure ones */
69 mode = mtp_property_const | mtp_property_pure;
70 for (i = 0; i < n_callees; ++i) {
71 ent = get_Call_callee(call, i);
72 if (ent == unknown_entity) {
73 /* we don't know which entity is called here */
76 mode &= get_entity_additional_properties(ent);
80 ++ctx->n_calls_removed_Sel;
84 /* ok, if we get here we found a call to a const or a pure function */
85 if (mode & mtp_property_pure) {
86 set_irn_link(call, ctx->pure_call_list);
87 ctx->pure_call_list = call;
89 set_irn_link(call, ctx->const_call_list);
90 ctx->const_call_list = call;
92 } else if (is_Proj(node)) {
94 * Collect all memory and exception Proj's from
97 call = get_Proj_pred(node);
101 /* collect the Proj's in the Proj list */
102 switch (get_Proj_proj(node)) {
103 case pn_Call_M_regular:
104 case pn_Call_X_except:
105 case pn_Call_M_except:
106 set_irn_link(node, ctx->proj_list);
107 ctx->proj_list = node;
113 } /* collect_calls */
116 * Fix the list of collected Calls.
118 * @param irg the graph that contained calls to pure functions
119 * @param call_list the list of all call sites of const functions
120 * @param proj_list the list of all memory/exception Proj's of this call sites
122 static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
123 ir_node *call, *next, *mem, *proj;
125 ir_graph *rem = current_ir_graph;
127 current_ir_graph = irg;
129 /* First step: fix all calls by removing it's memory input.
130 It's original memory input is preserved in their link fields. */
131 for (call = call_list; call; call = next) {
132 next = get_irn_link(call);
133 mem = get_Call_mem(call);
135 set_irn_link(call, mem);
136 set_Call_mem(call, get_irg_no_mem(irg));
139 * Sorrily we cannot simply set the node to 'float'.
140 * There is a reason for that:
142 * - The call might be inside a loop/if that is NOT entered
143 * and calls a endless function. Setting the call to float
144 * would allow to move it out from the loop/if causing this
145 * function be called even if the loop/if is not entered ...
147 * This could be fixed using post-dominators for calls and Pin nodes
148 * but need some more analyzes to ensure that a call that potential
149 * never returns is not executed before some code that generates
150 * observable states...
153 /* finally, this call can float
154 set_irn_pinned(call, op_pin_state_floats); */
155 hook_func_call(irg, call);
158 /* Second step: fix all Proj's */
159 for (proj = proj_list; proj; proj = next) {
160 next = get_irn_link(proj);
161 call = get_Proj_pred(proj);
162 mem = get_irn_link(call);
164 /* beware of calls in the pure call list */
165 if (! mem || get_irn_op(mem) == op_Call)
167 assert(get_irn_mode(mem) == mode_M);
169 switch (get_Proj_proj(proj)) {
170 case pn_Call_M_regular: {
171 /* in dead code there might be cycles where proj == mem */
175 case pn_Call_X_except:
176 case pn_Call_M_except:
178 exchange(proj, get_irg_bad(irg));
185 /* changes were done ... */
186 set_irg_outs_inconsistent(irg);
187 set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
190 /* ... including exception edges */
191 set_irg_doms_inconsistent(irg);
193 current_ir_graph = rem;
194 } /* fix_call_list */
198 * Check if a graph represents a const function.
200 * @param irg the graph
202 static int is_const_function(ir_graph *irg)
204 ir_node *end, *endbl;
207 if (get_irg_additional_properties(irg) & mtp_property_const) {
208 /* already marked as a const function */
212 end = get_irg_end(irg);
213 endbl = get_nodes_block(end);
216 /* visit every Return */
217 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
218 ir_node *node = get_Block_cfgpred(endbl, j);
219 ir_op *op = get_irn_op(node);
222 /* Bad nodes usually do NOT produce anything, so it's ok */
226 if (op == op_Return) {
227 mem = get_Return_mem(node);
229 /* Bad nodes usually do NOT produce anything, so it's ok */
233 change = mem != get_irg_initial_mem(irg);
238 /* exception found */
245 /* check, if a keep-alive exists */
246 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
247 ir_node *mem = get_End_keepalive(end, j);
249 if (mode_M != get_irn_mode(mem))
252 change = mem != get_irg_initial_mem(irg);
259 /* no memory changes found, it's a const function */
260 set_irg_additional_property(irg, mtp_property_const);
264 } /* is_const_function */
271 #define UNMARK_IRG(irg) set_irg_link((irg), NULL)
272 #define MARK_IRG(irg) set_irg_link((irg), MARK)
273 #define IS_IRG_MARKED(irg) (get_irg_link(irg) == MARK)
276 static int is_pure_function(ir_graph *irg);
278 #define UMAX(a,b) (a) > (b) ? (a) : (b)
281 * Follow the memory chain starting at node and determine
284 * @return mtp_property_const if only calls of const functions are detected
285 * mtp_property_pure if only Loads and const/pure
289 static unsigned _follow_mem(ir_node *node) {
290 unsigned m, mode = mtp_property_const;
295 if (irn_visited(node))
298 mark_irn_visited(node);
300 switch (get_irn_opcode(node)) {
302 node = get_Proj_pred(node);
311 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
312 mode &= _follow_mem(get_irn_n(node, i));
317 /* Beware volatile Loads are NOT allowed in pure functions */
318 if (get_Load_volatility(node) == volatility_is_volatile)
320 mode = mtp_property_pure;
321 node = get_Load_mem(node);
325 /* a call is only tolerable if its either constant or pure */
326 ptr = get_Call_ptr(node);
327 if (get_irn_op(ptr) == op_SymConst &&
328 get_SymConst_kind(ptr) == symconst_addr_ent) {
329 entity *ent = get_SymConst_entity(ptr);
330 ir_graph *irg = get_entity_irg(ent);
332 if (irg == current_ir_graph) {
333 /* A recursive call. The did not mode depend on this call */
335 else if (irg == NULL) {
336 m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
339 mode = UMAX(mode, m);
341 else if (irg != NULL) {
342 /* we have a graph. Check if it is already analyzed */
343 if (IS_IRG_MARKED(irg))
344 (void)is_pure_function(irg);
346 m = get_irg_additional_properties(irg) & (mtp_property_const|mtp_property_pure);
349 mode = UMAX(mode, m);
354 node = get_Call_mem(node);
364 * Follow the memory chain starting at node and determine
367 * @return mtp_property_const if only calls of const functions are detected
368 * mtp_property_pure if only Loads and const/pure
372 static unsigned follow_mem(ir_graph *irg, ir_node *node, unsigned mode) {
375 inc_irg_visited(irg);
376 /* mark the initial mem: recursion stops here */
377 mark_irn_visited(get_irg_initial_mem(irg));
378 m = _follow_mem(node);
381 return UMAX(mode, m);
385 * Check if a graph represents a pure function.
387 * @param irg the graph
389 static int is_pure_function(ir_graph *irg) {
390 ir_node *end, *endbl;
392 unsigned mode = get_irg_additional_properties(irg);
393 ir_graph *rem = current_ir_graph;
395 if (mode & mtp_property_const) {
396 /* already marked as a const function */
397 return mtp_property_const;
399 if (mode & mtp_property_pure) {
400 /* already marked as a pure function */
401 return mtp_property_const;
404 if (! IS_IRG_MARKED(irg))
408 end = get_irg_end(irg);
409 endbl = get_nodes_block(end);
410 mode = mtp_property_const;
412 current_ir_graph = irg;
414 /* visit every Return */
415 for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
416 ir_node *node = get_Block_cfgpred(endbl, j);
417 ir_op *op = get_irn_op(node);
420 /* Bad nodes usually do NOT produce anything, so it's ok */
424 if (op == op_Return) {
425 mem = get_Return_mem(node);
427 /* Bad nodes usually do NOT produce anything, so it's ok */
431 if (mem != get_irg_initial_mem(irg))
432 mode = follow_mem(irg, mem, mode);
435 /* exception found. */
436 mode = follow_mem(irg, node, mode);
444 /* check, if a keep-alive exists */
445 for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
446 ir_node *mem = get_End_keepalive(end, j);
448 if (mode_M != get_irn_mode(mem))
451 mode = follow_mem(irg, mem, mode);
458 set_irg_additional_property(irg, mode);
459 current_ir_graph = rem;
461 } /* is_pure_function */
464 * Handle calls to const functions.
466 static void handle_const_Calls(env_t *ctx)
470 ctx->n_calls_removed_SymConst = 0;
471 ctx->n_calls_removed_Sel = 0;
473 /* all calls of const functions can be transformed */
474 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
475 ir_graph *irg = get_irp_irg(i);
477 ctx->const_call_list = NULL;
478 ctx->pure_call_list = NULL;
479 ctx->proj_list = NULL;
480 irg_walk_graph(irg, NULL, collect_calls, ctx);
482 if (ctx->const_call_list)
483 fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
485 } /* handle_const_Calls */
488 * optimize function calls by handling const functions
490 void optimize_funccalls(int force_run)
493 unsigned num_const = 0;
494 unsigned num_pure = 0;
496 if (! get_opt_function_call())
499 /* prepare: mark all graphs as not analyzed */
500 n = get_irp_n_irgs();
501 for (i = n - 1; i >= 0; --i)
502 MARK_IRG(get_irp_irg(i));
504 /* first step: detect, which functions are const, i.e. do NOT touch any memory */
505 for (i = n - 1; i >= 0; --i) {
506 ir_graph *irg = get_irp_irg(i);
507 unsigned mode = is_pure_function(irg);
509 if (mode & mtp_property_const)
511 else if (mode & mtp_property_pure)
515 if (force_run || num_const > 0) {
518 handle_const_Calls(&ctx);
519 if (get_firm_verbosity()) {
520 printf("Detected %d graphs without side effects.\n", num_const);
521 printf("Optimizes %d(SymConst) + %d(Sel) calls to const/pure functions.\n",
522 ctx.n_calls_removed_SymConst, ctx.n_calls_removed_Sel);
526 if (get_firm_verbosity()) {
527 printf("No graphs without side effects detected\n");
530 } /* optimize_funccalls */