X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;ds=sidebyside;f=ir%2Fopt%2Ffunccall.c;h=e379ddf8336ce911e57faf7e24daa5b7cadc6314;hb=aa834ed8cb6e39e4f25f1f6f6a3ba5e2b559cb64;hp=cb8e8c5305d2ef116f87c832be094f7b02ef9c96;hpb=2bc6f5874f3917d2f1ed8b268c8082d0b44ec03a;p=libfirm diff --git a/ir/opt/funccall.c b/ir/opt/funccall.c index cb8e8c530..e379ddf83 100644 --- a/ir/opt/funccall.c +++ b/ir/opt/funccall.c @@ -1,233 +1,905 @@ /* - * Project: libFIRM - * File name: ir/opt/funccall.c - * Purpose: optimization of function calls - * Author: Michael Beck - * Created: - * CVS-ID: $Id$ - * Copyright: (c) 1998-2004 Universit�t Karlsruhe - * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + +/** + * @file + * @brief Optimization of function calls. + * @author Michael Beck */ +#include "config.h" + +#include "opt_init.h" +#include + #include "irnode_t.h" #include "irgraph_t.h" #include "irgmod.h" #include "irgwalk.h" -#include "irvrfy.h" #include "dbginfo_t.h" #include "irflag_t.h" +#include "irloop_t.h" #include "ircons.h" -#include "funccall.h" +#include "iredges_t.h" +#include "irpass_t.h" +#include "iroptimize.h" +#include "analyze_irg_args.h" #include "irhooks.h" +#include "raw_bitset.h" +#include "debug.h" + +DEBUG_ONLY(static firm_dbg_module_t *dbg;) /** - * The walker environment for rem_mem_from_const_fkt_calls + * The walker environment for updating function calls. */ -typedef struct _env_t { - int changed; /**< flag, is set if a graph was changed */ - int n_calls_removed_SymConst; - int n_calls_removed_Sel; +typedef struct env_t { + ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */ + ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */ + ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */ + ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */ + ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */ } env_t; +/** Ready IRG's are marked in the ready set. */ +static unsigned *ready_set; + +/** IRG's that are in progress are marked here. */ +static unsigned *busy_set; + /** - * remove memory from const function calls by rerouting - * it's ProjM and connection the call with a NoMem node. + * Walker: Collect all calls to const and pure functions + * to lists. Collect all Proj(Call) nodes into a Proj list. + */ +static void collect_const_and_pure_calls(ir_node *node, void *env) +{ + env_t *ctx = (env_t*)env; + + if (is_Call(node)) { + ir_node *call = node; + + /* set the link to NULL for all non-const/pure calls */ + set_irn_link(call, NULL); + ir_node *ptr = get_Call_ptr(call); + if (!is_SymConst_addr_ent(ptr)) + return; + + ir_entity *ent = get_SymConst_entity(ptr); + + unsigned prop = get_entity_additional_properties(ent); + if ((prop & (mtp_property_const|mtp_property_pure)) == 0) + return; + + /* ok, if we get here we found a call to a const or a pure function */ + if (prop & mtp_property_pure) { + set_irn_link(call, ctx->pure_call_list); + ctx->pure_call_list = call; + } else { + if (prop & mtp_property_has_loop) { + set_irn_link(call, ctx->nonfloat_const_call_list); + ctx->nonfloat_const_call_list = call; + } else { + set_irn_link(call, ctx->float_const_call_list); + ctx->float_const_call_list = call; + } + } + } else if (is_Proj(node)) { + /* + * Collect all memory and exception Proj's from + * calls. + */ + ir_node *call = get_Proj_pred(node); + if (!is_Call(call)) + return; + + /* collect the Proj's in the Proj list */ + switch (get_Proj_proj(node)) { + case pn_Call_M: + case pn_Call_X_except: + case pn_Call_X_regular: + set_irn_link(node, ctx->proj_list); + ctx->proj_list = node; + break; + default: + break; + } + } +} + +/** + * Fix the list of collected Calls. * - * Note: By "const function" we understand a function that did neither - * read nor write memory. Hence its result depends solely on its - * arguments. - */ -static void rem_mem_from_const_fkt_calls(ir_node *node, void *env) -{ - env_t *ctx = env; - ir_node *call, *ptr, *mem; - entity *ent; - - if (get_irn_op(node) == op_Call) { - call = node; - - set_irn_link(call, NULL); - - ptr = get_Call_ptr(call); - if (get_irn_op(ptr) == op_SymConst && get_SymConst_kind(ptr) == symconst_addr_ent) { - ent = get_SymConst_entity(ptr); - - if ((get_entity_additional_properties(ent) & mtp_property_const) == 0) - return; - ++ctx->n_calls_removed_SymConst; - } - else if (is_Sel(ptr) && - get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) { - /* If all possible callees are real functions, we can remove the memory edge. */ - int i, n_callees = get_Call_n_callees(call); - if (n_callees == 0) - /* This is kind of strange: dying code or a Call that will raise an exception - when executed as there is no implementation to call. So better not - optimize. */ - return; - for (i = 0; i < n_callees; ++i) { - ent = get_Call_callee(call, i); - if (ent == unknown_entity) { - /* we don't know which entity is called here */ - return; - } - if ((get_entity_additional_properties(ent) & mtp_property_const) == 0) - return; - } - ++ctx->n_calls_removed_Sel; - } - else - return; - - /* ok, if we get here we found a call to a const function, - * route the NoMem node to the call */ - mem = get_Call_mem(call); - - set_irn_link(call, mem); - set_Call_mem(call, new_r_NoMem(current_ir_graph)); - - /* finally, this call can float */ - set_irn_pinned(call, op_pin_state_floats); - - hook_func_call(current_ir_graph, call); - - ctx->changed = 1; - } - else if (get_irn_op(node) == op_Proj) { - /* - * Remove memory and exception Proj's from - * const function calls. - */ - call = get_Proj_pred(node); - if ((get_irn_op(call) != op_Call) || - (get_irn_op(get_Call_mem(call)) != op_NoMem)) - return; - - switch (get_Proj_proj(node)) { - case pn_Call_M_regular: { - ir_node *old_mem = get_irn_link(call); - if (old_mem) { - exchange(node, old_mem); - ctx->changed = 1; - } - } break; - case pn_Call_X_except: - case pn_Call_M_except: - exchange(node, new_Bad()); - ctx->changed = 1; - break; - default: ; - } - } + * @param irg the graph that contained calls to pure functions + * @param ctx context + */ +static void fix_const_call_lists(ir_graph *irg, env_t *ctx) +{ + bool exc_changed = false; + + /* First step: fix all calls by removing their memory input and let + * them floating. + * The original memory input is preserved in their link fields. */ + ir_node *next; + for (ir_node *call = ctx->float_const_call_list; call != NULL; call = next) { + next = (ir_node*)get_irn_link(call); + ir_node *mem = get_Call_mem(call); + + set_irn_link(call, mem); + set_Call_mem(call, get_irg_no_mem(irg)); + + /* + * Unfortunately we cannot simply set the node to 'float'. + * There is a reason for that: + * + * - The call might be inside a loop/if that is NOT entered + * and calls a endless function. Setting the call to float + * would allow to move it out from the loop/if causing this + * function be called even if the loop/if is not entered ... + * + * This could be fixed using post-dominators for calls and Pin nodes + * but need some more analyzes to ensure that a call that potential + * never returns is not executed before some code that generates + * observable states... + */ + + /* finally, this call can float */ + set_irn_pinned(call, op_pin_state_floats); + hook_func_call(irg, call); + } + + /* Last step: fix all Proj's */ + for (ir_node *proj = ctx->proj_list; proj != NULL; proj = next) { + next = (ir_node*)get_irn_link(proj); + ir_node *call = get_Proj_pred(proj); + ir_node *mem = (ir_node*)get_irn_link(call); + + /* beware of calls in the pure call list */ + if (!mem || is_Call(mem)) + continue; + assert(get_irn_mode(mem) == mode_M); + + switch (get_Proj_proj(proj)) { + case pn_Call_M: { + /* in dead code there might be cycles where proj == mem */ + if (proj != mem) + exchange(proj, mem); + break; + } + case pn_Call_X_except: + exc_changed = true; + exchange(proj, new_r_Bad(irg, mode_X)); + break; + case pn_Call_X_regular: { + ir_node *block = get_nodes_block(call); + exc_changed = true; + exchange(proj, new_r_Jmp(block)); + break; + } + default: + break; + } + } + + if (exc_changed) { + /* ... including exception edges */ + clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE + | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO); + } } -/* - * optimize function calls by handling const functions - */ -void optimize_funccalls(int force_run) -{ - int i, j; - int change; - unsigned num_pure = 0; - - if (! get_opt_real_function_call()) - return; - - /* first step: detect, which functions are const, i.e. do NOT touch any memory */ - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { - ir_graph *irg = get_irp_irg(i); - ir_node *end = get_irg_end(irg); - ir_node *endbl = get_nodes_block(end); - - change = 0; - - if (get_irg_additional_properties(irg) & mtp_property_const) { - /* already marked as a const function */ - ++num_pure; - } - else { - /* visit every Return */ - for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) { - ir_node *node = get_Block_cfgpred(endbl, j); - ir_op *op = get_irn_op(node); - ir_node *mem; - - /* Bad nodes usually do NOT produce anything, so it's ok */ - if (op == op_Bad) - continue; - - if (op == op_Return) { - mem = get_Return_mem(node); - - /* Bad nodes usually do NOT produce anything, so it's ok */ - if (is_Bad(mem)) - continue; - - change = mem != get_irg_initial_mem(irg); - if (change) - break; - } - else { - /* exception found */ - change = 1; - break; - } - } - - if (! change) { - /* check, if a keep-alive exists */ - for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) { - ir_node *mem = get_End_keepalive(end, j); - - if (mode_M != get_irn_mode(mem)) - continue; - - change = mem != get_irg_initial_mem(irg); - if (change) - break; - } - } - - if (! change) { - /* no memory changes found, it's a const function */ - set_irg_additional_property(irg, mtp_property_const); - ++num_pure; - } - } - } - - if (force_run || num_pure > 0) { - env_t ctx; - - ctx.n_calls_removed_SymConst = 0; - ctx.n_calls_removed_Sel = 0; - - /* all calls of pure functions can be transformed into FuncCalls */ - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { - ir_graph *irg = get_irp_irg(i); - - /* no need to do this on const functions */ - if ((get_irg_additional_properties(irg) & mtp_property_const) == 0) { - ctx.changed = 0; - irg_walk_graph(irg, NULL, rem_mem_from_const_fkt_calls, &ctx); - - if (ctx.changed) { - /* changes were done including exception edges */ - set_irg_outs_inconsistent(irg); - set_irg_doms_inconsistent(irg); - set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent); - } - } - } - - if (get_firm_verbosity()) { - printf("Detected %d graphs without side effects.\n", num_pure); - printf("Optimizes %d(SymConst) + %d(Sel) calls to const functions.\n", - ctx.n_calls_removed_SymConst, ctx.n_calls_removed_Sel); - } - } - else { - if (get_firm_verbosity()) { - printf("No graphs without side effects detected\n"); - } - } +/** + * Walker: Collect all calls to nothrow functions + * to lists. Collect all Proj(Call) nodes into a Proj list. + */ +static void collect_nothrow_calls(ir_node *node, void *env) +{ + env_t *ctx = (env_t*)env; + + if (is_Call(node)) { + ir_node *call = node; + + /* set the link to NULL for all non-const/pure calls */ + set_irn_link(call, NULL); + ir_node *ptr = get_Call_ptr(call); + if (!is_SymConst_addr_ent(ptr)) + return; + + ir_entity *ent = get_SymConst_entity(ptr); + + unsigned prop = get_entity_additional_properties(ent); + if ((prop & mtp_property_nothrow) == 0) + return; + + /* ok, if we get here we found a call to a nothrow function */ + set_irn_link(call, ctx->nothrow_call_list); + ctx->nothrow_call_list = call; + } else if (is_Proj(node)) { + /* + * Collect all memory and exception Proj's from + * calls. + */ + ir_node *call = get_Proj_pred(node); + if (! is_Call(call)) + return; + + /* collect the Proj's in the Proj list */ + switch (get_Proj_proj(node)) { + case pn_Call_M: + case pn_Call_X_except: + case pn_Call_X_regular: + set_irn_link(node, ctx->proj_list); + ctx->proj_list = node; + break; + default: + break; + } + } +} + +/** + * Fix the list of collected nothrow Calls. + * + * @param irg the graph that contained calls to pure functions + * @param call_list the list of all call sites of const functions + * @param proj_list the list of all memory/exception Proj's of this call sites + */ +static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, + ir_node *proj_list) +{ + bool exc_changed = false; + + /* First step: go through the list of calls and mark them. */ + ir_node *next; + for (ir_node *call = call_list; call; call = next) { + next = (ir_node*)get_irn_link(call); + + /* current_ir_graph is in memory anyway, so it's a good marker */ + set_irn_link(call, ¤t_ir_graph); + hook_func_call(irg, call); + } + + /* Second step: Remove all exception Proj's */ + for (ir_node *proj = proj_list; proj; proj = next) { + next = (ir_node*)get_irn_link(proj); + ir_node *call = get_Proj_pred(proj); + + /* handle only marked calls */ + if (get_irn_link(call) != ¤t_ir_graph) + continue; + + /* kill any exception flow */ + switch (get_Proj_proj(proj)) { + case pn_Call_X_except: + exc_changed = true; + exchange(proj, new_r_Bad(irg, mode_X)); + break; + case pn_Call_X_regular: { + ir_node *block = get_nodes_block(call); + exc_changed = true; + exchange(proj, new_r_Jmp(block)); + break; + } + default: + break; + } + } + + /* changes were done ... */ + if (exc_changed) { + /* ... including exception edges */ + clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE + | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO); + } +} + +/* marking */ +#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg)) +#define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg)) +#define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg)) +#define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg)) +#define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg)) + +/* forward */ +static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top); + +/** + * Calculate the bigger property of two. Handle the temporary flag right. + */ +static mtp_additional_properties max_property(mtp_additional_properties a, + mtp_additional_properties b) +{ + mtp_additional_properties t = (a | b) & mtp_temporary; + a &= ~mtp_temporary; + b &= ~mtp_temporary; + + if (a == mtp_no_property || b == mtp_no_property) + return mtp_no_property; + mtp_additional_properties r = a > b ? a : b; + return r | t; +} + +/** + * Follow the memory chain starting at node and determine + * the mtp_property. + * + * @return mtp_property_const if only calls of const functions are detected + * mtp_property_pure if only Loads and const/pure calls detected + * mtp_no_property else + */ +static mtp_additional_properties follow_mem_(ir_node *node) +{ + mtp_additional_properties mode = mtp_property_const; + + for (;;) { + if (mode == mtp_no_property) + return mtp_no_property; + + if (irn_visited_else_mark(node)) + return mode; + + switch (get_irn_opcode(node)) { + case iro_Proj: + node = get_Proj_pred(node); + break; + + case iro_NoMem: + /* finish here */ + return mode; + + case iro_Phi: + case iro_Sync: + /* do a dfs search */ + for (int i = get_irn_arity(node) - 1; i >= 0; --i) { + mtp_additional_properties m = follow_mem_(get_irn_n(node, i)); + mode = max_property(mode, m); + if (mode == mtp_no_property) + return mtp_no_property; + } + return mode; + + case iro_Load: + /* Beware volatile Loads are NOT allowed in pure functions. */ + if (get_Load_volatility(node) == volatility_is_volatile) + return mtp_no_property; + mode = max_property(mode, mtp_property_pure); + node = get_Load_mem(node); + break; + + case iro_Call: { + /* A call is only tolerable if its either constant or pure. */ + ir_node *ptr = get_Call_ptr(node); + if (!is_SymConst_addr_ent(ptr)) + return mtp_no_property; + + ir_entity *ent = get_SymConst_entity(ptr); + ir_graph *irg = get_entity_irg(ent); + + mtp_additional_properties m; + if (irg == NULL) { + m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure); + mode = max_property(mode, m); + } else { + /* we have a graph, analyze it. */ + m = check_const_or_pure_function(irg, false); + mode = max_property(mode, m); + } + node = get_Call_mem(node); + break; + } + + default: + return mtp_no_property; + } + } +} + +/** + * Follow the memory chain starting at node and determine + * the mtp_property. + * + * @return mtp_property_const if only calls of const functions are detected + * mtp_property_pure if only Loads and const/pure calls detected + * mtp_no_property else + */ +static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode) +{ + mtp_additional_properties m = follow_mem_(node); + return max_property(mode, m); +} + +/** + * Check if a graph represents a const or a pure function. + * + * @param irg the graph to check + * @param top if set, this is the top call + */ +static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, bool top) +{ + ir_entity *entity = get_irg_entity(irg); + ir_type *type = get_entity_type(entity); + size_t n_params = get_method_n_params(type); + mtp_additional_properties may_be_const = mtp_property_const; + mtp_additional_properties prop = get_entity_additional_properties(entity); + + /* libfirm handles aggregate parameters by passing around pointers to + * stuff in memory, so if we have compound parameters we are never const */ + for (size_t i = 0; i < n_params; ++i) { + ir_type *param = get_method_param_type(type, i); + if (is_compound_type(param)) { + prop &= ~mtp_property_const; + may_be_const = mtp_no_property; + } + } + + if (prop & mtp_property_const) { + /* already marked as a const function */ + return mtp_property_const; + } + if (prop & mtp_property_pure) { + /* already marked as a pure function */ + return mtp_property_pure; + } + + if (IS_IRG_READY(irg)) { + /* already checked */ + return mtp_no_property; + } + if (IS_IRG_BUSY(irg)) { + /* We are still evaluate this method. + * The function (indirectly) calls itself and thus may not terminate. */ + return mtp_no_property; + } + SET_IRG_BUSY(irg); + + ir_node *end = get_irg_end(irg); + ir_node *endbl = get_nodes_block(end); + prop = may_be_const; + + ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED); + inc_irg_visited(irg); + /* mark the initial mem: recursion of follow_mem() stops here */ + mark_irn_visited(get_irg_initial_mem(irg)); + + /* visit every Return */ + for (int j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) { + ir_node *node = get_Block_cfgpred(endbl, j); + unsigned code = get_irn_opcode(node); + + /* Bad nodes usually do NOT produce anything, so it's ok */ + if (code == iro_Bad) + continue; + + if (code == iro_Return) { + ir_node *mem = get_Return_mem(node); + + /* Bad nodes usually do NOT produce anything, so it's ok */ + if (is_Bad(mem)) + continue; + + if (mem != get_irg_initial_mem(irg)) + prop = max_property(prop, follow_mem(mem, prop)); + } else { + /* Exception found. Cannot be const or pure. */ + prop = mtp_no_property; + break; + } + if (prop == mtp_no_property) + break; + } + + if (prop != mtp_no_property) { + /* check, if a keep-alive exists */ + for (int j = get_End_n_keepalives(end) - 1; j >= 0; --j) { + ir_node *kept = get_End_keepalive(end, j); + + if (is_Block(kept)) { + prop = mtp_no_property; + break; + } + + if (mode_M != get_irn_mode(kept)) + continue; + + prop = max_property(prop, follow_mem(kept, prop)); + if (prop == mtp_no_property) + break; + } + } + + if (top) { + /* Set the property only if we are at top-level. */ + if (prop != mtp_no_property) { + add_entity_additional_properties(entity, prop); + } + SET_IRG_READY(irg); + } + CLEAR_IRG_BUSY(irg); + ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); + return prop; +} + +/** + * Handle calls to const functions. + * + * @param ctx context + */ +static void handle_const_Calls(env_t *ctx) +{ + /* all calls of const functions can be transformed */ + size_t n = get_irp_n_irgs(); + for (size_t i = 0; i < n; ++i) { + ir_graph *irg = get_irp_irg(i); + + ctx->float_const_call_list = NULL; + ctx->nonfloat_const_call_list = NULL; + ctx->pure_call_list = NULL; + ctx->proj_list = NULL; + + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); + irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx); + fix_const_call_lists(irg, ctx); + ir_free_resources(irg, IR_RESOURCE_IRN_LINK); + + confirm_irg_properties(irg, + IR_GRAPH_PROPERTIES_CONTROL_FLOW + | IR_GRAPH_PROPERTY_ONE_RETURN + | IR_GRAPH_PROPERTY_MANY_RETURNS); + } +} + +/** + * Handle calls to nothrow functions. + * + * @param ctx context + */ +static void handle_nothrow_Calls(env_t *ctx) +{ + /* all calls of const functions can be transformed */ + size_t n = get_irp_n_irgs(); + for (size_t i = 0; i < n; ++i) { + ir_graph *irg = get_irp_irg(i); + + ctx->nothrow_call_list = NULL; + ctx->proj_list = NULL; + + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); + irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx); + + if (ctx->nothrow_call_list) + fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list); + ir_free_resources(irg, IR_RESOURCE_IRN_LINK); + } +} + +/** + * Check, whether a given node represents a return value of + * a malloc like function (ie, new heap allocated memory). + * + * @param node the node to check + */ +static bool is_malloc_call_result(const ir_node *node) +{ + if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) { + /* Firm style high-level allocation */ + return true; + } + /* TODO: check mtp_malloc */ + return false; +} + +/** + * Update a property depending on a call property. + */ +static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop) +{ + mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary; + mtp_additional_properties r = orig_prop & call_prop; + return r | t; +} + +/** + * Check if a node is stored. + */ +static bool is_stored(const ir_node *n) +{ + const ir_node *ptr; + + foreach_out_edge(n, edge) { + const ir_node *succ = get_edge_src_irn(edge); + + switch (get_irn_opcode(succ)) { + case iro_Return: + case iro_Load: + case iro_Cmp: + /* ok */ + break; + case iro_Store: + if (get_Store_value(succ) == n) + return true; + /* ok if its only the address input */ + break; + case iro_Sel: + case iro_Cast: + case iro_Confirm: + if (is_stored(succ)) + return true; + break; + case iro_Call: + ptr = get_Call_ptr(succ); + if (is_SymConst_addr_ent(ptr)) { + ir_entity *ent = get_SymConst_entity(ptr); + size_t i; + + /* we know the called entity */ + for (i = get_Call_n_params(succ); i > 0;) { + if (get_Call_param(succ, --i) == n) { + /* n is the i'th param of the call */ + if (get_method_param_access(ent, i) & ptr_access_store) { + /* n is store in ent */ + return true; + } + } + } + } else { + /* unknown call address */ + return true; + } + break; + default: + /* bad, potential alias */ + return true; + } + } + return false; +} + +/** + * Check that the return value of an irg is not stored anywhere. + * + * return ~mtp_property_malloc if return values are stored, ~0 else + */ +static mtp_additional_properties check_stored_result(ir_graph *irg) +{ + ir_node *end_blk = get_irg_end_block(irg); + mtp_additional_properties res = ~mtp_no_property; + + assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES); + + for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) { + ir_node *pred = get_Block_cfgpred(end_blk, i); + + if (! is_Return(pred)) + continue; + for (size_t j = get_Return_n_ress(pred); j > 0;) { + const ir_node *irn = get_Return_res(pred, --j); + + if (is_stored(irn)) { + /* bad, might create an alias */ + res = ~mtp_property_malloc; + goto finish; + } + } + } +finish: + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); + return res; +} + +/** + * Check if a graph represents a nothrow or a malloc function. + * + * @param irg the graph to check + * @param top if set, this is the top call + */ +static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, bool top) +{ + mtp_additional_properties curr_prop + = mtp_property_malloc | mtp_property_nothrow; + + ir_entity *ent = get_irg_entity(irg); + if (IS_IRG_READY(irg)) { + /* already checked */ + return get_entity_additional_properties(ent); + } + if (IS_IRG_BUSY(irg)) { + /* we are still evaluate this method. Be optimistic, + return the best possible so far but mark the result as temporary. */ + return mtp_temporary | mtp_property_malloc | mtp_property_nothrow; + } + SET_IRG_BUSY(irg); + + ir_type *mtp = get_entity_type(ent); + if (get_method_n_ress(mtp) <= 0) + curr_prop &= ~mtp_property_malloc; + + ir_node *end_blk = get_irg_end_block(irg); + for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) { + ir_node *pred = get_Block_cfgpred(end_blk, i); + + if (is_Return(pred)) { + if (curr_prop & mtp_property_malloc) { + /* check, if malloc is called here */ + for (size_t j = get_Return_n_ress(pred); j > 0;) { + ir_node *res = get_Return_res(pred, --j); + + /* skip Confirms and Casts */ + res = skip_HighLevel_ops(res); + /* skip Proj's */ + while (is_Proj(res)) + res = get_Proj_pred(res); + if (is_malloc_call_result(res)) { + /* ok, this is a malloc */ + } else if (is_Call(res)) { + ir_node *ptr = get_Call_ptr(res); + + if (is_SymConst_addr_ent(ptr)) { + /* a direct call */ + ir_entity *ent = get_SymConst_entity(ptr); + ir_graph *callee = get_entity_irg(ent); + + if (callee == irg) { + /* A self-recursive call. The property did not depend on this call. */ + } else if (callee != NULL) { + mtp_additional_properties prop = check_nothrow_or_malloc(callee, false); + curr_prop = update_property(curr_prop, prop); + } else { + curr_prop = update_property(curr_prop, get_entity_additional_properties(ent)); + } + } else { + /* unknown call */ + curr_prop &= ~mtp_property_malloc; + } + } else { + /* unknown return value */ + curr_prop &= ~mtp_property_malloc; + } + } + } + } else if (curr_prop & mtp_property_nothrow) { + /* exception flow detected */ + pred = skip_Proj(pred); + + if (is_Call(pred)) { + ir_node *ptr = get_Call_ptr(pred); + + if (is_SymConst_addr_ent(ptr)) { + /* a direct call */ + ir_entity *ent = get_SymConst_entity(ptr); + ir_graph *callee = get_entity_irg(ent); + + if (callee == irg) { + /* A self-recursive call. The property did not depend on this call. */ + } else if (callee != NULL) { + /* Note: we check here for nothrow only, so do NOT reset the malloc property */ + mtp_additional_properties prop = check_nothrow_or_malloc(callee, false) | mtp_property_malloc; + curr_prop = update_property(curr_prop, prop); + } else { + if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) + curr_prop &= ~mtp_property_nothrow; + } + } else { + /* unknown call */ + curr_prop &= ~mtp_property_nothrow; + } + } else { + /* real exception flow possible. */ + curr_prop &= ~mtp_property_nothrow; + } + } + if ((curr_prop & ~mtp_temporary) == mtp_no_property) { + /* no need to search further */ + break; + } + } + + if (curr_prop & mtp_property_malloc) { + /* Note that the malloc property means not only return newly allocated + * memory, but also that this memory is ALIAS FREE. + * To ensure that, we do NOT allow that the returned memory is somewhere + * stored. */ + curr_prop &= check_stored_result(irg); + } + + if (curr_prop != mtp_no_property + && (top || (curr_prop & mtp_temporary) == 0)) { + /* We use the temporary flag here to mark an optimistic result. + * Set the property only if we are sure that it does NOT base on + * temporary results OR if we are at top-level. */ + add_entity_additional_properties(ent, curr_prop & ~mtp_temporary); + SET_IRG_READY(irg); + } + if (top) + SET_IRG_READY(irg); + CLEAR_IRG_BUSY(irg); + return curr_prop; +} + +/** + * When a function was detected as "const", it might be moved out of loops. + * This might be dangerous if the graph can contain endless loops. + */ +static void check_for_possible_endless_loops(ir_graph *irg) +{ + assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO); + + ir_loop *root_loop = get_irg_loop(irg); + if (root_loop->flags & loop_outer_loop) { + ir_entity *ent = get_irg_entity(irg); + add_entity_additional_properties(ent, mtp_property_has_loop); + } + + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); +} + +void optimize_funccalls(void) +{ + /* prepare: mark all graphs as not analyzed */ + size_t last_idx = get_irp_last_idx(); + ready_set = rbitset_malloc(last_idx); + busy_set = rbitset_malloc(last_idx); + + /* first step: detect, which functions are nothrow or malloc */ + DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n")); + for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) { + ir_graph *irg = get_irp_irg(i); + unsigned prop = check_nothrow_or_malloc(irg, true); + + if (prop & mtp_property_nothrow) { + DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg)); + } else if (prop & mtp_property_malloc) { + DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg)); + } + } + + /* second step: remove exception edges: this must be done before the + detection of const and pure functions take place. */ + env_t ctx; + handle_nothrow_Calls(&ctx); + + rbitset_clear_all(ready_set, last_idx); + rbitset_clear_all(busy_set, last_idx); + + /* third step: detect, which functions are const or pure */ + DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n")); + for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) { + ir_graph *irg = get_irp_irg(i); + unsigned prop = check_const_or_pure_function(irg, true); + + if (prop & mtp_property_const) { + DB((dbg, LEVEL_2, "%+F has the const property\n", irg)); + check_for_possible_endless_loops(irg); + } else if (prop & mtp_property_pure) { + DB((dbg, LEVEL_2, "%+F has the pure property\n", irg)); + } + } + + handle_const_Calls(&ctx); + + xfree(busy_set); + xfree(ready_set); +} + +void firm_init_funccalls(void) +{ + FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls"); +} + +ir_prog_pass_t *optimize_funccalls_pass(const char *name) +{ + return def_prog_pass(name ? name : "funccall", optimize_funccalls); }