-void optimize_funccalls(int force_run)
-{
- int i, j;
- int change;
- unsigned num_pure = 0;
-
- if (! get_opt_real_func_call())
- return;
-
- /* first step: detect, which functions are const, i.e. do NOT touch any memory */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
- ir_graph *irg = get_irp_irg(i);
- ir_node *end = get_irg_end(irg);
- ir_node *endbl = get_nodes_block(end);
-
- change = 0;
-
- if (get_irg_additional_properties(irg) & irg_const_function) {
- /* already marked as a const function */
- ++num_pure;
- }
- else {
- /* visit every Return */
- for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
- ir_node *node = get_Block_cfgpred(endbl, j);
- ir_op *op = get_irn_op(node);
- ir_node *mem;
-
- /* Bad nodes usually do NOT produce anything, so it's ok */
- if (op == op_Bad)
- continue;
-
- if (op == op_Return) {
- mem = get_Return_mem(node);
-
- /* Bad nodes usually do NOT produce anything, so it's ok */
- if (is_Bad(mem))
- continue;
-
- change = mem != get_irg_initial_mem(irg);
- if (change)
- break;
- }
- else {
- /* exception found */
- change = 1;
- break;
- }
- }
-
- if (! change) {
- /* check, if a keep-alive exists */
- for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
- ir_node *mem = get_End_keepalive(end, j);
-
- if (mode_M != get_irn_mode(mem))
- continue;
-
- change = mem != gforce_runet_irg_initial_mem(irg);
- if (change)
- break;
- }
- }
-
- if (! change) {
- /* no memory changes found, it's a const function */
- set_irg_additional_property(irg, irg_const_function);
- ++num_pure;
- }
- }
- }
-
- if (force_run || num_pure > 0) {
- env_t ctx;
-
- ctx.n_calls_removed_SymConst = 0;
- ctx.n_calls_removed_Sel = 0;
-
- /* all calls of pure functions can be transformed into FuncCalls */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
- ir_graph *irg = get_irp_irg(i);
-
- /* no need to do this on const functions */
- if ((get_irg_additional_properties(irg) & irg_const_function) == 0) {
- ctx.changed = 0;
- irg_walk_graph(irg, NULL, rem_mem_from_const_fkt_calls, &ctx);
-
- if (ctx.changed) {
- /* changes were done */
- set_irg_outs_inconsistent(irg);
- set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
- }
- }
- }
-
- if (get_firm_verbosity()) {
- printf("Detected %d graphs without side effects.\n", num_pure);
- printf("Optimizes %d(SymConst) + %d(Sel) calls to const functions.\n",
- ctx.n_calls_removed_SymConst, ctx.n_calls_removed_Sel);
- }
- }
- else {
- if (get_firm_verbosity()) {
- printf("No graphs without side effects detected\n");
- }
- }
+static void collect_nothrow_calls(ir_node *node, void *env) {
+ env_t *ctx = env;
+ ir_node *call, *ptr;
+ ir_entity *ent;
+ unsigned prop;
+
+ if (is_Call(node)) {
+ call = node;
+
+ /* set the link to NULL for all non-const/pure calls */
+ set_irn_link(call, NULL);
+ ptr = get_Call_ptr(call);
+ if (is_Global(ptr)) {
+ ent = get_Global_entity(ptr);
+
+ prop = get_entity_additional_properties(ent);
+ if ((prop & mtp_property_nothrow) == 0)
+ return;
+ ++ctx->n_calls_SymConst;
+ } else if (get_opt_closed_world() &&
+ is_Sel(ptr) &&
+ get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ /* If all possible callees are nothrow functions, we can remove the exception edge. */
+ int i, n_callees = get_Call_n_callees(call);
+ if (n_callees == 0) {
+ /* This is kind of strange: dying code or a Call that will raise an exception
+ when executed as there is no implementation to call. So better not
+ optimize. */
+ return;
+ }
+
+ /* note that const function are a subset of pure ones */
+ prop = mtp_property_nothrow;
+ for (i = 0; i < n_callees; ++i) {
+ ent = get_Call_callee(call, i);
+ if (ent == unknown_entity) {
+ /* we don't know which entity is called here */
+ return;
+ }
+ prop &= get_entity_additional_properties(ent);
+ if (prop == mtp_no_property)
+ return;
+ }
+ ++ctx->n_calls_Sel;
+ } else
+ return;
+
+ /* ok, if we get here we found a call to a nothrow function */
+ set_irn_link(call, ctx->nothrow_call_list);
+ ctx->nothrow_call_list = call;
+ } else if (is_Proj(node)) {
+ /*
+ * Collect all memory and exception Proj's from
+ * calls.
+ */
+ call = get_Proj_pred(node);
+ if (! is_Call(call))
+ return;
+
+ /* collect the Proj's in the Proj list */
+ switch (get_Proj_proj(node)) {
+ case pn_Call_M_regular:
+ case pn_Call_X_except:
+ case pn_Call_X_regular:
+ case pn_Call_M_except:
+ set_irn_link(node, ctx->proj_list);
+ ctx->proj_list = node;
+ break;
+ default:
+ break;
+ }
+ }
+} /* collect_nothrow_calls */
+
+/**
+ * Fix the list of collected nothrow Calls.
+ *
+ * @param irg the graph that contained calls to pure functions
+ * @param call_list the list of all call sites of const functions
+ * @param proj_list the list of all memory/exception Proj's of this call sites
+ */
+static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+ ir_node *call, *next, *proj;
+ int exc_changed = 0;
+ ir_graph *rem = current_ir_graph;
+
+ current_ir_graph = irg;
+
+ /* First step: go through the list of calls and mark them. */
+ for (call = call_list; call; call = next) {
+ next = get_irn_link(call);
+
+ /* current_ir_graph is in memory anyway, so it's a good marker */
+ set_irn_link(call, ¤t_ir_graph);
+ hook_func_call(irg, call);
+ }
+
+ /* Second step: Remove all exception Proj's */
+ for (proj = proj_list; proj; proj = next) {
+ next = get_irn_link(proj);
+ call = get_Proj_pred(proj);
+
+ /* handle only marked calls */
+ if (get_irn_link(call) != ¤t_ir_graph)
+ continue;
+
+ /* kill any exception flow */
+ switch (get_Proj_proj(proj)) {
+ case pn_Call_X_except:
+ case pn_Call_M_except:
+ exc_changed = 1;
+ exchange(proj, get_irg_bad(irg));
+ break;
+ case pn_Call_X_regular: {
+ ir_node *block = get_nodes_block(call);
+ exc_changed = 1;
+ exchange(proj, new_r_Jmp(irg, block));
+ break;
+ }
+ default:
+ ;
+ }
+ }
+
+ /* changes were done ... */
+ set_irg_outs_inconsistent(irg);
+ set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
+
+ if (exc_changed) {
+ /* ... including exception edges */
+ set_irg_doms_inconsistent(irg);
+ }
+ current_ir_graph = rem;
+} /* fix_nothrow_call_list */
+
+/* marking */
+#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
+#define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
+#define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
+#define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
+#define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
+
+/* forward */
+static unsigned check_const_or_pure_function(ir_graph *irg, int top);
+static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
+
+/**
+ * Calculate the bigger property of two. Handle the temporary flag right.
+ */
+static unsigned max_property(unsigned a, unsigned b) {
+ unsigned r, t = (a | b) & mtp_temporary;
+ a &= ~mtp_temporary;
+ b &= ~mtp_temporary;
+
+ if (a == mtp_no_property || b == mtp_no_property)
+ return mtp_no_property;
+ r = a > b ? a : b;
+ return r | t;
+} /* max_property */
+
+/**
+ * Follow the memory chain starting at node and determine
+ * the mtp_property.
+ *
+ * @return mtp_property_const if only calls of const functions are detected
+ * mtp_property_pure if only Loads and const/pure
+ * calls detected
+ * mtp_no_property else
+ */
+static unsigned _follow_mem(ir_node *node) {
+ unsigned m, mode = mtp_property_const;
+ ir_node *ptr;
+ int i;
+
+ for (;;) {
+ if (mode == mtp_no_property)
+ return mtp_no_property;
+
+ if (irn_visited(node))
+ return mode;
+
+ mark_irn_visited(node);
+
+ switch (get_irn_opcode(node)) {
+ case iro_Proj:
+ node = get_Proj_pred(node);
+ break;
+
+ case iro_NoMem:
+ /* finish here */
+ return mode;
+
+ case iro_Phi:
+ case iro_Sync:
+ /* do a dfs search */
+ for (i = get_irn_arity(node) - 1; i >= 0; --i) {
+ m = _follow_mem(get_irn_n(node, i));
+ mode = max_property(mode, m);
+ if (mode == mtp_no_property)
+ return mtp_no_property;
+ }
+ return mode;
+
+ case iro_Load:
+ /* Beware volatile Loads are NOT allowed in pure functions. */
+ if (get_Load_volatility(node) == volatility_is_volatile)
+ return mtp_no_property;
+ mode = max_property(mode, mtp_property_pure);
+ node = get_Load_mem(node);
+ break;
+
+ case iro_Call:
+ /* A call is only tolerable if its either constant or pure. */
+ ptr = get_Call_ptr(node);
+ if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
+ ir_entity *ent = get_SymConst_entity(ptr);
+ ir_graph *irg = get_entity_irg(ent);
+
+ if (irg == current_ir_graph) {
+ /* A self-recursive call. The property did not depend on this call. */
+ } else if (irg == NULL) {
+ m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
+ mode = max_property(mode, m);
+ } else if (irg != NULL) {
+ /* we have a graph, analyze it. */
+ m = check_const_or_pure_function(irg, /*top=*/0);
+ mode = max_property(mode, m);
+ }
+ } else
+ return mtp_no_property;
+ node = get_Call_mem(node);
+ break;
+
+ default:
+ return mtp_no_property;
+ }
+ }
+} /* _follow_mem */
+
+/**
+ * Follow the memory chain starting at node and determine
+ * the mtp_property.
+ *
+ * @return mtp_property_const if only calls of const functions are detected
+ * mtp_property_pure if only Loads and const/pure calls detected
+ * mtp_no_property else
+ */
+static unsigned follow_mem(ir_node *node, unsigned mode) {
+ unsigned m;
+
+ m = _follow_mem(node);
+ return max_property(mode, m);
+} /* follow_mem */
+
+/**
+ * Check if a graph represents a const or a pure function.
+ *
+ * @param irg the graph to check
+ * @param top if set, this is the top call
+ */
+static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
+ ir_node *end, *endbl;
+ int j;
+ unsigned prop = get_irg_additional_properties(irg);
+ ir_graph *rem = current_ir_graph;
+
+ if (prop & mtp_property_const) {
+ /* already marked as a const function */
+ return mtp_property_const;
+ }
+ if (prop & mtp_property_pure) {
+ /* already marked as a pure function */
+ return mtp_property_pure;
+ }
+
+ if (IS_IRG_READY(irg)) {
+ /* already checked */
+ return mtp_no_property;
+ }
+ if (IS_IRG_BUSY(irg)) {
+ /* we are still evaluate this method. Be optimistic,
+ return the best possible so far but mark the result as temporary. */
+ return mtp_temporary | mtp_property_const;
+ }
+ SET_IRG_BUSY(irg);
+
+ end = get_irg_end(irg);
+ endbl = get_nodes_block(end);
+ prop = mtp_property_const;
+
+ current_ir_graph = irg;
+
+ inc_irg_visited(irg);
+ /* mark the initial mem: recursion of follow_mem stops here */
+ mark_irn_visited(get_irg_initial_mem(irg));
+
+ /* visit every Return */
+ for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
+ ir_node *node = get_Block_cfgpred(endbl, j);
+ ir_opcode code = get_irn_opcode(node);
+ ir_node *mem;
+
+ /* Bad nodes usually do NOT produce anything, so it's ok */
+ if (code == iro_Bad)
+ continue;
+
+ if (code == iro_Return) {
+ mem = get_Return_mem(node);
+
+ /* Bad nodes usually do NOT produce anything, so it's ok */
+ if (is_Bad(mem))
+ continue;
+
+ if (mem != get_irg_initial_mem(irg))
+ prop = max_property(prop, follow_mem(mem, prop));
+ } else {
+ /* Exception found. Cannot be const or pure. */
+ prop = mtp_no_property;
+ break;
+ }
+ if (prop == mtp_no_property)
+ break;
+ }
+
+ if (prop != mtp_no_property) {
+ /* check, if a keep-alive exists */
+ for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
+ ir_node *kept = get_End_keepalive(end, j);
+
+ if (is_Block(kept)) {
+ prop = mtp_no_property;
+ break;
+ }
+
+ if (mode_M != get_irn_mode(kept))
+ continue;
+
+ prop = max_property(prop, follow_mem(kept, prop));
+ if (prop == mtp_no_property)
+ break;
+ }
+ }
+
+ if (prop != mtp_no_property) {
+ if (top || (prop & mtp_temporary) == 0) {
+ /* We use the temporary flag here to mark optimistic result.
+ Set the property only if we are sure that it does NOT base on
+ temporary results OR if we are at top-level. */
+ set_irg_additional_property(irg, prop & ~mtp_temporary);
+ SET_IRG_READY(irg);
+ }
+ }
+ if (top)
+ SET_IRG_READY(irg);
+ CLEAR_IRG_BUSY(irg);
+ current_ir_graph = rem;
+ return prop;
+} /* check_const_or_pure_function */
+
+/**
+ * Handle calls to const functions.
+ *
+ * @param ctx context
+ */
+static void handle_const_Calls(env_t *ctx) {
+ int i;
+
+ ctx->n_calls_SymConst = 0;
+ ctx->n_calls_Sel = 0;
+
+ /* all calls of const functions can be transformed */
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph *irg = get_irp_irg(i);
+
+ ctx->const_call_list = NULL;
+ ctx->pure_call_list = NULL;
+ ctx->proj_list = NULL;
+ irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
+
+ if (ctx->const_call_list) {
+ fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
+
+ /* this graph was changed, invalidate analysis info */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ }
+ }
+} /* handle_const_Calls */
+
+/**
+ * Handle calls to nothrow functions.
+ *
+ * @param ctx context
+ */
+static void handle_nothrow_Calls(env_t *ctx) {
+ int i;
+
+ ctx->n_calls_SymConst = 0;
+ ctx->n_calls_Sel = 0;
+
+ /* all calls of const functions can be transformed */
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph *irg = get_irp_irg(i);
+
+ ctx->nothrow_call_list = NULL;
+ ctx->proj_list = NULL;
+ irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
+
+ if (ctx->nothrow_call_list) {
+ fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
+
+ /* this graph was changed, invalidate analysis info */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ }
+ }