* @author Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <adt/raw_bitset.h>
#include "irvrfy.h"
#include "dbginfo_t.h"
#include "irflag_t.h"
+#include "irloop_t.h"
#include "ircons.h"
+#include "iredges_t.h"
+#include "analyze_irg_args.h"
#include "irhooks.h"
#include "debug.h"
typedef struct _env_t {
unsigned n_calls_SymConst;
unsigned n_calls_Sel;
- ir_node *const_call_list; /**< The list of all const function calls that will be changed. */
- ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
- ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
- ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
+ ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
+ ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
+ ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
+ ir_node *nothrow_call_list; /**< The list of all nothrow function calls that will be changed. */
+ ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
} env_t;
/** If non-null, evaluates entities for being a heap alloc. */
* to lists. Collect all Proj(Call) nodes into a Proj list.
*/
static void collect_const_and_pure_calls(ir_node *node, void *env) {
- env_t *ctx = env;
- ir_node *call, *ptr;
+ env_t *ctx = env;
+ ir_node *call, *ptr;
ir_entity *ent;
- unsigned prop;
+ unsigned and_prop, or_prop, prop;
if (is_Call(node)) {
call = node;
/* set the link to NULL for all non-const/pure calls */
set_irn_link(call, NULL);
ptr = get_Call_ptr(call);
- if (is_SymConst_addr_ent(ptr)) {
- ent = get_SymConst_entity(ptr);
+ if (is_Global(ptr)) {
+ ent = get_Global_entity(ptr);
prop = get_entity_additional_properties(ent);
if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
}
/* note that const function are a subset of pure ones */
- prop = mtp_property_const | mtp_property_pure;
+ and_prop = mtp_property_const | mtp_property_pure;
+ or_prop = 0;
for (i = 0; i < n_callees; ++i) {
ent = get_Call_callee(call, i);
if (ent == unknown_entity) {
/* we don't know which entity is called here */
return;
}
- prop &= get_entity_additional_properties(ent);
- if (prop == mtp_no_property)
+ prop = get_entity_additional_properties(ent);
+ and_prop &= prop;
+ or_prop &= prop;
+ if (and_prop == mtp_no_property)
return;
}
+ prop = and_prop | (or_prop & mtp_property_has_loop);
++ctx->n_calls_Sel;
} else
return;
set_irn_link(call, ctx->pure_call_list);
ctx->pure_call_list = call;
} else {
- set_irn_link(call, ctx->const_call_list);
- ctx->const_call_list = call;
+ if (prop & mtp_property_has_loop) {
+ set_irn_link(call, ctx->nonfloat_const_call_list);
+ ctx->nonfloat_const_call_list = call;
+ } else {
+ set_irn_link(call, ctx->float_const_call_list);
+ ctx->float_const_call_list = call;
+ }
}
} else if (is_Proj(node)) {
/*
/**
* Fix the list of collected Calls.
*
- * @param irg the graph that contained calls to pure functions
- * @param call_list the list of all call sites of const functions
- * @param proj_list the list of all memory/exception Proj's of this call sites
+ * @param irg the graph that contained calls to pure functions
+ * @param ctx context
*/
-static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
ir_graph *rem = current_ir_graph;
current_ir_graph = irg;
- /* First step: fix all calls by removing it's memory input.
- It's original memory input is preserved in their link fields. */
- for (call = call_list; call; call = next) {
+ /* First step: fix all calls by removing their memory input and let
+ * them floating.
+ * The original memory input is preserved in their link fields. */
+ for (call = ctx->float_const_call_list; call != NULL; call = next) {
next = get_irn_link(call);
mem = get_Call_mem(call);
set_Call_mem(call, get_irg_no_mem(irg));
/*
- * Sorrily we cannot simply set the node to 'float'.
+ * Unfortunately we cannot simply set the node to 'float'.
* There is a reason for that:
*
* - The call might be inside a loop/if that is NOT entered
* observable states...
*/
- /* finally, this call can float
- set_irn_pinned(call, op_pin_state_floats); */
+ /* finally, this call can float */
+ set_irn_pinned(call, op_pin_state_floats);
hook_func_call(irg, call);
}
- /* Second step: fix all Proj's */
- for (proj = proj_list; proj; proj = next) {
+ /* Last step: fix all Proj's */
+ for (proj = ctx->proj_list; proj != NULL; proj = next) {
next = get_irn_link(proj);
call = get_Proj_pred(proj);
mem = get_irn_link(call);
/* beware of calls in the pure call list */
- if (! mem || get_irn_op(mem) == op_Call)
+ if (!mem || is_Call(mem))
continue;
assert(get_irn_mode(mem) == mode_M);
/* set the link to NULL for all non-const/pure calls */
set_irn_link(call, NULL);
ptr = get_Call_ptr(call);
- if (is_SymConst_addr_ent(ptr)) {
- ent = get_SymConst_entity(ptr);
+ if (is_Global(ptr)) {
+ ent = get_Global_entity(ptr);
prop = get_entity_additional_properties(ent);
if ((prop & mtp_property_nothrow) == 0)
if (mode == mtp_no_property)
return mtp_no_property;
- if (irn_visited(node))
+ if (irn_visited_else_mark(node))
return mode;
- mark_irn_visited(node);
-
switch (get_irn_opcode(node)) {
case iro_Proj:
node = get_Proj_pred(node);
case iro_Call:
/* A call is only tolerable if its either constant or pure. */
ptr = get_Call_ptr(node);
- if (get_irn_op(ptr) == op_SymConst &&
- get_SymConst_kind(ptr) == symconst_addr_ent) {
+ if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
if (prop != mtp_no_property) {
/* check, if a keep-alive exists */
for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
- ir_node *mem = get_End_keepalive(end, j);
+ ir_node *kept = get_End_keepalive(end, j);
- if (mode_M != get_irn_mode(mem))
+ if (is_Block(kept)) {
+ prop = mtp_no_property;
+ break;
+ }
+
+ if (mode_M != get_irn_mode(kept))
continue;
- prop = max_property(prop, follow_mem(mem, prop));
+ prop = max_property(prop, follow_mem(kept, prop));
if (prop == mtp_no_property)
break;
}
for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
ir_graph *irg = get_irp_irg(i);
- ctx->const_call_list = NULL;
- ctx->pure_call_list = NULL;
- ctx->proj_list = NULL;
+ ctx->float_const_call_list = NULL;
+ ctx->nonfloat_const_call_list = NULL;
+ ctx->pure_call_list = NULL;
+ ctx->proj_list = NULL;
irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
- if (ctx->const_call_list) {
- fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
+ if (ctx->float_const_call_list != NULL) {
+ fix_const_call_lists(irg, ctx);
/* this graph was changed, invalidate analysis info */
set_irg_outs_inconsistent(irg);
if (is_alloc_entity != NULL && is_Call(node)) {
ir_node *ptr = get_Call_ptr(node);
- if (is_SymConst_addr_ent(ptr)) {
- ir_entity *ent = get_SymConst_entity(ptr);
+ if (is_Global(ptr)) {
+ ir_entity *ent = get_Global_entity(ptr);
return is_alloc_entity(ent);
}
}
return r | t;
} /** update_property */
+/**
+ * Check if a node is stored.
+ */
+static int is_stored(const ir_node *n) {
+ const ir_edge_t *edge;
+ const ir_node *ptr;
+
+ foreach_out_edge(n, edge) {
+ const ir_node *succ = get_edge_src_irn(edge);
+
+ switch (get_irn_opcode(succ)) {
+ case iro_Return:
+ case iro_Load:
+ case iro_Cmp:
+ /* ok */
+ break;
+ case iro_Store:
+ if (get_Store_value(succ) == n)
+ return 1;
+ /* ok if its only the address input */
+ break;
+ case iro_Sel:
+ case iro_Cast:
+ case iro_Confirm:
+ if (is_stored(succ))
+ return 1;
+ break;
+ case iro_Call:
+ ptr = get_Call_ptr(succ);
+ if (is_Global(ptr)) {
+ ir_entity *ent = get_Global_entity(ptr);
+ int i;
+
+ /* we know the called entity */
+ for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
+ if (get_Call_param(succ, i) == n) {
+ /* n is the i'th param of the call */
+ if (get_method_param_access(ent, i) & ptr_access_store) {
+ /* n is store in ent */
+ return 1;
+ }
+ }
+ }
+ } else {
+ /* unknown call address */
+ return 1;
+ }
+ break;
+ default:
+ /* bad, potential alias */
+ return 1;
+ }
+ }
+ return 0;
+} /* is_stored */
+
+/**
+ * Check that the return value of an irg is not stored anywhere.
+ *
+ * return ~mtp_property_malloc if return values are stored, ~0 else
+ */
+static unsigned check_stored_result(ir_graph *irg) {
+ ir_node *end_blk = get_irg_end_block(irg);
+ int i, j;
+ unsigned res = ~0;
+ int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
+
+ for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+ if (! is_Return(pred))
+ continue;
+ for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
+ const ir_node *irn = get_Return_res(pred, j);
+
+ if (is_stored(irn)) {
+ /* bad, might create an alias */
+ res = ~mtp_property_malloc;
+ goto finish;
+ }
+ }
+ }
+finish:
+ if (! old_edges)
+ edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
+ return res;
+} /* check_stored_result */
+
/**
* Check if a graph represents a nothrow or a malloc function.
*
* @param top if set, this is the top call
*/
static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
- ir_node *end_blk = get_irg_end_block(irg);
- int i, j;
- unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
+ ir_node *end_blk = get_irg_end_block(irg);
+ ir_entity *ent;
+ ir_type *mtp;
+ int i, j;
+ unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
if (IS_IRG_READY(irg)) {
/* already checked */
}
SET_IRG_BUSY(irg);
+ ent = get_irg_entity(irg);
+ mtp = get_entity_type(ent);
+
+ if (get_method_n_ress(mtp) <= 0)
+ curr_prop &= ~mtp_property_malloc;
+
for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(end_blk, i);
if (curr_prop & mtp_property_malloc) {
/* check, if malloc is called here */
for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
- const ir_node *res = get_Return_res(pred, j);
- const ir_node *irn = skip_Proj_const(res);
+ ir_node *res = get_Return_res(pred, j);
+
+ /* skip Confirms and Casts */
+ res = skip_HighLevel_ops(res);
+ /* skip Proj's */
+ while (is_Proj(res))
+ res = get_Proj_pred(res);
if (is_malloc_call_result(res)) {
/* ok, this is a malloc */
} else if (is_Call(res)) {
ir_node *ptr = get_Call_ptr(res);
- if (is_SymConst_addr_ent(ptr)) {
+ if (is_Global(ptr)) {
/* a direct call */
- ir_entity *ent = get_SymConst_entity(ptr);
+ ir_entity *ent = get_Global_entity(ptr);
ir_graph *callee = get_entity_irg(ent);
if (callee == irg) {
/* unknown call */
curr_prop &= ~mtp_property_malloc;
}
+ } else {
+ /* unknown return value */
+ curr_prop &= ~mtp_property_malloc;
}
}
}
if (is_Call(pred)) {
ir_node *ptr = get_Call_ptr(pred);
- if (is_SymConst_addr_ent(ptr)) {
+ if (is_Global(ptr)) {
/* a direct call */
- ir_entity *ent = get_SymConst_entity(ptr);
+ ir_entity *ent = get_Global_entity(ptr);
ir_graph *callee = get_entity_irg(ent);
if (callee == irg) {
/* A self-recursive call. The property did not depend on this call. */
} else if (callee != NULL) {
- unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+ /* Note: we check here for nothrow only, so do NOT reset the malloc property */
+ unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
curr_prop = update_property(curr_prop, prop);
} else {
- curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
+ if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
+ curr_prop &= ~mtp_property_nothrow;
}
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
break;
}
}
+
+ if (curr_prop & mtp_property_malloc) {
+ /*
+ * Note that the malloc property means not only return newly allocated
+ * memory, but also that this memory is ALIAS FREE.
+ * To ensure that, we do NOT allow that the returned memory is somewhere
+ * stored.
+ */
+ curr_prop &= check_stored_result(irg);
+ }
+
if (curr_prop != mtp_no_property) {
if (top || (curr_prop & mtp_temporary) == 0) {
/* We use the temporary flag here to mark an optimistic result.
return curr_prop;
} /* check_nothrow_or_malloc */
+/**
+ * When a function was detected as "const", it might be moved out of loops.
+ * This might be dangerous if the graph might contain endless loops.
+ */
+static void check_for_possible_endless_loops(ir_graph *irg) {
+ ir_loop *root_loop;
+ assure_cf_loop(irg);
+
+ root_loop = get_irg_loop(irg);
+ if (root_loop->flags & loop_outer_loop)
+ set_irg_additional_property(irg, mtp_property_has_loop);
+}
+
/*
* optimize function calls by handling const functions
*/
-void optimize_funccalls(int force_run)
+void optimize_funccalls(int force_run, check_alloc_entity_func callback)
{
int i, last_idx;
unsigned num_const = 0;
unsigned num_nothrow = 0;
unsigned num_malloc = 0;
+ is_alloc_entity = callback;
+
/* prepare: mark all graphs as not analyzed */
last_idx = get_irp_last_idx();
ready_set = rbitset_malloc(last_idx);
if (prop & mtp_property_const) {
++num_const;
DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
+ check_for_possible_endless_loops(irg);
} else if (prop & mtp_property_pure) {
++num_pure;
DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));