/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* The walker environment for updating function calls.
*/
typedef struct env_t {
- unsigned n_calls_SymConst;
- unsigned n_calls_Sel;
+ size_t n_calls_SymConst;
+ size_t n_calls_Sel;
ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */
} env_t;
-/** If non-null, evaluates entities for being a heap alloc. */
-static check_alloc_entity_func is_alloc_entity = NULL;
-
/** Ready IRG's are marked in the ready set. */
static unsigned *ready_set;
is_Sel(ptr) &&
get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are const functions, we can remove the memory edge. */
- int i, n_callees = get_Call_n_callees(call);
+ size_t i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
/* This is kind of strange: dying code or a Call that will raise an exception
when executed as there is no implementation to call. So better not
}
case pn_Call_X_except:
exc_changed = 1;
- exchange(proj, get_irg_bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
}
/* changes were done ... */
- set_irg_outs_inconsistent(irg);
set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
if (exc_changed) {
/* ... including exception edges */
- set_irg_doms_inconsistent(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
}
} /* fix_const_call_list */
is_Sel(ptr) &&
get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are nothrow functions, we can remove the exception edge. */
- int i, n_callees = get_Call_n_callees(call);
+ size_t i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
/* This is kind of strange: dying code or a Call that will raise an exception
when executed as there is no implementation to call. So better not
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
exc_changed = 1;
- exchange(proj, get_irg_bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
}
/* changes were done ... */
- set_irg_outs_inconsistent(irg);
set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
if (exc_changed) {
/* ... including exception edges */
- set_irg_doms_inconsistent(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
}
} /* fix_nothrow_call_list */
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
- if (irg == get_irn_irg(node)) {
- /* A self-recursive call. The property did not depend on this call. */
- } else if (irg == NULL) {
+ if (irg == NULL) {
m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
mode = max_property(mode, m);
- } else if (irg != NULL) {
+ } else {
/* we have a graph, analyze it. */
m = check_const_or_pure_function(irg, /*top=*/0);
mode = max_property(mode, m);
{
ir_node *end, *endbl;
int j;
+ ir_entity *entity = get_irg_entity(irg);
+ ir_type *type = get_entity_type(entity);
+ size_t n_params = get_method_n_params(type);
+ size_t i;
+ mtp_additional_properties may_be_const = mtp_property_const;
mtp_additional_properties prop = get_irg_additional_properties(irg);
+ /* libfirm handles aggregate parameters by passing around pointers to
+ * stuff in memory, so if we have compound parameters we are never const */
+ for (i = 0; i < n_params; ++i) {
+ ir_type *param = get_method_param_type(type, i);
+ if (is_compound_type(param)) {
+ prop &= ~mtp_property_const;
+ may_be_const = mtp_no_property;
+ }
+ }
+
if (prop & mtp_property_const) {
/* already marked as a const function */
return mtp_property_const;
return mtp_no_property;
}
if (IS_IRG_BUSY(irg)) {
- /* we are still evaluate this method. Be optimistic,
- return the best possible so far but mark the result as temporary. */
- return mtp_temporary | mtp_property_const;
+ /* We are still evaluate this method.
+ * The function (indirectly) calls itself and thus may not terminate.
+ */
+ return mtp_no_property;
}
SET_IRG_BUSY(irg);
end = get_irg_end(irg);
endbl = get_nodes_block(end);
- prop = mtp_property_const;
+ prop = may_be_const;
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
}
}
- if (prop != mtp_no_property) {
- if (top || (prop & mtp_temporary) == 0) {
- /* We use the temporary flag here to mark optimistic result.
- Set the property only if we are sure that it does NOT base on
- temporary results OR if we are at top-level. */
- add_irg_additional_properties(irg, prop & ~mtp_temporary);
- SET_IRG_READY(irg);
+ if (top) {
+ /* Set the property only if we are at top-level. */
+ if (prop != mtp_no_property) {
+ add_irg_additional_properties(irg, prop);
}
- }
- if (top)
SET_IRG_READY(irg);
+ }
CLEAR_IRG_BUSY(irg);
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
return prop;
*/
static void handle_const_Calls(env_t *ctx)
{
- int i;
+ size_t i, n;
ctx->n_calls_SymConst = 0;
ctx->n_calls_Sel = 0;
/* all calls of const functions can be transformed */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
ctx->float_const_call_list = NULL;
*/
static void handle_nothrow_Calls(env_t *ctx)
{
- int i;
+ size_t i, n;
ctx->n_calls_SymConst = 0;
ctx->n_calls_Sel = 0;
/* all calls of const functions can be transformed */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
ctx->nothrow_call_list = NULL;
/* Firm style high-level allocation */
return 1;
}
- if (is_alloc_entity != NULL && is_Call(node)) {
- ir_node *ptr = get_Call_ptr(node);
-
- if (is_Global(ptr)) {
- ir_entity *ent = get_Global_entity(ptr);
- return is_alloc_entity(ent);
- }
- }
+ /* TODO: check mtp_malloc */
return 0;
-} /* is_malloc_call_result */
+}
/**
* Update a property depending on a call property.
ptr = get_Call_ptr(succ);
if (is_Global(ptr)) {
ir_entity *ent = get_Global_entity(ptr);
- int i;
+ size_t i;
/* we know the called entity */
- for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
- if (get_Call_param(succ, i) == n) {
+ for (i = get_Call_n_params(succ); i > 0;) {
+ if (get_Call_param(succ, --i) == n) {
/* n is the i'th param of the call */
if (get_method_param_access(ent, i) & ptr_access_store) {
/* n is store in ent */
static mtp_additional_properties check_stored_result(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
- int i, j;
+ int i;
mtp_additional_properties res = ~mtp_no_property;
int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
ir_node *pred = get_Block_cfgpred(end_blk, i);
+ size_t j;
if (! is_Return(pred))
continue;
- for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
- const ir_node *irn = get_Return_res(pred, j);
+ for (j = get_Return_n_ress(pred); j > 0;) {
+ const ir_node *irn = get_Return_res(pred, --j);
if (is_stored(irn)) {
/* bad, might create an alias */
ir_node *end_blk = get_irg_end_block(irg);
ir_entity *ent;
ir_type *mtp;
- int i, j;
+ int i;
if (IS_IRG_READY(irg)) {
/* already checked */
if (is_Return(pred)) {
if (curr_prop & mtp_property_malloc) {
+ size_t j;
+
/* check, if malloc is called here */
- for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
- ir_node *res = get_Return_res(pred, j);
+ for (j = get_Return_n_ress(pred); j > 0;) {
+ ir_node *res = get_Return_res(pred, --j);
/* skip Confirms and Casts */
res = skip_HighLevel_ops(res);
is_Sel(ptr) &&
get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
/* check if all possible callees are malloc functions. */
- int i, n_callees = get_Call_n_callees(res);
+ size_t i, n_callees = get_Call_n_callees(res);
if (n_callees == 0) {
/* This is kind of strange: dying code or a Call that will raise an exception
when executed as there is no implementation to call. So better not
is_Sel(ptr) &&
get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
/* check if all possible callees are nothrow functions. */
- int i, n_callees = get_Call_n_callees(pred);
+ size_t i, n_callees = get_Call_n_callees(pred);
if (n_callees == 0) {
/* This is kind of strange: dying code or a Call that will raise an exception
when executed as there is no implementation to call. So better not
/*
* optimize function calls by handling const functions
*/
-void optimize_funccalls(int force_run, check_alloc_entity_func callback)
+void optimize_funccalls(void)
{
- int i, last_idx;
- unsigned num_const = 0;
- unsigned num_pure = 0;
- unsigned num_nothrow = 0;
- unsigned num_malloc = 0;
-
- is_alloc_entity = callback;
+ size_t i, n;
+ size_t last_idx;
+ env_t ctx;
+ size_t num_const = 0;
+ size_t num_pure = 0;
+ size_t num_nothrow = 0;
+ size_t num_malloc = 0;
/* prepare: mark all graphs as not analyzed */
last_idx = get_irp_last_idx();
/* first step: detect, which functions are nothrow or malloc */
DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
/* second step: remove exception edges: this must be done before the
detection of const and pure functions take place. */
- if (force_run || num_nothrow > 0) {
- env_t ctx;
-
- handle_nothrow_Calls(&ctx);
- DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
- DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
- ctx.n_calls_SymConst, ctx.n_calls_Sel));
- } else {
- DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
- }
+ handle_nothrow_Calls(&ctx);
+ DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
+ DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
+ ctx.n_calls_SymConst, ctx.n_calls_Sel));
rbitset_clear_all(ready_set, last_idx);
rbitset_clear_all(busy_set, last_idx);
/* third step: detect, which functions are const or pure */
DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
}
}
- if (force_run || num_const > 0) {
- env_t ctx;
+ handle_const_Calls(&ctx);
+ DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure));
+ DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
+ ctx.n_calls_SymConst, ctx.n_calls_Sel));
- handle_const_Calls(&ctx);
- DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
- DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
- ctx.n_calls_SymConst, ctx.n_calls_Sel));
- } else {
- DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
- }
xfree(busy_set);
xfree(ready_set);
-} /* optimize_funccalls */
+}
/* initialize the funccall optimization */
void firm_init_funccalls(void)
{
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
-} /* firm_init_funccalls */
-
-typedef struct pass_t {
- ir_prog_pass_t pass;
- int force_run;
- check_alloc_entity_func callback;
-} pass_t;
-
-/**
- * Wrapper for running optimize_funccalls() as an ir_prog pass.
- */
-static int pass_wrapper(ir_prog *irp, void *context)
-{
- pass_t *pass = (pass_t*)context;
-
- (void)irp;
- optimize_funccalls(pass->force_run, pass->callback);
- return 0;
-} /* pass_wrapper */
+}
/* Creates an ir_prog pass for optimize_funccalls. */
-ir_prog_pass_t *optimize_funccalls_pass(
- const char *name,
- int force_run, check_alloc_entity_func callback)
+ir_prog_pass_t *optimize_funccalls_pass(const char *name)
{
- struct pass_t *pass = XMALLOCZ(struct pass_t);
-
- pass->force_run = force_run;
- pass->callback = callback;
-
- return def_prog_pass_constructor(
- &pass->pass, name ? name : "funccall", pass_wrapper);
-} /* optimize_funccalls_pass */
+ return def_prog_pass(name ? name : "funccall", optimize_funccalls);
+}