/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* The walker environment for updating function calls.
*/
typedef struct env_t {
- unsigned n_calls_SymConst;
- unsigned n_calls_Sel;
+ size_t n_calls_SymConst;
+ size_t n_calls_Sel;
ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */
ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */
*/
static void collect_const_and_pure_calls(ir_node *node, void *env)
{
- env_t *ctx = env;
- ir_node *call, *ptr;
+ env_t *ctx = (env_t*)env;
+ ir_node *call;
+ ir_node *ptr;
ir_entity *ent;
unsigned and_prop, or_prop, prop;
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are const functions, we can remove the memory edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: fix all calls by removing their memory input and let
* them floating.
* The original memory input is preserved in their link fields. */
for (call = ctx->float_const_call_list; call != NULL; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
mem = get_Call_mem(call);
set_irn_link(call, mem);
/* Last step: fix all Proj's */
for (proj = ctx->proj_list; proj != NULL; proj = next) {
- next = get_irn_link(proj);
+ next = (ir_node*)get_irn_link(proj);
call = get_Proj_pred(proj);
- mem = get_irn_link(call);
+ mem = (ir_node*)get_irn_link(call);
/* beware of calls in the pure call list */
if (!mem || is_Call(mem))
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_const_call_list */
/**
*/
static void collect_nothrow_calls(ir_node *node, void *env)
{
- env_t *ctx = env;
+ env_t *ctx = (env_t*)env;
ir_node *call, *ptr;
ir_entity *ent;
unsigned prop;
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are nothrow functions, we can remove the exception edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: go through the list of calls and mark them. */
for (call = call_list; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
/* current_ir_graph is in memory anyway, so it's a good marker */
set_irn_link(call, ¤t_ir_graph);
/* Second step: Remove all exception Proj's */
for (proj = proj_list; proj; proj = next) {
- next = get_irn_link(proj);
+ next = (ir_node*)get_irn_link(proj);
call = get_Proj_pred(proj);
/* handle only marked calls */
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_nothrow_call_list */
/* marking */
-#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
+#define SET_IRG_READY(irg) rbitset_set(ready_set, get_irg_idx(irg))
#define IS_IRG_READY(irg) rbitset_is_set(ready_set, get_irg_idx(irg))
#define SET_IRG_BUSY(irg) rbitset_set(busy_set, get_irg_idx(irg))
#define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
#define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
/* forward */
-static unsigned check_const_or_pure_function(ir_graph *irg, int top);
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
/**
* Calculate the bigger property of two. Handle the temporary flag right.
*/
-static unsigned max_property(unsigned a, unsigned b)
+static mtp_additional_properties max_property(mtp_additional_properties a,
+ mtp_additional_properties b)
{
- unsigned r, t = (a | b) & mtp_temporary;
+ mtp_additional_properties r;
+ mtp_additional_properties t = (a | b) & mtp_temporary;
a &= ~mtp_temporary;
b &= ~mtp_temporary;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned _follow_mem(ir_node *node)
+static mtp_additional_properties follow_mem_(ir_node *node)
{
- unsigned m, mode = mtp_property_const;
+ mtp_additional_properties mode = mtp_property_const;
+ mtp_additional_properties m;
ir_node *ptr;
int i;
case iro_Sync:
/* do a dfs search */
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- m = _follow_mem(get_irn_n(node, i));
+ m = follow_mem_(get_irn_n(node, i));
mode = max_property(mode, m);
if (mode == mtp_no_property)
return mtp_no_property;
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
- if (irg == current_ir_graph) {
+ if (irg == get_irn_irg(node)) {
/* A self-recursive call. The property did not depend on this call. */
} else if (irg == NULL) {
m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
return mtp_no_property;
}
}
-} /* _follow_mem */
+}
/**
* Follow the memory chain starting at node and determine
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned follow_mem(ir_node *node, unsigned mode)
+static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
{
- unsigned m;
-
- m = _follow_mem(node);
+ mtp_additional_properties m = follow_mem_(node);
return max_property(mode, m);
-} /* follow_mem */
+}
/**
* Check if a graph represents a const or a pure function.
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_const_or_pure_function(ir_graph *irg, int top)
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
{
ir_node *end, *endbl;
int j;
- unsigned prop = get_irg_additional_properties(irg);
- ir_graph *rem = current_ir_graph;
+ mtp_additional_properties prop = get_irg_additional_properties(irg);
if (prop & mtp_property_const) {
/* already marked as a const function */
endbl = get_nodes_block(end);
prop = mtp_property_const;
- current_ir_graph = irg;
-
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
/* mark the initial mem: recursion of follow_mem() stops here */
/* visit every Return */
for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
ir_node *node = get_Block_cfgpred(endbl, j);
- ir_opcode code = get_irn_opcode(node);
+ unsigned code = get_irn_opcode(node);
ir_node *mem;
/* Bad nodes usually do NOT produce anything, so it's ok */
/* We use the temporary flag here to mark optimistic result.
Set the property only if we are sure that it does NOT base on
temporary results OR if we are at top-level. */
- set_irg_additional_property(irg, prop & ~mtp_temporary);
+ add_irg_additional_properties(irg, prop & ~mtp_temporary);
SET_IRG_READY(irg);
}
}
SET_IRG_READY(irg);
CLEAR_IRG_BUSY(irg);
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
- current_ir_graph = rem;
return prop;
} /* check_const_or_pure_function */
*/
static void handle_const_Calls(env_t *ctx)
{
- int i;
+ size_t i, n;
ctx->n_calls_SymConst = 0;
ctx->n_calls_Sel = 0;
/* all calls of const functions can be transformed */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
ctx->float_const_call_list = NULL;
*/
static void handle_nothrow_Calls(env_t *ctx)
{
- int i;
+ size_t i, n;
ctx->n_calls_SymConst = 0;
ctx->n_calls_Sel = 0;
/* all calls of const functions can be transformed */
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
ctx->nothrow_call_list = NULL;
/**
* Update a property depending on a call property.
*/
-static unsigned update_property(unsigned orig_prop, unsigned call_prop)
+static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
{
- unsigned t = (orig_prop | call_prop) & mtp_temporary;
- unsigned r = orig_prop & call_prop;
+ mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
+ mtp_additional_properties r = orig_prop & call_prop;
return r | t;
-} /** update_property */
+}
/**
* Check if a node is stored.
*
* return ~mtp_property_malloc if return values are stored, ~0 else
*/
-static unsigned check_stored_result(ir_graph *irg)
+static mtp_additional_properties check_stored_result(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
int i, j;
- unsigned res = ~0;
+ mtp_additional_properties res = ~mtp_no_property;
int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
if (! old_edges)
edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
return res;
-} /* check_stored_result */
+}
/**
* Check if a graph represents a nothrow or a malloc function.
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top)
+static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
{
- ir_node *end_blk = get_irg_end_block(irg);
+ mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
+ ir_node *end_blk = get_irg_end_block(irg);
ir_entity *ent;
ir_type *mtp;
int i, j;
- unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
if (IS_IRG_READY(irg)) {
/* already checked */
if (callee == irg) {
/* A self-recursive call. The property did not depend on this call. */
} else if (callee != NULL) {
- unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
curr_prop = update_property(curr_prop, prop);
} else {
curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
/* A self-recursive call. The property did not depend on this call. */
} else if (callee != NULL) {
/* Note: we check here for nothrow only, so do NOT reset the malloc property */
- unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
curr_prop = update_property(curr_prop, prop);
} else {
if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
/* We use the temporary flag here to mark an optimistic result.
Set the property only if we are sure that it does NOT base on
temporary results OR if we are at top-level. */
- set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
+ add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
SET_IRG_READY(irg);
}
}
root_loop = get_irg_loop(irg);
if (root_loop->flags & loop_outer_loop)
- set_irg_additional_property(irg, mtp_property_has_loop);
+ add_irg_additional_properties(irg, mtp_property_has_loop);
}
/*
*/
void optimize_funccalls(int force_run, check_alloc_entity_func callback)
{
- int i, last_idx;
- unsigned num_const = 0;
- unsigned num_pure = 0;
- unsigned num_nothrow = 0;
- unsigned num_malloc = 0;
+ size_t i, n;
+ int last_idx;
+ size_t num_const = 0;
+ size_t num_pure = 0;
+ size_t num_nothrow = 0;
+ size_t num_malloc = 0;
is_alloc_entity = callback;
/* first step: detect, which functions are nothrow or malloc */
DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
env_t ctx;
handle_nothrow_Calls(&ctx);
- DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
- DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
+ DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
+ DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
ctx.n_calls_SymConst, ctx.n_calls_Sel));
} else {
DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
/* third step: detect, which functions are const or pure */
DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
env_t ctx;
handle_const_Calls(&ctx);
- DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
+ DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure));
DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
ctx.n_calls_SymConst, ctx.n_calls_Sel));
} else {
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
} /* firm_init_funccalls */
-struct pass_t {
+typedef struct pass_t {
ir_prog_pass_t pass;
int force_run;
check_alloc_entity_func callback;
-};
+} pass_t;
/**
* Wrapper for running optimize_funccalls() as an ir_prog pass.
*/
static int pass_wrapper(ir_prog *irp, void *context)
{
- struct pass_t *pass = context;
+ pass_t *pass = (pass_t*)context;
(void)irp;
optimize_funccalls(pass->force_run, pass->callback);