*/
static void collect_const_and_pure_calls(ir_node *node, void *env)
{
- env_t *ctx = env;
- ir_node *call, *ptr;
+ env_t *ctx = (env_t*)env;
+ ir_node *call;
+ ir_node *ptr;
ir_entity *ent;
unsigned and_prop, or_prop, prop;
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are const functions, we can remove the memory edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: fix all calls by removing their memory input and let
* them floating.
* The original memory input is preserved in their link fields. */
for (call = ctx->float_const_call_list; call != NULL; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
mem = get_Call_mem(call);
set_irn_link(call, mem);
/* Last step: fix all Proj's */
for (proj = ctx->proj_list; proj != NULL; proj = next) {
- next = get_irn_link(proj);
+ next = (ir_node*)get_irn_link(proj);
call = get_Proj_pred(proj);
- mem = get_irn_link(call);
+ mem = (ir_node*)get_irn_link(call);
/* beware of calls in the pure call list */
if (!mem || is_Call(mem))
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_const_call_list */
/**
*/
static void collect_nothrow_calls(ir_node *node, void *env)
{
- env_t *ctx = env;
+ env_t *ctx = (env_t*)env;
ir_node *call, *ptr;
ir_entity *ent;
unsigned prop;
++ctx->n_calls_SymConst;
} else if (get_opt_closed_world() &&
is_Sel(ptr) &&
- get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+ get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
/* If all possible callees are nothrow functions, we can remove the exception edge. */
int i, n_callees = get_Call_n_callees(call);
if (n_callees == 0) {
{
ir_node *call, *next, *proj;
int exc_changed = 0;
- ir_graph *rem = current_ir_graph;
-
- current_ir_graph = irg;
/* First step: go through the list of calls and mark them. */
for (call = call_list; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
/* current_ir_graph is in memory anyway, so it's a good marker */
set_irn_link(call, ¤t_ir_graph);
/* Second step: Remove all exception Proj's */
for (proj = proj_list; proj; proj = next) {
- next = get_irn_link(proj);
+ next = (ir_node*)get_irn_link(proj);
call = get_Proj_pred(proj);
/* handle only marked calls */
/* ... including exception edges */
set_irg_doms_inconsistent(irg);
}
- current_ir_graph = rem;
} /* fix_nothrow_call_list */
/* marking */
#define IS_IRG_BUSY(irg) rbitset_is_set(busy_set, get_irg_idx(irg))
/* forward */
-static unsigned check_const_or_pure_function(ir_graph *irg, int top);
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
/**
* Calculate the bigger property of two. Handle the temporary flag right.
*/
-static unsigned max_property(unsigned a, unsigned b)
+static mtp_additional_properties max_property(mtp_additional_properties a,
+ mtp_additional_properties b)
{
- unsigned r, t = (a | b) & mtp_temporary;
+ mtp_additional_properties r;
+ mtp_additional_properties t = (a | b) & mtp_temporary;
a &= ~mtp_temporary;
b &= ~mtp_temporary;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned _follow_mem(ir_node *node)
+static mtp_additional_properties follow_mem_(ir_node *node)
{
- unsigned m, mode = mtp_property_const;
+ mtp_additional_properties mode = mtp_property_const;
+ mtp_additional_properties m;
ir_node *ptr;
int i;
case iro_Sync:
/* do a dfs search */
for (i = get_irn_arity(node) - 1; i >= 0; --i) {
- m = _follow_mem(get_irn_n(node, i));
+ m = follow_mem_(get_irn_n(node, i));
mode = max_property(mode, m);
if (mode == mtp_no_property)
return mtp_no_property;
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
- if (irg == current_ir_graph) {
+ if (irg == get_irn_irg(node)) {
/* A self-recursive call. The property did not depend on this call. */
} else if (irg == NULL) {
m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
return mtp_no_property;
}
}
-} /* _follow_mem */
+}
/**
* Follow the memory chain starting at node and determine
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned follow_mem(ir_node *node, unsigned mode)
+static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
{
- unsigned m;
-
- m = _follow_mem(node);
+ mtp_additional_properties m = follow_mem_(node);
return max_property(mode, m);
-} /* follow_mem */
+}
/**
* Check if a graph represents a const or a pure function.
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_const_or_pure_function(ir_graph *irg, int top)
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
{
ir_node *end, *endbl;
int j;
- unsigned prop = get_irg_additional_properties(irg);
- ir_graph *rem = current_ir_graph;
+ mtp_additional_properties prop = get_irg_additional_properties(irg);
if (prop & mtp_property_const) {
/* already marked as a const function */
endbl = get_nodes_block(end);
prop = mtp_property_const;
- current_ir_graph = irg;
-
ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
/* mark the initial mem: recursion of follow_mem() stops here */
/* visit every Return */
for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
ir_node *node = get_Block_cfgpred(endbl, j);
- ir_opcode code = get_irn_opcode(node);
+ unsigned code = get_irn_opcode(node);
ir_node *mem;
/* Bad nodes usually do NOT produce anything, so it's ok */
/* We use the temporary flag here to mark optimistic result.
Set the property only if we are sure that it does NOT base on
temporary results OR if we are at top-level. */
- set_irg_additional_property(irg, prop & ~mtp_temporary);
+ add_irg_additional_properties(irg, prop & ~mtp_temporary);
SET_IRG_READY(irg);
}
}
SET_IRG_READY(irg);
CLEAR_IRG_BUSY(irg);
ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
- current_ir_graph = rem;
return prop;
} /* check_const_or_pure_function */
/**
* Update a property depending on a call property.
*/
-static unsigned update_property(unsigned orig_prop, unsigned call_prop)
+static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
{
- unsigned t = (orig_prop | call_prop) & mtp_temporary;
- unsigned r = orig_prop & call_prop;
+ mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
+ mtp_additional_properties r = orig_prop & call_prop;
return r | t;
-} /** update_property */
+}
/**
* Check if a node is stored.
*
* return ~mtp_property_malloc if return values are stored, ~0 else
*/
-static unsigned check_stored_result(ir_graph *irg)
+static mtp_additional_properties check_stored_result(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
int i, j;
- unsigned res = ~0;
+ mtp_additional_properties res = ~mtp_no_property;
int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
if (! old_edges)
edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
return res;
-} /* check_stored_result */
+}
/**
* Check if a graph represents a nothrow or a malloc function.
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top)
+static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
{
- ir_node *end_blk = get_irg_end_block(irg);
+ mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
+ ir_node *end_blk = get_irg_end_block(irg);
ir_entity *ent;
ir_type *mtp;
int i, j;
- unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
if (IS_IRG_READY(irg)) {
/* already checked */
if (callee == irg) {
/* A self-recursive call. The property did not depend on this call. */
} else if (callee != NULL) {
- unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
curr_prop = update_property(curr_prop, prop);
} else {
curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
/* A self-recursive call. The property did not depend on this call. */
} else if (callee != NULL) {
/* Note: we check here for nothrow only, so do NOT reset the malloc property */
- unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
curr_prop = update_property(curr_prop, prop);
} else {
if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
/* We use the temporary flag here to mark an optimistic result.
Set the property only if we are sure that it does NOT base on
temporary results OR if we are at top-level. */
- set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
+ add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
SET_IRG_READY(irg);
}
}
root_loop = get_irg_loop(irg);
if (root_loop->flags & loop_outer_loop)
- set_irg_additional_property(irg, mtp_property_has_loop);
+ add_irg_additional_properties(irg, mtp_property_has_loop);
}
/*
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
} /* firm_init_funccalls */
-struct pass_t {
+typedef struct pass_t {
ir_prog_pass_t pass;
int force_run;
check_alloc_entity_func callback;
-};
+} pass_t;
/**
* Wrapper for running optimize_funccalls() as an ir_prog pass.
*/
static int pass_wrapper(ir_prog *irp, void *context)
{
- struct pass_t *pass = context;
+ pass_t *pass = (pass_t*)context;
(void)irp;
optimize_funccalls(pass->force_run, pass->callback);