/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
*/
static int can_escape(ir_node *n)
{
- int i, j, k;
+ int i;
/* should always be pointer mode or we made some mistake */
assert(mode_is_reference(get_irn_mode(n)));
ir_entity *ent;
if (is_SymConst_addr_ent(ptr)) {
+ size_t j;
ent = get_SymConst_entity(ptr);
/* we know the called entity */
- for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
- if (get_Call_param(succ, j) == n) {
+ for (j = get_Call_n_params(succ); j > 0;) {
+ if (get_Call_param(succ, --j) == n) {
/* n is the j'th param of the call */
if (get_method_param_access(ent, j) & ptr_access_store)
/* n is store in ent */
return 1;
}
}
- }
- else if (is_Sel(ptr)) {
+ } else if (is_Sel(ptr)) {
+ size_t k;
+
/* go through all possible callees */
- for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) {
- ent = get_Call_callee(succ, k);
+ for (k = get_Call_n_callees(succ); k > 0;) {
+ size_t j;
+ ent = get_Call_callee(succ, --k);
if (ent == unknown_entity) {
/* we don't know what will be called, a possible escape */
return 1;
}
- for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
- if (get_Call_param(succ, j) == n) {
+ for (j = get_Call_n_params(succ); j > 0;) {
+ if (get_Call_param(succ, --j) == n) {
/* n is the j'th param of the call */
if (get_method_param_access(ent, j) & ptr_access_store)
/* n is store in ent */
}
}
}
- }
- else /* we don't know want will called */
+ } else /* we don't know want will called */
return 1;
break;
case iro_Tuple: {
ir_node *proj;
+ int j, k;
/* Bad: trace the tuple backwards */
for (j = get_irn_arity(succ) - 1; j >= 0; --j)
*/
static void find_allocations(ir_node *alloc, void *ctx)
{
+ walk_env_t *env = (walk_env_t*)ctx;
int i;
ir_node *adr;
- walk_env_t *env = ctx;
if (! is_Alloc(alloc))
return;
*/
static void find_allocation_calls(ir_node *call, void *ctx)
{
+ walk_env_t *env = (walk_env_t*)ctx;
int i;
ir_node *adr;
ir_entity *ent;
- walk_env_t *env = ctx;
if (! is_Call(call))
return;
/* kill all dead allocs */
for (alloc = env->dead_allocs; alloc; alloc = next) {
- next = get_irn_link(alloc);
+ next = (ir_node*)get_irn_link(alloc);
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
++env->nr_deads;
}
/* convert all non-escaped heap allocs into frame variables */
ftp = get_irg_frame_type(irg);
for (alloc = env->found_allocs; alloc; alloc = next) {
- next = get_irn_link(alloc);
+ next = (ir_node*)get_irn_link(alloc);
size = get_Alloc_count(alloc);
atp = get_Alloc_type(alloc);
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
++env->nr_removed;
}
/* if allocs were removed somehow */
- if (env->nr_removed | env->nr_deads) {
- set_irg_outs_inconsistent(irg);
-
- if (env->nr_deads) {
- /* exception control flow might have been changed */
- set_irg_doms_inconsistent(irg);
- }
+ if (env->nr_removed && env->nr_deads) {
+ /* exception control flow might have been changed */
+ set_irg_doms_inconsistent(irg);
}
}
/* kill all dead allocs */
for (call = env->dead_allocs; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
++env->nr_deads;
}
/* convert all non-escaped heap allocs into frame variables */
ftp = get_irg_frame_type(irg);
for (call = env->found_allocs; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
}
}
/* Do simple and fast escape analysis for all graphs. */
void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback)
{
- ir_graph *irg;
- int i;
+ size_t i, n;
struct obstack obst;
walk_env_t *env, *elist;
(void) run_scalar_replace;
env->dead_allocs = NULL;
env->callback = callback;
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
- irg = get_irp_irg(i);
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
+ ir_graph *irg = get_irp_irg(i);
assure_irg_outs(irg);