/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
*
* A fast and simple Escape analysis.
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "iroptimize.h"
#include "analyze_irg_args.h"
#include "irgmod.h"
#include "ircons.h"
+#include "irprintf.h"
#include "debug.h"
+#include "error.h"
/**
* walker environment
*/
-typedef struct _walk_env {
+typedef struct walk_env {
ir_node *found_allocs; /**< list of all found non-escaped allocs */
ir_node *dead_allocs; /**< list of all found dead alloc */
check_alloc_entity_func callback; /**< callback that checks a given entity for allocation */
/* these fields are only used in the global escape analysis */
ir_graph *irg; /**< the irg for this environment */
- struct _walk_env *next; /**< for linking environments */
+ struct walk_env *next; /**< for linking environments */
} walk_env_t;
/* Hmm: no ProjX from a Raise? This should be a verification
* error. For now we just assert and return.
*/
- assert(! "No ProjX after Raise found");
- return 1;
+ panic("No ProjX after Raise found");
}
if (get_irn_n_outs(proj) != 1) {
/* Hmm: more than one user of ProjX: This is a verification
* error.
*/
- assert(! "More than one user of ProjX");
- return 1;
+ panic("More than one user of ProjX");
}
n = get_irn_out(proj, 0);
return 0;
}
+/**
+ * returns an Alloc node if the node adr Select
+ * from one
+ */
+static ir_node *is_depend_alloc(ir_node *adr)
+{
+ ir_node *alloc;
+
+ if (!is_Sel(adr))
+ return NULL;
+
+ /* should be a simple Sel */
+ if (get_Sel_n_indexs(adr) != 0)
+ return NULL;
+
+ alloc = skip_Proj(get_Sel_ptr(adr));
+ if (!is_Alloc(alloc))
+ return NULL;
+
+ /* hmm, we depend on this Alloc */
+ ir_printf("depend alloc %+F\n", alloc);
+
+ return NULL;
+}
+
/**
* determine if a value calculated by n "escape", ie
* is stored somewhere we could not track
*/
-static int can_escape(ir_node *n) {
- int i, j, k;
+static int can_escape(ir_node *n)
+{
+ int i;
/* should always be pointer mode or we made some mistake */
assert(mode_is_reference(get_irn_mode(n)));
switch (get_irn_opcode(succ)) {
case iro_Store:
if (get_Store_value(succ) == n) {
+ ir_node *adr = get_Store_ptr(succ);
+
+ /*
+ * if this Alloc depends on another one,
+ * we can enqueue it
+ */
+ if (is_depend_alloc(adr))
+ break;
+
/*
* We are storing n. As long as we do not further
* evaluate things, the pointer 'escape' here
ir_node *ptr = get_Call_ptr(succ);
ir_entity *ent;
- if (get_irn_op(ptr) == op_SymConst &&
- get_SymConst_kind(ptr) == symconst_addr_ent) {
+ if (is_SymConst_addr_ent(ptr)) {
+ size_t j;
ent = get_SymConst_entity(ptr);
/* we know the called entity */
- for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
- if (get_Call_param(succ, j) == n) {
+ for (j = get_Call_n_params(succ); j > 0;) {
+ if (get_Call_param(succ, --j) == n) {
/* n is the j'th param of the call */
if (get_method_param_access(ent, j) & ptr_access_store)
/* n is store in ent */
return 1;
}
}
- }
- else if (get_irn_op(ptr) == op_Sel) {
+ } else if (is_Sel(ptr)) {
+ size_t k;
+
/* go through all possible callees */
- for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) {
- ent = get_Call_callee(succ, k);
+ for (k = get_Call_n_callees(succ); k > 0;) {
+ size_t j;
+ ent = get_Call_callee(succ, --k);
if (ent == unknown_entity) {
/* we don't know what will be called, a possible escape */
return 1;
}
- for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
- if (get_Call_param(succ, j) == n) {
+ for (j = get_Call_n_params(succ); j > 0;) {
+ if (get_Call_param(succ, --j) == n) {
/* n is the j'th param of the call */
if (get_method_param_access(ent, j) & ptr_access_store)
/* n is store in ent */
}
}
}
- }
- else /* we don't know want will called */
+ } else /* we don't know want will called */
return 1;
break;
case iro_Tuple: {
ir_node *proj;
+ int j, k;
/* Bad: trace the tuple backwards */
for (j = get_irn_arity(succ) - 1; j >= 0; --j)
*/
static void find_allocations(ir_node *alloc, void *ctx)
{
+ walk_env_t *env = (walk_env_t*)ctx;
int i;
ir_node *adr;
- walk_env_t *env = ctx;
if (! is_Alloc(alloc))
return;
*/
static void find_allocation_calls(ir_node *call, void *ctx)
{
+ walk_env_t *env = (walk_env_t*)ctx;
int i;
ir_node *adr;
ir_entity *ent;
- walk_env_t *env = ctx;
if (! is_Call(call))
return;
adr = get_Call_ptr(call);
- if (! is_SymConst(adr) || get_SymConst_kind(adr) != symconst_addr_ent)
+ if (! is_SymConst_addr_ent(adr))
return;
ent = get_SymConst_entity(adr);
if (! env->callback(ent))
/* kill all dead allocs */
for (alloc = env->dead_allocs; alloc; alloc = next) {
- next = get_irn_link(alloc);
+ next = (ir_node*)get_irn_link(alloc);
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
blk = get_nodes_block(alloc);
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
- set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
++env->nr_deads;
}
/* convert all non-escaped heap allocs into frame variables */
ftp = get_irg_frame_type(irg);
for (alloc = env->found_allocs; alloc; alloc = next) {
- next = get_irn_link(alloc);
- size = get_Alloc_size(alloc);
+ next = (ir_node*)get_irn_link(alloc);
+ size = get_Alloc_count(alloc);
atp = get_Alloc_type(alloc);
tp = NULL;
/* if the size is a type size and the types matched */
assert(atp == get_SymConst_type(size));
tp = atp;
- }
- else if (is_Const(size)) {
- tarval *tv = get_Const_tarval(size);
+ } else if (is_Const(size)) {
+ ir_tarval *tv = get_Const_tarval(size);
if (tv != tarval_bad && tarval_is_long(tv) &&
get_type_state(atp) == layout_fixed &&
- get_tarval_long(tv) == get_type_size_bytes(atp)) {
+ (unsigned)get_tarval_long(tv) == get_type_size_bytes(atp)) {
/* a already lowered type size */
tp = atp;
}
DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on frame\n", irg, alloc, tp));
snprintf(name, sizeof(name), "%s_NE_%u", get_entity_name(get_irg_entity(irg)), nr++);
+ name[sizeof(name) - 1] = '\0';
ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
- sel = new_rd_simpleSel(dbg, irg, get_nodes_block(alloc),
- get_irg_no_mem(irg), get_irg_frame(irg), ent);
+ sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
mem = get_Alloc_mem(alloc);
turn_into_tuple(alloc, pn_Alloc_max);
set_Tuple_pred(alloc, pn_Alloc_M, mem);
- set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
- set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
+ set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(alloc, pn_Alloc_res, sel);
++env->nr_removed;
/* kill all dead allocs */
for (call = env->dead_allocs; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
mem = get_Call_mem(call);
blk = get_nodes_block(call);
turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M_regular, mem);
- set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, blk));
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
- set_Tuple_pred(call, pn_Call_M_except, mem);
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
++env->nr_deads;
}
/* convert all non-escaped heap allocs into frame variables */
ftp = get_irg_frame_type(irg);
for (call = env->found_allocs; call; call = next) {
- next = get_irn_link(call);
+ next = (ir_node*)get_irn_link(call);
}
}
/* Do simple and fast escape analysis for all graphs. */
void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback)
{
- ir_graph *irg;
- int i;
+ size_t i, n;
struct obstack obst;
walk_env_t *env, *elist;
(void) run_scalar_replace;
obstack_init(&obst);
elist = NULL;
- env = obstack_alloc(&obst, sizeof(*env));
+ env = OALLOC(&obst, walk_env_t);
env->found_allocs = NULL;
env->dead_allocs = NULL;
env->callback = callback;
- for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
- irg = get_irp_irg(i);
+ for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
+ ir_graph *irg = get_irp_irg(i);
assure_irg_outs(irg);
elist = env;
- env = obstack_alloc(&obst, sizeof(*env));
+ env = OALLOC(&obst, walk_env_t);
env->found_allocs = NULL;
env->dead_allocs = NULL;
env->callback = callback;