X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fescape_ana.c;h=ea8dd5c906acd724edf543caa616cc342f6417e4;hb=39d8838df009224ae0b10046947854977d6db4b4;hp=92a6d72b9b08a3390efbdb2c41f57b73a8750c59;hpb=0fbcef83aa6060534172bb13e71cdadb04428806;p=libfirm diff --git a/ir/opt/escape_ana.c b/ir/opt/escape_ana.c index 92a6d72b9..ea8dd5c90 100644 --- a/ir/opt/escape_ana.c +++ b/ir/opt/escape_ana.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -47,11 +47,12 @@ #include "ircons.h" #include "irprintf.h" #include "debug.h" +#include "error.h" /** * walker environment */ -typedef struct _walk_env { +typedef struct walk_env { ir_node *found_allocs; /**< list of all found non-escaped allocs */ ir_node *dead_allocs; /**< list of all found dead alloc */ check_alloc_entity_func callback; /**< callback that checks a given entity for allocation */ @@ -61,7 +62,7 @@ typedef struct _walk_env { /* these fields are only used in the global escape analysis */ ir_graph *irg; /**< the irg for this environment */ - struct _walk_env *next; /**< for linking environments */ + struct walk_env *next; /**< for linking environments */ } walk_env_t; @@ -91,16 +92,14 @@ static int is_method_leaving_raise(ir_node *raise) /* Hmm: no ProjX from a Raise? This should be a verification * error. For now we just assert and return. */ - assert(! "No ProjX after Raise found"); - return 1; + panic("No ProjX after Raise found"); } if (get_irn_n_outs(proj) != 1) { /* Hmm: more than one user of ProjX: This is a verification * error. */ - assert(! "More than one user of ProjX"); - return 1; + panic("More than one user of ProjX"); } n = get_irn_out(proj, 0); @@ -117,7 +116,8 @@ static int is_method_leaving_raise(ir_node *raise) * returns an Alloc node if the node adr Select * from one */ -static ir_node *is_depend_alloc(ir_node *adr) { +static ir_node *is_depend_alloc(ir_node *adr) +{ ir_node *alloc; if (!is_Sel(adr)) @@ -141,8 +141,9 @@ static ir_node *is_depend_alloc(ir_node *adr) { * determine if a value calculated by n "escape", ie * is stored somewhere we could not track */ -static int can_escape(ir_node *n) { - int i, j, k; +static int can_escape(ir_node *n) +{ + int i; /* should always be pointer mode or we made some mistake */ assert(mode_is_reference(get_irn_mode(n))); @@ -182,30 +183,33 @@ static int can_escape(ir_node *n) { ir_entity *ent; if (is_SymConst_addr_ent(ptr)) { + size_t j; ent = get_SymConst_entity(ptr); /* we know the called entity */ - for (j = get_Call_n_params(succ) - 1; j >= 0; --j) { - if (get_Call_param(succ, j) == n) { + for (j = get_Call_n_params(succ); j > 0;) { + if (get_Call_param(succ, --j) == n) { /* n is the j'th param of the call */ if (get_method_param_access(ent, j) & ptr_access_store) /* n is store in ent */ return 1; } } - } - else if (is_Sel(ptr)) { + } else if (is_Sel(ptr)) { + size_t k; + /* go through all possible callees */ - for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) { - ent = get_Call_callee(succ, k); + for (k = get_Call_n_callees(succ); k > 0;) { + size_t j; + ent = get_Call_callee(succ, --k); if (ent == unknown_entity) { /* we don't know what will be called, a possible escape */ return 1; } - for (j = get_Call_n_params(succ) - 1; j >= 0; --j) { - if (get_Call_param(succ, j) == n) { + for (j = get_Call_n_params(succ); j > 0;) { + if (get_Call_param(succ, --j) == n) { /* n is the j'th param of the call */ if (get_method_param_access(ent, j) & ptr_access_store) /* n is store in ent */ @@ -213,8 +217,7 @@ static int can_escape(ir_node *n) { } } } - } - else /* we don't know want will called */ + } else /* we don't know want will called */ return 1; break; @@ -232,6 +235,7 @@ static int can_escape(ir_node *n) { case iro_Tuple: { ir_node *proj; + int j, k; /* Bad: trace the tuple backwards */ for (j = get_irn_arity(succ) - 1; j >= 0; --j) @@ -277,9 +281,9 @@ static int can_escape(ir_node *n) { */ static void find_allocations(ir_node *alloc, void *ctx) { + walk_env_t *env = (walk_env_t*)ctx; int i; ir_node *adr; - walk_env_t *env = ctx; if (! is_Alloc(alloc)) return; @@ -320,10 +324,10 @@ static void find_allocations(ir_node *alloc, void *ctx) */ static void find_allocation_calls(ir_node *call, void *ctx) { + walk_env_t *env = (walk_env_t*)ctx; int i; ir_node *adr; ir_entity *ent; - walk_env_t *env = ctx; if (! is_Call(call)) return; @@ -384,7 +388,7 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) /* kill all dead allocs */ for (alloc = env->dead_allocs; alloc; alloc = next) { - next = get_irn_link(alloc); + next = (ir_node*)get_irn_link(alloc); DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc)); @@ -392,8 +396,8 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) blk = get_nodes_block(alloc); turn_into_tuple(alloc, pn_Alloc_max); set_Tuple_pred(alloc, pn_Alloc_M, mem); - set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk)); - set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg)); + set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk)); + set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X)); ++env->nr_deads; } @@ -401,8 +405,8 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) /* convert all non-escaped heap allocs into frame variables */ ftp = get_irg_frame_type(irg); for (alloc = env->found_allocs; alloc; alloc = next) { - next = get_irn_link(alloc); - size = get_Alloc_size(alloc); + next = (ir_node*)get_irn_link(alloc); + size = get_Alloc_count(alloc); atp = get_Alloc_type(alloc); tp = NULL; @@ -410,9 +414,8 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) /* if the size is a type size and the types matched */ assert(atp == get_SymConst_type(size)); tp = atp; - } - else if (is_Const(size)) { - tarval *tv = get_Const_tarval(size); + } else if (is_Const(size)) { + ir_tarval *tv = get_Const_tarval(size); if (tv != tarval_bad && tarval_is_long(tv) && get_type_state(atp) == layout_fixed && @@ -433,14 +436,13 @@ static void transform_allocs(ir_graph *irg, walk_env_t *env) name[sizeof(name) - 1] = '\0'; ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg); - sel = new_rd_simpleSel(dbg, irg, get_nodes_block(alloc), - get_irg_no_mem(irg), get_irg_frame(irg), ent); + sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent); mem = get_Alloc_mem(alloc); turn_into_tuple(alloc, pn_Alloc_max); set_Tuple_pred(alloc, pn_Alloc_M, mem); - set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk)); - set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg)); + set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk)); + set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(alloc, pn_Alloc_res, sel); ++env->nr_removed; @@ -479,19 +481,17 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env) /* kill all dead allocs */ for (call = env->dead_allocs; call; call = next) { - next = get_irn_link(call); + next = (ir_node*)get_irn_link(call); DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call)); mem = get_Call_mem(call); blk = get_nodes_block(call); turn_into_tuple(call, pn_Call_max); - set_Tuple_pred(call, pn_Call_M_regular, mem); - set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, blk)); - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); - set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg)); - set_Tuple_pred(call, pn_Call_M_except, mem); - set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_M, mem); + set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); + set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T)); ++env->nr_deads; } @@ -499,7 +499,7 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env) /* convert all non-escaped heap allocs into frame variables */ ftp = get_irg_frame_type(irg); for (call = env->found_allocs; call; call = next) { - next = get_irn_link(call); + next = (ir_node*)get_irn_link(call); } } @@ -539,8 +539,7 @@ void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback) /* Do simple and fast escape analysis for all graphs. */ void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback) { - ir_graph *irg; - int i; + size_t i, n; struct obstack obst; walk_env_t *env, *elist; (void) run_scalar_replace; @@ -561,13 +560,13 @@ void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback) obstack_init(&obst); elist = NULL; - env = obstack_alloc(&obst, sizeof(*env)); + env = OALLOC(&obst, walk_env_t); env->found_allocs = NULL; env->dead_allocs = NULL; env->callback = callback; - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { - irg = get_irp_irg(i); + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { + ir_graph *irg = get_irp_irg(i); assure_irg_outs(irg); @@ -587,7 +586,7 @@ void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback) elist = env; - env = obstack_alloc(&obst, sizeof(*env)); + env = OALLOC(&obst, walk_env_t); env->found_allocs = NULL; env->dead_allocs = NULL; env->callback = callback;