3 * File name: ir/opt/escape_ana.c
4 * Purpose: escape analysis and optimization
9 * Copyright: (c) 1999-2005 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
13 /** @file escape_ana.c
22 #include "irgraph_t.h"
25 #include "analyze_irg_args.h"
28 #include "escape_ana.h"
33 typedef struct _walk_env {
34 ir_node *found_allocs; /**< list of all found non-escaped allocs */
35 ir_node *dead_allocs; /**< list of all found dead alloc */
36 unsigned nr_changed; /**< number of changed allocs */
37 unsigned nr_deads; /**< number of dead allocs */
39 /* these fields are only used in the global escape analysis */
40 ir_graph *irg; /**< the irg for this environment */
41 struct _walk_env *next; /**< for linking environments */
46 * determine if a value calculated by n "escape", ie
47 * is stored somewhere we could not track
49 static int do_escape(ir_node *n) {
52 /* should always be pointer mode or we made some mistake */
53 assert(mode_is_reference(get_irn_mode(n)));
55 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
56 ir_node *succ = get_irn_out(n, i);
57 ir_op *op = get_irn_op(succ);
60 if (get_Store_value(succ) == n) {
62 * We are storing n. As long as we do not further
63 * evaluate things, the pointer 'escape' here
68 else if (op == op_Conv) {
70 * Should not happen, but if it does we leave the pointer
71 * path and do not track further
75 else if (op == op_Call) { /* most complicated case */
76 ir_node *ptr = get_Call_ptr(succ);
79 if (get_irn_op(ptr) == op_SymConst &&
80 get_SymConst_kind(ptr) == symconst_addr_ent) {
81 ent = get_SymConst_entity(ptr);
83 /* we know the called entity */
84 for (j = get_Call_n_params(succ); j >= 0; --j) {
85 if (get_Call_param(succ, j) == n) {
86 /* n is the j'th param of the call */
87 if (get_method_param_access(ent, j) & ptr_access_store)
88 /* n is store in ent */
94 /* go through all possible callees */
95 for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) {
96 ent = get_Call_callee(succ, k);
98 for (j = get_Call_n_params(succ); j >= 0; --j) {
99 if (get_Call_param(succ, j) == n) {
100 /* n is the j'th param of the call */
101 if (get_method_param_access(ent, j) & ptr_access_store)
102 /* n is store in ent */
110 if (! mode_is_reference(get_irn_mode(succ)))
120 * walker: search for Alloc nodes and follow the usages
122 static void find_allocations(ir_node *alloc, void *ctx)
126 walk_env_t *env = ctx;
128 if (get_irn_op(alloc) != op_Alloc)
131 /* we searching only for heap allocations */
132 if (get_Alloc_where(alloc) != heap_alloc)
136 for (i = get_irn_n_outs(alloc); i >= 0; --i) {
137 ir_node *proj = get_irn_out(alloc, i);
139 if (get_Proj_proj(proj) == pn_Alloc_res) {
147 * bad: no-one wants the result, should NOT happen but
148 * if it does we could delete it.
150 set_irn_link(alloc, env->dead_allocs);
151 env->dead_allocs = alloc;
156 if (! do_escape(adr)) {
157 set_irn_link(alloc, env->found_allocs);
158 env->found_allocs = alloc;
163 * do the necessary graph transformations
165 static void transform_allocs(ir_graph *irg, walk_env_t *env)
167 ir_node *alloc, *next, *mem, *sel;
174 /* kill all dead allocs */
175 for (alloc = env->dead_allocs; alloc; alloc = next) {
176 next = get_irn_link(alloc);
178 mem = get_Alloc_mem(alloc);
179 turn_into_tuple(alloc, pn_Alloc_max);
180 set_Tuple_pred(alloc, pn_Alloc_M, mem);
181 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
186 /* convert all non-escaped heap allocs into frame variables */
187 ftp = get_irg_frame_type(irg);
188 for (alloc = env->dead_allocs; alloc; alloc = next) {
189 next = get_irn_link(alloc);
190 dbg = get_irn_dbg_info(alloc);
192 snprintf(name, sizeof(name), "_not_escaped_%u", nr++);
193 ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
195 sel = new_rd_simpleSel(dbg, irg, get_nodes_block(alloc),
196 get_irg_no_mem(irg), get_irg_frame(irg), ent);
197 mem = get_Alloc_mem(alloc);
199 turn_into_tuple(alloc, pn_Alloc_max);
200 set_Tuple_pred(alloc, pn_Alloc_M, mem);
201 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
202 set_Tuple_pred(alloc, pn_Alloc_res, sel);
207 if (env->nr_changed | env->nr_deads) {
208 set_irg_outs_inconsistent(irg);
211 set_irg_dom_inconsistent(irg);
215 /* Do simple and fast escape analysis for one graph. */
216 void escape_enalysis_irg(ir_graph *irg)
220 if (get_irg_callee_info_state(irg) != irg_callee_info_consistent) {
221 /* no way yet to calculate this for one irg */
222 assert(! "need callee info");
226 if (get_irg_outs_state(irg) != outs_consistent)
227 compute_irg_outs(irg);
229 env.found_allocs = NULL;
230 env.dead_allocs = NULL;
234 irg_walk_graph(irg, NULL, find_allocations, &env);
236 transform_allocs(irg, &env);
239 /* Do simple and fast escape analysis for all graphs. */
240 void escape_analysis(int run_scalar_replace)
245 walk_env_t *env, *elist;
247 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
248 assert(! "need callee info");
253 * We treat memory for speed: we first collect all info in a
254 * list of environments, than do the transformation.
255 * Doing it this way, no analysis info gets invalid while we run
261 env = obstack_alloc(&obst, sizeof(*env));
262 env->found_allocs = NULL;
263 env->dead_allocs = NULL;
265 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
266 irg = get_irp_irg(i);
268 if (get_irg_outs_state(irg) != outs_consistent)
269 compute_irg_outs(irg);
271 irg_walk_graph(irg, NULL, find_allocations, &env);
273 if (env->found_allocs || env->dead_allocs) {
281 env = obstack_alloc(&obst, sizeof(*env));
282 env->found_allocs = NULL;
283 env->dead_allocs = NULL;
287 for (env = elist; env; env = env->next) {
288 transform_allocs(env->irg, env);
291 obstack_free(&obst, NULL);