3 * File name: ir/opt/escape_ana.c
4 * Purpose: escape analysis and optimization
9 * Copyright: (c) 1999-2005 Universität Karlsruhe
10 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 * A fast and simple Escape analysis.
23 #include "irgraph_t.h"
28 #include "analyze_irg_args.h"
31 #include "escape_ana.h"
37 typedef struct _walk_env {
38 ir_node *found_allocs; /**< list of all found non-escaped allocs */
39 ir_node *dead_allocs; /**< list of all found dead alloc */
40 unsigned nr_removed; /**< number of removed allocs (placed of frame) */
41 unsigned nr_changed; /**< number of changed allocs (allocated on stack now) */
42 unsigned nr_deads; /**< number of dead allocs */
44 /* these fields are only used in the global escape analysis */
45 ir_graph *irg; /**< the irg for this environment */
46 struct _walk_env *next; /**< for linking environments */
51 DEBUG_ONLY(firm_dbg_module_t *dbgHandle;)
54 * checks whether a Raise leaves a method
56 static int is_method_leaving_raise(ir_node *raise)
62 for (i = get_irn_n_outs(raise) - 1; i >= 0; --i) {
63 ir_node *succ = get_irn_out(raise, i);
65 /* there should be only one ProjX node */
66 if (get_Proj_proj(succ) == pn_Raise_X) {
73 /* Hmm: no ProjX from a Raise? This should be a verification
74 * error. For now we just assert and return.
76 assert(! "No ProjX after Raise found");
80 if (get_irn_n_outs(proj) != 1) {
81 /* Hmm: more than one user of ProjX: This is a verification
84 assert(! "More than one user of ProjX");
88 n = get_irn_out(proj, 0);
89 assert(is_Block(n) && "Argh: user of ProjX is no block");
91 if (n == get_irg_end_block(get_irn_irg(n)))
94 /* ok, we get here so the raise will not leave the function */
99 * determine if a value calculated by n "escape", ie
100 * is stored somewhere we could not track
102 static int can_escape(ir_node *n) {
105 /* should always be pointer mode or we made some mistake */
106 assert(mode_is_reference(get_irn_mode(n)));
108 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
109 ir_node *succ = get_irn_out(n, i);
111 switch (get_irn_opcode(succ)) {
113 if (get_Store_value(succ) == n) {
115 * We are storing n. As long as we do not further
116 * evaluate things, the pointer 'escape' here
124 * Should not happen, but if it does we leave the pointer
125 * path and do not track further
129 case iro_Call: { /* most complicated case */
130 ir_node *ptr = get_Call_ptr(succ);
133 if (get_irn_op(ptr) == op_SymConst &&
134 get_SymConst_kind(ptr) == symconst_addr_ent) {
135 ent = get_SymConst_entity(ptr);
137 /* we know the called entity */
138 for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
139 if (get_Call_param(succ, j) == n) {
140 /* n is the j'th param of the call */
141 if (get_method_param_access(ent, j) & ptr_access_store)
142 /* n is store in ent */
147 else if (get_irn_op(ptr) == op_Sel) {
148 /* go through all possible callees */
149 for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) {
150 ent = get_Call_callee(succ, k);
152 if (ent == unknown_entity) {
153 /* we don't know what will be called, a possible escape */
157 for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
158 if (get_Call_param(succ, j) == n) {
159 /* n is the j'th param of the call */
160 if (get_method_param_access(ent, j) & ptr_access_store)
161 /* n is store in ent */
167 else /* we don't know want will called */
174 /* Bad: the allocate object is returned */
178 /* Hmm: if we do NOT leave the method, it's local */
179 return is_method_leaving_raise(succ);
184 /* Bad: trace the tuple backwards */
185 for (j = get_irn_arity(succ) - 1; j >= 0; --j)
186 if (get_irn_n(succ, j) == n)
192 for (k = get_irn_n_outs(succ); k >= 0; --k) {
193 proj = get_irn_out(succ, k);
195 if (get_Proj_proj(proj) == j) {
196 /* we found the right Proj */
203 * If we haven't found the right Proj, succ is still
204 * the Tuple and the search will end here.
214 if (! mode_is_reference(get_irn_mode(succ)))
217 if (can_escape(succ))
224 * walker: search for Alloc nodes and follow the usages
226 static void find_allocations(ir_node *alloc, void *ctx)
230 walk_env_t *env = ctx;
232 if (get_irn_op(alloc) != op_Alloc)
235 /* we searching only for heap allocations */
236 if (get_Alloc_where(alloc) != heap_alloc)
240 for (i = get_irn_n_outs(alloc) - 1; i >= 0; --i) {
241 ir_node *proj = get_irn_out(alloc, i);
243 if (get_Proj_proj(proj) == pn_Alloc_res) {
251 * bad: no-one wants the result, should NOT happen but
252 * if it does we could delete it.
254 set_irn_link(alloc, env->dead_allocs);
255 env->dead_allocs = alloc;
260 if (! can_escape(adr)) {
261 set_irn_link(alloc, env->found_allocs);
262 env->found_allocs = alloc;
267 * do the necessary graph transformations
269 static void transform_allocs(ir_graph *irg, walk_env_t *env)
271 ir_node *alloc, *next, *mem, *sel, *size;
272 ir_type *ftp, *atp, *tp;
278 /* kill all dead allocs */
279 for (alloc = env->dead_allocs; alloc; alloc = next) {
280 next = get_irn_link(alloc);
282 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
284 mem = get_Alloc_mem(alloc);
285 turn_into_tuple(alloc, pn_Alloc_max);
286 set_Tuple_pred(alloc, pn_Alloc_M, mem);
287 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
292 /* convert all non-escaped heap allocs into frame variables */
293 ftp = get_irg_frame_type(irg);
294 for (alloc = env->found_allocs; alloc; alloc = next) {
295 next = get_irn_link(alloc);
296 size = get_Alloc_size(alloc);
297 atp = get_Alloc_type(alloc);
300 if (get_irn_op(size) == op_SymConst && get_SymConst_kind(size) == symconst_type_size) {
301 /* if the size is a type size and the types matched */
302 assert(atp == get_SymConst_type(size));
305 else if (is_Const(size)) {
306 tarval *tv = get_Const_tarval(size);
308 if (tv != tarval_bad && tarval_is_long(tv) &&
309 get_type_state(atp) == layout_fixed &&
310 get_tarval_long(tv) == get_type_size_bytes(atp)) {
311 /* a already lowered type size */
316 if (tp && tp != firm_unknown_type) {
317 /* we could determine the type, so we could place it on the frame */
318 dbg = get_irn_dbg_info(alloc);
320 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on frame\n", irg, alloc, tp));
322 snprintf(name, sizeof(name), "%s_NE_%u", get_entity_name(get_irg_entity(irg)), nr++);
323 ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
325 sel = new_rd_simpleSel(dbg, irg, get_nodes_block(alloc),
326 get_irg_no_mem(irg), get_irg_frame(irg), ent);
327 mem = get_Alloc_mem(alloc);
329 turn_into_tuple(alloc, pn_Alloc_max);
330 set_Tuple_pred(alloc, pn_Alloc_M, mem);
331 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
332 set_Tuple_pred(alloc, pn_Alloc_res, sel);
338 * We could not determine the type or it is variable size.
339 * At least, we could place it on the stack
341 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on stack\n", irg, alloc));
342 set_Alloc_where(alloc, stack_alloc);
348 /* if allocs were removed somehow */
349 if (env->nr_removed | env->nr_deads) {
350 set_irg_outs_inconsistent(irg);
353 /* exception control flow might have been changed */
354 set_irg_doms_inconsistent(irg);
359 /* Do simple and fast escape analysis for one graph. */
360 void escape_enalysis_irg(ir_graph *irg)
364 if (get_irg_callee_info_state(irg) != irg_callee_info_consistent) {
365 /* no way yet to calculate this for one irg */
366 assert(! "need callee info");
370 if (get_irg_outs_state(irg) != outs_consistent)
371 compute_irg_outs(irg);
373 env.found_allocs = NULL;
374 env.dead_allocs = NULL;
379 irg_walk_graph(irg, NULL, find_allocations, &env);
381 transform_allocs(irg, &env);
384 /* Do simple and fast escape analysis for all graphs. */
385 void escape_analysis(int run_scalar_replace)
390 walk_env_t *env, *elist;
392 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
393 assert(! "need callee info");
397 FIRM_DBG_REGISTER(dbgHandle, "firm.opt.escape_ana");
400 * We treat memory for speed: we first collect all info in a
401 * list of environments, than do the transformation.
402 * Doing it this way, no analysis info gets invalid while we run
408 env = obstack_alloc(&obst, sizeof(*env));
409 env->found_allocs = NULL;
410 env->dead_allocs = NULL;
412 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
413 irg = get_irp_irg(i);
415 if (get_irg_outs_state(irg) != outs_consistent)
416 compute_irg_outs(irg);
418 irg_walk_graph(irg, NULL, find_allocations, env);
420 if (env->found_allocs || env->dead_allocs) {
428 env = obstack_alloc(&obst, sizeof(*env));
429 env->found_allocs = NULL;
430 env->dead_allocs = NULL;
434 for (env = elist; env; env = env->next) {
435 transform_allocs(env->irg, env);
438 obstack_free(&obst, NULL);