2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/opt/escape_ana.c
23 * Purpose: escape analysis and optimization
24 * Author: Michael Beck
28 * Copyright: (c) 1999-2005 Universität Karlsruhe
34 * A fast and simple Escape analysis.
40 #include "iroptimize.h"
42 #include "irgraph_t.h"
47 #include "analyze_irg_args.h"
55 typedef struct _walk_env {
56 ir_node *found_allocs; /**< list of all found non-escaped allocs */
57 ir_node *dead_allocs; /**< list of all found dead alloc */
58 check_alloc_entity_func callback; /**< callback that checks a given entity for allocation */
59 unsigned nr_removed; /**< number of removed allocs (placed of frame) */
60 unsigned nr_changed; /**< number of changed allocs (allocated on stack now) */
61 unsigned nr_deads; /**< number of dead allocs */
63 /* these fields are only used in the global escape analysis */
64 ir_graph *irg; /**< the irg for this environment */
65 struct _walk_env *next; /**< for linking environments */
70 DEBUG_ONLY(firm_dbg_module_t *dbgHandle;)
73 * checks whether a Raise leaves a method
75 static int is_method_leaving_raise(ir_node *raise)
81 for (i = get_irn_n_outs(raise) - 1; i >= 0; --i) {
82 ir_node *succ = get_irn_out(raise, i);
84 /* there should be only one ProjX node */
85 if (get_Proj_proj(succ) == pn_Raise_X) {
92 /* Hmm: no ProjX from a Raise? This should be a verification
93 * error. For now we just assert and return.
95 assert(! "No ProjX after Raise found");
99 if (get_irn_n_outs(proj) != 1) {
100 /* Hmm: more than one user of ProjX: This is a verification
103 assert(! "More than one user of ProjX");
107 n = get_irn_out(proj, 0);
108 assert(is_Block(n) && "Argh: user of ProjX is no block");
110 if (n == get_irg_end_block(get_irn_irg(n)))
113 /* ok, we get here so the raise will not leave the function */
118 * determine if a value calculated by n "escape", ie
119 * is stored somewhere we could not track
121 static int can_escape(ir_node *n) {
124 /* should always be pointer mode or we made some mistake */
125 assert(mode_is_reference(get_irn_mode(n)));
127 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
128 ir_node *succ = get_irn_out(n, i);
130 switch (get_irn_opcode(succ)) {
132 if (get_Store_value(succ) == n) {
134 * We are storing n. As long as we do not further
135 * evaluate things, the pointer 'escape' here
143 * Should not happen, but if it does we leave the pointer
144 * path and do not track further
148 case iro_Call: { /* most complicated case */
149 ir_node *ptr = get_Call_ptr(succ);
152 if (get_irn_op(ptr) == op_SymConst &&
153 get_SymConst_kind(ptr) == symconst_addr_ent) {
154 ent = get_SymConst_entity(ptr);
156 /* we know the called entity */
157 for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
158 if (get_Call_param(succ, j) == n) {
159 /* n is the j'th param of the call */
160 if (get_method_param_access(ent, j) & ptr_access_store)
161 /* n is store in ent */
166 else if (get_irn_op(ptr) == op_Sel) {
167 /* go through all possible callees */
168 for (k = get_Call_n_callees(succ) - 1; k >= 0; --k) {
169 ent = get_Call_callee(succ, k);
171 if (ent == unknown_entity) {
172 /* we don't know what will be called, a possible escape */
176 for (j = get_Call_n_params(succ) - 1; j >= 0; --j) {
177 if (get_Call_param(succ, j) == n) {
178 /* n is the j'th param of the call */
179 if (get_method_param_access(ent, j) & ptr_access_store)
180 /* n is store in ent */
186 else /* we don't know want will called */
193 /* Bad: the allocate object is returned */
197 /* Hmm: if we do NOT leave the method, it's local */
198 if (is_method_leaving_raise(succ))
205 /* Bad: trace the tuple backwards */
206 for (j = get_irn_arity(succ) - 1; j >= 0; --j)
207 if (get_irn_n(succ, j) == n)
213 for (k = get_irn_n_outs(succ); k >= 0; --k) {
214 proj = get_irn_out(succ, k);
216 if (get_Proj_proj(proj) == j) {
217 /* we found the right Proj */
224 * If we haven't found the right Proj, succ is still
225 * the Tuple and the search will end here.
235 if (! mode_is_reference(get_irn_mode(succ)))
238 if (can_escape(succ))
245 * walker: search for Alloc nodes and follow the usages
247 static void find_allocations(ir_node *alloc, void *ctx)
251 walk_env_t *env = ctx;
253 if (! is_Alloc(alloc))
256 /* we searching only for heap allocations */
257 if (get_Alloc_where(alloc) != heap_alloc)
261 for (i = get_irn_n_outs(alloc) - 1; i >= 0; --i) {
262 ir_node *proj = get_irn_out(alloc, i);
264 if (get_Proj_proj(proj) == pn_Alloc_res) {
272 * bad: no-one wants the result, should NOT happen but
273 * if it does we could delete it.
275 set_irn_link(alloc, env->dead_allocs);
276 env->dead_allocs = alloc;
281 if (! can_escape(adr)) {
282 set_irn_link(alloc, env->found_allocs);
283 env->found_allocs = alloc;
288 * walker: search for allocation Call nodes and follow the usages
290 static void find_allocation_calls(ir_node *call, void *ctx)
295 walk_env_t *env = ctx;
299 adr = get_Call_ptr(call);
300 if (! is_SymConst(adr) || get_SymConst_kind(adr) != symconst_addr_ent)
302 ent = get_SymConst_entity(adr);
303 if (! env->callback(ent))
307 for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
308 ir_node *res_proj = get_irn_out(call, i);
310 if (get_Proj_proj(res_proj) == pn_Call_T_result) {
311 for (i = get_irn_n_outs(res_proj) - 1; i >= 0; --i) {
312 ir_node *proj = get_irn_out(res_proj, i);
314 if (get_Proj_proj(proj) == 0) {
315 /* found first result */
326 * bad: no-one wants the result, should NOT happen but
327 * if it does we could delete it.
329 set_irn_link(call, env->dead_allocs);
330 env->dead_allocs = call;
335 if (! can_escape(adr)) {
336 set_irn_link(call, env->found_allocs);
337 env->found_allocs = call;
342 * Do the necessary graph transformations to transform
345 static void transform_allocs(ir_graph *irg, walk_env_t *env)
347 ir_node *alloc, *next, *mem, *sel, *size, *blk;
348 ir_type *ftp, *atp, *tp;
354 /* kill all dead allocs */
355 for (alloc = env->dead_allocs; alloc; alloc = next) {
356 next = get_irn_link(alloc);
358 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
360 mem = get_Alloc_mem(alloc);
361 blk = get_nodes_block(alloc);
362 turn_into_tuple(alloc, pn_Alloc_max);
363 set_Tuple_pred(alloc, pn_Alloc_M, mem);
364 set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
365 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
370 /* convert all non-escaped heap allocs into frame variables */
371 ftp = get_irg_frame_type(irg);
372 for (alloc = env->found_allocs; alloc; alloc = next) {
373 next = get_irn_link(alloc);
374 size = get_Alloc_size(alloc);
375 atp = get_Alloc_type(alloc);
378 if (is_SymConst(size) && get_SymConst_kind(size) == symconst_type_size) {
379 /* if the size is a type size and the types matched */
380 assert(atp == get_SymConst_type(size));
383 else if (is_Const(size)) {
384 tarval *tv = get_Const_tarval(size);
386 if (tv != tarval_bad && tarval_is_long(tv) &&
387 get_type_state(atp) == layout_fixed &&
388 get_tarval_long(tv) == get_type_size_bytes(atp)) {
389 /* a already lowered type size */
394 if (tp && tp != firm_unknown_type) {
395 /* we could determine the type, so we could place it on the frame */
396 dbg = get_irn_dbg_info(alloc);
398 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on frame\n", irg, alloc, tp));
400 snprintf(name, sizeof(name), "%s_NE_%u", get_entity_name(get_irg_entity(irg)), nr++);
401 ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
403 sel = new_rd_simpleSel(dbg, irg, get_nodes_block(alloc),
404 get_irg_no_mem(irg), get_irg_frame(irg), ent);
405 mem = get_Alloc_mem(alloc);
407 turn_into_tuple(alloc, pn_Alloc_max);
408 set_Tuple_pred(alloc, pn_Alloc_M, mem);
409 set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(irg, blk));
410 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg));
411 set_Tuple_pred(alloc, pn_Alloc_res, sel);
417 * We could not determine the type or it is variable size.
418 * At least, we could place it on the stack
420 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on stack\n", irg, alloc));
421 set_Alloc_where(alloc, stack_alloc);
427 /* if allocs were removed somehow */
428 if (env->nr_removed | env->nr_deads) {
429 set_irg_outs_inconsistent(irg);
432 /* exception control flow might have been changed */
433 set_irg_doms_inconsistent(irg);
439 * Do the necessary graph transformations to transform
442 static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
444 ir_node *call, *next, *mem, *blk;
447 /* kill all dead allocs */
448 for (call = env->dead_allocs; call; call = next) {
449 next = get_irn_link(call);
451 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
453 mem = get_Call_mem(call);
454 blk = get_nodes_block(call);
455 turn_into_tuple(call, pn_Call_max);
456 set_Tuple_pred(call, pn_Call_M_regular, mem);
457 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(irg, blk));
458 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
459 set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
460 set_Tuple_pred(call, pn_Call_M_except, mem);
461 set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
466 /* convert all non-escaped heap allocs into frame variables */
467 ftp = get_irg_frame_type(irg);
468 for (call = env->found_allocs; call; call = next) {
469 next = get_irn_link(call);
474 /* Do simple and fast escape analysis for one graph. */
475 void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback)
479 if (get_irg_callee_info_state(irg) != irg_callee_info_consistent) {
480 /* no way yet to calculate this for one irg */
481 assert(! "need callee info");
485 if (get_irg_outs_state(irg) != outs_consistent)
486 compute_irg_outs(irg);
488 env.found_allocs = NULL;
489 env.dead_allocs = NULL;
490 env.callback = callback;
496 /* search for Calls */
497 irg_walk_graph(irg, NULL, find_allocation_calls, &env);
498 transform_alloc_calls(irg, &env);
500 /* search for Alloc nodes */
501 irg_walk_graph(irg, NULL, find_allocations, &env);
502 transform_allocs(irg, &env);
506 /* Do simple and fast escape analysis for all graphs. */
507 void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback)
512 walk_env_t *env, *elist;
513 (void) run_scalar_replace;
515 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
516 assert(! "need callee info");
520 FIRM_DBG_REGISTER(dbgHandle, "firm.opt.escape_ana");
523 * We treat memory for speed: we first collect all info in a
524 * list of environments, than do the transformation.
525 * Doing it this way, no analysis info gets invalid while we run
531 env = obstack_alloc(&obst, sizeof(*env));
532 env->found_allocs = NULL;
533 env->dead_allocs = NULL;
534 env->callback = callback;
536 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
537 irg = get_irp_irg(i);
539 assure_irg_outs(irg);
542 /* search for Calls */
543 irg_walk_graph(irg, NULL, find_allocation_calls, env);
545 /* search for Alloc nodes */
546 irg_walk_graph(irg, NULL, find_allocations, env);
549 if (env->found_allocs || env->dead_allocs) {
557 env = obstack_alloc(&obst, sizeof(*env));
558 env->found_allocs = NULL;
559 env->dead_allocs = NULL;
560 env->callback = callback;
565 for (env = elist; env; env = env->next) {
566 transform_alloc_calls(env->irg, env);
569 for (env = elist; env; env = env->next) {
570 transform_allocs(env->irg, env);
574 obstack_free(&obst, NULL);