2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @author Michael Beck
24 * @brief A fast and simple Escape analysis.
28 #include "iroptimize.h"
30 #include "irgraph_t.h"
35 #include "analyze_irg_args.h"
45 typedef struct walk_env {
46 ir_node *found_allocs; /**< list of all found non-escaped allocs */
47 ir_node *dead_allocs; /**< list of all found dead alloc */
48 check_alloc_entity_func callback; /**< callback that checks a given entity for allocation */
49 unsigned nr_removed; /**< number of removed allocs (placed of frame) */
50 unsigned nr_changed; /**< number of changed allocs (allocated on stack now) */
51 unsigned nr_deads; /**< number of dead allocs */
53 /* these fields are only used in the global escape analysis */
54 ir_graph *irg; /**< the irg for this environment */
55 struct walk_env *next; /**< for linking environments */
59 DEBUG_ONLY(static firm_dbg_module_t *dbgHandle;)
62 * checks whether a Raise leaves a method
64 static int is_method_leaving_raise(ir_node *raise)
70 for (i = get_irn_n_outs(raise) - 1; i >= 0; --i) {
71 ir_node *succ = get_irn_out(raise, i);
73 /* there should be only one ProjX node */
74 if (get_Proj_proj(succ) == pn_Raise_X) {
81 /* Hmm: no ProjX from a Raise? This should be a verification
82 * error. For now we just assert and return.
84 panic("No ProjX after Raise found");
87 if (get_irn_n_outs(proj) != 1) {
88 /* Hmm: more than one user of ProjX: This is a verification
91 panic("More than one user of ProjX");
94 n = get_irn_out(proj, 0);
95 assert(is_Block(n) && "Argh: user of ProjX is no block");
97 if (n == get_irg_end_block(get_irn_irg(n)))
100 /* ok, we get here so the raise will not leave the function */
105 * returns an Alloc node if the node adr Select
108 static ir_node *is_depend_alloc(ir_node *adr)
115 /* should be a simple Sel */
116 if (get_Sel_n_indexs(adr) != 0)
119 alloc = skip_Proj(get_Sel_ptr(adr));
120 if (!is_Alloc(alloc))
123 /* hmm, we depend on this Alloc */
124 ir_printf("depend alloc %+F\n", alloc);
130 * determine if a value calculated by n "escape", ie
131 * is stored somewhere we could not track
133 static int can_escape(ir_node *n)
137 /* should always be pointer mode or we made some mistake */
138 assert(mode_is_reference(get_irn_mode(n)));
140 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
141 ir_node *succ = get_irn_out(n, i);
143 switch (get_irn_opcode(succ)) {
145 if (get_Store_value(succ) == n) {
146 ir_node *adr = get_Store_ptr(succ);
149 * if this Alloc depends on another one,
152 if (is_depend_alloc(adr))
156 * We are storing n. As long as we do not further
157 * evaluate things, the pointer 'escape' here
165 * Should not happen, but if it does we leave the pointer
166 * path and do not track further
170 case iro_Call: { /* most complicated case */
171 ir_node *ptr = get_Call_ptr(succ);
174 if (is_SymConst_addr_ent(ptr)) {
176 ent = get_SymConst_entity(ptr);
178 /* we know the called entity */
179 for (j = get_Call_n_params(succ); j > 0;) {
180 if (get_Call_param(succ, --j) == n) {
181 /* n is the j'th param of the call */
182 if (get_method_param_access(ent, j) & ptr_access_store)
183 /* n is store in ent */
187 } else if (is_Sel(ptr)) {
190 /* go through all possible callees */
191 for (k = get_Call_n_callees(succ); k > 0;) {
193 ent = get_Call_callee(succ, --k);
195 if (is_unknown_entity(ent)) {
196 /* we don't know what will be called, a possible escape */
200 for (j = get_Call_n_params(succ); j > 0;) {
201 if (get_Call_param(succ, --j) == n) {
202 /* n is the j'th param of the call */
203 if (get_method_param_access(ent, j) & ptr_access_store)
204 /* n is store in ent */
209 } else /* we don't know want will called */
216 /* Bad: the allocate object is returned */
220 /* Hmm: if we do NOT leave the method, it's local */
221 if (is_method_leaving_raise(succ))
229 /* Bad: trace the tuple backwards */
230 for (j = get_irn_arity(succ) - 1; j >= 0; --j)
231 if (get_irn_n(succ, j) == n)
237 for (k = get_irn_n_outs(succ); k >= 0; --k) {
238 proj = get_irn_out(succ, k);
240 if (get_Proj_proj(proj) == j) {
241 /* we found the right Proj */
248 * If we haven't found the right Proj, succ is still
249 * the Tuple and the search will end here.
259 if (! mode_is_reference(get_irn_mode(succ)))
262 if (can_escape(succ))
269 * walker: search for Alloc nodes and follow the usages
271 static void find_allocations(ir_node *alloc, void *ctx)
273 walk_env_t *env = (walk_env_t*)ctx;
277 if (! is_Alloc(alloc))
280 /* we searching only for heap allocations */
281 if (get_Alloc_where(alloc) != heap_alloc)
285 for (i = get_irn_n_outs(alloc) - 1; i >= 0; --i) {
286 ir_node *proj = get_irn_out(alloc, i);
288 if (get_Proj_proj(proj) == pn_Alloc_res) {
296 * bad: no-one wants the result, should NOT happen but
297 * if it does we could delete it.
299 set_irn_link(alloc, env->dead_allocs);
300 env->dead_allocs = alloc;
305 if (! can_escape(adr)) {
306 set_irn_link(alloc, env->found_allocs);
307 env->found_allocs = alloc;
312 * walker: search for allocation Call nodes and follow the usages
314 static void find_allocation_calls(ir_node *call, void *ctx)
316 walk_env_t *env = (walk_env_t*)ctx;
323 adr = get_Call_ptr(call);
324 if (! is_SymConst_addr_ent(adr))
326 ent = get_SymConst_entity(adr);
327 if (! env->callback(ent))
331 for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
332 ir_node *res_proj = get_irn_out(call, i);
334 if (get_Proj_proj(res_proj) == pn_Call_T_result) {
335 for (i = get_irn_n_outs(res_proj) - 1; i >= 0; --i) {
336 ir_node *proj = get_irn_out(res_proj, i);
338 if (get_Proj_proj(proj) == 0) {
339 /* found first result */
350 * bad: no-one wants the result, should NOT happen but
351 * if it does we could delete it.
353 set_irn_link(call, env->dead_allocs);
354 env->dead_allocs = call;
359 if (! can_escape(adr)) {
360 set_irn_link(call, env->found_allocs);
361 env->found_allocs = call;
366 * Do the necessary graph transformations to transform
369 static void transform_allocs(ir_graph *irg, walk_env_t *env)
371 ir_node *alloc, *next, *mem, *sel, *size, *blk;
372 ir_type *ftp, *atp, *tp;
378 /* kill all dead allocs */
379 for (alloc = env->dead_allocs; alloc; alloc = next) {
380 next = (ir_node*)get_irn_link(alloc);
382 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
384 mem = get_Alloc_mem(alloc);
385 blk = get_nodes_block(alloc);
386 turn_into_tuple(alloc, pn_Alloc_max+1);
387 set_Tuple_pred(alloc, pn_Alloc_M, mem);
388 set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
389 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
394 /* convert all non-escaped heap allocs into frame variables */
395 ftp = get_irg_frame_type(irg);
396 for (alloc = env->found_allocs; alloc; alloc = next) {
397 next = (ir_node*)get_irn_link(alloc);
398 size = get_Alloc_count(alloc);
399 atp = get_Alloc_type(alloc);
402 if (is_SymConst(size) && get_SymConst_kind(size) == symconst_type_size) {
403 /* if the size is a type size and the types matched */
404 assert(atp == get_SymConst_type(size));
406 } else if (is_Const(size)) {
407 ir_tarval *tv = get_Const_tarval(size);
409 if (tv != tarval_bad && tarval_is_long(tv) &&
410 get_type_state(atp) == layout_fixed &&
411 (unsigned)get_tarval_long(tv) == get_type_size_bytes(atp)) {
412 /* a already lowered type size */
417 if (tp && !is_unknown_type(tp)) {
418 /* we could determine the type, so we could place it on the frame */
419 dbg = get_irn_dbg_info(alloc);
420 blk = get_nodes_block(alloc);
422 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on frame\n", irg, alloc, tp));
424 snprintf(name, sizeof(name), "%s_NE_%u", get_entity_name(get_irg_entity(irg)), nr++);
425 name[sizeof(name) - 1] = '\0';
426 ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
428 sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
429 mem = get_Alloc_mem(alloc);
431 turn_into_tuple(alloc, pn_Alloc_max+1);
432 set_Tuple_pred(alloc, pn_Alloc_M, mem);
433 set_Tuple_pred(alloc, pn_Alloc_X_regular, new_r_Jmp(blk));
434 set_Tuple_pred(alloc, pn_Alloc_X_except, new_r_Bad(irg, mode_X));
435 set_Tuple_pred(alloc, pn_Alloc_res, sel);
441 * We could not determine the type or it is variable size.
442 * At least, we could place it on the stack
444 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on stack\n", irg, alloc));
445 set_Alloc_where(alloc, stack_alloc);
451 /* if allocs were removed somehow */
452 if (env->nr_removed && env->nr_deads) {
453 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
458 * Do the necessary graph transformations to transform
461 static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
463 ir_node *call, *next, *mem, *blk;
465 /* kill all dead allocs */
466 for (call = env->dead_allocs; call; call = next) {
467 next = (ir_node*)get_irn_link(call);
469 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
471 mem = get_Call_mem(call);
472 blk = get_nodes_block(call);
473 turn_into_tuple(call, pn_Call_max+1);
474 set_Tuple_pred(call, pn_Call_M, mem);
475 set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
476 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
477 set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
482 /* convert all non-escaped heap allocs into frame variables */
483 for (call = env->found_allocs; call; call = next) {
484 next = (ir_node*)get_irn_link(call);
487 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
491 /* Do simple and fast escape analysis for one graph. */
492 void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback)
496 if (get_irg_callee_info_state(irg) != irg_callee_info_consistent) {
497 /* no way yet to calculate this for one irg */
498 assert(! "need callee info");
502 if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
503 compute_irg_outs(irg);
505 env.found_allocs = NULL;
506 env.dead_allocs = NULL;
507 env.callback = callback;
513 /* search for Calls */
514 irg_walk_graph(irg, NULL, find_allocation_calls, &env);
515 transform_alloc_calls(irg, &env);
517 /* search for Alloc nodes */
518 irg_walk_graph(irg, NULL, find_allocations, &env);
519 transform_allocs(irg, &env);
523 /* Do simple and fast escape analysis for all graphs. */
524 void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback)
528 walk_env_t *env, *elist;
529 (void) run_scalar_replace;
531 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
532 assert(! "need callee info");
536 FIRM_DBG_REGISTER(dbgHandle, "firm.opt.escape_ana");
539 * We treat memory for speed: we first collect all info in a
540 * list of environments, than do the transformation.
541 * Doing it this way, no analysis info gets invalid while we run
547 env = OALLOC(&obst, walk_env_t);
548 env->found_allocs = NULL;
549 env->dead_allocs = NULL;
550 env->callback = callback;
552 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
553 ir_graph *irg = get_irp_irg(i);
555 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
558 /* search for Calls */
559 irg_walk_graph(irg, NULL, find_allocation_calls, env);
561 /* search for Alloc nodes */
562 irg_walk_graph(irg, NULL, find_allocations, env);
565 if (env->found_allocs || env->dead_allocs) {
573 env = OALLOC(&obst, walk_env_t);
574 env->found_allocs = NULL;
575 env->dead_allocs = NULL;
576 env->callback = callback;
581 for (env = elist; env; env = env->next) {
582 transform_alloc_calls(env->irg, env);
585 for (env = elist; env; env = env->next) {
586 transform_allocs(env->irg, env);
590 obstack_free(&obst, NULL);