2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
10 * @brief A fast and simple Escape analysis.
14 #include "iroptimize.h"
16 #include "irgraph_t.h"
21 #include "analyze_irg_args.h"
32 typedef struct walk_env {
33 ir_node *found_allocs; /**< list of all found non-escaped allocs */
34 ir_node *dead_allocs; /**< list of all found dead alloc */
35 check_alloc_entity_func callback; /**< callback that checks a given entity for allocation */
36 unsigned nr_removed; /**< number of removed allocs (placed of frame) */
37 unsigned nr_changed; /**< number of changed allocs (allocated on stack now) */
38 unsigned nr_deads; /**< number of dead allocs */
40 /* these fields are only used in the global escape analysis */
41 ir_graph *irg; /**< the irg for this environment */
42 struct walk_env *next; /**< for linking environments */
46 DEBUG_ONLY(static firm_dbg_module_t *dbgHandle;)
49 * checks whether a Raise leaves a method
51 static int is_method_leaving_raise(ir_node *raise)
57 for (i = get_irn_n_outs(raise) - 1; i >= 0; --i) {
58 ir_node *succ = get_irn_out(raise, i);
60 /* there should be only one ProjX node */
61 if (get_Proj_proj(succ) == pn_Raise_X) {
68 /* Hmm: no ProjX from a Raise? This should be a verification
69 * error. For now we just assert and return.
71 panic("No ProjX after Raise found");
74 if (get_irn_n_outs(proj) != 1) {
75 /* Hmm: more than one user of ProjX: This is a verification
78 panic("More than one user of ProjX");
81 n = get_irn_out(proj, 0);
82 assert(is_Block(n) && "Argh: user of ProjX is no block");
84 if (n == get_irg_end_block(get_irn_irg(n)))
87 /* ok, we get here so the raise will not leave the function */
92 * returns an Alloc node if the node adr Select
95 static ir_node *is_depend_alloc(ir_node *adr)
102 /* should be a simple Sel */
103 if (get_Sel_n_indexs(adr) != 0)
106 alloc = skip_Proj(get_Sel_ptr(adr));
107 if (!is_Alloc(alloc))
110 /* hmm, we depend on this Alloc */
111 ir_printf("depend alloc %+F\n", alloc);
117 * determine if a value calculated by n "escape", ie
118 * is stored somewhere we could not track
120 static int can_escape(ir_node *n)
124 /* should always be pointer mode or we made some mistake */
125 assert(mode_is_reference(get_irn_mode(n)));
127 for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
128 ir_node *succ = get_irn_out(n, i);
130 switch (get_irn_opcode(succ)) {
132 if (get_Store_value(succ) == n) {
133 ir_node *adr = get_Store_ptr(succ);
136 * if this Alloc depends on another one,
139 if (is_depend_alloc(adr))
143 * We are storing n. As long as we do not further
144 * evaluate things, the pointer 'escape' here
152 * Should not happen, but if it does we leave the pointer
153 * path and do not track further
157 case iro_Call: { /* most complicated case */
158 ir_node *ptr = get_Call_ptr(succ);
161 if (is_SymConst_addr_ent(ptr)) {
163 ent = get_SymConst_entity(ptr);
165 /* we know the called entity */
166 for (j = get_Call_n_params(succ); j > 0;) {
167 if (get_Call_param(succ, --j) == n) {
168 /* n is the j'th param of the call */
169 if (get_method_param_access(ent, j) & ptr_access_store)
170 /* n is store in ent */
174 } else if (is_Sel(ptr)) {
177 /* go through all possible callees */
178 for (k = get_Call_n_callees(succ); k > 0;) {
180 ent = get_Call_callee(succ, --k);
182 if (is_unknown_entity(ent)) {
183 /* we don't know what will be called, a possible escape */
187 for (j = get_Call_n_params(succ); j > 0;) {
188 if (get_Call_param(succ, --j) == n) {
189 /* n is the j'th param of the call */
190 if (get_method_param_access(ent, j) & ptr_access_store)
191 /* n is store in ent */
196 } else /* we don't know want will called */
203 /* Bad: the allocate object is returned */
207 /* Hmm: if we do NOT leave the method, it's local */
208 if (is_method_leaving_raise(succ))
216 /* Bad: trace the tuple backwards */
217 for (j = get_irn_arity(succ) - 1; j >= 0; --j)
218 if (get_irn_n(succ, j) == n)
224 for (k = get_irn_n_outs(succ); k >= 0; --k) {
225 proj = get_irn_out(succ, k);
227 if (get_Proj_proj(proj) == j) {
228 /* we found the right Proj */
235 * If we haven't found the right Proj, succ is still
236 * the Tuple and the search will end here.
246 if (! mode_is_reference(get_irn_mode(succ)))
249 if (can_escape(succ))
256 * walker: search for Alloc nodes and follow the usages
258 static void find_allocations(ir_node *alloc, void *ctx)
260 walk_env_t *env = (walk_env_t*)ctx;
264 if (! is_Alloc(alloc))
267 /* we searching only for heap allocations */
268 if (get_Alloc_where(alloc) != heap_alloc)
272 for (i = get_irn_n_outs(alloc) - 1; i >= 0; --i) {
273 ir_node *proj = get_irn_out(alloc, i);
275 if (get_Proj_proj(proj) == pn_Alloc_res) {
283 * bad: no-one wants the result, should NOT happen but
284 * if it does we could delete it.
286 set_irn_link(alloc, env->dead_allocs);
287 env->dead_allocs = alloc;
292 if (! can_escape(adr)) {
293 set_irn_link(alloc, env->found_allocs);
294 env->found_allocs = alloc;
299 * walker: search for allocation Call nodes and follow the usages
301 static void find_allocation_calls(ir_node *call, void *ctx)
303 walk_env_t *env = (walk_env_t*)ctx;
310 adr = get_Call_ptr(call);
311 if (! is_SymConst_addr_ent(adr))
313 ent = get_SymConst_entity(adr);
314 if (! env->callback(ent))
318 for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
319 ir_node *res_proj = get_irn_out(call, i);
321 if (get_Proj_proj(res_proj) == pn_Call_T_result) {
322 for (i = get_irn_n_outs(res_proj) - 1; i >= 0; --i) {
323 ir_node *proj = get_irn_out(res_proj, i);
325 if (get_Proj_proj(proj) == 0) {
326 /* found first result */
337 * bad: no-one wants the result, should NOT happen but
338 * if it does we could delete it.
340 set_irn_link(call, env->dead_allocs);
341 env->dead_allocs = call;
346 if (! can_escape(adr)) {
347 set_irn_link(call, env->found_allocs);
348 env->found_allocs = call;
353 * Do the necessary graph transformations to transform
356 static void transform_allocs(ir_graph *irg, walk_env_t *env)
358 ir_node *alloc, *next, *mem, *sel, *size, *blk;
359 ir_type *ftp, *atp, *tp;
365 /* kill all dead allocs */
366 for (alloc = env->dead_allocs; alloc; alloc = next) {
367 next = (ir_node*)get_irn_link(alloc);
369 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, alloc));
371 mem = get_Alloc_mem(alloc);
372 blk = get_nodes_block(alloc);
373 ir_node *const in[] = {
375 [pn_Alloc_X_regular] = new_r_Jmp(blk),
376 [pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
378 turn_into_tuple(alloc, ARRAY_SIZE(in), in);
383 /* convert all non-escaped heap allocs into frame variables */
384 ftp = get_irg_frame_type(irg);
385 for (alloc = env->found_allocs; alloc; alloc = next) {
386 next = (ir_node*)get_irn_link(alloc);
387 size = get_Alloc_count(alloc);
388 atp = get_Alloc_type(alloc);
391 if (is_SymConst(size) && get_SymConst_kind(size) == symconst_type_size) {
392 /* if the size is a type size and the types matched */
393 assert(atp == get_SymConst_type(size));
395 } else if (is_Const(size)) {
396 ir_tarval *tv = get_Const_tarval(size);
398 if (tv != tarval_bad && tarval_is_long(tv) &&
399 get_type_state(atp) == layout_fixed &&
400 (unsigned)get_tarval_long(tv) == get_type_size_bytes(atp)) {
401 /* a already lowered type size */
406 if (tp && !is_unknown_type(tp)) {
407 /* we could determine the type, so we could place it on the frame */
408 dbg = get_irn_dbg_info(alloc);
409 blk = get_nodes_block(alloc);
411 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on frame\n", irg, alloc, tp));
413 snprintf(name, sizeof(name), "%s_NE_%u", get_entity_name(get_irg_entity(irg)), nr++);
414 name[sizeof(name) - 1] = '\0';
415 ent = new_d_entity(ftp, new_id_from_str(name), get_Alloc_type(alloc), dbg);
417 sel = new_rd_simpleSel(dbg, get_nodes_block(alloc), get_irg_no_mem(irg), get_irg_frame(irg), ent);
418 mem = get_Alloc_mem(alloc);
420 ir_node *const in[] = {
422 [pn_Alloc_res] = sel,
423 [pn_Alloc_X_regular] = new_r_Jmp(blk),
424 [pn_Alloc_X_except] = new_r_Bad(irg, mode_X),
426 turn_into_tuple(alloc, ARRAY_SIZE(in), in);
432 * We could not determine the type or it is variable size.
433 * At least, we could place it on the stack
435 DBG((dbgHandle, LEVEL_DEFAULT, "%+F allocation of %+F type %+F placed on stack\n", irg, alloc));
436 set_Alloc_where(alloc, stack_alloc);
442 /* if allocs were removed somehow */
443 if (env->nr_removed && env->nr_deads) {
444 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
449 * Do the necessary graph transformations to transform
452 static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
454 ir_node *call, *next, *mem, *blk;
456 /* kill all dead allocs */
457 for (call = env->dead_allocs; call; call = next) {
458 next = (ir_node*)get_irn_link(call);
460 DBG((dbgHandle, LEVEL_1, "%+F allocation of %+F unused, deleted.\n", irg, call));
462 mem = get_Call_mem(call);
463 blk = get_nodes_block(call);
464 ir_node *const in[] = {
466 [pn_Call_T_result] = new_r_Bad(irg, mode_T),
467 [pn_Call_X_regular] = new_r_Jmp(blk),
468 [pn_Call_X_except] = new_r_Bad(irg, mode_X),
470 turn_into_tuple(call, ARRAY_SIZE(in), in);
475 /* convert all non-escaped heap allocs into frame variables */
476 for (call = env->found_allocs; call; call = next) {
477 next = (ir_node*)get_irn_link(call);
480 confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE);
484 /* Do simple and fast escape analysis for one graph. */
485 void escape_enalysis_irg(ir_graph *irg, check_alloc_entity_func callback)
489 if (get_irg_callee_info_state(irg) != irg_callee_info_consistent) {
490 /* no way yet to calculate this for one irg */
491 assert(! "need callee info");
495 if (irg_has_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS))
496 compute_irg_outs(irg);
498 env.found_allocs = NULL;
499 env.dead_allocs = NULL;
500 env.callback = callback;
506 /* search for Calls */
507 irg_walk_graph(irg, NULL, find_allocation_calls, &env);
508 transform_alloc_calls(irg, &env);
510 /* search for Alloc nodes */
511 irg_walk_graph(irg, NULL, find_allocations, &env);
512 transform_allocs(irg, &env);
516 /* Do simple and fast escape analysis for all graphs. */
517 void escape_analysis(int run_scalar_replace, check_alloc_entity_func callback)
521 walk_env_t *env, *elist;
522 (void) run_scalar_replace;
524 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
525 assert(! "need callee info");
529 FIRM_DBG_REGISTER(dbgHandle, "firm.opt.escape_ana");
532 * We treat memory for speed: we first collect all info in a
533 * list of environments, than do the transformation.
534 * Doing it this way, no analysis info gets invalid while we run
540 env = OALLOC(&obst, walk_env_t);
541 env->found_allocs = NULL;
542 env->dead_allocs = NULL;
543 env->callback = callback;
545 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
546 ir_graph *irg = get_irp_irg(i);
548 assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
551 /* search for Calls */
552 irg_walk_graph(irg, NULL, find_allocation_calls, env);
554 /* search for Alloc nodes */
555 irg_walk_graph(irg, NULL, find_allocations, env);
558 if (env->found_allocs || env->dead_allocs) {
566 env = OALLOC(&obst, walk_env_t);
567 env->found_allocs = NULL;
568 env->dead_allocs = NULL;
569 env->callback = callback;
574 for (env = elist; env; env = env->next) {
575 transform_alloc_calls(env->irg, env);
578 for (env = elist; env; env = env->next) {
579 transform_allocs(env->irg, env);
583 obstack_free(&obst, NULL);