2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Tail-recursion call optimization.
24 * @author Michael Beck
33 #include "iroptimize.h"
34 #include "scalar_replace.h"
41 #include "irgraph_t.h"
49 #include "opt_manage.h"
51 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
54 * the environment for collecting data
56 typedef struct collect_t {
57 ir_node *proj_X; /**< initial exec proj */
58 ir_node *block; /**< old first block */
59 int blk_idx; /**< cfgpred index of the initial exec in block */
60 ir_node *proj_m; /**< memory from start proj's */
61 ir_node *proj_data; /**< linked list of all parameter access proj's */
65 * walker for collecting data, fills a collect_t environment
67 static void collect_data(ir_node *node, void *env)
69 collect_t *data = (collect_t*)env;
73 switch (get_irn_opcode(node)) {
75 pred = get_Proj_pred(node);
77 opcode = get_irn_opcode(pred);
78 if (opcode == iro_Proj) {
79 ir_node *start = get_Proj_pred(pred);
81 if (is_Start(start)) {
82 if (get_Proj_proj(pred) == pn_Start_T_args) {
83 /* found Proj(ProjT(Start)) */
84 set_irn_link(node, data->proj_data);
85 data->proj_data = node;
88 } else if (opcode == iro_Start) {
89 if (get_Proj_proj(node) == pn_Start_X_initial_exec) {
90 /* found ProjX(Start) */
96 int i, n_pred = get_Block_n_cfgpreds(node);
99 * the first block has the initial exec as cfg predecessor
101 if (node != get_irg_start_block(get_irn_irg(node))) {
102 for (i = 0; i < n_pred; ++i) {
103 if (get_Block_cfgpred(node, i) == data->proj_X) {
117 typedef enum tail_rec_variants {
118 TR_DIRECT, /**< direct return value, i.e. return func(). */
119 TR_ADD, /**< additive return value, i.e. return x +/- func() */
120 TR_MUL, /**< multiplicative return value, i.e. return x * func() or return -func() */
121 TR_BAD, /**< any other transformation */
122 TR_UNKNOWN /**< during construction */
125 typedef struct tr_env {
126 int n_tail_calls; /**< number of tail calls found */
127 int n_ress; /**< number of return values */
128 tail_rec_variants *variants; /**< return value variants */
129 ir_node *rets; /**< list of returns that can be transformed */
134 * do the graph reconstruction for tail-recursion elimination
136 * @param irg the graph that will reconstructed
137 * @param env tail recursion environment
139 static void do_opt_tail_rec(ir_graph *irg, tr_env *env)
141 ir_node *end_block = get_irg_end_block(irg);
142 ir_node *block, *jmp, *call, *calls;
145 ir_node ***call_params;
147 int i, j, n_params, n_locs;
149 int rem = get_optimize();
150 ir_entity *ent = get_irg_entity(irg);
151 ir_type *method_tp = get_entity_type(ent);
153 assert(env->n_tail_calls > 0);
155 /* we add new blocks and change the control flow */
156 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
157 | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
159 /* we must build some new nodes WITHOUT CSE */
162 /* collect needed data */
166 data.proj_m = get_irg_initial_mem(irg);
167 data.proj_data = NULL;
168 irg_walk_graph(irg, NULL, collect_data, &data);
170 /* check number of arguments */
171 call = (ir_node*)get_irn_link(end_block);
172 n_params = get_Call_n_params(call);
174 assert(data.proj_X && "Could not find initial exec from Start");
175 assert(data.block && "Could not find first block");
176 assert(data.proj_m && "Could not find initial memory");
177 assert((data.proj_data || n_params == 0) && "Could not find Proj(ProjT(Start)) of non-void function");
179 /* allocate in's for phi and block construction */
180 NEW_ARR_A(ir_node *, in, env->n_tail_calls + 1);
182 /* build a new header block for the loop we create */
184 in[i++] = data.proj_X;
186 /* turn Return's into Jmp's */
187 for (p = env->rets; p; p = n) {
188 ir_node *block = get_nodes_block(p);
190 n = (ir_node*)get_irn_link(p);
191 in[i++] = new_r_Jmp(block);
193 // exchange(p, new_r_Bad(irg));
195 /* we might generate an endless loop, so add
196 * the block to the keep-alive list */
197 add_End_keepalive(get_irg_end(irg), block);
199 assert(i == env->n_tail_calls + 1);
202 block = new_r_Block(irg, i, in);
203 jmp = new_r_Jmp(block);
205 /* the old first block is now the second one */
206 set_Block_cfgpred(data.block, data.blk_idx, jmp);
208 /* allocate phi's, position 0 contains the memory phi */
209 NEW_ARR_A(ir_node *, phis, n_params + 1);
211 /* build the memory phi */
213 in[i] = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M);
214 set_irg_initial_mem(irg, in[i]);
217 for (calls = call; calls != NULL; calls = (ir_node*)get_irn_link(calls)) {
218 in[i] = get_Call_mem(calls);
221 assert(i == env->n_tail_calls + 1);
223 phis[0] = new_r_Phi(block, env->n_tail_calls + 1, in, mode_M);
225 /* build the data Phi's */
230 NEW_ARR_A(ir_node **, call_params, env->n_tail_calls);
232 /* collect all parameters */
233 for (i = 0, calls = call; calls != NULL;
234 calls = (ir_node*)get_irn_link(calls)) {
235 call_params[i] = get_Call_param_arr(calls);
239 /* build new Proj's and Phi's */
240 args = get_irg_args(irg);
241 for (i = 0; i < n_params; ++i) {
242 ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i));
244 in[0] = new_r_Proj(args, mode, i);
245 for (j = 0; j < env->n_tail_calls; ++j)
246 in[j + 1] = call_params[j][i];
248 phis[i + 1] = new_r_Phi(block, env->n_tail_calls + 1, in, mode);
253 * ok, we are here, so we have build and collected all needed Phi's
254 * now exchange all Projs into links to Phi
256 exchange(data.proj_m, phis[0]);
257 for (p = data.proj_data; p; p = n) {
258 long proj = get_Proj_proj(p);
260 assert(0 <= proj && proj < n_params);
261 n = (ir_node*)get_irn_link(p);
262 exchange(p, phis[proj + 1]);
265 /* tail recursion was done, all info is invalid */
266 clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
267 | IR_GRAPH_STATE_CONSISTENT_LOOPINFO
268 | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
269 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
273 /* check if we need new values */
275 for (i = 0; i < env->n_ress; ++i) {
276 if (env->variants[i] != TR_DIRECT) {
283 ir_node *start_block;
287 NEW_ARR_A(ir_node *, in, env->n_ress);
288 NEW_ARR_A(ir_mode *, modes, env->n_ress);
289 ssa_cons_start(irg, env->n_ress);
291 start_block = get_irg_start_block(irg);
292 set_r_cur_block(irg, start_block);
294 /* set the neutral elements for the iteration start */
295 for (i = 0; i < env->n_ress; ++i) {
296 ir_type *tp = get_method_res_type(method_tp, i);
297 ir_mode *mode = get_type_mode(tp);
300 if (env->variants[i] == TR_ADD) {
301 set_r_value(irg, i, new_r_Const(irg, get_mode_null(mode)));
302 } else if (env->variants[i] == TR_MUL) {
303 set_r_value(irg, i, new_r_Const(irg, get_mode_one(mode)));
306 mature_immBlock(start_block);
308 /* no: we can kill all returns */
309 for (p = env->rets; p; p = n) {
310 ir_node *block = get_nodes_block(p);
311 ir_node *call, *mem, *jmp, *tuple;
313 set_r_cur_block(irg, block);
314 n = (ir_node*)get_irn_link(p);
316 call = skip_Proj(get_Return_mem(p));
317 assert(is_Call(call));
319 mem = get_Call_mem(call);
321 /* create a new jump, free of CSE */
323 jmp = new_r_Jmp(block);
326 for (i = 0; i < env->n_ress; ++i) {
327 ir_mode *mode = modes[i];
328 if (env->variants[i] != TR_DIRECT) {
329 in[i] = get_r_value(irg, i, mode);
331 in[i] = new_r_Bad(irg, mode);
334 /* create a new tuple for the return values */
335 tuple = new_r_Tuple(block, env->n_ress, in);
337 turn_into_tuple(call, pn_Call_max+1);
338 set_Tuple_pred(call, pn_Call_M, mem);
339 set_Tuple_pred(call, pn_Call_X_regular, jmp);
340 set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
341 set_Tuple_pred(call, pn_Call_T_result, tuple);
343 for (i = 0; i < env->n_ress; ++i) {
344 ir_node *res = get_Return_res(p, i);
345 if (env->variants[i] != TR_DIRECT) {
346 set_r_value(irg, i, res);
350 exchange(p, new_r_Bad(irg, mode_X));
353 /* finally fix all other returns */
354 end_block = get_irg_end_block(irg);
355 for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
356 ir_node *ret = get_Block_cfgpred(end_block, i);
359 /* search all Returns of a block */
360 if (! is_Return(ret))
363 block = get_nodes_block(ret);
364 set_r_cur_block(irg, block);
365 for (j = 0; j < env->n_ress; ++j) {
366 ir_node *pred = get_Return_res(ret, j);
369 switch (env->variants[j]) {
374 n = get_r_value(irg, j, modes[j]);
375 n = new_r_Add(block, n, pred, modes[j]);
376 set_Return_res(ret, j, n);
380 n = get_r_value(irg, j, modes[j]);
381 n = new_r_Mul(block, n, pred, modes[j]);
382 set_Return_res(ret, j, n);
386 assert(!"unexpected tail recursion variant");
390 ssa_cons_finish(irg);
392 ir_node *bad = new_r_Bad(irg, mode_X);
394 /* no: we can kill all returns */
395 for (p = env->rets; p; p = n) {
396 n = (ir_node*)get_irn_link(p);
403 * Check the lifetime of locals in the given graph.
404 * Tail recursion can only be done, if we can prove that
405 * the lifetime of locals end with the recursive call.
406 * We do this by checking that no address of a local variable is
407 * stored or transmitted as an argument to a call.
409 * @return non-zero if it's ok to do tail recursion
411 static int check_lifetime_of_locals(ir_graph *irg)
415 ir_type *frame_tp = get_irg_frame_type(irg);
417 irg_frame = get_irg_frame(irg);
418 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
419 ir_node *succ = get_irn_out(irg_frame, i);
422 /* Check if we have compound arguments.
423 For now, we cannot handle them, */
424 if (get_entity_owner(get_Sel_entity(succ)) != frame_tp)
427 if (is_address_taken(succ))
435 * Examine irn and detect the recursion variant.
437 static tail_rec_variants find_variant(ir_node *irn, ir_node *call)
440 tail_rec_variants va, vb, res;
442 if (skip_Proj(skip_Proj(irn)) == call) {
446 switch (get_irn_opcode(irn)) {
449 a = get_Add_left(irn);
450 if (get_nodes_block(a) != get_nodes_block(call)) {
451 /* we are outside, ignore */
454 va = find_variant(a, call);
458 b = get_Add_right(irn);
459 if (get_nodes_block(b) != get_nodes_block(call)) {
460 /* we are outside, ignore */
463 vb = find_variant(b, call);
470 else if (va == TR_UNKNOWN)
472 else if (vb == TR_UNKNOWN)
475 /* they are different but none is TR_UNKNOWN -> incompatible */
478 if (res == TR_DIRECT || res == TR_ADD)
484 /* try additive, but return value must be left */
485 a = get_Sub_left(irn);
486 if (get_nodes_block(a) != get_nodes_block(call)) {
487 /* we are outside, ignore */
490 va = find_variant(a, call);
494 b = get_Sub_right(irn);
495 if (get_nodes_block(b) != get_nodes_block(call)) {
496 /* we are outside, ignore */
499 vb = find_variant(b, call);
500 if (vb != TR_UNKNOWN)
504 if (res == TR_DIRECT || res == TR_ADD)
510 /* try multiplicative */
511 a = get_Mul_left(irn);
512 if (get_nodes_block(a) != get_nodes_block(call)) {
513 /* we are outside, ignore */
516 va = find_variant(a, call);
520 b = get_Mul_right(irn);
521 if (get_nodes_block(b) != get_nodes_block(call)) {
522 /* we are outside, ignore */
525 vb = find_variant(b, call);
532 else if (va == TR_UNKNOWN)
534 else if (vb == TR_UNKNOWN)
537 /* they are different but none is TR_UNKNOWN -> incompatible */
540 if (res == TR_DIRECT || res == TR_MUL)
546 /* try multiplicative */
547 a = get_Minus_op(irn);
548 res = find_variant(a, call);
549 if (res == TR_DIRECT)
551 if (res == TR_MUL || res == TR_UNKNOWN)
563 * convert simple tail-calls into loops
565 static ir_graph_state_t do_tailrec(ir_graph *irg)
569 int i, n_ress, n_tail_calls = 0;
570 ir_node *rets = NULL;
571 ir_type *mtd_type, *call_type;
575 FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
577 if (! check_lifetime_of_locals(irg))
580 rem = current_ir_graph;
581 current_ir_graph = irg;
583 ent = get_irg_entity(irg);
584 mtd_type = get_entity_type(ent);
585 n_ress = get_method_n_ress(mtd_type);
591 NEW_ARR_A(tail_rec_variants, env.variants, n_ress);
593 for (i = 0; i < n_ress; ++i)
594 env.variants[i] = TR_DIRECT;
597 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
599 end_block = get_irg_end_block(irg);
600 set_irn_link(end_block, NULL);
602 for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
603 ir_node *ret = get_Block_cfgpred(end_block, i);
604 ir_node *call, *call_ptr;
608 /* search all Returns of a block */
609 if (! is_Return(ret))
612 /* check, if it's a Return self() */
613 call = skip_Proj(get_Return_mem(ret));
617 /* the call must be in the same block as the return */
618 if (get_nodes_block(call) != get_nodes_block(ret))
621 /* check if it's a recursive call */
622 call_ptr = get_Call_ptr(call);
624 if (! is_SymConst_addr_ent(call_ptr))
627 ent = get_SymConst_entity(call_ptr);
628 if (!ent || get_entity_irg(ent) != irg)
632 * Check, that the types match. At least in C
635 mtd_type = get_entity_type(ent);
636 call_type = get_Call_type(call);
638 if (mtd_type != call_type) {
640 * Hmm, the types did not match, bad.
641 * This can happen in C when no prototype is given
642 * or K&R style is used.
644 DB((dbg, LEVEL_3, " tail recursion fails because of call type mismatch: %+F != %+F\n", mtd_type, call_type));
648 /* ok, mem is routed to a recursive call, check return args */
649 ress = get_Return_res_arr(ret);
650 for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) {
651 tail_rec_variants var = find_variant(ress[j], call);
654 /* cannot be transformed */
657 if (var == TR_DIRECT)
658 var = env.variants[j];
659 else if (env.variants[j] == TR_DIRECT)
660 env.variants[j] = var;
661 if (env.variants[j] != var) {
663 DB((dbg, LEVEL_3, " tail recursion fails for %d return value of %+F\n", j, ret));
670 /* here, we have found a call */
671 set_irn_link(call, get_irn_link(end_block));
672 set_irn_link(end_block, call);
675 /* link all returns, we will need this */
676 set_irn_link(ret, rets);
680 /* now, end_block->link contains the list of all tail calls */
681 if (n_tail_calls > 0) {
682 DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
683 get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
685 hook_tail_rec(irg, n_tail_calls);
687 env.n_tail_calls = n_tail_calls;
689 do_opt_tail_rec(irg, &env);
691 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
692 current_ir_graph = rem;
698 * This tail recursion optimization works best
699 * if the Returns are normalized.
701 static optdesc_t opt_tailrec = {
703 IR_GRAPH_STATE_MANY_RETURNS | IR_GRAPH_STATE_NO_BADS | IR_GRAPH_STATE_CONSISTENT_OUTS,
707 int opt_tail_rec_irg(ir_graph *irg) {
708 perform_irg_optimization(irg, &opt_tailrec);
709 return 1; /* conservatively report changes */
712 ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name)
714 return def_graph_pass_ret(name ? name : "tailrec", opt_tail_rec_irg);
718 * optimize tail recursion away
720 void opt_tail_recursion(void)
723 size_t n_opt_applications = 0;
725 FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
727 DB((dbg, LEVEL_1, "Performing tail recursion ...\n"));
728 for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
729 ir_graph *irg = get_irp_irg(i);
731 if (opt_tail_rec_irg(irg))
732 ++n_opt_applications;
735 DB((dbg, LEVEL_1, "Done for %zu of %zu graphs.\n",
736 n_opt_applications, get_irp_n_irgs()));
739 ir_prog_pass_t *opt_tail_recursion_pass(const char *name)
741 return def_prog_pass(name ? name : "tailrec", opt_tail_recursion);