2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Tail-recursion call optimization.
24 * @author Michael Beck
33 #include "iroptimize.h"
34 #include "scalar_replace.h"
41 #include "irgraph_t.h"
49 DEBUG_ONLY(static firm_dbg_module_t *dbg);
52 * the environment for collecting data
54 typedef struct _collect_t {
55 ir_node *proj_X; /**< initial exec proj */
56 ir_node *block; /**< old first block */
57 int blk_idx; /**< cfgpred index of the initial exec in block */
58 ir_node *proj_m; /**< memory from start proj's */
59 ir_node *proj_data; /**< linked list of all parameter access proj's */
63 * walker for collecting data, fills a collect_t environment
65 static void collect_data(ir_node *node, void *env) {
66 collect_t *data = env;
70 switch (get_irn_opcode(node)) {
72 pred = get_Proj_pred(node);
74 op = get_irn_op(pred);
76 ir_node *start = get_Proj_pred(pred);
78 if (is_Start(start)) {
79 if (get_Proj_proj(pred) == pn_Start_T_args) {
80 /* found Proj(ProjT(Start)) */
81 set_irn_link(node, data->proj_data);
82 data->proj_data = node;
85 } else if (op == op_Start) {
86 if (get_Proj_proj(node) == pn_Start_X_initial_exec) {
87 /* found ProjX(Start) */
93 int i, n_pred = get_Block_n_cfgpreds(node);
96 * the first block has the initial exec as cfg predecessor
98 if (node != get_irg_start_block(current_ir_graph)) {
99 for (i = 0; i < n_pred; ++i) {
100 if (get_Block_cfgpred(node, i) == data->proj_X) {
114 typedef enum tail_rec_variants {
115 TR_DIRECT, /**< direct return value, i.e. return func(). */
116 TR_ADD, /**< additive return value, i.e. return x +/- func() */
117 TR_MUL, /**< multiplicative return value, i.e. return x * func() or return -func() */
118 TR_BAD, /**< any other transformation */
119 TR_UNKNOWN /**< during construction */
122 typedef struct tr_env {
123 int n_tail_calls; /**< number of tail calls found */
124 int n_ress; /**< number of return values */
125 tail_rec_variants *variants; /**< return value variants */
126 ir_node *rets; /**< list of returns that can be transformed */
131 * do the graph reconstruction for tail-recursion elimination
133 * @param irg the graph that will reconstructed
134 * @param rets linked list of all rets
135 * @param n_tail_calls number of tail-recursion calls
137 static void do_opt_tail_rec(ir_graph *irg, tr_env *env) {
138 ir_node *end_block = get_irg_end_block(irg);
139 ir_node *block, *jmp, *call, *calls;
142 ir_node ***call_params;
144 int i, j, n_params, n_locs;
146 int rem = get_optimize();
147 ir_entity *ent = get_irg_entity(irg);
148 ir_type *method_tp = get_entity_type(ent);
149 ir_graph *old = current_ir_graph;
151 current_ir_graph = irg;
153 assert(env->n_tail_calls > 0);
155 /* we add new nodes, so the outs are inconsistent */
156 set_irg_outs_inconsistent(irg);
158 /* we add new blocks and change the control flow */
159 set_irg_doms_inconsistent(irg);
160 set_irg_extblk_inconsistent(irg);
162 /* we add a new loop */
163 set_irg_loopinfo_inconsistent(irg);
165 /* calls are removed */
166 set_trouts_inconsistent();
168 /* we must build some new nodes WITHOUT CSE */
171 /* collect needed data */
175 data.proj_m = get_irg_initial_mem(irg);
176 data.proj_data = NULL;
177 irg_walk_graph(irg, NULL, collect_data, &data);
179 /* check number of arguments */
180 call = get_irn_link(end_block);
181 n_params = get_Call_n_params(call);
183 assert(data.proj_X && "Could not find initial exec from Start");
184 assert(data.block && "Could not find first block");
185 assert(data.proj_m && "Could not find initial memory");
186 assert((data.proj_data || n_params == 0) && "Could not find Proj(ProjT(Start)) of non-void function");
188 /* allocate in's for phi and block construction */
189 NEW_ARR_A(ir_node *, in, env->n_tail_calls + 1);
193 /* turn Return's into Jmp's */
194 for (i = 1, p = env->rets; p; p = n) {
195 ir_node *block = get_nodes_block(p);
198 in[i++] = new_r_Jmp(irg, block);
200 // exchange(p, new_r_Bad(irg));
202 /* we might generate an endless loop, so add
203 * the block to the keep-alive list */
204 add_End_keepalive(get_irg_end(irg), block);
207 /* create a new block at start */
208 block = new_r_Block(irg, env->n_tail_calls + 1, in);
209 jmp = new_r_Jmp(irg, block);
211 /* the old first block is now the second one */
212 set_Block_cfgpred(data.block, data.blk_idx, jmp);
214 /* allocate phi's, position 0 contains the memory phi */
215 NEW_ARR_A(ir_node *, phis, n_params + 1);
217 /* build the memory phi */
219 in[i] = new_r_Proj(irg, get_irg_start_block(irg), get_irg_start(irg), mode_M, pn_Start_M);
220 set_irg_initial_mem(irg, in[i]);
223 for (calls = call; calls; calls = get_irn_link(calls)) {
224 in[i] = get_Call_mem(calls);
227 assert(i == env->n_tail_calls + 1);
229 phis[0] = new_r_Phi(irg, block, env->n_tail_calls + 1, in, mode_M);
231 /* build the data Phi's */
237 NEW_ARR_A(ir_node **, call_params, env->n_tail_calls);
239 /* collect all parameters */
240 for (i = 0, calls = call; calls; calls = get_irn_link(calls)) {
241 call_params[i] = get_Call_param_arr(calls);
245 /* build new Proj's and Phi's */
246 args = get_irg_args(irg);
247 args_bl = get_nodes_block(args);
248 for (i = 0; i < n_params; ++i) {
249 ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i));
251 in[0] = new_r_Proj(irg, args_bl, args, mode, i);
252 for (j = 0; j < env->n_tail_calls; ++j)
253 in[j + 1] = call_params[j][i];
255 phis[i + 1] = new_r_Phi(irg, block, env->n_tail_calls + 1, in, mode);
260 * ok, we are here, so we have build and collected all needed Phi's
261 * now exchange all Projs into links to Phi
263 exchange(data.proj_m, phis[0]);
264 for (p = data.proj_data; p; p = n) {
265 long proj = get_Proj_proj(p);
267 assert(0 <= proj && proj < n_params);
269 exchange(p, phis[proj + 1]);
272 /* tail recursion was done, all info is invalid */
273 set_irg_doms_inconsistent(irg);
274 set_irg_outs_inconsistent(irg);
275 set_irg_extblk_inconsistent(irg);
276 set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
277 set_trouts_inconsistent();
278 set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
282 /* check if we need new values */
284 for (i = 0; i < env->n_ress; ++i) {
285 if (env->variants[i] != TR_DIRECT)
290 ir_node *bad, *start_block;
294 NEW_ARR_A(ir_node *, in, n_locs);
295 NEW_ARR_A(ir_mode *, modes, n_locs);
296 ssa_cons_start(irg, n_locs);
298 start_block = get_irg_start_block(irg);
299 set_cur_block(start_block);
301 for (i = 0; i < env->n_ress; ++i) {
302 ir_type *tp = get_method_res_type(method_tp, i);
303 ir_mode *mode = get_type_mode(tp);
306 if (env->variants[i] == TR_ADD) {
307 set_value(i, new_Const(mode, get_mode_null(mode)));
308 } else if (env->variants[i] == TR_MUL) {
309 set_value(i, new_Const(mode, get_mode_one(mode)));
312 mature_immBlock(start_block);
314 /* no: we can kill all returns */
315 bad = get_irg_bad(irg);
317 for (p = env->rets; p; p = n) {
318 ir_node *block = get_nodes_block(p);
319 ir_node *call, *mem, *jmp, *tuple;
321 set_cur_block(block);
324 call = skip_Proj(get_Return_mem(p));
325 assert(is_Call(call));
327 mem = get_Call_mem(call);
329 /* create a new jump, free of CSE */
334 for (i = 0; i < env->n_ress; ++i) {
335 if (env->variants[i] != TR_DIRECT) {
336 in[i] = get_value(i, modes[i]);
341 /* create a new tuple for the return values */
342 tuple = new_Tuple(env->n_ress, in);
344 turn_into_tuple(call, pn_Call_max);
345 set_Tuple_pred(call, pn_Call_M, mem);
346 set_Tuple_pred(call, pn_Call_X_regular, jmp);
347 set_Tuple_pred(call, pn_Call_X_except, bad);
348 set_Tuple_pred(call, pn_Call_T_result, tuple);
349 set_Tuple_pred(call, pn_Call_M_except, mem);
350 set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
352 for (i = 0; i < env->n_ress; ++i) {
353 ir_node *res = get_Return_res(p, i);
354 if (env->variants[i] != TR_DIRECT) {
362 /* finally fix all other returns */
363 end_block = get_irg_end_block(irg);
364 for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
365 ir_node *ret = get_Block_cfgpred(end_block, i);
367 /* search all Returns of a block */
368 if (! is_Return(ret))
371 set_cur_block(get_nodes_block(ret));
372 for (j = 0; j < env->n_ress; ++j) {
373 ir_node *pred = get_Return_res(ret, j);
376 switch (env->variants[j]) {
381 n = get_value(j, modes[j]);
382 n = new_Add(n, pred, modes[j]);
383 set_Return_res(ret, j, n);
387 n = get_value(j, modes[j]);
388 n = new_Mul(n, pred, modes[j]);
389 set_Return_res(ret, j, n);
393 assert(!"unexpected tail recursion variant");
397 ssa_cons_finish(irg);
399 ir_node *bad = get_irg_bad(irg);
401 /* no: we can kill all returns */
402 for (p = env->rets; p; p = n) {
407 current_ir_graph = old;
411 * Check the lifetime of locals in the given graph.
412 * Tail recursion can only be done, if we can prove that
413 * the lifetime of locals end with the recursive call.
414 * We do this by checking that no address of a local variable is
415 * stored or transmitted as an argument to a call.
417 * @return non-zero if it's ok to do tail recursion
419 static int check_lifetime_of_locals(ir_graph *irg) {
420 ir_node *irg_frame, *irg_val_param_base;
423 irg_frame = get_irg_frame(irg);
424 for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
425 ir_node *succ = get_irn_out(irg_frame, i);
427 if (is_Sel(succ) && is_address_taken(succ))
431 /* Check if we have compound arguments.
432 For now, we cannot handle them, */
433 irg_val_param_base = get_irg_value_param_base(irg);
434 if (get_irn_n_outs(irg_val_param_base) > 0)
441 * Examine irn and detect the recursion variant.
443 static tail_rec_variants find_variant(ir_node *irn, ir_node *call) {
445 tail_rec_variants va, vb, res;
447 if (skip_Proj(skip_Proj(irn)) == call) {
451 switch (get_irn_opcode(irn)) {
454 a = get_Add_left(irn);
455 if (get_irn_MacroBlock(a) != get_irn_MacroBlock(call)) {
456 /* we are outside, ignore */
459 va = find_variant(a, call);
463 b = get_Add_right(irn);
464 if (get_irn_MacroBlock(b) != get_irn_MacroBlock(call)) {
465 /* we are outside, ignore */
468 vb = find_variant(b, call);
475 else if (va == TR_UNKNOWN)
477 else if (vb == TR_UNKNOWN)
480 /* they are different but none is TR_UNKNOWN -> incompatible */
483 if (res == TR_DIRECT || res == TR_ADD)
489 /* try additive, but return value must be left */
490 a = get_Sub_left(irn);
491 if (get_irn_MacroBlock(a) != get_irn_MacroBlock(call)) {
492 /* we are outside, ignore */
495 va = find_variant(a, call);
499 b = get_Sub_right(irn);
500 if (get_irn_MacroBlock(b) != get_irn_MacroBlock(call)) {
501 /* we are outside, ignore */
504 vb = find_variant(b, call);
505 if (vb != TR_UNKNOWN)
509 if (res == TR_DIRECT || res == TR_ADD)
515 /* try multiplicative */
516 a = get_Mul_left(irn);
517 if (get_irn_MacroBlock(a) != get_irn_MacroBlock(call)) {
518 /* we are outside, ignore */
521 va = find_variant(a, call);
525 b = get_Mul_right(irn);
526 if (get_irn_MacroBlock(b) != get_irn_MacroBlock(call)) {
527 /* we are outside, ignore */
530 vb = find_variant(b, call);
537 else if (va == TR_UNKNOWN)
539 else if (vb == TR_UNKNOWN)
542 /* they are different but none is TR_UNKNOWN -> incompatible */
545 if (res == TR_DIRECT || res == TR_MUL)
551 /* try multiplicative */
552 a = get_Minus_op(irn);
553 res = find_variant(a, call);
554 if (res == TR_DIRECT)
556 if (res == TR_MUL || res == TR_UNKNOWN)
568 * convert simple tail-calls into loops
570 int opt_tail_rec_irg(ir_graph *irg) {
573 int i, n_ress, n_tail_calls = 0;
574 ir_node *rets = NULL;
575 ir_type *mtd_type, *call_type;
578 assure_irg_outs(irg);
580 if (! check_lifetime_of_locals(irg))
584 ent = get_irg_entity(irg);
585 mtd_type = get_entity_type(ent);
586 n_ress = get_method_n_ress(mtd_type);
592 NEW_ARR_A(tail_rec_variants, env.variants, n_ress);
594 for (i = 0; i < n_ress; ++i)
595 env.variants[i] = TR_DIRECT;
599 * This tail recursion optimization works best
600 * if the Returns are normalized.
602 normalize_n_returns(irg);
604 end_block = get_irg_end_block(irg);
605 set_irn_link(end_block, NULL);
607 for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
608 ir_node *ret = get_Block_cfgpred(end_block, i);
609 ir_node *call, *call_ptr;
613 /* search all Returns of a block */
614 if (! is_Return(ret))
617 /* check, if it's a Return self() */
618 call = skip_Proj(get_Return_mem(ret));
622 /* the call must be in the same block as the return */
623 if (get_nodes_block(call) != get_nodes_block(ret))
626 /* check if it's a recursive call */
627 call_ptr = get_Call_ptr(call);
629 if (! is_Global(call_ptr))
632 ent = get_Global_entity(call_ptr);
633 if (!ent || get_entity_irg(ent) != irg)
637 * Check, that the types match. At least in C
640 mtd_type = get_entity_type(ent);
641 call_type = get_Call_type(call);
643 if (mtd_type != call_type) {
645 * Hmm, the types did not match, bad.
646 * This can happen in C when no prototype is given
647 * or K&R style is used.
650 printf("Warning: Tail recursion fails because of different method and call types:\n");
652 dump_type(call_type);
657 /* ok, mem is routed to a recursive call, check return args */
658 ress = get_Return_res_arr(ret);
659 for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) {
660 tail_rec_variants var = find_variant(ress[j], call);
663 /* cannot be transformed */
666 if (var == TR_DIRECT)
667 var = env.variants[j];
668 else if (env.variants[j] == TR_DIRECT)
669 env.variants[j] = var;
670 if (env.variants[j] != var) {
678 /* here, we have found a call */
679 set_irn_link(call, get_irn_link(end_block));
680 set_irn_link(end_block, call);
683 /* link all returns, we will need this */
684 set_irn_link(ret, rets);
688 /* now, end_block->link contains the list of all tail calls */
689 if (n_tail_calls <= 0)
692 DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
693 get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
695 hook_tail_rec(irg, n_tail_calls);
697 env.n_tail_calls = n_tail_calls;
699 do_opt_tail_rec(irg, &env);
705 * optimize tail recursion away
707 void opt_tail_recursion(void) {
709 int n_opt_applications = 0;
712 FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
714 for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
715 irg = get_irp_irg(i);
717 current_ir_graph = irg;
719 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
720 if (opt_tail_rec_irg(irg))
721 ++n_opt_applications;
723 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
726 DB((dbg, LEVEL_1, "Performed tail recursion for %d of %d graphs\n",
727 n_opt_applications, get_irp_n_irgs()));