X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Ftailrec.c;h=e1945ea418a54fc3862e187615907984842f791a;hb=0aa3f54e9f29fbed49bb4781c28ab1157499a92e;hp=b0402da9d39cb9d12f5e4d15926df9c4c1a69d47;hpb=1ec30d95387eb392ba5a1adc7958ebd91383d59c;p=libfirm diff --git a/ir/opt/tailrec.c b/ir/opt/tailrec.c index b0402da9d..e1945ea41 100644 --- a/ir/opt/tailrec.c +++ b/ir/opt/tailrec.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -22,18 +22,16 @@ * @brief Tail-recursion call optimization. * @date 08.06.2004 * @author Michael Beck - * @version $Id$ */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +#include "config.h" #include #include +#include "debug.h" #include "iroptimize.h" #include "scalar_replace.h" -#include "array.h" +#include "array_t.h" #include "irprog_t.h" #include "irgwalk.h" #include "irgmod.h" @@ -45,12 +43,15 @@ #include "trouts.h" #include "irouts.h" #include "irhooks.h" -#include "xmalloc.h" +#include "ircons_t.h" +#include "irpass.h" + +DEBUG_ONLY(static firm_dbg_module_t *dbg;) /** * the environment for collecting data */ -typedef struct _collect_t { +typedef struct collect_t { ir_node *proj_X; /**< initial exec proj */ ir_node *block; /**< old first block */ int blk_idx; /**< cfgpred index of the initial exec in block */ @@ -61,27 +62,28 @@ typedef struct _collect_t { /** * walker for collecting data, fills a collect_t environment */ -static void collect_data(ir_node *node, void *env) { - collect_t *data = env; +static void collect_data(ir_node *node, void *env) +{ + collect_t *data = (collect_t*)env; ir_node *pred; - ir_op *op; + ir_opcode opcode; switch (get_irn_opcode(node)) { case iro_Proj: pred = get_Proj_pred(node); - op = get_irn_op(pred); - if (op == op_Proj) { + opcode = (ir_opcode)get_irn_opcode(pred); + if (opcode == iro_Proj) { ir_node *start = get_Proj_pred(pred); - if (get_irn_op(start) == op_Start) { + if (is_Start(start)) { if (get_Proj_proj(pred) == pn_Start_T_args) { /* found Proj(ProjT(Start)) */ set_irn_link(node, data->proj_data); data->proj_data = node; } } - } else if (op == op_Start) { + } else if (opcode == iro_Start) { if (get_Proj_proj(node) == pn_Start_X_initial_exec) { /* found ProjX(Start) */ data->proj_X = node; @@ -94,7 +96,7 @@ static void collect_data(ir_node *node, void *env) { /* * the first block has the initial exec as cfg predecessor */ - if (node != get_irg_start_block(current_ir_graph)) { + if (node != get_irg_start_block(get_irn_irg(node))) { for (i = 0; i < n_pred; ++i) { if (get_Block_cfgpred(node, i) == data->proj_X) { data->block = node; @@ -110,40 +112,46 @@ static void collect_data(ir_node *node, void *env) { } } +typedef enum tail_rec_variants { + TR_DIRECT, /**< direct return value, i.e. return func(). */ + TR_ADD, /**< additive return value, i.e. return x +/- func() */ + TR_MUL, /**< multiplicative return value, i.e. return x * func() or return -func() */ + TR_BAD, /**< any other transformation */ + TR_UNKNOWN /**< during construction */ +} tail_rec_variants; + +typedef struct tr_env { + int n_tail_calls; /**< number of tail calls found */ + int n_ress; /**< number of return values */ + tail_rec_variants *variants; /**< return value variants */ + ir_node *rets; /**< list of returns that can be transformed */ +} tr_env; + + /** * do the graph reconstruction for tail-recursion elimination * - * @param irg the graph that will reconstructed - * @param rets linked list of all rets - * @param n_tail_calls number of tail-recursion calls + * @param irg the graph that will reconstructed + * @param env tail recursion environment */ -static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { +static void do_opt_tail_rec(ir_graph *irg, tr_env *env) +{ ir_node *end_block = get_irg_end_block(irg); ir_node *block, *jmp, *call, *calls; ir_node **in; ir_node **phis; ir_node ***call_params; ir_node *p, *n; - int i, j, n_params; + int i, j, n_params, n_locs; collect_t data; int rem = get_optimize(); ir_entity *ent = get_irg_entity(irg); ir_type *method_tp = get_entity_type(ent); - assert(n_tail_calls); - - /* we add new nodes, so the outs are inconsistent */ - set_irg_outs_inconsistent(irg); + assert(env->n_tail_calls > 0); /* we add new blocks and change the control flow */ - set_irg_doms_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - - /* we add a new loop */ - set_irg_loopinfo_inconsistent(irg); - - /* calls are removed */ - set_trouts_inconsistent(); + clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE); /* we must build some new nodes WITHOUT CSE */ set_optimize(0); @@ -157,7 +165,7 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { irg_walk_graph(irg, NULL, collect_data, &data); /* check number of arguments */ - call = get_irn_link(end_block); + call = (ir_node*)get_irn_link(end_block); n_params = get_Call_n_params(call); assert(data.proj_X && "Could not find initial exec from Start"); @@ -166,27 +174,30 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { assert((data.proj_data || n_params == 0) && "Could not find Proj(ProjT(Start)) of non-void function"); /* allocate in's for phi and block construction */ - NEW_ARR_A(ir_node *, in, n_tail_calls + 1); + NEW_ARR_A(ir_node *, in, env->n_tail_calls + 1); - in[0] = data.proj_X; + /* build a new header block for the loop we create */ + i = 0; + in[i++] = data.proj_X; /* turn Return's into Jmp's */ - for (i = 1, p = rets; p; p = n) { + for (p = env->rets; p; p = n) { ir_node *block = get_nodes_block(p); - n = get_irn_link(p); - in[i++] = new_r_Jmp(irg, block); + n = (ir_node*)get_irn_link(p); + in[i++] = new_r_Jmp(block); - exchange(p, new_r_Bad(irg)); + // exchange(p, new_r_Bad(irg)); /* we might generate an endless loop, so add - * the block to the keep-alive list */ + * the block to the keep-alive list */ add_End_keepalive(get_irg_end(irg), block); } + assert(i == env->n_tail_calls + 1); - /* create a new block at start */ - block = new_r_Block(irg, n_tail_calls + 1, in); - jmp = new_r_Jmp(irg, block); + /* now create it */ + block = new_r_Block(irg, i, in); + jmp = new_r_Jmp(block); /* the old first block is now the second one */ set_Block_cfgpred(data.block, data.blk_idx, jmp); @@ -196,43 +207,42 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { /* build the memory phi */ i = 0; - in[i] = new_r_Proj(irg, get_irg_start_block(irg), get_irg_start(irg), mode_M, pn_Start_M); + in[i] = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M); set_irg_initial_mem(irg, in[i]); ++i; - for (calls = call; calls; calls = get_irn_link(calls)) { + for (calls = call; calls != NULL; calls = (ir_node*)get_irn_link(calls)) { in[i] = get_Call_mem(calls); ++i; } - assert(i == n_tail_calls + 1); + assert(i == env->n_tail_calls + 1); - phis[0] = new_r_Phi(irg, block, n_tail_calls + 1, in, mode_M); + phis[0] = new_r_Phi(block, env->n_tail_calls + 1, in, mode_M); /* build the data Phi's */ if (n_params > 0) { ir_node *calls; ir_node *args; - ir_node *args_bl; - NEW_ARR_A(ir_node **, call_params, n_tail_calls); + NEW_ARR_A(ir_node **, call_params, env->n_tail_calls); /* collect all parameters */ - for (i = 0, calls = call; calls; calls = get_irn_link(calls)) { + for (i = 0, calls = call; calls != NULL; + calls = (ir_node*)get_irn_link(calls)) { call_params[i] = get_Call_param_arr(calls); ++i; } /* build new Proj's and Phi's */ args = get_irg_args(irg); - args_bl = get_nodes_block(args); for (i = 0; i < n_params; ++i) { ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i)); - in[0] = new_r_Proj(irg, args_bl, args, mode, i); - for (j = 0; j < n_tail_calls; ++j) + in[0] = new_r_Proj(args, mode, i); + for (j = 0; j < env->n_tail_calls; ++j) in[j + 1] = call_params[j][i]; - phis[i + 1] = new_r_Phi(irg, block, n_tail_calls + 1, in, mode); + phis[i + 1] = new_r_Phi(block, env->n_tail_calls + 1, in, mode); } } @@ -240,27 +250,147 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { * ok, we are here, so we have build and collected all needed Phi's * now exchange all Projs into links to Phi */ - for (p = data.proj_m; p; p = n) { - n = get_irn_link(p); - exchange(p, phis[0]); - } + exchange(data.proj_m, phis[0]); for (p = data.proj_data; p; p = n) { long proj = get_Proj_proj(p); assert(0 <= proj && proj < n_params); - n = get_irn_link(p); + n = (ir_node*)get_irn_link(p); exchange(p, phis[proj + 1]); } /* tail recursion was done, all info is invalid */ - set_irg_doms_inconsistent(irg); - set_irg_outs_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent); - set_trouts_inconsistent(); + clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE + | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO); set_irg_callee_info_state(irg, irg_callee_info_inconsistent); set_optimize(rem); + + /* check if we need new values */ + n_locs = 0; + for (i = 0; i < env->n_ress; ++i) { + if (env->variants[i] != TR_DIRECT) { + ++n_locs; + break; + } + } + + if (n_locs > 0) { + ir_node *start_block; + ir_node **in; + ir_mode **modes; + + NEW_ARR_A(ir_node *, in, env->n_ress); + NEW_ARR_A(ir_mode *, modes, env->n_ress); + ssa_cons_start(irg, env->n_ress); + + start_block = get_irg_start_block(irg); + set_r_cur_block(irg, start_block); + + /* set the neutral elements for the iteration start */ + for (i = 0; i < env->n_ress; ++i) { + ir_type *tp = get_method_res_type(method_tp, i); + ir_mode *mode = get_type_mode(tp); + + modes[i] = mode; + if (env->variants[i] == TR_ADD) { + set_r_value(irg, i, new_r_Const(irg, get_mode_null(mode))); + } else if (env->variants[i] == TR_MUL) { + set_r_value(irg, i, new_r_Const(irg, get_mode_one(mode))); + } + } + mature_immBlock(start_block); + + /* no: we can kill all returns */ + for (p = env->rets; p; p = n) { + ir_node *block = get_nodes_block(p); + ir_node *jmp, *tuple; + + set_r_cur_block(irg, block); + n = (ir_node*)get_irn_link(p); + + ir_node *const call = skip_Proj(get_Return_mem(p)); + ir_node *const mem = get_Call_mem(call); + + /* create a new jump, free of CSE */ + set_optimize(0); + jmp = new_r_Jmp(block); + set_optimize(rem); + + for (i = 0; i < env->n_ress; ++i) { + ir_mode *mode = modes[i]; + if (env->variants[i] != TR_DIRECT) { + in[i] = get_r_value(irg, i, mode); + } else { + in[i] = new_r_Bad(irg, mode); + } + } + /* create a new tuple for the return values */ + tuple = new_r_Tuple(block, env->n_ress, in); + + turn_into_tuple(call, pn_Call_max+1); + set_Tuple_pred(call, pn_Call_M, mem); + set_Tuple_pred(call, pn_Call_X_regular, jmp); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); + set_Tuple_pred(call, pn_Call_T_result, tuple); + + for (i = 0; i < env->n_ress; ++i) { + ir_node *res = get_Return_res(p, i); + if (env->variants[i] != TR_DIRECT) { + set_r_value(irg, i, res); + } + } + + exchange(p, new_r_Bad(irg, mode_X)); + } + + /* finally fix all other returns */ + end_block = get_irg_end_block(irg); + for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { + ir_node *ret = get_Block_cfgpred(end_block, i); + ir_node *block; + + /* search all Returns of a block */ + if (! is_Return(ret)) + continue; + + block = get_nodes_block(ret); + set_r_cur_block(irg, block); + for (j = 0; j < env->n_ress; ++j) { + ir_node *pred = get_Return_res(ret, j); + ir_node *n; + + switch (env->variants[j]) { + case TR_DIRECT: + continue; + + case TR_ADD: + n = get_r_value(irg, j, modes[j]); + n = new_r_Add(block, n, pred, modes[j]); + set_Return_res(ret, j, n); + break; + + case TR_MUL: + n = get_r_value(irg, j, modes[j]); + n = new_r_Mul(block, n, pred, modes[j]); + set_Return_res(ret, j, n); + break; + + default: + assert(!"unexpected tail recursion variant"); + } + } + } + ssa_cons_finish(irg); + } else { + ir_node *bad = new_r_Bad(irg, mode_X); + + /* no: we can kill all returns */ + for (p = env->rets; p; p = n) { + n = (ir_node*)get_irn_link(p); + exchange(p, bad); + } + } } /** @@ -272,42 +402,200 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { * * @return non-zero if it's ok to do tail recursion */ -static int check_lifetime_of_locals(ir_graph *irg) { - ir_node *irg_frame = get_irg_frame(irg); +static int check_lifetime_of_locals(ir_graph *irg) +{ + ir_node *irg_frame; int i; + ir_type *frame_tp = get_irg_frame_type(irg); - if (get_irg_outs_state(irg) != outs_consistent) - compute_irg_outs(irg); - + irg_frame = get_irg_frame(irg); for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) { ir_node *succ = get_irn_out(irg_frame, i); - if (is_Sel(succ) && is_address_taken(succ)) - return 0; + if (is_Sel(succ)) { + /* Check if we have compound arguments. + For now, we cannot handle them, */ + if (get_entity_owner(get_Sel_entity(succ)) != frame_tp) + return 0; + + if (is_address_taken(succ)) + return 0; + } } return 1; } +/** + * Examine irn and detect the recursion variant. + */ +static tail_rec_variants find_variant(ir_node *irn, ir_node *call) +{ + ir_node *a, *b; + tail_rec_variants va, vb, res; + + if (skip_Proj(skip_Proj(irn)) == call) { + /* found it */ + return TR_DIRECT; + } + switch (get_irn_opcode(irn)) { + case iro_Add: + /* try additive */ + a = get_Add_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Add_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb == TR_BAD) + return TR_BAD; + } + if (va == vb) { + res = va; + } + else if (va == TR_UNKNOWN) + res = vb; + else if (vb == TR_UNKNOWN) + res = va; + else { + /* they are different but none is TR_UNKNOWN -> incompatible */ + return TR_BAD; + } + if (res == TR_DIRECT || res == TR_ADD) + return TR_ADD; + /* not compatible */ + return TR_BAD; + + case iro_Sub: + /* try additive, but return value must be left */ + a = get_Sub_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Sub_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb != TR_UNKNOWN) + return TR_BAD; + } + res = va; + if (res == TR_DIRECT || res == TR_ADD) + return res; + /* not compatible */ + return TR_BAD; + + case iro_Mul: + /* try multiplicative */ + a = get_Mul_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Mul_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb == TR_BAD) + return TR_BAD; + } + if (va == vb) { + res = va; + } + else if (va == TR_UNKNOWN) + res = vb; + else if (vb == TR_UNKNOWN) + res = va; + else { + /* they are different but none is TR_UNKNOWN -> incompatible */ + return TR_BAD; + } + if (res == TR_DIRECT || res == TR_MUL) + return TR_MUL; + /* not compatible */ + return TR_BAD; + + case iro_Minus: + /* try multiplicative */ + a = get_Minus_op(irn); + res = find_variant(a, call); + if (res == TR_DIRECT) + return TR_MUL; + if (res == TR_MUL || res == TR_UNKNOWN) + return res; + /* not compatible */ + return TR_BAD; + + default: + return TR_UNKNOWN; + } +} + + /* * convert simple tail-calls into loops */ -int opt_tail_rec_irg(ir_graph *irg) { - ir_node *end_block; - int i, n_tail_calls = 0; - ir_node *rets = NULL; - ir_type *mtd_type, *call_type; +void opt_tail_rec_irg(ir_graph *irg) +{ + tr_env env; + ir_node *end_block; + int i, n_ress, n_tail_calls = 0; + ir_node *rets = NULL; + ir_type *mtd_type, *call_type; + ir_entity *ent; + ir_graph *rem; + + assure_irg_properties(irg, + IR_GRAPH_PROPERTY_MANY_RETURNS + | IR_GRAPH_PROPERTY_NO_BADS + | IR_GRAPH_PROPERTY_CONSISTENT_OUTS); + + FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec"); + + if (! check_lifetime_of_locals(irg)) { + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); + return; + } - if (! get_opt_tail_recursion() || ! get_opt_optimize()) - return 0; + rem = current_ir_graph; + current_ir_graph = irg; - if (! check_lifetime_of_locals(irg)) - return 0; + ent = get_irg_entity(irg); + mtd_type = get_entity_type(ent); + n_ress = get_method_n_ress(mtd_type); - /* - * This tail recursion optimization works best - * if the Returns are normalized. - */ - normalize_n_returns(irg); + env.variants = NULL; + env.n_ress = n_ress; + + if (n_ress > 0) { + NEW_ARR_A(tail_rec_variants, env.variants, n_ress); + + for (i = 0; i < n_ress; ++i) + env.variants[i] = TR_DIRECT; + } + + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); end_block = get_irg_end_block(irg); set_irn_link(end_block, NULL); @@ -315,7 +603,6 @@ int opt_tail_rec_irg(ir_graph *irg) { for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { ir_node *ret = get_Block_cfgpred(end_block, i); ir_node *call, *call_ptr; - ir_entity *ent; int j; ir_node **ress; @@ -328,36 +615,24 @@ int opt_tail_rec_irg(ir_graph *irg) { if (! is_Call(call)) continue; + /* the call must be in the same block as the return */ + if (get_nodes_block(call) != get_nodes_block(ret)) + continue; + /* check if it's a recursive call */ call_ptr = get_Call_ptr(call); - if (get_irn_op(call_ptr) != op_SymConst) - continue; - - if (get_SymConst_kind(call_ptr) != symconst_addr_ent) + if (! is_SymConst_addr_ent(call_ptr)) continue; ent = get_SymConst_entity(call_ptr); if (!ent || get_entity_irg(ent) != irg) continue; - /* ok, mem is routed to a recursive call, check return args */ - ress = get_Return_res_arr(ret); - for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) { - ir_node *irn = skip_Proj(skip_Proj(ress[j])); - - if (irn != call) { - /* not routed to a call */ - break; - } - } - if (j >= 0) - continue; - /* - * Check, that the types match. At least in C - * this might fail. - */ + * Check, that the types match. At least in C + * this might fail. + */ mtd_type = get_entity_type(ent); call_type = get_Call_type(call); @@ -367,14 +642,33 @@ int opt_tail_rec_irg(ir_graph *irg) { * This can happen in C when no prototype is given * or K&R style is used. */ -#if 0 - printf("Warning: Tail recursion fails because of different method and call types:\n"); - dump_type(mtd_type); - dump_type(call_type); -#endif - return 0; + DB((dbg, LEVEL_3, " tail recursion fails because of call type mismatch: %+F != %+F\n", mtd_type, call_type)); + continue; } + /* ok, mem is routed to a recursive call, check return args */ + ress = get_Return_res_arr(ret); + for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) { + tail_rec_variants var = find_variant(ress[j], call); + + if (var >= TR_BAD) { + /* cannot be transformed */ + break; + } + if (var == TR_DIRECT) { + var = env.variants[j]; + } else if (env.variants[j] == TR_DIRECT) { + env.variants[j] = var; + } + if (env.variants[j] != var) { + /* not compatible */ + DB((dbg, LEVEL_3, " tail recursion fails for %d return value of %+F\n", j, ret)); + break; + } + } + if (j >= 0) + continue; + /* here, we have found a call */ set_irn_link(call, get_irn_link(end_block)); set_irn_link(end_block, call); @@ -386,37 +680,45 @@ int opt_tail_rec_irg(ir_graph *irg) { } /* now, end_block->link contains the list of all tail calls */ - if (! n_tail_calls) - return 0; - - if (get_opt_tail_recursion_verbose() && get_firm_verbosity() > 1) - printf(" Performing tail recursion for graph %s and %d Calls\n", - get_entity_ld_name(get_irg_entity(irg)), n_tail_calls); - - hook_tail_rec(irg, n_tail_calls); - do_opt_tail_rec(irg, rets, n_tail_calls); + if (n_tail_calls > 0) { + DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n", + get_entity_ld_name(get_irg_entity(irg)), n_tail_calls)); + + hook_tail_rec(irg, n_tail_calls); + + env.n_tail_calls = n_tail_calls; + env.rets = rets; + do_opt_tail_rec(irg, &env); + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_NONE); + } else { + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL); + } + ir_free_resources(irg, IR_RESOURCE_IRN_LINK); + current_ir_graph = rem; +} - return n_tail_calls; +ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name) +{ + return def_graph_pass(name ? name : "tailrec", opt_tail_rec_irg); } /* * optimize tail recursion away */ -void opt_tail_recursion(void) { - int i; - int n_opt_applications = 0; - ir_graph *irg; +void opt_tail_recursion(void) +{ + size_t i, n; - if (! get_opt_tail_recursion() || ! get_opt_optimize()) - return; - - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { - irg = get_irp_irg(i); + FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec"); - if (opt_tail_rec_irg(irg)) - ++n_opt_applications; + DB((dbg, LEVEL_1, "Performing tail recursion ...\n")); + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { + ir_graph *irg = get_irp_irg(i); + opt_tail_rec_irg(irg); } +} - if (get_opt_tail_recursion_verbose()) - printf("Performed tail recursion for %d of %d graphs\n", n_opt_applications, get_irp_n_irgs()); +ir_prog_pass_t *opt_tail_recursion_pass(const char *name) +{ + return def_prog_pass(name ? name : "tailrec", opt_tail_recursion); }