X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Ftailrec.c;h=22b04b10924a68215c00062a3b43cde5a955e6d3;hb=9d3c8631459f431c313160dab5778e8a7b88dd92;hp=0d10cdc9dccc4b79a52677ef13b7580857601647;hpb=83a16976c37f75e5d3a1f8a2c0a02c291a7b88ee;p=libfirm diff --git a/ir/opt/tailrec.c b/ir/opt/tailrec.c index 0d10cdc9d..22b04b109 100644 --- a/ir/opt/tailrec.c +++ b/ir/opt/tailrec.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -24,9 +24,7 @@ * @author Michael Beck * @version $Id$ */ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +#include "config.h" #include #include @@ -34,7 +32,7 @@ #include "debug.h" #include "iroptimize.h" #include "scalar_replace.h" -#include "array.h" +#include "array_t.h" #include "irprog_t.h" #include "irgwalk.h" #include "irgmod.h" @@ -46,14 +44,15 @@ #include "trouts.h" #include "irouts.h" #include "irhooks.h" -#include "xmalloc.h" +#include "ircons_t.h" +#include "irpass.h" DEBUG_ONLY(static firm_dbg_module_t *dbg); /** * the environment for collecting data */ -typedef struct _collect_t { +typedef struct collect_t { ir_node *proj_X; /**< initial exec proj */ ir_node *block; /**< old first block */ int blk_idx; /**< cfgpred index of the initial exec in block */ @@ -64,7 +63,8 @@ typedef struct _collect_t { /** * walker for collecting data, fills a collect_t environment */ -static void collect_data(ir_node *node, void *env) { +static void collect_data(ir_node *node, void *env) +{ collect_t *data = env; ir_node *pred; ir_op *op; @@ -77,7 +77,7 @@ static void collect_data(ir_node *node, void *env) { if (op == op_Proj) { ir_node *start = get_Proj_pred(pred); - if (get_irn_op(start) == op_Start) { + if (is_Start(start)) { if (get_Proj_proj(pred) == pn_Start_T_args) { /* found Proj(ProjT(Start)) */ set_irn_link(node, data->proj_data); @@ -97,7 +97,7 @@ static void collect_data(ir_node *node, void *env) { /* * the first block has the initial exec as cfg predecessor */ - if (node != get_irg_start_block(current_ir_graph)) { + if (node != get_irg_start_block(get_irn_irg(node))) { for (i = 0; i < n_pred; ++i) { if (get_Block_cfgpred(node, i) == data->proj_X) { data->block = node; @@ -113,6 +113,22 @@ static void collect_data(ir_node *node, void *env) { } } +typedef enum tail_rec_variants { + TR_DIRECT, /**< direct return value, i.e. return func(). */ + TR_ADD, /**< additive return value, i.e. return x +/- func() */ + TR_MUL, /**< multiplicative return value, i.e. return x * func() or return -func() */ + TR_BAD, /**< any other transformation */ + TR_UNKNOWN /**< during construction */ +} tail_rec_variants; + +typedef struct tr_env { + int n_tail_calls; /**< number of tail calls found */ + int n_ress; /**< number of return values */ + tail_rec_variants *variants; /**< return value variants */ + ir_node *rets; /**< list of returns that can be transformed */ +} tr_env; + + /** * do the graph reconstruction for tail-recursion elimination * @@ -120,20 +136,21 @@ static void collect_data(ir_node *node, void *env) { * @param rets linked list of all rets * @param n_tail_calls number of tail-recursion calls */ -static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { +static void do_opt_tail_rec(ir_graph *irg, tr_env *env) +{ ir_node *end_block = get_irg_end_block(irg); ir_node *block, *jmp, *call, *calls; ir_node **in; ir_node **phis; ir_node ***call_params; ir_node *p, *n; - int i, j, n_params; + int i, j, n_params, n_locs; collect_t data; int rem = get_optimize(); ir_entity *ent = get_irg_entity(irg); ir_type *method_tp = get_entity_type(ent); - assert(n_tail_calls); + assert(env->n_tail_calls > 0); /* we add new nodes, so the outs are inconsistent */ set_irg_outs_inconsistent(irg); @@ -169,18 +186,18 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { assert((data.proj_data || n_params == 0) && "Could not find Proj(ProjT(Start)) of non-void function"); /* allocate in's for phi and block construction */ - NEW_ARR_A(ir_node *, in, n_tail_calls + 1); + NEW_ARR_A(ir_node *, in, env->n_tail_calls + 1); in[0] = data.proj_X; /* turn Return's into Jmp's */ - for (i = 1, p = rets; p; p = n) { + for (i = 1, p = env->rets; p; p = n) { ir_node *block = get_nodes_block(p); n = get_irn_link(p); - in[i++] = new_r_Jmp(irg, block); + in[i++] = new_r_Jmp(block); - exchange(p, new_r_Bad(irg)); + // exchange(p, new_r_Bad(irg)); /* we might generate an endless loop, so add * the block to the keep-alive list */ @@ -188,8 +205,8 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { } /* create a new block at start */ - block = new_r_Block(irg, n_tail_calls + 1, in); - jmp = new_r_Jmp(irg, block); + block = new_r_Block(irg, env->n_tail_calls + 1, in); + jmp = new_r_Jmp(block); /* the old first block is now the second one */ set_Block_cfgpred(data.block, data.blk_idx, jmp); @@ -199,7 +216,7 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { /* build the memory phi */ i = 0; - in[i] = new_r_Proj(irg, get_irg_start_block(irg), get_irg_start(irg), mode_M, pn_Start_M); + in[i] = new_r_Proj(get_irg_start(irg), mode_M, pn_Start_M); set_irg_initial_mem(irg, in[i]); ++i; @@ -207,9 +224,9 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { in[i] = get_Call_mem(calls); ++i; } - assert(i == n_tail_calls + 1); + assert(i == env->n_tail_calls + 1); - phis[0] = new_r_Phi(irg, block, n_tail_calls + 1, in, mode_M); + phis[0] = new_r_Phi(block, env->n_tail_calls + 1, in, mode_M); /* build the data Phi's */ if (n_params > 0) { @@ -217,7 +234,7 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { ir_node *args; ir_node *args_bl; - NEW_ARR_A(ir_node **, call_params, n_tail_calls); + NEW_ARR_A(ir_node **, call_params, env->n_tail_calls); /* collect all parameters */ for (i = 0, calls = call; calls; calls = get_irn_link(calls)) { @@ -231,11 +248,11 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { for (i = 0; i < n_params; ++i) { ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i)); - in[0] = new_r_Proj(irg, args_bl, args, mode, i); - for (j = 0; j < n_tail_calls; ++j) + in[0] = new_r_Proj(args, mode, i); + for (j = 0; j < env->n_tail_calls; ++j) in[j + 1] = call_params[j][i]; - phis[i + 1] = new_r_Phi(irg, block, n_tail_calls + 1, in, mode); + phis[i + 1] = new_r_Phi(block, env->n_tail_calls + 1, in, mode); } } @@ -243,10 +260,7 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { * ok, we are here, so we have build and collected all needed Phi's * now exchange all Projs into links to Phi */ - for (p = data.proj_m; p; p = n) { - n = get_irn_link(p); - exchange(p, phis[0]); - } + exchange(data.proj_m, phis[0]); for (p = data.proj_data; p; p = n) { long proj = get_Proj_proj(p); @@ -259,11 +273,138 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { set_irg_doms_inconsistent(irg); set_irg_outs_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent); + set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent); set_trouts_inconsistent(); set_irg_callee_info_state(irg, irg_callee_info_inconsistent); set_optimize(rem); + + /* check if we need new values */ + n_locs = 0; + for (i = 0; i < env->n_ress; ++i) { + if (env->variants[i] != TR_DIRECT) + ++n_locs; + } + + if (n_locs > 0) { + ir_node *bad, *start_block; + ir_node **in; + ir_mode **modes; + + NEW_ARR_A(ir_node *, in, n_locs); + NEW_ARR_A(ir_mode *, modes, n_locs); + ssa_cons_start(irg, n_locs); + + start_block = get_irg_start_block(irg); + set_cur_block(start_block); + + for (i = 0; i < env->n_ress; ++i) { + ir_type *tp = get_method_res_type(method_tp, i); + ir_mode *mode = get_type_mode(tp); + + modes[i] = mode; + if (env->variants[i] == TR_ADD) { + set_value(i, new_r_Const(irg, get_mode_null(mode))); + } else if (env->variants[i] == TR_MUL) { + set_value(i, new_r_Const(irg, get_mode_one(mode))); + } + } + mature_immBlock(start_block); + + /* no: we can kill all returns */ + bad = get_irg_bad(irg); + + for (p = env->rets; p; p = n) { + ir_node *block = get_nodes_block(p); + ir_node *call, *mem, *jmp, *tuple; + + set_cur_block(block); + n = get_irn_link(p); + + call = skip_Proj(get_Return_mem(p)); + assert(is_Call(call)); + + mem = get_Call_mem(call); + + /* create a new jump, free of CSE */ + set_optimize(0); + jmp = new_r_Jmp(block); + set_optimize(rem); + + for (i = 0; i < env->n_ress; ++i) { + if (env->variants[i] != TR_DIRECT) { + in[i] = get_value(i, modes[i]); + } else { + in[i] = bad; + } + } + /* create a new tuple for the return values */ + tuple = new_r_Tuple(block, env->n_ress, in); + + turn_into_tuple(call, pn_Call_max); + set_Tuple_pred(call, pn_Call_M, mem); + set_Tuple_pred(call, pn_Call_X_regular, jmp); + set_Tuple_pred(call, pn_Call_X_except, bad); + set_Tuple_pred(call, pn_Call_T_result, tuple); + set_Tuple_pred(call, pn_Call_P_value_res_base, bad); + + for (i = 0; i < env->n_ress; ++i) { + ir_node *res = get_Return_res(p, i); + if (env->variants[i] != TR_DIRECT) { + set_value(i, res); + } + } + + exchange(p, bad); + } + + /* finally fix all other returns */ + end_block = get_irg_end_block(irg); + for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { + ir_node *ret = get_Block_cfgpred(end_block, i); + ir_node *block; + + /* search all Returns of a block */ + if (! is_Return(ret)) + continue; + + block = get_nodes_block(ret); + set_cur_block(block); + for (j = 0; j < env->n_ress; ++j) { + ir_node *pred = get_Return_res(ret, j); + ir_node *n; + + switch (env->variants[j]) { + case TR_DIRECT: + continue; + + case TR_ADD: + n = get_value(j, modes[j]); + n = new_r_Add(block, n, pred, modes[j]); + set_Return_res(ret, j, n); + break; + + case TR_MUL: + n = get_value(j, modes[j]); + n = new_r_Mul(block, n, pred, modes[j]); + set_Return_res(ret, j, n); + break; + + default: + assert(!"unexpected tail recursion variant"); + } + } + } + ssa_cons_finish(irg); + } else { + ir_node *bad = get_irg_bad(irg); + + /* no: we can kill all returns */ + for (p = env->rets; p; p = n) { + n = get_irn_link(p); + exchange(p, bad); + } + } } /** @@ -275,41 +416,190 @@ static void do_opt_tail_rec(ir_graph *irg, ir_node *rets, int n_tail_calls) { * * @return non-zero if it's ok to do tail recursion */ -static int check_lifetime_of_locals(ir_graph *irg) { - ir_node *irg_frame, *irg_val_param_base; +static int check_lifetime_of_locals(ir_graph *irg) +{ + ir_node *irg_frame; int i; + ir_type *frame_tp = get_irg_frame_type(irg); irg_frame = get_irg_frame(irg); for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) { ir_node *succ = get_irn_out(irg_frame, i); - if (is_Sel(succ) && is_address_taken(succ)) - return 0; + if (is_Sel(succ)) { + /* Check if we have compound arguments. + For now, we cannot handle them, */ + if (get_entity_owner(get_Sel_entity(succ)) != frame_tp) + return 0; + + if (is_address_taken(succ)) + return 0; + } } + return 1; +} - /* Check if we have compound arguments. - For now, we cannot handle them, */ - irg_val_param_base = get_irg_value_param_base(irg); - if (get_irn_n_outs(irg_val_param_base) > 0) - return 0; +/** + * Examine irn and detect the recursion variant. + */ +static tail_rec_variants find_variant(ir_node *irn, ir_node *call) +{ + ir_node *a, *b; + tail_rec_variants va, vb, res; + + if (skip_Proj(skip_Proj(irn)) == call) { + /* found it */ + return TR_DIRECT; + } + switch (get_irn_opcode(irn)) { + case iro_Add: + /* try additive */ + a = get_Add_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Add_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb == TR_BAD) + return TR_BAD; + } + if (va == vb) { + res = va; + } + else if (va == TR_UNKNOWN) + res = vb; + else if (vb == TR_UNKNOWN) + res = va; + else { + /* they are different but none is TR_UNKNOWN -> incompatible */ + return TR_BAD; + } + if (res == TR_DIRECT || res == TR_ADD) + return TR_ADD; + /* not compatible */ + return TR_BAD; + + case iro_Sub: + /* try additive, but return value must be left */ + a = get_Sub_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Sub_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb != TR_UNKNOWN) + return TR_BAD; + } + res = va; + if (res == TR_DIRECT || res == TR_ADD) + return res; + /* not compatible */ + return TR_BAD; + + case iro_Mul: + /* try multiplicative */ + a = get_Mul_left(irn); + if (get_nodes_block(a) != get_nodes_block(call)) { + /* we are outside, ignore */ + va = TR_UNKNOWN; + } else { + va = find_variant(a, call); + if (va == TR_BAD) + return TR_BAD; + } + b = get_Mul_right(irn); + if (get_nodes_block(b) != get_nodes_block(call)) { + /* we are outside, ignore */ + vb = TR_UNKNOWN; + } else { + vb = find_variant(b, call); + if (vb == TR_BAD) + return TR_BAD; + } + if (va == vb) { + res = va; + } + else if (va == TR_UNKNOWN) + res = vb; + else if (vb == TR_UNKNOWN) + res = va; + else { + /* they are different but none is TR_UNKNOWN -> incompatible */ + return TR_BAD; + } + if (res == TR_DIRECT || res == TR_MUL) + return TR_MUL; + /* not compatible */ + return TR_BAD; + + case iro_Minus: + /* try multiplicative */ + a = get_Minus_op(irn); + res = find_variant(a, call); + if (res == TR_DIRECT) + return TR_MUL; + if (res == TR_MUL || res == TR_UNKNOWN) + return res; + /* not compatible */ + return TR_BAD; - return 1; + default: + return TR_UNKNOWN; + } } + /* * convert simple tail-calls into loops */ -int opt_tail_rec_irg(ir_graph *irg) { - ir_node *end_block; - int i, n_tail_calls = 0; - ir_node *rets = NULL; - ir_type *mtd_type, *call_type; +int opt_tail_rec_irg(ir_graph *irg) +{ + tr_env env; + ir_node *end_block; + int i, n_ress, n_tail_calls = 0; + ir_node *rets = NULL; + ir_type *mtd_type, *call_type; + ir_entity *ent; + + FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec"); assure_irg_outs(irg); if (! check_lifetime_of_locals(irg)) return 0; + ent = get_irg_entity(irg); + mtd_type = get_entity_type(ent); + n_ress = get_method_n_ress(mtd_type); + + env.variants = NULL; + env.n_ress = n_ress; + + if (n_ress > 0) { + NEW_ARR_A(tail_rec_variants, env.variants, n_ress); + + for (i = 0; i < n_ress; ++i) + env.variants[i] = TR_DIRECT; + } + /* * This tail recursion optimization works best * if the Returns are normalized. @@ -322,7 +612,6 @@ int opt_tail_rec_irg(ir_graph *irg) { for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) { ir_node *ret = get_Block_cfgpred(end_block, i); ir_node *call, *call_ptr; - ir_entity *ent; int j; ir_node **ress; @@ -342,26 +631,13 @@ int opt_tail_rec_irg(ir_graph *irg) { /* check if it's a recursive call */ call_ptr = get_Call_ptr(call); - if (! is_SymConst(call_ptr) || get_SymConst_kind(call_ptr) != symconst_addr_ent) + if (! is_Global(call_ptr)) continue; - ent = get_SymConst_entity(call_ptr); + ent = get_Global_entity(call_ptr); if (!ent || get_entity_irg(ent) != irg) continue; - /* ok, mem is routed to a recursive call, check return args */ - ress = get_Return_res_arr(ret); - for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) { - ir_node *irn = skip_Proj(skip_Proj(ress[j])); - - if (irn != call) { - /* not routed to a call */ - break; - } - } - if (j >= 0) - continue; - /* * Check, that the types match. At least in C * this might fail. @@ -383,6 +659,27 @@ int opt_tail_rec_irg(ir_graph *irg) { continue; } + /* ok, mem is routed to a recursive call, check return args */ + ress = get_Return_res_arr(ret); + for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) { + tail_rec_variants var = find_variant(ress[j], call); + + if (var >= TR_BAD) { + /* cannot be transformed */ + break; + } + if (var == TR_DIRECT) + var = env.variants[j]; + else if (env.variants[j] == TR_DIRECT) + env.variants[j] = var; + if (env.variants[j] != var) { + /* not compatible */ + break; + } + } + if (j >= 0) + continue; + /* here, we have found a call */ set_irn_link(call, get_irn_link(end_block)); set_irn_link(end_block, call); @@ -394,22 +691,31 @@ int opt_tail_rec_irg(ir_graph *irg) { } /* now, end_block->link contains the list of all tail calls */ - if (! n_tail_calls) + if (n_tail_calls <= 0) return 0; DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n", get_entity_ld_name(get_irg_entity(irg)), n_tail_calls)); hook_tail_rec(irg, n_tail_calls); - do_opt_tail_rec(irg, rets, n_tail_calls); + + env.n_tail_calls = n_tail_calls; + env.rets = rets; + do_opt_tail_rec(irg, &env); return n_tail_calls; } +ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name) +{ + return def_graph_pass_ret(name ? name : "tailrec", opt_tail_rec_irg); +} + /* * optimize tail recursion away */ -void opt_tail_recursion(void) { +void opt_tail_recursion(void) +{ int i; int n_opt_applications = 0; ir_graph *irg; @@ -419,12 +725,18 @@ void opt_tail_recursion(void) { for (i = get_irp_n_irgs() - 1; i >= 0; --i) { irg = get_irp_irg(i); - current_ir_graph = irg; - + ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); if (opt_tail_rec_irg(irg)) ++n_opt_applications; + + ir_free_resources(irg, IR_RESOURCE_IRN_LINK); } DB((dbg, LEVEL_1, "Performed tail recursion for %d of %d graphs\n", n_opt_applications, get_irp_n_irgs())); } + +ir_prog_pass_t *opt_tail_recursion_pass(const char *name) +{ + return def_prog_pass(name ? name : "tailrec", opt_tail_recursion); +}