X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Ftailrec.c;h=a294bde2eb422f0f6a6aee2ab9d582b4e6aab7c0;hb=8e4e49e66d1d578b31a5ffce9bb6ff94ba985dfb;hp=fc580fbf09e128810bdebdeaa740c640471bb9a4;hpb=d304e6e0053ecf1de3f541121ee70d7542bf9f84;p=libfirm diff --git a/ir/opt/tailrec.c b/ir/opt/tailrec.c index fc580fbf0..a294bde2e 100644 --- a/ir/opt/tailrec.c +++ b/ir/opt/tailrec.c @@ -22,7 +22,6 @@ * @brief Tail-recursion call optimization. * @date 08.06.2004 * @author Michael Beck - * @version $Id$ */ #include "config.h" @@ -46,8 +45,9 @@ #include "irhooks.h" #include "ircons_t.h" #include "irpass.h" +#include "opt_manage.h" -DEBUG_ONLY(static firm_dbg_module_t *dbg); +DEBUG_ONLY(static firm_dbg_module_t *dbg;) /** * the environment for collecting data @@ -152,11 +152,8 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) assert(env->n_tail_calls > 0); /* we add new blocks and change the control flow */ - set_irg_doms_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - - /* calls are removed */ - set_trouts_inconsistent(); + clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE + | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS); /* we must build some new nodes WITHOUT CSE */ set_optimize(0); @@ -228,7 +225,6 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) if (n_params > 0) { ir_node *calls; ir_node *args; - ir_node *args_bl; NEW_ARR_A(ir_node **, call_params, env->n_tail_calls); @@ -241,7 +237,6 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) /* build new Proj's and Phi's */ args = get_irg_args(irg); - args_bl = get_nodes_block(args); for (i = 0; i < n_params; ++i) { ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i)); @@ -267,10 +262,9 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) } /* tail recursion was done, all info is invalid */ - set_irg_doms_inconsistent(irg); - set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent); - set_trouts_inconsistent(); + clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE + | IR_GRAPH_STATE_CONSISTENT_LOOPINFO + | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS); set_irg_callee_info_state(irg, irg_callee_info_inconsistent); set_optimize(rem); @@ -339,7 +333,7 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) /* create a new tuple for the return values */ tuple = new_r_Tuple(block, env->n_ress, in); - turn_into_tuple(call, pn_Call_max); + turn_into_tuple(call, pn_Call_max+1); set_Tuple_pred(call, pn_Call_M, mem); set_Tuple_pred(call, pn_Call_X_regular, jmp); set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); @@ -567,7 +561,7 @@ static tail_rec_variants find_variant(ir_node *irn, ir_node *call) /* * convert simple tail-calls into loops */ -int opt_tail_rec_irg(ir_graph *irg) +static ir_graph_state_t do_tailrec(ir_graph *irg) { tr_env env; ir_node *end_block; @@ -579,8 +573,6 @@ int opt_tail_rec_irg(ir_graph *irg) FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec"); - assure_irg_outs(irg); - if (! check_lifetime_of_locals(irg)) return 0; @@ -601,12 +593,6 @@ int opt_tail_rec_irg(ir_graph *irg) env.variants[i] = TR_DIRECT; } - /* - * This tail recursion optimization works best - * if the Returns are normalized. - */ - normalize_n_returns(irg); - ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK); end_block = get_irg_end_block(irg); @@ -634,10 +620,10 @@ int opt_tail_rec_irg(ir_graph *irg) /* check if it's a recursive call */ call_ptr = get_Call_ptr(call); - if (! is_Global(call_ptr)) + if (! is_SymConst_addr_ent(call_ptr)) continue; - ent = get_Global_entity(call_ptr); + ent = get_SymConst_entity(call_ptr); if (!ent || get_entity_irg(ent) != irg) continue; @@ -668,7 +654,7 @@ int opt_tail_rec_irg(ir_graph *irg) break; } if (var == TR_DIRECT) - var = env.variants[j]; + var = env.variants[j]; else if (env.variants[j] == TR_DIRECT) env.variants[j] = var; if (env.variants[j] != var) { @@ -703,7 +689,23 @@ int opt_tail_rec_irg(ir_graph *irg) } ir_free_resources(irg, IR_RESOURCE_IRN_LINK); current_ir_graph = rem; - return n_tail_calls; + return 0; +} + + +/* + * This tail recursion optimization works best + * if the Returns are normalized. + */ +static optdesc_t opt_tailrec = { + "tail-recursion", + IR_GRAPH_STATE_MANY_RETURNS | IR_GRAPH_STATE_NO_BADS | IR_GRAPH_STATE_CONSISTENT_OUTS, + do_tailrec, +}; + +int opt_tail_rec_irg(ir_graph *irg) { + perform_irg_optimization(irg, &opt_tailrec); + return 1; /* conservatively report changes */ } ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name)