+ * Examine irn and detect the recursion variant.
+ */
+static tail_rec_variants find_variant(ir_node *irn, ir_node *call)
+{
+ ir_node *a, *b;
+ tail_rec_variants va, vb, res;
+
+ if (skip_Proj(skip_Proj(irn)) == call) {
+ /* found it */
+ return TR_DIRECT;
+ }
+ switch (get_irn_opcode(irn)) {
+ case iro_Add:
+ /* try additive */
+ a = get_Add_left(irn);
+ if (get_nodes_block(a) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ va = TR_UNKNOWN;
+ } else {
+ va = find_variant(a, call);
+ if (va == TR_BAD)
+ return TR_BAD;
+ }
+ b = get_Add_right(irn);
+ if (get_nodes_block(b) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ vb = TR_UNKNOWN;
+ } else {
+ vb = find_variant(b, call);
+ if (vb == TR_BAD)
+ return TR_BAD;
+ }
+ if (va == vb) {
+ res = va;
+ }
+ else if (va == TR_UNKNOWN)
+ res = vb;
+ else if (vb == TR_UNKNOWN)
+ res = va;
+ else {
+ /* they are different but none is TR_UNKNOWN -> incompatible */
+ return TR_BAD;
+ }
+ if (res == TR_DIRECT || res == TR_ADD)
+ return TR_ADD;
+ /* not compatible */
+ return TR_BAD;
+
+ case iro_Sub:
+ /* try additive, but return value must be left */
+ a = get_Sub_left(irn);
+ if (get_nodes_block(a) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ va = TR_UNKNOWN;
+ } else {
+ va = find_variant(a, call);
+ if (va == TR_BAD)
+ return TR_BAD;
+ }
+ b = get_Sub_right(irn);
+ if (get_nodes_block(b) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ vb = TR_UNKNOWN;
+ } else {
+ vb = find_variant(b, call);
+ if (vb != TR_UNKNOWN)
+ return TR_BAD;
+ }
+ res = va;
+ if (res == TR_DIRECT || res == TR_ADD)
+ return res;
+ /* not compatible */
+ return TR_BAD;
+
+ case iro_Mul:
+ /* try multiplicative */
+ a = get_Mul_left(irn);
+ if (get_nodes_block(a) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ va = TR_UNKNOWN;
+ } else {
+ va = find_variant(a, call);
+ if (va == TR_BAD)
+ return TR_BAD;
+ }
+ b = get_Mul_right(irn);
+ if (get_nodes_block(b) != get_nodes_block(call)) {
+ /* we are outside, ignore */
+ vb = TR_UNKNOWN;
+ } else {
+ vb = find_variant(b, call);
+ if (vb == TR_BAD)
+ return TR_BAD;
+ }
+ if (va == vb) {
+ res = va;
+ }
+ else if (va == TR_UNKNOWN)
+ res = vb;
+ else if (vb == TR_UNKNOWN)
+ res = va;
+ else {
+ /* they are different but none is TR_UNKNOWN -> incompatible */
+ return TR_BAD;
+ }
+ if (res == TR_DIRECT || res == TR_MUL)
+ return TR_MUL;
+ /* not compatible */
+ return TR_BAD;
+
+ case iro_Minus:
+ /* try multiplicative */
+ a = get_Minus_op(irn);
+ res = find_variant(a, call);
+ if (res == TR_DIRECT)
+ return TR_MUL;
+ if (res == TR_MUL || res == TR_UNKNOWN)
+ return res;
+ /* not compatible */
+ return TR_BAD;
+
+ default:
+ return TR_UNKNOWN;
+ }
+}
+
+
+/*
+ * convert simple tail-calls into loops
+ */
+static ir_graph_state_t do_tailrec(ir_graph *irg)
+{
+ tr_env env;
+ ir_node *end_block;
+ int i, n_ress, n_tail_calls = 0;
+ ir_node *rets = NULL;
+ ir_type *mtd_type, *call_type;
+ ir_entity *ent;
+ ir_graph *rem;
+
+ FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
+
+ if (! check_lifetime_of_locals(irg))
+ return 0;
+
+ rem = current_ir_graph;
+ current_ir_graph = irg;
+
+ ent = get_irg_entity(irg);
+ mtd_type = get_entity_type(ent);
+ n_ress = get_method_n_ress(mtd_type);
+
+ env.variants = NULL;
+ env.n_ress = n_ress;
+
+ if (n_ress > 0) {
+ NEW_ARR_A(tail_rec_variants, env.variants, n_ress);
+
+ for (i = 0; i < n_ress; ++i)
+ env.variants[i] = TR_DIRECT;
+ }
+
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+
+ end_block = get_irg_end_block(irg);
+ set_irn_link(end_block, NULL);
+
+ for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
+ ir_node *ret = get_Block_cfgpred(end_block, i);
+ ir_node *call, *call_ptr;
+ int j;
+ ir_node **ress;
+
+ /* search all Returns of a block */
+ if (! is_Return(ret))
+ continue;
+
+ /* check, if it's a Return self() */
+ call = skip_Proj(get_Return_mem(ret));
+ if (! is_Call(call))
+ continue;
+
+ /* the call must be in the same block as the return */
+ if (get_nodes_block(call) != get_nodes_block(ret))
+ continue;
+
+ /* check if it's a recursive call */
+ call_ptr = get_Call_ptr(call);
+
+ if (! is_SymConst_addr_ent(call_ptr))
+ continue;
+
+ ent = get_SymConst_entity(call_ptr);
+ if (!ent || get_entity_irg(ent) != irg)
+ continue;
+
+ /*
+ * Check, that the types match. At least in C
+ * this might fail.
+ */
+ mtd_type = get_entity_type(ent);
+ call_type = get_Call_type(call);
+
+ if (mtd_type != call_type) {
+ /*
+ * Hmm, the types did not match, bad.
+ * This can happen in C when no prototype is given
+ * or K&R style is used.
+ */
+ DB((dbg, LEVEL_3, " tail recursion fails because of call type mismatch: %+F != %+F\n", mtd_type, call_type));
+ continue;
+ }
+
+ /* ok, mem is routed to a recursive call, check return args */
+ ress = get_Return_res_arr(ret);
+ for (j = get_Return_n_ress(ret) - 1; j >= 0; --j) {
+ tail_rec_variants var = find_variant(ress[j], call);
+
+ if (var >= TR_BAD) {
+ /* cannot be transformed */
+ break;
+ }
+ if (var == TR_DIRECT)
+ var = env.variants[j];
+ else if (env.variants[j] == TR_DIRECT)
+ env.variants[j] = var;
+ if (env.variants[j] != var) {
+ /* not compatible */
+ DB((dbg, LEVEL_3, " tail recursion fails for %d return value of %+F\n", j, ret));
+ break;
+ }
+ }
+ if (j >= 0)
+ continue;
+
+ /* here, we have found a call */
+ set_irn_link(call, get_irn_link(end_block));
+ set_irn_link(end_block, call);
+ ++n_tail_calls;
+
+ /* link all returns, we will need this */
+ set_irn_link(ret, rets);
+ rets = ret;
+ }
+
+ /* now, end_block->link contains the list of all tail calls */
+ if (n_tail_calls > 0) {
+ DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
+ get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
+
+ hook_tail_rec(irg, n_tail_calls);
+
+ env.n_tail_calls = n_tail_calls;
+ env.rets = rets;
+ do_opt_tail_rec(irg, &env);
+ }
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+ current_ir_graph = rem;
+ return 0;
+}
+
+
+/*
+ * This tail recursion optimization works best
+ * if the Returns are normalized.
+ */
+static optdesc_t opt_tailrec = {
+ "tail-recursion",
+ IR_GRAPH_STATE_MANY_RETURNS | IR_GRAPH_STATE_NO_BADS | IR_GRAPH_STATE_CONSISTENT_OUTS,
+ do_tailrec,
+};
+
+int opt_tail_rec_irg(ir_graph *irg) {
+ perform_irg_optimization(irg, &opt_tailrec);
+ return 1; /* conservatively report changes */
+}
+
+ir_graph_pass_t *opt_tail_rec_irg_pass(const char *name)
+{
+ return def_graph_pass_ret(name ? name : "tailrec", opt_tail_rec_irg);
+}
+
+/*