+ phis[0] = new_r_Phi(block, env->n_tail_calls + 1, in, mode_M);
+
+ /* build the data Phi's */
+ if (n_params > 0) {
+ ir_node *calls;
+ ir_node *args;
+ ir_node *args_bl;
+
+ NEW_ARR_A(ir_node **, call_params, env->n_tail_calls);
+
+ /* collect all parameters */
+ for (i = 0, calls = call; calls; calls = get_irn_link(calls)) {
+ call_params[i] = get_Call_param_arr(calls);
+ ++i;
+ }
+
+ /* build new Proj's and Phi's */
+ args = get_irg_args(irg);
+ args_bl = get_nodes_block(args);
+ for (i = 0; i < n_params; ++i) {
+ ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i));
+
+ in[0] = new_r_Proj(args_bl, args, mode, i);
+ for (j = 0; j < env->n_tail_calls; ++j)
+ in[j + 1] = call_params[j][i];
+
+ phis[i + 1] = new_r_Phi(block, env->n_tail_calls + 1, in, mode);
+ }
+ }
+
+ /*
+ * ok, we are here, so we have build and collected all needed Phi's
+ * now exchange all Projs into links to Phi
+ */
+ exchange(data.proj_m, phis[0]);
+ for (p = data.proj_data; p; p = n) {
+ long proj = get_Proj_proj(p);
+
+ assert(0 <= proj && proj < n_params);
+ n = get_irn_link(p);
+ exchange(p, phis[proj + 1]);
+ }
+
+ /* tail recursion was done, all info is invalid */
+ set_irg_doms_inconsistent(irg);
+ set_irg_outs_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_loopinfo_state(current_ir_graph, loopinfo_cf_inconsistent);
+ set_trouts_inconsistent();
+ set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
+
+ set_optimize(rem);
+
+ /* check if we need new values */
+ n_locs = 0;
+ for (i = 0; i < env->n_ress; ++i) {
+ if (env->variants[i] != TR_DIRECT)
+ ++n_locs;
+ }
+
+ if (n_locs > 0) {
+ ir_node *bad, *start_block;
+ ir_node **in;
+ ir_mode **modes;
+
+ NEW_ARR_A(ir_node *, in, n_locs);
+ NEW_ARR_A(ir_mode *, modes, n_locs);
+ ssa_cons_start(irg, n_locs);
+
+ start_block = get_irg_start_block(irg);
+ set_cur_block(start_block);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ ir_type *tp = get_method_res_type(method_tp, i);
+ ir_mode *mode = get_type_mode(tp);
+
+ modes[i] = mode;
+ if (env->variants[i] == TR_ADD) {
+ set_value(i, new_Const(get_mode_null(mode)));
+ } else if (env->variants[i] == TR_MUL) {
+ set_value(i, new_Const(get_mode_one(mode)));
+ }
+ }
+ mature_immBlock(start_block);
+
+ /* no: we can kill all returns */
+ bad = get_irg_bad(irg);
+
+ for (p = env->rets; p; p = n) {
+ ir_node *block = get_nodes_block(p);
+ ir_node *call, *mem, *jmp, *tuple;
+
+ set_cur_block(block);
+ n = get_irn_link(p);
+
+ call = skip_Proj(get_Return_mem(p));
+ assert(is_Call(call));
+
+ mem = get_Call_mem(call);
+
+ /* create a new jump, free of CSE */
+ set_optimize(0);
+ jmp = new_Jmp();
+ set_optimize(rem);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ if (env->variants[i] != TR_DIRECT) {
+ in[i] = get_value(i, modes[i]);
+ } else {
+ in[i] = bad;
+ }
+ }
+ /* create a new tuple for the return values */
+ tuple = new_Tuple(env->n_ress, in);
+
+ turn_into_tuple(call, pn_Call_max);
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, jmp);
+ set_Tuple_pred(call, pn_Call_X_except, bad);
+ set_Tuple_pred(call, pn_Call_T_result, tuple);
+ set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ ir_node *res = get_Return_res(p, i);
+ if (env->variants[i] != TR_DIRECT) {
+ set_value(i, res);
+ }
+ }
+
+ exchange(p, bad);
+ }
+
+ /* finally fix all other returns */
+ end_block = get_irg_end_block(irg);
+ for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
+ ir_node *ret = get_Block_cfgpred(end_block, i);
+
+ /* search all Returns of a block */
+ if (! is_Return(ret))
+ continue;
+
+ set_cur_block(get_nodes_block(ret));
+ for (j = 0; j < env->n_ress; ++j) {
+ ir_node *pred = get_Return_res(ret, j);
+ ir_node *n;
+
+ switch (env->variants[j]) {
+ case TR_DIRECT:
+ continue;
+
+ case TR_ADD:
+ n = get_value(j, modes[j]);
+ n = new_Add(n, pred, modes[j]);
+ set_Return_res(ret, j, n);
+ break;
+
+ case TR_MUL:
+ n = get_value(j, modes[j]);
+ n = new_Mul(n, pred, modes[j]);
+ set_Return_res(ret, j, n);
+ break;
+
+ default:
+ assert(!"unexpected tail recursion variant");
+ }
+ }
+ }
+ ssa_cons_finish(irg);
+ } else {
+ ir_node *bad = get_irg_bad(irg);
+
+ /* no: we can kill all returns */
+ for (p = env->rets; p; p = n) {
+ n = get_irn_link(p);
+ exchange(p, bad);
+ }
+ }
+ current_ir_graph = old;
+}
+
+/**
+ * Check the lifetime of locals in the given graph.
+ * Tail recursion can only be done, if we can prove that
+ * the lifetime of locals end with the recursive call.
+ * We do this by checking that no address of a local variable is
+ * stored or transmitted as an argument to a call.
+ *
+ * @return non-zero if it's ok to do tail recursion
+ */
+static int check_lifetime_of_locals(ir_graph *irg) {
+ ir_node *irg_frame;
+ int i;
+ ir_type *frame_tp = get_irg_frame_type(irg);
+
+ irg_frame = get_irg_frame(irg);
+ for (i = get_irn_n_outs(irg_frame) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(irg_frame, i);
+
+ if (is_Sel(succ)) {
+ /* Check if we have compound arguments.
+ For now, we cannot handle them, */
+ if (get_entity_owner(get_Sel_entity(succ)) != frame_tp)
+ return 0;
+
+ if (is_address_taken(succ))
+ return 0;
+ }
+ }
+ return 1;
+}