+
+ /* check if we need new values */
+ n_locs = 0;
+ for (i = 0; i < env->n_ress; ++i) {
+ if (env->variants[i] != TR_DIRECT)
+ ++n_locs;
+ }
+
+ if (n_locs > 0) {
+ ir_node *bad, *start_block;
+ ir_node **in;
+ ir_mode **modes;
+
+ NEW_ARR_A(ir_node *, in, n_locs);
+ NEW_ARR_A(ir_mode *, modes, n_locs);
+ ssa_cons_start(irg, n_locs);
+
+ start_block = get_irg_start_block(irg);
+ set_cur_block(start_block);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ ir_type *tp = get_method_res_type(method_tp, i);
+ ir_mode *mode = get_type_mode(tp);
+
+ modes[i] = mode;
+ if (env->variants[i] == TR_ADD) {
+ set_value(i, new_Const(mode, get_mode_null(mode)));
+ } else if (env->variants[i] == TR_MUL) {
+ set_value(i, new_Const(mode, get_mode_one(mode)));
+ }
+ }
+ mature_immBlock(start_block);
+
+ /* no: we can kill all returns */
+ bad = get_irg_bad(irg);
+
+ for (p = env->rets; p; p = n) {
+ ir_node *block = get_nodes_block(p);
+ ir_node *call, *mem, *jmp, *tuple;
+
+ set_cur_block(block);
+ n = get_irn_link(p);
+
+ call = skip_Proj(get_Return_mem(p));
+ assert(is_Call(call));
+
+ mem = get_Call_mem(call);
+
+ /* create a new jump, free of CSE */
+ set_optimize(0);
+ jmp = new_Jmp();
+ set_optimize(rem);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ if (env->variants[i] != TR_DIRECT) {
+ in[i] = get_value(i, modes[i]);
+ } else {
+ in[i] = bad;
+ }
+ }
+ /* create a new tuple for the return values */
+ tuple = new_Tuple(env->n_ress, in);
+
+ turn_into_tuple(call, pn_Call_max);
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, jmp);
+ set_Tuple_pred(call, pn_Call_X_except, bad);
+ set_Tuple_pred(call, pn_Call_T_result, tuple);
+ set_Tuple_pred(call, pn_Call_M_except, mem);
+ set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
+
+ for (i = 0; i < env->n_ress; ++i) {
+ ir_node *res = get_Return_res(p, i);
+ if (env->variants[i] != TR_DIRECT) {
+ set_value(i, res);
+ }
+ }
+
+ exchange(p, bad);
+ }
+
+ /* finally fix all other returns */
+ end_block = get_irg_end_block(irg);
+ for (i = get_Block_n_cfgpreds(end_block) - 1; i >= 0; --i) {
+ ir_node *ret = get_Block_cfgpred(end_block, i);
+
+ /* search all Returns of a block */
+ if (! is_Return(ret))
+ continue;
+
+ set_cur_block(get_nodes_block(ret));
+ for (j = 0; j < env->n_ress; ++j) {
+ ir_node *pred = get_Return_res(ret, j);
+ ir_node *n;
+
+ switch (env->variants[j]) {
+ case TR_DIRECT:
+ continue;
+
+ case TR_ADD:
+ n = get_value(j, modes[j]);
+ n = new_Add(n, pred, modes[j]);
+ set_Return_res(ret, j, n);
+ break;
+
+ case TR_MUL:
+ n = get_value(j, modes[j]);
+ n = new_Mul(n, pred, modes[j]);
+ set_Return_res(ret, j, n);
+ break;
+
+ default:
+ assert(!"unexpected tail recursion variant");
+ }
+ }
+ }
+ ssa_cons_finish(irg);
+ } else {
+ ir_node *bad = get_irg_bad(irg);
+
+ /* no: we can kill all returns */
+ for (p = env->rets; p; p = n) {
+ n = get_irn_link(p);
+ exchange(p, bad);
+ }
+ }
+ current_ir_graph = old;