#include "ircons_t.h"
#include "irpass.h"
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
/**
* the environment for collecting data
{
collect_t *data = (collect_t*)env;
ir_node *pred;
- ir_op *op;
+ ir_opcode opcode;
switch (get_irn_opcode(node)) {
case iro_Proj:
pred = get_Proj_pred(node);
- op = get_irn_op(pred);
- if (op == op_Proj) {
+ opcode = get_irn_opcode(pred);
+ if (opcode == iro_Proj) {
ir_node *start = get_Proj_pred(pred);
if (is_Start(start)) {
data->proj_data = node;
}
}
- } else if (op == op_Start) {
+ } else if (opcode == iro_Start) {
if (get_Proj_proj(node) == pn_Start_X_initial_exec) {
/* found ProjX(Start) */
data->proj_X = node;
/**
* do the graph reconstruction for tail-recursion elimination
*
- * @param irg the graph that will reconstructed
- * @param rets linked list of all rets
- * @param n_tail_calls number of tail-recursion calls
+ * @param irg the graph that will reconstructed
+ * @param env tail recursion environment
*/
static void do_opt_tail_rec(ir_graph *irg, tr_env *env)
{
assert(env->n_tail_calls > 0);
- /* we add new nodes, so the outs are inconsistent */
- set_irg_outs_inconsistent(irg);
-
/* we add new blocks and change the control flow */
- set_irg_doms_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
-
- /* we add a new loop */
- set_irg_loopinfo_inconsistent(irg);
-
- /* calls are removed */
- set_trouts_inconsistent();
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
+ | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
/* we must build some new nodes WITHOUT CSE */
set_optimize(0);
/* allocate in's for phi and block construction */
NEW_ARR_A(ir_node *, in, env->n_tail_calls + 1);
- in[0] = data.proj_X;
+ /* build a new header block for the loop we create */
+ i = 0;
+ in[i++] = data.proj_X;
/* turn Return's into Jmp's */
- for (i = 1, p = env->rets; p; p = n) {
+ for (p = env->rets; p; p = n) {
ir_node *block = get_nodes_block(p);
n = (ir_node*)get_irn_link(p);
* the block to the keep-alive list */
add_End_keepalive(get_irg_end(irg), block);
}
+ assert(i == env->n_tail_calls + 1);
- /* create a new block at start */
- block = new_r_Block(irg, env->n_tail_calls + 1, in);
+ /* now create it */
+ block = new_r_Block(irg, i, in);
jmp = new_r_Jmp(block);
/* the old first block is now the second one */
if (n_params > 0) {
ir_node *calls;
ir_node *args;
- ir_node *args_bl;
NEW_ARR_A(ir_node **, call_params, env->n_tail_calls);
/* build new Proj's and Phi's */
args = get_irg_args(irg);
- args_bl = get_nodes_block(args);
for (i = 0; i < n_params; ++i) {
ir_mode *mode = get_type_mode(get_method_param_type(method_tp, i));
}
/* tail recursion was done, all info is invalid */
- set_irg_doms_inconsistent(irg);
- set_irg_outs_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent);
- set_trouts_inconsistent();
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
+ | IR_GRAPH_STATE_CONSISTENT_LOOPINFO
+ | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
set_optimize(rem);
/* check if we need new values */
n_locs = 0;
for (i = 0; i < env->n_ress; ++i) {
- if (env->variants[i] != TR_DIRECT)
+ if (env->variants[i] != TR_DIRECT) {
++n_locs;
+ break;
+ }
}
if (n_locs > 0) {
- ir_node *bad, *start_block;
+ ir_node *start_block;
ir_node **in;
ir_mode **modes;
- NEW_ARR_A(ir_node *, in, n_locs);
- NEW_ARR_A(ir_mode *, modes, n_locs);
- ssa_cons_start(irg, n_locs);
+ NEW_ARR_A(ir_node *, in, env->n_ress);
+ NEW_ARR_A(ir_mode *, modes, env->n_ress);
+ ssa_cons_start(irg, env->n_ress);
start_block = get_irg_start_block(irg);
- set_cur_block(start_block);
+ set_r_cur_block(irg, start_block);
+ /* set the neutral elements for the iteration start */
for (i = 0; i < env->n_ress; ++i) {
ir_type *tp = get_method_res_type(method_tp, i);
ir_mode *mode = get_type_mode(tp);
modes[i] = mode;
if (env->variants[i] == TR_ADD) {
- set_value(i, new_r_Const(irg, get_mode_null(mode)));
+ set_r_value(irg, i, new_r_Const(irg, get_mode_null(mode)));
} else if (env->variants[i] == TR_MUL) {
- set_value(i, new_r_Const(irg, get_mode_one(mode)));
+ set_r_value(irg, i, new_r_Const(irg, get_mode_one(mode)));
}
}
mature_immBlock(start_block);
/* no: we can kill all returns */
- bad = get_irg_bad(irg);
-
for (p = env->rets; p; p = n) {
ir_node *block = get_nodes_block(p);
ir_node *call, *mem, *jmp, *tuple;
- set_cur_block(block);
+ set_r_cur_block(irg, block);
n = (ir_node*)get_irn_link(p);
call = skip_Proj(get_Return_mem(p));
set_optimize(rem);
for (i = 0; i < env->n_ress; ++i) {
+ ir_mode *mode = modes[i];
if (env->variants[i] != TR_DIRECT) {
- in[i] = get_value(i, modes[i]);
+ in[i] = get_r_value(irg, i, mode);
} else {
- in[i] = bad;
+ in[i] = new_r_Bad(irg, mode);
}
}
/* create a new tuple for the return values */
tuple = new_r_Tuple(block, env->n_ress, in);
- turn_into_tuple(call, pn_Call_max);
- set_Tuple_pred(call, pn_Call_M, mem);
- set_Tuple_pred(call, pn_Call_X_regular, jmp);
- set_Tuple_pred(call, pn_Call_X_except, bad);
- set_Tuple_pred(call, pn_Call_T_result, tuple);
+ turn_into_tuple(call, pn_Call_max+1);
+ set_Tuple_pred(call, pn_Call_M, mem);
+ set_Tuple_pred(call, pn_Call_X_regular, jmp);
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
+ set_Tuple_pred(call, pn_Call_T_result, tuple);
for (i = 0; i < env->n_ress; ++i) {
ir_node *res = get_Return_res(p, i);
if (env->variants[i] != TR_DIRECT) {
- set_value(i, res);
+ set_r_value(irg, i, res);
}
}
- exchange(p, bad);
+ exchange(p, new_r_Bad(irg, mode_X));
}
/* finally fix all other returns */
continue;
block = get_nodes_block(ret);
- set_cur_block(block);
+ set_r_cur_block(irg, block);
for (j = 0; j < env->n_ress; ++j) {
ir_node *pred = get_Return_res(ret, j);
ir_node *n;
continue;
case TR_ADD:
- n = get_value(j, modes[j]);
+ n = get_r_value(irg, j, modes[j]);
n = new_r_Add(block, n, pred, modes[j]);
set_Return_res(ret, j, n);
break;
case TR_MUL:
- n = get_value(j, modes[j]);
+ n = get_r_value(irg, j, modes[j]);
n = new_r_Mul(block, n, pred, modes[j]);
set_Return_res(ret, j, n);
break;
}
ssa_cons_finish(irg);
} else {
- ir_node *bad = get_irg_bad(irg);
+ ir_node *bad = new_r_Bad(irg, mode_X);
/* no: we can kill all returns */
for (p = env->rets; p; p = n) {
ir_node *rets = NULL;
ir_type *mtd_type, *call_type;
ir_entity *ent;
+ ir_graph *rem;
FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
if (! check_lifetime_of_locals(irg))
return 0;
+ rem = current_ir_graph;
+ current_ir_graph = irg;
+
ent = get_irg_entity(irg);
mtd_type = get_entity_type(ent);
- n_ress = get_method_n_ress(mtd_type);
+ n_ress = get_method_n_ress(mtd_type);
env.variants = NULL;
env.n_ress = n_ress;
*/
normalize_n_returns(irg);
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+
end_block = get_irg_end_block(irg);
set_irn_link(end_block, NULL);
* This can happen in C when no prototype is given
* or K&R style is used.
*/
-#if 0
- printf("Warning: Tail recursion fails because of different method and call types:\n");
- dump_type(mtd_type);
- dump_type(call_type);
-#endif
+ DB((dbg, LEVEL_3, " tail recursion fails because of call type mismatch: %+F != %+F\n", mtd_type, call_type));
continue;
}
env.variants[j] = var;
if (env.variants[j] != var) {
/* not compatible */
+ DB((dbg, LEVEL_3, " tail recursion fails for %d return value of %+F\n", j, ret));
break;
}
}
}
/* now, end_block->link contains the list of all tail calls */
- if (n_tail_calls <= 0)
- return 0;
-
- DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
- get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
+ if (n_tail_calls > 0) {
+ DB((dbg, LEVEL_2, " Performing tail recursion for graph %s and %d Calls\n",
+ get_entity_ld_name(get_irg_entity(irg)), n_tail_calls));
- hook_tail_rec(irg, n_tail_calls);
-
- env.n_tail_calls = n_tail_calls;
- env.rets = rets;
- do_opt_tail_rec(irg, &env);
+ hook_tail_rec(irg, n_tail_calls);
+ env.n_tail_calls = n_tail_calls;
+ env.rets = rets;
+ do_opt_tail_rec(irg, &env);
+ }
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+ current_ir_graph = rem;
return n_tail_calls;
}
FIRM_DBG_REGISTER(dbg, "firm.opt.tailrec");
+ DB((dbg, LEVEL_1, "Performing tail recursion ...\n"));
for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
ir_graph *irg = get_irp_irg(i);
- ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
if (opt_tail_rec_irg(irg))
++n_opt_applications;
-
- ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
}
- DB((dbg, LEVEL_1, "Performed tail recursion for %zu of %zu graphs\n",
+ DB((dbg, LEVEL_1, "Done for %zu of %zu graphs.\n",
n_opt_applications, get_irp_n_irgs()));
}