From ba92afeb4aa45a53e85e64e8f9513bb9c14dac19 Mon Sep 17 00:00:00 2001 From: Michael Beck Date: Tue, 1 Jun 2004 09:43:41 +0000 Subject: [PATCH] used enum values for Tuple creation [r2982] --- ir/ir/irgopt.c | 43 ++++++++++++++++++++++--------------------- ir/ir/iropt.c | 38 +++++++++++++++++++------------------- 2 files changed, 41 insertions(+), 40 deletions(-) diff --git a/ir/ir/irgopt.c b/ir/ir/irgopt.c index cce8004b6..531d4082b 100644 --- a/ir/ir/irgopt.c +++ b/ir/ir/irgopt.c @@ -723,11 +723,12 @@ void inline_method(ir_node *call, ir_graph *called_graph) { post_bl = get_nodes_Block(call); set_irg_current_block(current_ir_graph, post_bl); /* XxMxPxP of Start + parameter of Call */ - in[0] = new_Jmp(); - in[1] = get_Call_mem(call); - in[2] = get_irg_frame(current_ir_graph); - in[3] = get_irg_globals(current_ir_graph); - in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call)); + in[pn_Start_X_initial_exec] = new_Jmp(); + in[pn_Start_M] = get_Call_mem(call); + in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph); + in[pn_Start_P_globals] = get_irg_globals(current_ir_graph); + in[pn_Start_T_arg] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call)); + /* in[pn_Start_P_value_arg_base] = ??? */ pre_call = new_Tuple(5, in); post_call = call; @@ -844,7 +845,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) { } } phi = new_Phi(n_ret, cf_pred, mode_M); - set_Tuple_pred(call, 0, phi); + set_Tuple_pred(call, pn_Call_M_regular, phi); /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_Block(phi) == post_bl) { set_irn_link(phi, get_irn_link(post_bl)); @@ -855,23 +856,23 @@ void inline_method(ir_node *call, ir_graph *called_graph) { for (j = 0; j < n_res; j++) { n_ret = 0; for (i = 0; i < arity; i++) { - ret = intern_get_irn_n(end_bl, i); - if (intern_get_irn_op(ret) == op_Return) { - cf_pred[n_ret] = get_Return_res(ret, j); - n_ret++; - } + ret = intern_get_irn_n(end_bl, i); + if (intern_get_irn_op(ret) == op_Return) { + cf_pred[n_ret] = get_Return_res(ret, j); + n_ret++; + } } phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0])); res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ if (get_nodes_Block(phi) == post_bl) { - set_irn_link(phi, get_irn_link(post_bl)); - set_irn_link(post_bl, phi); + set_irn_link(phi, get_irn_link(post_bl)); + set_irn_link(post_bl, phi); } } - set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred)); + set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred)); } else { - set_Tuple_pred(call, 2, new_Bad()); + set_Tuple_pred(call, pn_Call_T_result, new_Bad()); } /* Finally the exception control flow. We have two (three) possible situations: @@ -895,7 +896,7 @@ void inline_method(ir_node *call, ir_graph *called_graph) { } if (n_exc > 0) { new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */ - set_Tuple_pred(call, 1, new_Jmp()); + set_Tuple_pred(call, pn_Call_X_except, new_Jmp()); /* The Phi for the memories with the exception objects */ n_exc = 0; for (i = 0; i < arity; i++) { @@ -913,10 +914,10 @@ void inline_method(ir_node *call, ir_graph *called_graph) { n_exc++; } } - set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M)); + set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M)); } else { - set_Tuple_pred(call, 1, new_Bad()); - set_Tuple_pred(call, 3, new_Bad()); + set_Tuple_pred(call, pn_Call_X_except, new_Bad()); + set_Tuple_pred(call, pn_Call_M_except, new_Bad()); } } else { ir_node *main_end_bl; @@ -942,8 +943,8 @@ void inline_method(ir_node *call, ir_graph *called_graph) { for (i = 0; i < n_exc; ++i) end_preds[main_end_bl_arity + i] = cf_pred[i]; set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds); - set_Tuple_pred(call, 1, new_Bad()); - set_Tuple_pred(call, 3, new_Bad()); + set_Tuple_pred(call, pn_Call_X_except, new_Bad()); + set_Tuple_pred(call, pn_Call_X_except, new_Bad()); free(end_preds); } free(res_pred); diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 96eb4f3fc..f103ddd56 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -697,9 +697,9 @@ static ir_node *equivalent_node_Div(ir_node *n) /* Turn Div into a tuple (mem, bad, a) */ ir_node *mem = get_Div_mem(n); turn_into_tuple(n, 3); - set_Tuple_pred(n, 0, mem); - set_Tuple_pred(n, 1, new_Bad()); - set_Tuple_pred(n, 2, a); + set_Tuple_pred(n, pn_Div_M, mem); + set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */ + set_Tuple_pred(n, pn_Div_res, a); } return n; } @@ -883,8 +883,8 @@ static ir_node *equivalent_node_Store(ir_node *n) doesn't change the memory -- a write after read. */ a = get_Store_mem(n); turn_into_tuple(n, 2); - set_Tuple_pred(n, 0, a); - set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR; + set_Tuple_pred(n, pn_Store_M, a); + set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR; } return n; } @@ -1023,9 +1023,9 @@ static ir_node *transform_node_Div(ir_node *n) ir_node *mem = get_Div_mem(n); turn_into_tuple(n, 3); - set_Tuple_pred(n, 0, mem); - set_Tuple_pred(n, 1, new_Bad()); - set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta)); + set_Tuple_pred(n, pn_Div_M, mem); + set_Tuple_pred(n, pn_Div_X_except, new_Bad()); + set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(ta), ta)); } return n; } @@ -1038,9 +1038,9 @@ static ir_node *transform_node_Mod(ir_node *n) /* Turn Mod into a tuple (mem, bad, value) */ ir_node *mem = get_Mod_mem(n); turn_into_tuple(n, 3); - set_Tuple_pred(n, 0, mem); - set_Tuple_pred(n, 1, new_Bad()); - set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta)); + set_Tuple_pred(n, pn_Mod_M, mem); + set_Tuple_pred(n, pn_Mod_X_except, new_Bad()); + set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(ta), ta)); } return n; } @@ -1087,10 +1087,10 @@ static ir_node *transform_node_DivMod(ir_node *n) if (evaluated) { /* replace by tuple */ ir_node *mem = get_DivMod_mem(n); turn_into_tuple(n, 4); - set_Tuple_pred(n, 0, mem); - set_Tuple_pred(n, 1, new_Bad()); /* no exception */ - set_Tuple_pred(n, 2, a); - set_Tuple_pred(n, 3, b); + set_Tuple_pred(n, pn_DivMod_M, mem); + set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ + set_Tuple_pred(n, pn_DivMod_res_div, a); + set_Tuple_pred(n, pn_DivMod_res_mod, b); assert(get_nodes_Block(n)); } @@ -1113,11 +1113,11 @@ static ir_node *transform_node_Cond(ir_node *n) jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n)); turn_into_tuple(n, 2); if (ta == tarval_b_true) { - set_Tuple_pred(n, 0, new_Bad()); - set_Tuple_pred(n, 1, jmp); + set_Tuple_pred(n, pn_Cond_false, new_Bad()); + set_Tuple_pred(n, pn_Cond_true, jmp); } else { - set_Tuple_pred(n, 0, jmp); - set_Tuple_pred(n, 1, new_Bad()); + set_Tuple_pred(n, pn_Cond_false, jmp); + set_Tuple_pred(n, pn_Cond_true, new_Bad()); } /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n)); -- 2.20.1