- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
- (get_Block_n_cfgpreds(nn) == 1) &&
- (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
- exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
+ (get_Block_n_cfgpreds(nn) == 1) &&
+ (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+ ir_node *old = get_nodes_Block(get_Block_cfgpred(nn, 0));
+ if (nn == old) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
+ exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
+ } else {
+ exchange(nn, old);
+ }
+ }
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
ir_node *ka; /* keep alive */
int i, irn_arity;
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
ne = new_ir_node(get_irn_dbg_info(oe),
ir_node *ka; /* keep alive */
int i, irn_arity;
oe = get_irg_end(current_ir_graph);
/* copy the end node by hand, allocate dynamic in array! */
ne = new_ir_node(get_irn_dbg_info(oe),
/* copy the live nodes */
irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
/* copy_preds for the end node ... */
set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
/* copy the live nodes */
irg_walk(get_nodes_Block(oe), copy_node, copy_preds, NULL);
/* copy_preds for the end node ... */
set_nodes_Block(ne, get_new_node(get_nodes_Block(oe)));
/*- ... and now the keep alives. -*/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
/*- ... and now the keep alives. -*/
/* First pick the not marked block nodes and walk them. We must pick these
first as else we will oversee blocks reachable from Phis. */
irn_arity = get_irn_arity(oe);
for (i = 0; i < irn_arity; i++) {
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
irg_walk(ka, copy_node, copy_preds, NULL);
/* We must keep the block alive and copy everything reachable */
set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
irg_walk(ka, copy_node, copy_preds, NULL);
- /* We didn't copy the Phi yet. */
- set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
- irg_walk(ka, copy_node, copy_preds, NULL);
+ /* We didn't copy the Phi yet. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
- set_irn_link(get_irg_frame (current_ir_graph), NULL);
- set_irn_link(get_irg_globals(current_ir_graph), NULL);
- set_irn_link(get_irg_args (current_ir_graph), NULL);
+ set_irn_link(get_irg_frame (current_ir_graph), NULL);
+ set_irn_link(get_irg_globals (current_ir_graph), NULL);
+ set_irn_link(get_irg_args (current_ir_graph), NULL);
+ set_irn_link(get_irg_initial_mem(current_ir_graph), NULL);
if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
copy_node (get_irg_args(current_ir_graph), NULL);
copy_preds(get_irg_args(current_ir_graph), NULL);
}
if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
copy_node (get_irg_args(current_ir_graph), NULL);
copy_preds(get_irg_args(current_ir_graph), NULL);
}
- get_new_node(get_irg_start_block(current_ir_graph)));
- set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
- set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
- set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+ get_new_node(get_irg_start_block(current_ir_graph)));
+ set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+ set_irg_globals (current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+ set_irg_initial_mem(current_ir_graph, get_new_node(get_irg_initial_mem(current_ir_graph)));
+ set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+
if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
copy_node(get_irg_bad(current_ir_graph), NULL);
copy_preds(get_irg_bad(current_ir_graph), NULL);
}
set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
copy_node(get_irg_bad(current_ir_graph), NULL);
copy_preds(get_irg_bad(current_ir_graph), NULL);
}
set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
* Relinks Bad predecesors from Bocks and Phis called by walker
* remove_bad_predecesors(). If n is a Block, call
* relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
* Relinks Bad predecesors from Bocks and Phis called by walker
* remove_bad_predecesors(). If n is a Block, call
* relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
+static void find_addr(ir_node *node, void *env)
+{
+ if (get_irn_opcode(node) == iro_Proj) {
+ if (get_Proj_proj(node) == pn_Start_P_value_arg_base)
+ *(int *)env = 0;
+ }
+}
+
+/*
+ * currently, we cannot inline two cases:
+ * - call with compound arguments
+ * - graphs that take the address of a parameter
+ *
+ * check these condition here
+ */
+static int can_inline(ir_node *call, ir_graph *called_graph)
+{
+ type *call_type = get_Call_type(call);
+ int params, ress, i, res;
+
+ assert(is_method_type(call_type));
+
+ params = get_method_n_params(call_type);
+ ress = get_method_n_ress(call_type);
+
+ /* check params */
+ for (i = 0; i < params; ++i) {
+ type *p_type = get_method_param_type(call_type, i);
+
+ if (is_compound_type(p_type))
+ return 0;
+ }
+
+ /* check res */
+ for (i = 0; i < ress; ++i) {
+ type *r_type = get_method_res_type(call_type, i);
+
+ if (is_compound_type(r_type))
+ return 0;
+ }
+
+ res = 1;
+ irg_walk_graph(called_graph, find_addr, NULL, &res);
+
+ return res;
+}
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
ir_node **cf_pred;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
- if ( !(get_irg_inline_property(called_graph) == irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() ||
- (get_irg_inline_property(called_graph) == irg_inline_forbidden))) return;
+ if ( (prop != irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() ||
+ (prop == irg_inline_forbidden))) return 0;
+
+
+ /*
+ * currently, we cannot inline two cases:
+ * - call with compound arguments
+ * - graphs that take the address of a parameter
+ */
+ if (! can_inline(call, called_graph))
+ return 0;
- in[0] = new_Jmp();
- in[1] = get_Call_mem(call);
- in[2] = get_irg_frame(current_ir_graph);
- in[3] = get_irg_globals(current_ir_graph);
- in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ in[pn_Start_X_initial_exec] = new_Jmp();
+ in[pn_Start_M] = get_Call_mem(call);
+ in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
+ in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
+ in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
+ /* in[pn_Start_P_value_arg_base] = ??? */
- set_irn_visited(get_irg_start(called_graph),
- get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_start_block(called_graph),
- get_nodes_Block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph),
- get_irg_visited(current_ir_graph));
+ set_irn_visited(get_irg_start(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_start_block(called_graph), get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
+ set_irn_visited(get_irg_bad(called_graph), get_irg_visited(current_ir_graph));
entities. */
/* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
entities. */
/* @@@ endless loops are not copied!! -- they should be, I think... */
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
- -1: Block of Tuple.
- 0: Phi of all Memories of Return statements.
- 1: Jmp from new Block that merges the control flow from all exception
- predecessors of the old end block.
- 2: Tuple of all arguments.
- 3: Phi of Exception memories.
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
}
phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (get_irn_op(ret) == op_Call) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
- n_exc++;
- }
+ ir_node *ret;
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (get_irn_op(ret) == op_Call) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
+ n_exc++;
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
+ n_exc++;
+ } else if (get_irn_op(ret) == op_Raise) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- cf_op = get_Proj_pred(cf_op);
- if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
- // There are unoptimized tuples from inlineing before when no exc
- assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
- cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(get_irn_op(cf_op) == op_Jmp);
- break;
- }
+ cf_op = get_Proj_pred(cf_op);
+ if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
+ /* There are unoptimized tuples from inlineing before when no exc */
+ assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
+ cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
+ assert(get_irn_op(cf_op) == op_Jmp);
+ break;
+ }
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlineing the same
method several times changes the visited flag of the walked graph:
after the first inlineing visited of the callee equals visited of
the caller. With the next inlineing both are increased.) */
/* Find Call nodes to inline.
(We can not inline during a walk of the graph, as inlineing the same
method several times changes the visited flag of the walked graph:
after the first inlineing visited of the callee equals visited of
the caller. With the next inlineing both are increased.) */
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
- inline_method(call, callee);
- did_inline = 1;
- env->n_call_nodes--;
- eset_insert_all(env->call_nodes, callee_env->call_nodes);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
+ if (inline_method(call, callee)) {
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
-// printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)),
-// get_entity_name(get_irg_entity(callee)));
- inline_method(call, callee);
- did_inline = 1;
- env->n_call_nodes--;
- eset_insert_all(env->call_nodes, callee_env->call_nodes);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- callee_env->n_callers--;
+/* printf(" %s: Inlineing %s.\n", get_entity_name(get_irg_entity(current_ir_graph)), */
+/* get_entity_name(get_irg_entity(callee))); */
+ if (inline_method(call, callee)) {
+ did_inline = 1;
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(current_ir_graph)));
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(current_ir_graph)));
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
been finished on them. We do not have any unfinished inputs! */
dep_block = get_nodes_Block(dep);
if ((!is_Bad(dep_block)) &&
/* Because all loops contain at least one pinned node, now all
our inputs are either pinned or place_early has already
been finished on them. We do not have any unfinished inputs! */
dep_block = get_nodes_Block(dep);
if ((!is_Bad(dep_block)) &&
- (get_irn_op(n) != op_Const) &&
- (get_irn_op(n) != op_SymConst)) {
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
+ (get_irn_op(n) != op_Const) &&
+ (get_irn_op(n) != op_SymConst)) {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
} else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
ir_node *new_node = equivalent_node(b);
while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) {
/* We would have to run gigo if new is bad, so we
} else if (get_opt_optimize() && (get_irn_mode(n) == mode_X)) {
/* We will soon visit a block. Optimize it before visiting! */
ir_node *b = get_nodes_Block(n);
ir_node *new_node = equivalent_node(b);
while (irn_not_visited(b) && (!is_Bad(new_node)) && (new_node != b)) {
/* We would have to run gigo if new is bad, so we
- ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (get_Block_block_visited(b_pred) + 1
- < get_irg_block_visited(current_ir_graph)) {
- for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
- ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
- if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
- }
- } else {
- if (is_pred_of(b_pred, pred)) dispensable = 0;
- }
+ ir_node *b_pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (get_Block_block_visited(b_pred) + 1
+ < get_irg_block_visited(current_ir_graph)) {
+ for (j = 0; j < get_Block_n_cfgpreds(b_pred); j++) {
+ ir_node *b_pred_pred = get_nodes_Block(get_Block_cfgpred(b_pred, j));
+ if (is_pred_of(b_pred_pred, pred)) dispensable = 0;
+ }
+ } else {
+ if (is_pred_of(b_pred, pred)) dispensable = 0;
+ }
if (is_Bad(get_Block_cfgpred(b, i))) {
printf(" removing Bad %i\n ", i);
} else if (get_Block_block_visited(pred) +1
if (is_Bad(get_Block_cfgpred(b, i))) {
printf(" removing Bad %i\n ", i);
} else if (get_Block_block_visited(pred) +1
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
pred = get_nodes_Block(get_Block_cfgpred(b, i));
if (is_Bad(get_Block_cfgpred(b, i))) {
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- ir_node *phi_pred = get_Phi_pred(phi, i);
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- if (get_nodes_Block(phi_pred) == pred) {
- assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
- in[n_preds] = get_Phi_pred(phi_pred, j);
- } else {
- in[n_preds] = phi_pred;
- }
- n_preds++;
- }
- /* The Phi_pred node is replaced now if it is a Phi.
- In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
- Daher muss der Phiknoten durch den neuen ersetzt werden.
- Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
- durch einen Bad) damit er aus den keep_alive verschwinden kann.
- Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
- aufrufen. */
- if (get_nodes_Block(phi_pred) == pred) {
- /* remove the Phi as it might be kept alive. Further there
- might be other users. */
- exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
- }
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ ir_node *phi_pred = get_Phi_pred(phi, i);
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ if (get_nodes_Block(phi_pred) == pred) {
+ assert(get_irn_op(phi_pred) == op_Phi); /* Block is empty!! */
+ in[n_preds] = get_Phi_pred(phi_pred, j);
+ } else {
+ in[n_preds] = phi_pred;
+ }
+ n_preds++;
+ }
+ /* The Phi_pred node is replaced now if it is a Phi.
+ In Schleifen kann offenbar der entfernte Phi Knoten legal verwendet werden.
+ Daher muss der Phiknoten durch den neuen ersetzt werden.
+ Weiter muss der alte Phiknoten entfernt werden (durch ersetzen oder
+ durch einen Bad) damit er aus den keep_alive verschwinden kann.
+ Man sollte also, falls keine Schleife vorliegt, exchange mit new_Bad
+ aufrufen. */
+ if (get_nodes_Block(phi_pred) == pred) {
+ /* remove the Phi as it might be kept alive. Further there
+ might be other users. */
+ exchange(phi_pred, phi); /* geht, ist aber doch semantisch falsch! Warum?? */
+ }
- if (get_irn_op(phi) == op_Phi) {
- set_nodes_Block(phi, b);
-
- n_preds = 0;
- for (i = 0; i < k; i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
- muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
- Anweisungen.) Trotzdem tuts bisher!! */
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- for (i = 0; i < get_Phi_n_preds(phi); i++) {
- in[n_preds] = get_Phi_pred(phi, i);
- n_preds++;
- }
- for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_nodes_Block(get_Block_cfgpred(b, i));
- if (is_Bad(get_Block_cfgpred(b, i))) {
- /* Do nothing */
- } else if (get_Block_block_visited(pred) +1
- < get_irg_block_visited(current_ir_graph)) {
- /* It's an empty block and not yet visited. */
- for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- in[n_preds] = phi;
- n_preds++;
- }
- } else {
- in[n_preds] = phi;
- n_preds++;
- }
- }
- set_irn_in(phi, n_preds, in);
- }
- phi = get_irn_link(phi);
+ if (get_irn_op(phi) == op_Phi) {
+ set_nodes_Block(phi, b);
+
+ n_preds = 0;
+ for (i = 0; i < k; i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ /* @@@ Hier brauche ich Schleifeninformation!!! Kontrollflusskante
+ muss Rueckwaertskante sein! (An allen vier in[n_preds] = phi
+ Anweisungen.) Trotzdem tuts bisher!! */
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ for (i = 0; i < get_Phi_n_preds(phi); i++) {
+ in[n_preds] = get_Phi_pred(phi, i);
+ n_preds++;
+ }
+ for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
+ pred = get_nodes_Block(get_Block_cfgpred(b, i));
+ if (is_Bad(get_Block_cfgpred(b, i))) {
+ /* Do nothing */
+ } else if (get_Block_block_visited(pred) +1
+ < get_irg_block_visited(current_ir_graph)) {
+ /* It's an empty block and not yet visited. */
+ for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ } else {
+ in[n_preds] = phi;
+ n_preds++;
+ }
+ }
+ set_irn_in(phi, n_preds, in);
+ }
+ phi = get_irn_link(phi);
/* It's an empty block and not yet visited. */
assert(get_Block_n_cfgpreds(b) > 1);
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
/* It's an empty block and not yet visited. */
assert(get_Block_n_cfgpreds(b) > 1);
/* Else it should be optimized by equivalent_node. */
for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
- set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
- get_irg_block_visited(current_ir_graph)-1);
- irg_block_walk(ka, optimize_blocks, NULL, NULL);
- mark_irn_visited(ka);
- ARR_APP1 (ir_node *, in, ka);
+ set_irg_block_visited(current_ir_graph, /* Don't walk all the way to Start. */
+ get_irg_block_visited(current_ir_graph)-1);
+ irg_block_walk(ka, optimize_blocks, NULL, NULL);
+ mark_irn_visited(ka);
+ ARR_APP1 (ir_node *, in, ka);
- (op_Proj == get_irn_op(pre)) &&
- op_Raise != get_irn_op(skip_Proj(pre))) {
-
- /* set predecessor array for new block */
- in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
- /* set predecessor of new block */
- in[0] = pre;
- block = new_Block(1, in);
- /* insert new jmp node to new block */
- switch_block(block);
- jmp = new_Jmp();
- switch_block(n);
- /* set successor of new block */
- set_irn_n(n, i, jmp);
+ (op_Proj == get_irn_op(pre)) &&
+ op_Raise != get_irn_op(skip_Proj(pre))) {
+
+ /* set predecessor array for new block */
+ in = NEW_ARR_D (ir_node *, current_ir_graph->obst, 1);
+ /* set predecessor of new block */
+ in[0] = pre;
+ block = new_Block(1, in);
+ /* insert new jmp node to new block */
+ switch_block(block);
+ jmp = new_Jmp();
+ switch_block(n);
+ /* set successor of new block */
+ set_irn_n(n, i, jmp);