nn = get_new_node(n);
if (is_Block(n)) {
+ /* copy the macro block header */
+ ir_node *mbh = get_Block_MacroBlock(n);
+
+ if (mbh == n) {
+ /* this block is a macroblock header */
+ set_irn_n(nn, -1, nn);
+ } else {
+ /* get the macro block header */
+ set_irn_n(nn, -1, get_new_node(mbh));
+ }
+
/* Don't copy Bad nodes. */
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
if (! is_Bad(get_irn_n(n, i))) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
exchange(nn, old);
}
}
- } else if (get_irn_op(n) == op_Phi) {
+ } else if (is_Phi(n)) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
- xfree (graveyard_obst); /* ... then free it. */
+ xfree(graveyard_obst); /* ... then free it. */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
} else {
set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
+ /* handle the regular call */
+ set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
/* For now, we cannot inline calls with value_base */
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
ir_node *ret, *irn;
ret = get_irn_n(end_bl, i);
irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+ if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
++n_exc;
}
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
} else {
ir_node *main_end_bl;
int main_end_bl_arity;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
current_ir_graph = rem;
}
+typedef struct cf_env {
+ char changed; /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
/**
* Called by walker of remove_critical_cf_edges().
*
* predecessors and a block of multiple successors.
*
* @param n IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
*/
static void walk_critical_cf_edges(ir_node *n, void *env) {
int arity, i;
ir_node *pre, *block, *jmp;
- int *changed = env;
+ cf_env *cenv = env;
ir_graph *irg = get_irn_irg(n);
/* Block has multiple predecessors */
pre = get_irn_n(n, i);
cfop = get_irn_op(skip_Proj(pre));
- /* Predecessor has multiple successors. Insert new control flow edge but
- ignore exception edges. */
- if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+ if (is_op_fragile(cfop)) {
+ if (cfop != op_Raise)
+ goto insert;
+ continue;
+ }
+ if (is_op_forking(cfop)) {
+ /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
/* set predecessor of new block */
block = new_r_Block(irg, 1, &pre);
/* insert new jmp node to new block */
jmp = new_r_Jmp(irg, block);
/* set successor of new block */
set_irn_n(n, i, jmp);
- *changed = 1;
+ cenv->changed = 1;
} /* predecessor has multiple successors */
} /* for all predecessors */
} /* n is a multi-entry block */
}
void remove_critical_cf_edges(ir_graph *irg) {
- int changed = 0;
+ cf_env env;
+
+ env.changed = 0;
- irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
- if (changed) {
+ irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+ if (env.changed) {
/* control flow changed */
set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);