nn = get_new_node(n);
if (is_Block(n)) {
+ /* copy the macro block header */
+ ir_node *mbh = get_Block_MacroBlock(n);
+
+ if (mbh == n) {
+ /* this block is a macroblock header */
+ set_irn_n(nn, -1, nn);
+ } else {
+ /* get the macro block header */
+ set_irn_n(nn, -1, get_new_node(mbh));
+ }
+
/* Don't copy Bad nodes. */
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
if (! is_Bad(get_irn_n(n, i))) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
exchange(nn, old);
}
}
- } else if (get_irn_op(n) == op_Phi) {
+ } else if (is_Phi(n)) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
- set_nodes_block(nn, get_new_node(block));
+ set_irn_n(nn, -1, get_new_node(block));
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
- xfree (graveyard_obst); /* ... then free it. */
+ xfree(graveyard_obst); /* ... then free it. */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
} else {
set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
+ /* handle the regular call */
+ set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
/* For now, we cannot inline calls with value_base */
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
ir_node *ret, *irn;
ret = get_irn_n(end_bl, i);
irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+ if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
++n_exc;
}
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
} else {
ir_node *main_end_bl;
int main_end_bl_arity;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
assert(irn_not_visited(n));
mark_irn_visited(n);
-#ifndef CAN_PLACE_PROJS
- while (is_Proj(n)) {
- n = get_Proj_pred(n);
- mark_irn_visited(n);
- }
-#endif
-
/* Place floating nodes. */
if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
int depth = 0;
ir_node *b = NULL; /* The block to place this node in */
*/
if (! in_dead_block) {
if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred)))
+ is_Block_unreachable(get_irn_n(pred, -1)))
set_nodes_block(pred, curr_block);
}
place_floats_early(pred, worklist);
/* Because all loops contain at least one op_pin_state_pinned node, now all
our inputs are either op_pin_state_pinned or place_early() has already
been finished on them. We do not have any unfinished inputs! */
- pred_block = get_nodes_block(pred);
+ pred_block = get_irn_n(pred, -1);
if ((!is_Block_dead(pred_block)) &&
(get_Block_dom_depth(pred_block) > depth)) {
b = pred_block;
depth = get_Block_dom_depth(pred_block);
}
/* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)
+ if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
&& get_irg_phase_state(current_ir_graph) != phase_backend) {
b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
assert(b != get_irg_start_block(current_ir_graph));
}
} else if (is_Phi(n)) {
ir_node *pred;
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* Phi nodes: move nodes from dead blocks into the effective use
* of the Phi-input if the Phi is not in a bad block.
*/
- pred = get_nodes_block(n);
+ pred = get_irn_n(n, -1);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred))) {
+ is_Block_unreachable(get_irn_n(pred, -1))) {
set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
}
waitq_put(worklist, pred);
}
} else {
ir_node *pred;
- ir_node *curr_block = get_nodes_block(n);
+ ir_node *curr_block = get_irn_n(n, -1);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* All other nodes: move nodes from dead blocks into the same block.
*/
- pred = get_nodes_block(n);
+ pred = get_irn_n(n, -1);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_nodes_block(pred))) {
+ is_Block_unreachable(get_irn_n(pred, -1))) {
set_nodes_block(pred, curr_block);
}
waitq_put(worklist, pred);
}
if (! block)
- block = get_nodes_block(producer);
+ block = get_irn_n(producer, -1);
} else {
assert(is_no_Block(consumer));
block = get_nodes_block(consumer);
dca = get_deepest_common_ancestor(succ, dca);
} else {
/* ignore if succ is in dead code */
- succ_blk = get_nodes_block(succ);
+ succ_blk = get_irn_n(succ, -1);
if (is_Block_unreachable(succ_blk))
continue;
dca = consumer_dom_dca(dca, succ, node);
return dca;
}
-#ifdef CAN_PLACE_PROJS
static void set_projs_block(ir_node *node, ir_node *block)
{
int i;
set_nodes_block(succ, block);
}
}
-#endif
/**
* Find the latest legal block for N and place N into the
(get_irn_mode(n) != mode_X)) {
/* Remember the early_blk placement of this block to move it
out of loop no further than the early_blk placement. */
- early_blk = get_nodes_block(n);
+ early_blk = get_irn_n(n, -1);
/*
* BEWARE: Here we also get code, that is live, but
if (dca != NULL) {
set_nodes_block(n, dca);
move_out_of_loops(n, early_blk);
-#ifdef CAN_PLACE_PROJS
if(get_irn_mode(n) == mode_T) {
set_projs_block(n, get_nodes_block(n));
}
-#endif
}
}
}
current_ir_graph = rem;
}
+typedef struct cf_env {
+ char changed; /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
/**
* Called by walker of remove_critical_cf_edges().
*
* predecessors and a block of multiple successors.
*
* @param n IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
*/
static void walk_critical_cf_edges(ir_node *n, void *env) {
int arity, i;
ir_node *pre, *block, *jmp;
- int *changed = env;
+ cf_env *cenv = env;
ir_graph *irg = get_irn_irg(n);
/* Block has multiple predecessors */
pre = get_irn_n(n, i);
cfop = get_irn_op(skip_Proj(pre));
- /* Predecessor has multiple successors. Insert new control flow edge but
- ignore exception edges. */
- if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+ if (is_op_fragile(cfop)) {
+ if (cfop != op_Raise)
+ goto insert;
+ continue;
+ }
+ if (is_op_forking(cfop)) {
+ /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
/* set predecessor of new block */
block = new_r_Block(irg, 1, &pre);
/* insert new jmp node to new block */
jmp = new_r_Jmp(irg, block);
/* set successor of new block */
set_irn_n(n, i, jmp);
- *changed = 1;
+ cenv->changed = 1;
} /* predecessor has multiple successors */
} /* for all predecessors */
} /* n is a multi-entry block */
}
void remove_critical_cf_edges(ir_graph *irg) {
- int changed = 0;
+ cf_env env;
+
+ env.changed = 0;
- irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
- if (changed) {
+ irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+ if (env.changed) {
/* control flow changed */
set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);