/* compute the number of good predecessors */
res = irn_arity = get_irn_arity(b);
for (i = 0; i < irn_arity; i++)
- if (get_irn_opcode(get_irn_n(b, i)) == iro_Bad) res--;
+ if (is_Bad(get_irn_n(b, i))) res--;
/* save it in the flag. */
set_Block_block_visited(b, irg_v + res);
return res;
nn = get_new_node(n);
if (is_Block(n)) {
+ /* copy the macro block header */
+ ir_node *mbh = get_Block_MacroBlock(n);
+
+ if (mbh == n) {
+ /* this block is a macroblock header */
+ set_irn_n(nn, -1, nn);
+ } else {
+ /* get the macro block header */
+ set_irn_n(nn, -1, get_new_node(mbh));
+ }
+
/* Don't copy Bad nodes. */
j = 0;
irn_arity = get_irn_arity(n);
for (i = 0; i < irn_arity; i++) {
if (! is_Bad(get_irn_n(n, i))) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
/*if (is_backedge(n, i)) set_backedge(nn, j);*/
j++;
}
that the fields in ir_graph are set properly. */
if ((get_opt_control_flow_straightening()) &&
(get_Block_n_cfgpreds(nn) == 1) &&
- (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+ is_Jmp(get_Block_cfgpred(nn, 0))) {
ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
if (nn == old) {
/* Jmp jumps into the block it is in -- deal self cycle. */
exchange(nn, old);
}
}
- } else if (get_irn_op(n) == op_Phi) {
+ } else if (is_Phi(n)) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
}
/* Now the new node is complete. We can add it to the hash table for CSE.
@@@ inlining aborts if we identify End. Why? */
- if (get_irn_op(nn) != op_End)
+ if (!is_End(nn))
add_identities(current_ir_graph->value_table, nn);
}
/* Free memory from old unoptimized obstack */
obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
- xfree (graveyard_obst); /* ... then free it. */
+ xfree(graveyard_obst); /* ... then free it. */
/* inform statistics that the run is over */
hook_dead_node_elim(irg, 0);
/* if link field of block is NULL, look for bad predecessors otherwise
this is already done */
- if (get_irn_op(n) == op_Block &&
- get_irn_link(n) == NULL) {
-
+ if (is_Block(n) && get_irn_link(n) == NULL) {
/* save old predecessors in link field (position 0 is the block operand)*/
set_irn_link(n, get_irn_in(n));
int i, old_irn_arity, new_irn_arity;
/* relink bad predecessors of a block */
- if (get_irn_op(n) == op_Block)
+ if (is_Block(n))
relink_bad_block_predecessors(n, env);
/* If Phi node relink its block and its predecessors */
- if (get_irn_op(n) == op_Phi) {
-
+ if (is_Phi(n)) {
/* Relink predecessors of phi's block */
block = get_nodes_block(n);
if (get_irn_link(block) == NULL)
ir_type *frame_tp = (ir_type *)env;
copy_node(n, NULL);
- if (get_irn_op(n) == op_Sel) {
+ if (is_Sel(n)) {
nn = get_new_node (n);
assert(is_Sel(nn));
if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
}
- } else if (get_irn_op(n) == op_Block) {
+ } else if (is_Block(n)) {
nn = get_new_node (n);
nn->attr.block.irg = current_ir_graph;
}
*/
static void find_addr(ir_node *node, void *env) {
int *allow_inline = env;
- if (is_Proj(node) && get_irn_op(get_Proj_pred(node)) == op_Start) {
- if (get_Proj_proj(node) == pn_Start_P_value_arg_base)
- *allow_inline = 0;
+ if (is_Proj(node) &&
+ is_Start(get_Proj_pred(node)) &&
+ get_Proj_proj(node) == pn_Start_P_value_arg_base) {
+ *allow_inline = 0;
}
}
}
enum exc_mode {
- exc_handler = 0, /**< There is a handler. */
- exc_to_end = 1, /**< Branches to End. */
- exc_no_handler = 2 /**< Exception handling not represented. */
+ exc_handler = 0, /**< There is a handler. */
+ exc_to_end = 1, /**< Branches to End. */
+ exc_no_handler = 2 /**< Exception handling not represented. */
};
/* Inlines a method at the given call site. */
n_ret = 0;
for (i = 0; i < arity; i++) {
ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
+ if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
}
} else {
set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
+ /* handle the regular call */
+ set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
/* For now, we cannot inline calls with value_base */
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
ir_node *ret, *irn;
ret = get_irn_n(end_bl, i);
irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+ if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
++n_exc;
}
/* We rely that all cfops have the memory output at the same position. */
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
+ } else if (is_Raise(ret)) {
cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
n_exc++;
}
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
} else {
ir_node *main_end_bl;
int main_end_bl_arity;
ir_node *ret = get_irn_n(end_bl, i);
ir_node *irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+ if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
n_exc++;
}
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
set_Tuple_pred(call, pn_Call_M_except, new_Bad());
free(end_preds);
}
/**
- * Returns TRUE if the number of callers in 0 in the irg's environment,
+ * Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
INLINE static int is_leave(ir_graph *irg) {
}
/**
- * Returns TRUE if the number of callers is smaller size in the irg's environment.
+ * Returns TRUE if the number of nodes in the callee is
+ * smaller then size in the irg's environment.
*/
INLINE static int is_smaller(ir_graph *callee, int size) {
inline_irg_env *env = get_irg_link(callee);
/* Place floating nodes. */
if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
int depth = 0;
ir_node *b = NULL; /* The block to place this node in */
*/
if (! in_dead_block) {
if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1)))
+ is_Block_unreachable(get_nodes_block(pred)))
set_nodes_block(pred, curr_block);
}
place_floats_early(pred, worklist);
/* Because all loops contain at least one op_pin_state_pinned node, now all
our inputs are either op_pin_state_pinned or place_early() has already
been finished on them. We do not have any unfinished inputs! */
- pred_block = get_irn_n(pred, -1);
+ pred_block = get_nodes_block(pred);
if ((!is_Block_dead(pred_block)) &&
(get_Block_dom_depth(pred_block) > depth)) {
b = pred_block;
depth = get_Block_dom_depth(pred_block);
}
/* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
- && get_irg_phase_state(current_ir_graph) != phase_backend) {
+ if (depth == 1 &&
+ get_Block_dom_depth(get_nodes_block(n)) > 1 &&
+ get_irg_phase_state(current_ir_graph) != phase_backend) {
b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
assert(b != get_irg_start_block(current_ir_graph));
depth = 2;
*/
irn_arity = get_irn_arity(n);
- if (get_irn_op(n) == op_End) {
+ if (is_End(n)) {
/*
* Simplest case: End node. Predecessors are keep-alives,
* no need to move out of dead block.
}
} else if (is_Phi(n)) {
ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* Phi nodes: move nodes from dead blocks into the effective use
* of the Phi-input if the Phi is not in a bad block.
*/
- pred = get_irn_n(n, -1);
+ pred = get_nodes_block(n);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
+ is_Block_unreachable(get_nodes_block(pred))) {
set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
}
waitq_put(worklist, pred);
}
} else {
ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
+ ir_node *curr_block = get_nodes_block(n);
int in_dead_block = is_Block_unreachable(curr_block);
/*
* All other nodes: move nodes from dead blocks into the same block.
*/
- pred = get_irn_n(n, -1);
+ pred = get_nodes_block(n);
if (irn_not_visited(pred))
waitq_put(worklist, pred);
if (irn_not_visited(pred)) {
if (! in_dead_block &&
get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
+ is_Block_unreachable(get_nodes_block(pred))) {
set_nodes_block(pred, curr_block);
}
waitq_put(worklist, pred);
* I.e., DCA is the block where we might place PRODUCER.
* A data flow edge points from producer to consumer.
*/
-static ir_node *
-consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) {
- ir_node *block = NULL;
-
- /* Compute the latest block into which we can place a node so that it is
+static ir_node *consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer)
+{
+ /* Compute the last block into which we can place a node so that it is
before consumer. */
- if (get_irn_op(consumer) == op_Phi) {
+ if (is_Phi(consumer)) {
/* our consumer is a Phi-node, the effective use is in all those
blocks through which the Phi-node reaches producer */
- int i, irn_arity;
ir_node *phi_block = get_nodes_block(consumer);
- irn_arity = get_irn_arity(consumer);
+ int arity = get_irn_arity(consumer);
+ int i;
- for (i = 0; i < irn_arity; i++) {
- if (get_irn_n(consumer, i) == producer) {
- ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+ for (i = 0; i < arity; i++) {
+ if (get_Phi_pred(consumer, i) == producer) {
+ ir_node *new_block = get_Block_cfgpred_block(phi_block, i);
- if (! is_Block_unreachable(new_block))
- block = calc_dca(block, new_block);
+ if (!is_Block_unreachable(new_block))
+ dca = calc_dca(dca, new_block);
}
}
-
- if (! block)
- block = get_irn_n(producer, -1);
} else {
- assert(is_no_Block(consumer));
- block = get_nodes_block(consumer);
+ dca = calc_dca(dca, get_nodes_block(consumer));
}
- /* Compute the deepest common ancestor of block and dca. */
- return calc_dca(dca, block);
+ return dca;
}
/* FIXME: the name clashes here with the function from ana/field_temperature.c
for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
ir_node *succ = get_irn_out(node, i);
- ir_node *succ_blk;
if (is_End(succ)) {
/*
continue;
}
- if(is_Proj(succ)) {
+ if (is_Proj(succ)) {
dca = get_deepest_common_ancestor(succ, dca);
} else {
/* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
+ ir_node *succ_blk = get_nodes_block(succ);
if (is_Block_unreachable(succ_blk))
continue;
dca = consumer_dom_dca(dca, succ, node);
mark_irn_visited(n);
/* no need to place block nodes, control nodes are already placed. */
- if ((get_irn_op(n) != op_Block) &&
+ if (!is_Block(n) &&
(!is_cfop(n)) &&
(get_irn_mode(n) != mode_X)) {
/* Remember the early_blk placement of this block to move it
out of loop no further than the early_blk placement. */
- early_blk = get_irn_n(n, -1);
+ early_blk = get_nodes_block(n);
/*
* BEWARE: Here we also get code, that is live, but
producer of one of their inputs in the same block anyway. */
for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+ if (irn_not_visited(succ) && !is_Phi(succ))
place_floats_late(succ, worklist);
}
if (1 || get_irg_loopinfo_state(irg) != loopinfo_consistent) {
free_loop_information(irg);
- construct_backedges(irg);
+ construct_cf_backedges(irg);
}
/* Place all floating nodes as early as possible. This guarantees
current_ir_graph = rem;
}
+typedef struct cf_env {
+ char changed; /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
/**
* Called by walker of remove_critical_cf_edges().
*
* predecessors and a block of multiple successors.
*
* @param n IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
*/
static void walk_critical_cf_edges(ir_node *n, void *env) {
int arity, i;
ir_node *pre, *block, *jmp;
- int *changed = env;
+ cf_env *cenv = env;
ir_graph *irg = get_irn_irg(n);
/* Block has multiple predecessors */
pre = get_irn_n(n, i);
cfop = get_irn_op(skip_Proj(pre));
- /* Predecessor has multiple successors. Insert new control flow edge but
- ignore exception edges. */
- if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+ if (is_op_fragile(cfop)) {
+ if (cfop != op_Raise)
+ goto insert;
+ continue;
+ }
+ if (is_op_forking(cfop)) {
+ /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
/* set predecessor of new block */
block = new_r_Block(irg, 1, &pre);
/* insert new jmp node to new block */
jmp = new_r_Jmp(irg, block);
/* set successor of new block */
set_irn_n(n, i, jmp);
- *changed = 1;
+ cenv->changed = 1;
} /* predecessor has multiple successors */
} /* for all predecessors */
} /* n is a multi-entry block */
}
void remove_critical_cf_edges(ir_graph *irg) {
- int changed = 0;
+ cf_env env;
+
+ env.changed = 0;
- irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
- if (changed) {
+ irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+ if (env.changed) {
/* control flow changed */
set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);