ir_node *nn, *block;
int new_arity;
ir_op *op = get_irn_op(n);
+ (void) env;
/* The end node looses it's flexible in array. This doesn't matter,
as dead node elimination builds End by hand, inlineing doesn't use
exchange(nn, old);
}
}
- } else if (is_Phi(n)) {
+ } else if (is_Phi(n) && get_irn_arity(n) > 0) {
/* Don't copy node if corresponding predecessor in block is Bad.
The Block itself should not be Bad. */
block = get_nodes_block(n);
dead_node_elimination(ir_graph *irg) {
if (get_opt_optimize() && get_opt_dead_node_elimination()) {
ir_graph *rem;
+#ifdef INTERPROCEDURAL_VIEW
int rem_ipview = get_interprocedural_view();
+#endif
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
/* Remember external state of current_ir_graph. */
rem = current_ir_graph;
current_ir_graph = irg;
+#ifdef INTERPROCEDURAL_VIEW
set_interprocedural_view(0);
+#endif
assert(get_irg_phase_state(irg) != phase_building);
hook_dead_node_elim(irg, 0);
current_ir_graph = rem;
+#ifdef INTERPROCEDURAL_VIEW
set_interprocedural_view(rem_ipview);
+#endif
}
}
}
/**
- * Returns TRUE if the number of callers in 0 in the irg's environment,
+ * Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
INLINE static int is_leave(ir_graph *irg) {
}
/**
- * Returns TRUE if the number of callers is smaller size in the irg's environment.
+ * Returns TRUE if the number of nodes in the callee is
+ * smaller then size in the irg's environment.
*/
INLINE static int is_smaller(ir_graph *callee, int size) {
inline_irg_env *env = get_irg_link(callee);
optimize_graph_df(irg);
optimize_cf(irg);
}
- if (env->got_inline || (env->n_callers_orig != env->n_callers))
+ if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
env->n_callers_orig, env->n_callers,
get_entity_name(get_irg_entity(irg))));
+ }
}
obstack_free(&obst, NULL);
*/
static ir_node *consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer)
{
- ir_node *block = NULL;
-
- /* Compute the latest block into which we can place a node so that it is
+ /* Compute the last block into which we can place a node so that it is
before consumer. */
if (is_Phi(consumer)) {
/* our consumer is a Phi-node, the effective use is in all those
int i;
for (i = 0; i < arity; i++) {
- if (get_irn_n(consumer, i) == producer) {
- ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+ if (get_Phi_pred(consumer, i) == producer) {
+ ir_node *new_block = get_Block_cfgpred_block(phi_block, i);
if (!is_Block_unreachable(new_block))
- block = calc_dca(block, new_block);
+ dca = calc_dca(dca, new_block);
}
}
-
- if (!block)
- block = get_nodes_block(producer);
} else {
- assert(!is_Block(consumer));
- block = get_nodes_block(consumer);
+ dca = calc_dca(dca, get_nodes_block(consumer));
}
- /* Compute the deepest common ancestor of block and dca. */
- return calc_dca(dca, block);
+ return dca;
}
/* FIXME: the name clashes here with the function from ana/field_temperature.c
if (1 || get_irg_loopinfo_state(irg) != loopinfo_consistent) {
free_loop_information(irg);
- construct_backedges(irg);
+ construct_cf_backedges(irg);
}
/* Place all floating nodes as early as possible. This guarantees
goto insert;
continue;
}
- if (is_op_forking(cfop)) {
+ /* we don't want place nodes in the start block, so handle it like forking */
+ if (is_op_forking(cfop) || cfop == op_Start) {
/* Predecessor has multiple successors. Insert new control flow edge edges. */
insert:
/* set predecessor of new block */