# include "pset.h"
# include "pdeq.h" /* Fuer code placement */
# include "irouts.h"
+# include "irloop.h"
+# include "irbackedge_t.h"
/* Defined in iropt.c */
pset *new_identities (void);
}
}
+static INLINE void new_backedge_info(ir_node *n) {
+ switch(get_irn_opcode(n)) {
+ case iro_Block:
+ n->attr.block.cg_backedge = NULL;
+ n->attr.block.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ break;
+ case iro_Phi:
+ n->attr.phi_backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ break;
+ case iro_Filter:
+ n->attr.filter.backedge = new_backedge_arr(current_ir_graph->obst, get_irn_arity(n));
+ break;
+ default: ;
+ }
+}
+
/* Copies the node to the new obstack. The Ins of the new node point to
the predecessors on the old obstack. For block/phi nodes not all
predecessors might be copied. n->link points to the new node.
For Phi and Block nodes the function allocates in-arrays with an arity
only for useful predecessors. The arity is determined by counting
the non-bad predecessors of the block. */
-void
+static void
copy_node (ir_node *n, void *env) {
ir_node *nn, *block;
int new_arity;
if (get_irn_opcode(n) == iro_Block) {
block = NULL;
new_arity = compute_new_arity(n);
+ n->attr.block.graph_arr = NULL;
} else {
block = get_nodes_Block(n);
if (get_irn_opcode(n) == iro_Phi) {
was allocated on the old obstack the pointers now are dangling. This
frees e.g. the memory of the graph_arr allocated in new_immBlock. */
copy_attrs(n, nn);
+ new_backedge_info(nn);
set_new_node(n, nn);
/* printf("\n old node: "); DDMSG2(n);
/* Copies new predecessors of old node to new node remembered in link.
Spare the Bad predecessors of Phi and Block nodes. */
-void
+static void
copy_preds (ir_node *n, void *env) {
ir_node *nn, *block;
int i, j;
for (i = 0; i < get_irn_arity(n); i++)
if (get_irn_opcode(get_irn_n(n, i)) != iro_Bad) {
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ //if (is_backedge(n, i)) set_backedge(nn, j);
j++;
}
/* repair the block visited flag from above misuse. Repair it in both
in array contained Bads. Now it's possible.
We don't call optimize_in_place as it requires
that the fields in ir_graph are set properly. */
- if (get_Block_n_cfgpreds(nn) == 1
- && get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)
+ if ((get_opt_control_flow()) &&
+ (get_Block_n_cfgpreds(nn) == 1) &&
+ (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp))
exchange(nn, get_nodes_Block(get_Block_cfgpred(nn, 0)));
} else if (get_irn_opcode(n) == iro_Phi) {
/* Don't copy node if corresponding predecessor in block is Bad.
for (i = 0; i < get_irn_arity(n); i++)
if (get_irn_opcode(get_irn_n(block, i)) != iro_Bad) {
set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ //if (is_backedge(n, i)) set_backedge(nn, j);
j++;
}
/* If the pre walker reached this Phi after the post walker visited the
}
/* Copies the graph recursively, compacts the keepalive of the end node. */
-void
+static void
copy_graph () {
ir_node *oe, *ne; /* old end, new end */
ir_node *ka; /* keep alive */
graph. */
void
copy_graph_env () {
+ ir_node *old_end;
/* Not all nodes remembered in current_ir_graph might be reachable
from the end node. Assure their link is set to NULL, so that
we can test whether new nodes have been computed. */
copy_graph();
/* fix the fields in current_ir_graph */
- free_End(get_irg_end(current_ir_graph));
- set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
+ old_end = get_irg_end(current_ir_graph);
+ set_irg_end (current_ir_graph, get_new_node(old_end));
+ free_End(old_end);
set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
copy_node (get_irg_frame(current_ir_graph), NULL);
assert(get_irg_phase_state(current_ir_graph) != phase_building);
free_outs(current_ir_graph);
+ /* @@@ so far we loose loops when copying */
+ set_irg_loop(current_ir_graph, NULL);
+
if (get_optimize() && get_opt_dead_node_elimination()) {
/* A quiet place, where the old obstack can rest in peace,
then updates the entity if it's a local one. env must be a pointer
to the frame type of the procedure. The new entities must be in
the link field of the entities. */
-void
+static INLINE void
copy_node_inline (ir_node *n, void *env) {
ir_node *new;
type *frame_tp = (type *)env;
/** Check preconditions **/
assert(get_irn_op(call) == op_Call);
- assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
+ /* assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph))); */
+ /*
+ @@@ TODO does not work for InterfaceIII.java after cgana
+ assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
+ get_Call_type(call)));
+ */
assert(get_type_tpop(get_Call_type(call)) == type_method);
if (called_graph == current_ir_graph) return;
/** Part the Call node into two nodes. Pre_call collects the parameters of
- the procedure and later replaces the Start node of the called graph.
- Post_call is the old Call node and collects the results of the called
- graph. Both will end up being a tuple. **/
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. **/
post_bl = get_nodes_Block(call);
set_irg_current_block(current_ir_graph, post_bl);
/* XxMxPxP of Start + parameter of Call */
}
}
if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* whatch it: current_block is changed! */
+ new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
set_Tuple_pred(call, 1, new_Jmp());
/* The Phi for the memories with the exception objects */
n_exc = 0;
if (!(get_optimize() && get_opt_inline())) return;
- /*DDME(get_irg_ent(current_ir_graph));*/
+ //DDME(get_irg_ent(current_ir_graph));
current_ir_graph = irg;
/* Handle graph state */
tv = get_Const_tarval(get_Call_ptr(calls[i]));
callee = get_entity_irg(tv->u.p.ent);
if ((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) {
- /*printf(" inlineing "); DDME(tv->u.p.ent);*/
inline_method(calls[i], callee);
}
}
return dca;
}
-#if 0
-/* @@@ Needs loop informations. Will implement later interprocedural. */
+INLINE int get_irn_loop_depth(ir_node *n) {
+ return get_loop_depth(get_irn_loop(n));
+}
+
+/* Move n to a block with less loop depth than it's current block. The
+ new block must be dominated by early. */
static void
-move_out_of_loops (ir_node *n, ir_node *dca)
+move_out_of_loops (ir_node *n, ir_node *early)
{
- assert(dca);
+ ir_node *best, *dca;
+ assert(n && early);
+
/* Find the region deepest in the dominator tree dominating
dca with the least loop nesting depth, but still dominated
by our early placement. */
- ir_node *best = dca;
- while (dca != get_nodes_Block(n)) {
+ dca = get_nodes_Block(n);
+ best = dca;
+ while (dca != early) {
dca = get_Block_idom(dca);
if (!dca) break; /* should we put assert(dca)? */
- if (get_Block_loop_depth(dca) < get_Block_loop_depth(best)) {
+ if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
best = dca;
}
}
- if (get_Block_dom_depth(best) >= get_Block_dom_depth(get_nodes_Block(n)))
+ if (best != get_nodes_Block(n)) {
+ /* debug output
+ printf("Moving out of loop: "); DDMN(n);
+ printf(" Outermost block: "); DDMN(early);
+ printf(" Best block: "); DDMN(best);
+ printf(" Innermost block: "); DDMN(get_nodes_Block(n));
+ */
set_nodes_Block(n, best);
+ }
}
-#endif
/* Find the latest legal block for N and place N into the
`optimal' Block between the latest and earliest legal block.
place_floats_late (ir_node *n)
{
int i;
+ ir_node *early;
assert (irn_not_visited(n)); /* no multiple placement */
/* no need to place block nodes, control nodes are already placed. */
- if ((get_irn_op(n) != op_Block) && (!is_cfop(n)) && (get_irn_mode(n) != mode_X)) {
+ if ((get_irn_op(n) != op_Block) &&
+ (!is_cfop(n)) &&
+ (get_irn_mode(n) != mode_X)) {
+ /* Remember the early palacement of this block to move it
+ out of loop no further than the early placement. */
+ early = get_nodes_Block(n);
/* Assure that our users are all placed, except the Phi-nodes.
--- Each dataflow cycle contains at least one Phi-node. We
have to break the `user has to be placed before the
place_floats_late (succ);
}
- /* We have to determine the final block of this node... except for constants. */
+ /* We have to determine the final block of this node... except for
+ constants. */
if ((get_op_pinned(get_irn_op(n)) == floats) &&
(get_irn_op(n) != op_Const) &&
(get_irn_op(n) != op_SymConst)) {
dca = consumer_dom_dca (dca, get_irn_out(n, i), n);
}
set_nodes_Block(n, dca);
-#if 0
- move_out_of_loops (n, dca);
-#endif
+
+ move_out_of_loops (n, early);
}
}
if (get_irg_dom_state(irg) != dom_consistent)
compute_doms(irg);
+ construct_backedges(irg);
+
/* Place all floating nodes as early as possible. This guarantees
a legal code placement. */
worklist = new_pdeq ();
ir_node *pred, *phi;
ir_node **in;
+ /* Count the number of predecessor if this block is merged with pred blocks
+ that are empty. */
max_preds = 0;
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
- pred = get_Block_cfgpred(b, i);
max_preds += test_whether_dispensable(b, i);
}
in = (ir_node **) malloc(max_preds * sizeof(ir_node *));
-
/** Debug output **
printf(" working on "); DDMN(b);
for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
printf(" removing pred %i ", i); DDMN(pred);
} else { printf(" Nothing to do for "); DDMN(pred); }
}
- ** end Debug output **/
+ ** end Debug output **/
/** Fix the Phi nodes **/
phi = get_irn_link(b);