+ obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
+ xfree (graveyard_obst); /* ... then free it. */
+ }
+
+ current_ir_graph = rem;
+}
+
+/* Relink bad predeseccors of a block and store the old in array to the
+ link field. This function is called by relink_bad_predecessors().
+ The array of link field starts with the block operand at position 0.
+ If block has bad predecessors, create a new in array without bad preds.
+ Otherwise let in array untouched. */
+static void relink_bad_block_predecessors(ir_node *n, void *env) {
+ ir_node **new_in, *irn;
+ int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
+
+ /* if link field of block is NULL, look for bad predecessors otherwise
+ this is allready done */
+ if (get_irn_op(n) == op_Block &&
+ get_irn_link(n) == NULL) {
+
+ /* save old predecessors in link field (position 0 is the block operand)*/
+ set_irn_link(n, (void *)get_irn_in(n));
+
+ /* count predecessors without bad nodes */
+ old_irn_arity = get_irn_arity(n);
+ for (i = 0; i < old_irn_arity; i++)
+ if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
+
+ /* arity changing: set new predecessors without bad nodes */
+ if (new_irn_arity < old_irn_arity) {
+ /* get new predecessor array without Block predecessor */
+ new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
+
+ /* set new predeseccors in array */
+ new_in[0] = NULL;
+ new_irn_n = 1;
+ for (i = 1; i < old_irn_arity; i++) {
+ irn = get_irn_n(n, i);
+ if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
+ }
+ n->in = new_in;
+ } /* ir node has bad predecessors */
+
+ } /* Block is not relinked */
+}
+
+/* Relinks Bad predecesors from Bocks and Phis called by walker
+ remove_bad_predecesors(). If n is a Block, call
+ relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
+ function of Phi's Block. If this block has bad predecessors, relink preds
+ of the Phinode. */
+static void relink_bad_predecessors(ir_node *n, void *env) {
+ ir_node *block, **old_in;
+ int i, old_irn_arity, new_irn_arity;
+
+ /* relink bad predeseccors of a block */
+ if (get_irn_op(n) == op_Block)
+ relink_bad_block_predecessors(n, env);
+
+ /* If Phi node relink its block and its predecessors */
+ if (get_irn_op(n) == op_Phi) {
+
+ /* Relink predeseccors of phi's block */
+ block = get_nodes_Block(n);
+ if (get_irn_link(block) == NULL)
+ relink_bad_block_predecessors(block, env);
+
+ old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
+ old_irn_arity = ARR_LEN(old_in);
+
+ /* Relink Phi predeseccors if count of predeseccors changed */
+ if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
+ /* set new predeseccors in array
+ n->in[0] remains the same block */
+ new_irn_arity = 1;
+ for(i = 1; i < old_irn_arity; i++)
+ if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
+
+ ARR_SETLEN(ir_node *, n->in, new_irn_arity);
+ }
+
+ } /* n is a Phi node */
+}
+
+/* Removes Bad Bad predecesors from Blocks and the corresponding
+ inputs to Phi nodes as in dead_node_elimination but without
+ copying the graph.
+ On walking up set the link field to NULL, on walking down call
+ relink_bad_predecessors() (This function stores the old in array
+ to the link field and sets a new in array if arity of predecessors
+ changes) */
+void remove_bad_predecessors(ir_graph *irg) {
+ irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
+}
+
+
+/**********************************************************************/
+/* Funcionality for inlining */
+/**********************************************************************/
+
+/* Copy node for inlineing. Copies the node by calling copy_node and
+ then updates the entity if it's a local one. env must be a pointer
+ to the frame type of the procedure. The new entities must be in
+ the link field of the entities. */
+static INLINE void
+copy_node_inline (ir_node *n, void *env) {
+ ir_node *new;
+ type *frame_tp = (type *)env;
+
+ copy_node(n, NULL);
+ if (get_irn_op(n) == op_Sel) {
+ new = get_new_node (n);
+ assert(get_irn_op(new) == op_Sel);
+ if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
+ set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
+ }
+ }
+}
+
+void inline_method(ir_node *call, ir_graph *called_graph) {
+ ir_node *pre_call;
+ ir_node *post_call, *post_bl;
+ ir_node *in[5];
+ ir_node *end, *end_bl;
+ ir_node **res_pred;
+ ir_node **cf_pred;
+ ir_node *ret, *phi;
+ ir_node *cf_op = NULL, *bl;
+ int arity, n_ret, n_exc, n_res, i, j, rem_opt;
+ type *called_frame;
+
+ if (!get_optimize() || !get_opt_inline()) return;
+ /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
+ rem_opt = get_optimize();
+ set_optimize(0);
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_pinned(current_ir_graph) == pinned);
+ assert(get_irg_pinned(called_graph) == pinned);
+ if (get_irg_outs_state(current_ir_graph) == outs_consistent)
+ set_irg_outs_inconsistent(current_ir_graph);
+
+ /* -- Check preconditions -- */
+ assert(get_irn_op(call) == op_Call);
+ /* @@@ does not work for InterfaceIII.java after cgana
+ assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
+ assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
+ get_Call_type(call)));
+ */
+ assert(get_type_tpop(get_Call_type(call)) == type_method);
+ if (called_graph == current_ir_graph) {
+ set_optimize(rem_opt);
+ return;
+ }
+
+ /* --
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. -- */
+ post_bl = get_nodes_Block(call);
+ set_irg_current_block(current_ir_graph, post_bl);
+ /* XxMxPxP of Start + parameter of Call */
+ in[0] = new_Jmp();
+ in[1] = get_Call_mem(call);
+ in[2] = get_irg_frame(current_ir_graph);
+ in[3] = get_irg_globals(current_ir_graph);
+ in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ pre_call = new_Tuple(5, in);
+ post_call = call;
+
+ /* --
+ The new block gets the ins of the old block, pre_call and all its
+ predecessors and all Phi nodes. -- */
+ part_block(pre_call);
+
+ /* -- Prepare state for dead node elimination -- */
+ /* Visited flags in calling irg must be >= flag in called irg.
+ Else walker and arity computation will not work. */
+ if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
+ set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
+ if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
+ set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
+ /* Set pre_call as new Start node in link field of the start node of
+ calling graph and pre_calls block as new block for the start block
+ of calling graph.
+ Further mark these nodes so that they are not visited by the
+ copying. */
+ set_irn_link(get_irg_start(called_graph), pre_call);
+ set_irn_visited(get_irg_start(called_graph),
+ get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_start_block(called_graph),
+ get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph),
+ get_irg_visited(current_ir_graph));
+
+ /* Initialize for compaction of in arrays */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* -- Replicate local entities of the called_graph -- */
+ /* copy the entities. */
+ called_frame = get_irg_frame_type(called_graph);
+ for (i = 0; i < get_class_n_members(called_frame); i++) {
+ entity *new_ent, *old_ent;
+ old_ent = get_class_member(called_frame, i);
+ new_ent = copy_entity_own(old_ent, get_cur_frame_type());
+ set_entity_link(old_ent, new_ent);
+ }
+
+ /* visited is > than that of called graph. With this trick visited will
+ remain unchanged so that an outer walker, e.g., searching the call nodes
+ to inline, calling this inline will not visit the inlined nodes. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+
+ /* -- Performing dead node elimination inlines the graph -- */
+ /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
+ entities. */
+ /* @@@ endless loops are not copied!! -- they should be, I think... */
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ get_irg_frame_type(called_graph));
+
+ /* Repair called_graph */
+ set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
+ set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
+ set_Block_block_visited(get_irg_start_block(called_graph), 0);
+
+ /* -- Merge the end of the inlined procedure with the call site -- */
+ /* We will turn the old Call node into a Tuple with the following
+ predecessors:
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
+ */
+
+ /* -- Precompute some values -- */
+ end_bl = get_new_node(get_irg_end_block(called_graph));
+ end = get_new_node(get_irg_end(called_graph));
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ n_res = get_method_n_ress(get_Call_type(call));
+
+ res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
+ cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+
+ set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+
+ /* -- archive keepalives -- */
+ for (i = 0; i < get_irn_arity(end); i++)
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+ /* The new end node will die, but the in array is not on the obstack ... */
+ free_End(end);
+
+/* --
+ Return nodes by Jump nodes. -- */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
+ n_ret++;
+ }
+ }
+ set_irn_in(post_bl, n_ret, cf_pred);
+
+/* --
+ turned into a tuple. -- */
+ turn_into_tuple(post_call, 4);
+ /* First the Memory-Phi */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_mem(ret);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, mode_M);
+ set_Tuple_pred(call, 0, phi);
+ /* Conserve Phi-list for further inlinings -- but might be optimized */
+ if (get_nodes_Block(phi) == post_bl) {
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
+ }
+ /* Now the real results */
+ if (n_res > 0) {
+ for (j = 0; j < n_res; j++) {
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
+ res_pred[j] = phi;
+ /* Conserve Phi-list for further inlinings -- but might be optimized */
+ if (get_nodes_Block(phi) == post_bl) {
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
+ }
+ }
+ set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
+ } else {
+ set_Tuple_pred(call, 2, new_Bad());
+ }
+ /* Finally the exception control flow. We need to add a Phi node to
+ collect the memory containing the exception objects. Further we need
+ to add another block to get a correct representation of this Phi. To
+ this block we add a Jmp that resolves into the X output of the Call
+ when the Call is turned into a tuple. */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ cf_pred[n_exc] = ret;
+ n_exc++;
+ }
+ }
+ if (n_exc > 0) {
+ new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
+ set_Tuple_pred(call, 1, new_Jmp());
+ /* The Phi for the memories with the exception objects */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (get_irn_op(ret) == op_Call) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
+ n_exc++;
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
+ n_exc++;
+ } else if (get_irn_op(ret) == op_Raise) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
+ }
+ set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
+ } else {
+ set_Tuple_pred(call, 1, new_Bad());
+ set_Tuple_pred(call, 3, new_Bad());