+ }
+
+ /* Now pick the Phis. Here we will keep all! */
+ irn_arity = get_irn_arity(oe);
+ for (i = 0; i < irn_arity; i++) {
+ ka = get_irn_n(oe, i);
+ if ((get_irn_op(ka) == op_Phi)) {
+ if (get_irn_visited(ka) < get_irg_visited(current_ir_graph)) {
+ /* We didn't copy the Phi yet. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+ irg_walk(ka, copy_node, copy_preds, NULL);
+ }
+ add_End_keepalive(ne, get_new_node(ka));
+ }
+ }
+}
+
+/**
+ * Copies the graph reachable from current_ir_graph->end to the obstack
+ * in current_ir_graph and fixes the environment.
+ * Then fixes the fields in current_ir_graph containing nodes of the
+ * graph.
+ */
+static void
+copy_graph_env (void) {
+ ir_node *old_end;
+ /* Not all nodes remembered in current_ir_graph might be reachable
+ from the end node. Assure their link is set to NULL, so that
+ we can test whether new nodes have been computed. */
+ set_irn_link(get_irg_frame (current_ir_graph), NULL);
+ set_irn_link(get_irg_globals(current_ir_graph), NULL);
+ set_irn_link(get_irg_args (current_ir_graph), NULL);
+
+ /* we use the block walk flag for removing Bads from Blocks ins. */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* copy the graph */
+ copy_graph();
+
+ /* fix the fields in current_ir_graph */
+ old_end = get_irg_end(current_ir_graph);
+ set_irg_end (current_ir_graph, get_new_node(old_end));
+ free_End(old_end);
+ set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
+ if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
+ copy_node (get_irg_frame(current_ir_graph), NULL);
+ copy_preds(get_irg_frame(current_ir_graph), NULL);
+ }
+ if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
+ copy_node (get_irg_globals(current_ir_graph), NULL);
+ copy_preds(get_irg_globals(current_ir_graph), NULL);
+ }
+ if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
+ copy_node (get_irg_args(current_ir_graph), NULL);
+ copy_preds(get_irg_args(current_ir_graph), NULL);
+ }
+ set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
+
+ set_irg_start_block(current_ir_graph,
+ get_new_node(get_irg_start_block(current_ir_graph)));
+ set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+ set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+ set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+ if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
+ copy_node(get_irg_bad(current_ir_graph), NULL);
+ copy_preds(get_irg_bad(current_ir_graph), NULL);
+ }
+ set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
+ /* GL removed: we need unknown with mode for analyses.
+ if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
+ copy_node(get_irg_unknown(current_ir_graph), NULL);
+ copy_preds(get_irg_unknown(current_ir_graph), NULL);
+ }
+ set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
+ */
+}
+
+/**
+ * Copies all reachable nodes to a new obstack. Removes bad inputs
+ * from block nodes and the corresponding inputs from Phi nodes.
+ * Merges single exit blocks with single entry blocks and removes
+ * 1-input Phis.
+ * Adds all new nodes to a new hash table for cse. Does not
+ * perform cse, so the hash table might contain common subexpressions.
+ */
+/* Amroq call this emigrate() */
+void
+dead_node_elimination(ir_graph *irg) {
+ ir_graph *rem;
+ struct obstack *graveyard_obst = NULL;
+ struct obstack *rebirth_obst = NULL;
+
+ /* Remember external state of current_ir_graph. */
+ rem = current_ir_graph;
+ current_ir_graph = irg;
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_callee_info_state(current_ir_graph) == irg_callee_info_none);
+ free_outs(current_ir_graph);
+
+ /* @@@ so far we loose loops when copying */
+ free_loop_information(current_ir_graph);
+
+ if (get_opt_optimize() && get_opt_dead_node_elimination()) {
+
+ /* A quiet place, where the old obstack can rest in peace,
+ until it will be cremated. */
+ graveyard_obst = irg->obst;
+
+ /* A new obstack, where the reachable nodes will be copied to. */
+ rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
+ current_ir_graph->obst = rebirth_obst;
+ obstack_init (current_ir_graph->obst);
+
+ /* We also need a new hash table for cse */
+ del_identities (irg->value_table);
+ irg->value_table = new_identities ();
+
+ /* Copy the graph from the old to the new obstack */
+ copy_graph_env();
+
+ /* Free memory from old unoptimized obstack */
+ obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
+ xfree (graveyard_obst); /* ... then free it. */
+ }
+
+ current_ir_graph = rem;
+}
+
+/**
+ * Relink bad predeseccors of a block and store the old in array to the
+ * link field. This function is called by relink_bad_predecessors().
+ * The array of link field starts with the block operand at position 0.
+ * If block has bad predecessors, create a new in array without bad preds.
+ * Otherwise let in array untouched.
+ */
+static void relink_bad_block_predecessors(ir_node *n, void *env) {
+ ir_node **new_in, *irn;
+ int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
+
+ /* if link field of block is NULL, look for bad predecessors otherwise
+ this is allready done */
+ if (get_irn_op(n) == op_Block &&
+ get_irn_link(n) == NULL) {
+
+ /* save old predecessors in link field (position 0 is the block operand)*/
+ set_irn_link(n, (void *)get_irn_in(n));
+
+ /* count predecessors without bad nodes */
+ old_irn_arity = get_irn_arity(n);
+ for (i = 0; i < old_irn_arity; i++)
+ if (!is_Bad(get_irn_n(n, i))) new_irn_arity++;
+
+ /* arity changing: set new predecessors without bad nodes */
+ if (new_irn_arity < old_irn_arity) {
+ /* get new predecessor array without Block predecessor */
+ new_in = NEW_ARR_D (ir_node *, current_ir_graph->obst, (new_irn_arity+1));
+
+ /* set new predeseccors in array */
+ new_in[0] = NULL;
+ new_irn_n = 1;
+ for (i = 1; i < old_irn_arity; i++) {
+ irn = get_irn_n(n, i);
+ if (!is_Bad(irn)) new_in[new_irn_n++] = irn;
+ }
+ n->in = new_in;
+ } /* ir node has bad predecessors */
+
+ } /* Block is not relinked */
+}
+
+/**
+ * Relinks Bad predecesors from Bocks and Phis called by walker
+ * remove_bad_predecesors(). If n is a Block, call
+ * relink_bad_block_redecessors(). If n is a Phinode, call also the relinking
+ * function of Phi's Block. If this block has bad predecessors, relink preds
+ * of the Phinode.
+ */
+static void relink_bad_predecessors(ir_node *n, void *env) {
+ ir_node *block, **old_in;
+ int i, old_irn_arity, new_irn_arity;
+
+ /* relink bad predeseccors of a block */
+ if (get_irn_op(n) == op_Block)
+ relink_bad_block_predecessors(n, env);
+
+ /* If Phi node relink its block and its predecessors */
+ if (get_irn_op(n) == op_Phi) {
+
+ /* Relink predeseccors of phi's block */
+ block = get_nodes_Block(n);
+ if (get_irn_link(block) == NULL)
+ relink_bad_block_predecessors(block, env);
+
+ old_in = (ir_node **)get_irn_link(block); /* Of Phi's Block */
+ old_irn_arity = ARR_LEN(old_in);
+
+ /* Relink Phi predeseccors if count of predeseccors changed */
+ if (old_irn_arity != ARR_LEN(get_irn_in(block))) {
+ /* set new predeseccors in array
+ n->in[0] remains the same block */
+ new_irn_arity = 1;
+ for(i = 1; i < old_irn_arity; i++)
+ if (!is_Bad((ir_node *)old_in[i])) n->in[new_irn_arity++] = n->in[i];
+
+ ARR_SETLEN(ir_node *, n->in, new_irn_arity);
+ }
+
+ } /* n is a Phi node */
+}
+
+/**
+ * Removes Bad Bad predecesors from Blocks and the corresponding
+ * inputs to Phi nodes as in dead_node_elimination but without
+ * copying the graph.
+ * On walking up set the link field to NULL, on walking down call
+ * relink_bad_predecessors() (This function stores the old in array
+ * to the link field and sets a new in array if arity of predecessors
+ * changes).
+ */
+void remove_bad_predecessors(ir_graph *irg) {
+ irg_walk_graph(irg, init_link, relink_bad_predecessors, NULL);
+}
+
+
+/*--------------------------------------------------------------------*/
+/* Funcionality for inlining */
+/*--------------------------------------------------------------------*/
+
+/**
+ * Copy node for inlineing. Updates attributes that change when
+ * inlineing but not for dead node elimination.
+ *
+ * Copies the node by calling copy_node and then updates the entity if
+ * it's a local one. env must be a pointer of the frame type of the
+ * inlined procedure. The new entities must be in the link field of
+ * the entities.
+ */
+static INLINE void
+copy_node_inline (ir_node *n, void *env) {
+ ir_node *new;
+ type *frame_tp = (type *)env;
+
+ copy_node(n, NULL);
+ if (get_irn_op(n) == op_Sel) {
+ new = get_new_node (n);
+ assert(get_irn_op(new) == op_Sel);
+ if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
+ set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
+ }
+ } else if (get_irn_op(n) == op_Block) {
+ new = get_new_node (n);
+ new->attr.block.irg = current_ir_graph;
+ }
+}
+
+
+void inline_method(ir_node *call, ir_graph *called_graph) {
+ ir_node *pre_call;
+ ir_node *post_call, *post_bl;
+ ir_node *in[5];
+ ir_node *end, *end_bl;
+ ir_node **res_pred;
+ ir_node **cf_pred;
+ ir_node *ret, *phi;
+ int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
+ int exc_handling;
+ type *called_frame;
+
+ if ( !(get_irg_inline_property(called_graph) == irg_inline_forced) && (!get_opt_optimize() || !get_opt_inline() ||
+ (get_irg_inline_property(called_graph) == irg_inline_forbidden))) return;
+
+ /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
+ rem_opt = get_opt_optimize();
+ set_optimize(0);
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_pinned(current_ir_graph) == pinned);
+ assert(get_irg_pinned(called_graph) == pinned);
+ if (get_irg_outs_state(current_ir_graph) == outs_consistent)
+ set_irg_outs_inconsistent(current_ir_graph);
+
+ /* -- Check preconditions -- */
+ assert(get_irn_op(call) == op_Call);
+ /* @@@ does not work for InterfaceIII.java after cgana
+ assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph)));
+ assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
+ get_Call_type(call)));
+ */
+ assert(get_type_tpop(get_Call_type(call)) == type_method);
+ if (called_graph == current_ir_graph) {
+ set_optimize(rem_opt);
+ return;
+ }
+
+ /* -- Decide how to handle exception control flow: Is there a handler
+ for the Call node, or do we branch directly to End on an exception?
+ exc_handling: 0 There is a handler.
+ 1 Branches to End.
+ 2 Exception handling not represented in Firm. -- */
+ {
+ ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+ for (proj = (ir_node *)get_irn_link(call); proj; proj = (ir_node *)get_irn_link(proj)) {
+ assert(get_irn_op(proj) == op_Proj);
+ if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
+ if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
+ }
+ if (Mproj) { assert(Xproj); exc_handling = 0; } // Mproj
+ else if (Xproj) { exc_handling = 1; } //!Mproj && Xproj
+ else { exc_handling = 2; } //!Mproj && !Xproj
+ }
+
+
+ /* --
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. -- */
+ post_bl = get_nodes_Block(call);
+ set_irg_current_block(current_ir_graph, post_bl);
+ /* XxMxPxP of Start + parameter of Call */
+ in[0] = new_Jmp();
+ in[1] = get_Call_mem(call);
+ in[2] = get_irg_frame(current_ir_graph);
+ in[3] = get_irg_globals(current_ir_graph);
+ in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ pre_call = new_Tuple(5, in);
+ post_call = call;
+
+ /* --
+ The new block gets the ins of the old block, pre_call and all its
+ predecessors and all Phi nodes. -- */
+ part_block(pre_call);
+
+ /* -- Prepare state for dead node elimination -- */
+ /* Visited flags in calling irg must be >= flag in called irg.
+ Else walker and arity computation will not work. */
+ if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
+ set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
+ if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
+ set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
+ /* Set pre_call as new Start node in link field of the start node of
+ calling graph and pre_calls block as new block for the start block
+ of calling graph.
+ Further mark these nodes so that they are not visited by the
+ copying. */
+ set_irn_link(get_irg_start(called_graph), pre_call);
+ set_irn_visited(get_irg_start(called_graph),
+ get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_start_block(called_graph),
+ get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph),
+ get_irg_visited(current_ir_graph));
+
+ /* Initialize for compaction of in arrays */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* -- Replicate local entities of the called_graph -- */
+ /* copy the entities. */
+ called_frame = get_irg_frame_type(called_graph);
+ for (i = 0; i < get_class_n_members(called_frame); i++) {
+ entity *new_ent, *old_ent;
+ old_ent = get_class_member(called_frame, i);
+ new_ent = copy_entity_own(old_ent, get_cur_frame_type());
+ set_entity_link(old_ent, new_ent);
+ }
+
+ /* visited is > than that of called graph. With this trick visited will
+ remain unchanged so that an outer walker, e.g., searching the call nodes
+ to inline, calling this inline will not visit the inlined nodes. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+
+ /* -- Performing dead node elimination inlines the graph -- */
+ /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
+ entities. */
+ /* @@@ endless loops are not copied!! -- they should be, I think... */
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ get_irg_frame_type(called_graph));
+
+ /* Repair called_graph */
+ set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
+ set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
+ set_Block_block_visited(get_irg_start_block(called_graph), 0);
+
+ /* -- Merge the end of the inlined procedure with the call site -- */
+ /* We will turn the old Call node into a Tuple with the following
+ predecessors:
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
+ In case the old Call directly branches to End on an exception we don't
+ need the block merging all exceptions nor the Phi of the exception
+ memories.
+ */
+
+ /* -- Precompute some values -- */
+ end_bl = get_new_node(get_irg_end_block(called_graph));
+ end = get_new_node(get_irg_end(called_graph));
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ n_res = get_method_n_ress(get_Call_type(call));
+
+ res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
+ cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+
+ set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+
+ /* -- archive keepalives -- */
+ irn_arity = get_irn_arity(end);
+ for (i = 0; i < irn_arity; i++)
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+
+ /* The new end node will die. We need not free as the in array is on the obstack:
+ copy_node only generated 'D' arrays. */
+
+ /* -- Replace Return nodes by Jump nodes. -- */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
+ n_ret++;
+ }
+ }
+ set_irn_in(post_bl, n_ret, cf_pred);
+
+ /* -- Build a Tuple for all results of the method.
+ Add Phi node if there was more than one Return. -- */
+ turn_into_tuple(post_call, 4);
+ /* First the Memory-Phi */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_mem(ret);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, mode_M);
+ set_Tuple_pred(call, 0, phi);
+ /* Conserve Phi-list for further inlinings -- but might be optimized */
+ if (get_nodes_Block(phi) == post_bl) {
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
+ }
+ /* Now the real results */
+ if (n_res > 0) {
+ for (j = 0; j < n_res; j++) {
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;