+ }
+}
+
+/* Copies the graph reachable from current_ir_graph->end to the obstack
+ in current_ir_graph and fixes the environment.
+ Then fixes the fields in current_ir_graph containing nodes of the
+ graph. */
+void
+copy_graph_env () {
+ /* Not all nodes remembered in current_ir_graph might be reachable
+ from the end node. Assure their link is set to NULL, so that
+ we can test whether new nodes have been computed. */
+ set_irn_link(get_irg_frame (current_ir_graph), NULL);
+ set_irn_link(get_irg_globals(current_ir_graph), NULL);
+ set_irn_link(get_irg_args (current_ir_graph), NULL);
+
+ /* we use the block walk flag for removing Bads from Blocks ins. */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* copy the graph */
+ copy_graph();
+
+ /* fix the fields in current_ir_graph */
+ free_End(get_irg_end(current_ir_graph));
+ set_irg_end (current_ir_graph, get_new_node(get_irg_end(current_ir_graph)));
+ set_irg_end_block (current_ir_graph, get_new_node(get_irg_end_block(current_ir_graph)));
+ if (get_irn_link(get_irg_frame(current_ir_graph)) == NULL) {
+ copy_node (get_irg_frame(current_ir_graph), NULL);
+ copy_preds(get_irg_frame(current_ir_graph), NULL);
+ }
+ if (get_irn_link(get_irg_globals(current_ir_graph)) == NULL) {
+ copy_node (get_irg_globals(current_ir_graph), NULL);
+ copy_preds(get_irg_globals(current_ir_graph), NULL);
+ }
+ if (get_irn_link(get_irg_args(current_ir_graph)) == NULL) {
+ copy_node (get_irg_args(current_ir_graph), NULL);
+ copy_preds(get_irg_args(current_ir_graph), NULL);
+ }
+ set_irg_start (current_ir_graph, get_new_node(get_irg_start(current_ir_graph)));
+
+ set_irg_start_block(current_ir_graph,
+ get_new_node(get_irg_start_block(current_ir_graph)));
+ set_irg_frame (current_ir_graph, get_new_node(get_irg_frame(current_ir_graph)));
+ set_irg_globals(current_ir_graph, get_new_node(get_irg_globals(current_ir_graph)));
+ set_irg_args (current_ir_graph, get_new_node(get_irg_args(current_ir_graph)));
+ if (get_irn_link(get_irg_bad(current_ir_graph)) == NULL) {
+ copy_node(get_irg_bad(current_ir_graph), NULL);
+ copy_preds(get_irg_bad(current_ir_graph), NULL);
+ }
+ set_irg_bad(current_ir_graph, get_new_node(get_irg_bad(current_ir_graph)));
+ if (get_irn_link(get_irg_unknown(current_ir_graph)) == NULL) {
+ copy_node(get_irg_unknown(current_ir_graph), NULL);
+ copy_preds(get_irg_unknown(current_ir_graph), NULL);
+ }
+ set_irg_unknown(current_ir_graph, get_new_node(get_irg_unknown(current_ir_graph)));
+}
+
+/* Copies all reachable nodes to a new obstack. Removes bad inputs
+ from block nodes and the corresponding inputs from Phi nodes.
+ Merges single exit blocks with single entry blocks and removes
+ 1-input Phis.
+ Adds all new nodes to a new hash table for cse. Does not
+ perform cse, so the hash table might contain common subexpressions. */
+/* Amroq call this emigrate() */
+void
+dead_node_elimination(ir_graph *irg) {
+ ir_graph *rem;
+ struct obstack *graveyard_obst = NULL;
+ struct obstack *rebirth_obst = NULL;
+
+ /* Remember external state of current_ir_graph. */
+ rem = current_ir_graph;
+ current_ir_graph = irg;
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ free_outs(current_ir_graph);
+
+ if (get_optimize() && get_opt_dead_node_elimination()) {
+
+ /* A quiet place, where the old obstack can rest in peace,
+ until it will be cremated. */
+ graveyard_obst = irg->obst;
+
+ /* A new obstack, where the reachable nodes will be copied to. */
+ rebirth_obst = (struct obstack *) xmalloc (sizeof (struct obstack));
+ current_ir_graph->obst = rebirth_obst;
+ obstack_init (current_ir_graph->obst);
+
+ /* We also need a new hash table for cse */
+ del_identities (irg->value_table);
+ irg->value_table = new_identities ();
+
+ /* Copy the graph from the old to the new obstack */
+ copy_graph_env();
+
+ /* Free memory from old unoptimized obstack */
+ obstack_free(graveyard_obst, 0); /* First empty the obstack ... */
+ xfree (graveyard_obst); /* ... then free it. */
+ }
+
+ current_ir_graph = rem;
+}
+
+/**********************************************************************/
+/* Funcionality for inlining */
+/**********************************************************************/
+
+/* Copy node for inlineing. Copies the node by calling copy_node and
+ then updates the entity if it's a local one. env must be a pointer
+ to the frame type of the procedure. The new entities must be in
+ the link field of the entities. */
+void
+copy_node_inline (ir_node *n, void *env) {
+ ir_node *new;
+ type *frame_tp = (type *)env;
+
+ copy_node(n, NULL);
+ if (get_irn_op(n) == op_Sel) {
+ new = get_new_node (n);
+ assert(get_irn_op(new) == op_Sel);
+ if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
+ set_Sel_entity(new, get_entity_link(get_Sel_entity(n)));
+ }
+ }
+}
+
+void inline_method(ir_node *call, ir_graph *called_graph) {
+ ir_node *pre_call;
+ ir_node *post_call, *post_bl;
+ ir_node *in[5];
+ ir_node *end, *end_bl;
+ ir_node **res_pred;
+ ir_node **cf_pred;
+ ir_node *ret, *phi;
+ ir_node *cf_op = NULL, *bl;
+ int arity, n_ret, n_exc, n_res, i, j, rem_opt;
+ type *called_frame;
+
+ if (!get_optimize() || !get_opt_inline()) return;
+ /** Turn off optimizations, this can cause problems when allocating new nodes. **/
+ rem_opt = get_optimize();
+ set_optimize(0);
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_pinned(current_ir_graph) == pinned);
+ assert(get_irg_pinned(called_graph) == pinned);
+ if (get_irg_outs_state(current_ir_graph) == outs_consistent)
+ set_irg_outs_inconsistent(current_ir_graph);
+
+ /** Check preconditions **/
+ assert(get_irn_op(call) == op_Call);
+ /* assert(get_Call_type(call) == get_entity_type(get_irg_ent(called_graph))); */
+ assert(smaller_type(get_entity_type(get_irg_ent(called_graph)),
+ get_Call_type(call)));
+ assert(get_type_tpop(get_Call_type(call)) == type_method);
+ if (called_graph == current_ir_graph) return;
+
+
+ /** Part the Call node into two nodes. Pre_call collects the parameters of
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. **/
+ post_bl = get_nodes_Block(call);
+ set_irg_current_block(current_ir_graph, post_bl);
+ /* XxMxPxP of Start + parameter of Call */
+ in[0] = new_Jmp();
+ in[1] = get_Call_mem(call);
+ in[2] = get_irg_frame(current_ir_graph);
+ in[3] = get_irg_globals(current_ir_graph);
+ in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ pre_call = new_Tuple(5, in);
+ post_call = call;
+
+ /** Part the block of the Call node into two blocks.
+ The new block gets the ins of the old block, pre_call and all its
+ predecessors and all Phi nodes. **/
+ part_block(pre_call);
+
+ /** Prepare state for dead node elimination **/
+ /* Visited flags in calling irg must be >= flag in called irg.
+ Else walker and arity computation will not work. */
+ if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
+ set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1); /***/
+ if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
+ set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
+ /* Set pre_call as new Start node in link field of the start node of
+ calling graph and pre_calls block as new block for the start block
+ of calling graph.
+ Further mark these nodes so that they are not visited by the
+ copying. */
+ set_irn_link(get_irg_start(called_graph), pre_call);
+ set_irn_visited(get_irg_start(called_graph),
+ get_irg_visited(current_ir_graph));/***/
+ set_irn_link(get_irg_start_block(called_graph),
+ get_nodes_Block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph),
+ get_irg_visited(current_ir_graph)); /***/
+
+ /* Initialize for compaction of in arrays */
+ inc_irg_block_visited(current_ir_graph);
+
+ /*** Replicate local entities of the called_graph ***/
+ /* copy the entities. */
+ called_frame = get_irg_frame_type(called_graph);
+ for (i = 0; i < get_class_n_members(called_frame); i++) {
+ entity *new_ent, *old_ent;
+ old_ent = get_class_member(called_frame, i);
+ new_ent = copy_entity_own(old_ent, get_cur_frame_type());
+ set_entity_link(old_ent, new_ent);
+ }
+
+ /* visited is > than that of called graph. With this trick visited will
+ remain unchanged so that an outer walker, e.g., searching the call nodes
+ to inline, calling this inline will not visit the inlined nodes. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+
+ /** Performing dead node elimination inlines the graph **/
+ /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
+ entities. */
+ /* @@@ endless loops are not copied!! */
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ get_irg_frame_type(called_graph));
+
+ /* Repair called_graph */
+ set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
+ set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
+ set_Block_block_visited(get_irg_start_block(called_graph), 0);
+
+ /*** Merge the end of the inlined procedure with the call site ***/
+ /* We will turn the old Call node into a Tuple with the following
+ predecessors:
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
+ */
+
+ /** Precompute some values **/
+ end_bl = get_new_node(get_irg_end_block(called_graph));
+ end = get_new_node(get_irg_end(called_graph));
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ n_res = get_method_n_ress(get_Call_type(call));
+
+ res_pred = (ir_node **) malloc (n_res * sizeof (ir_node *));
+ cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
+
+ set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+
+ /** archive keepalives **/
+ for (i = 0; i < get_irn_arity(end); i++)
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+ /* The new end node will die, but the in array is not on the obstack ... */
+ free_End(end);
+
+ /** Collect control flow from Return blocks to post_calls block. Replace
+ Return nodes by Jump nodes. **/
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
+ n_ret++;
+ }
+ }
+ set_irn_in(post_bl, n_ret, cf_pred);
+
+ /** Collect results from Return nodes to post_call. Post_call is
+ turned into a tuple. **/
+ turn_into_tuple(post_call, 4);
+ /* First the Memory-Phi */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_mem(ret);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, mode_M);
+ set_Tuple_pred(call, 0, phi);
+ set_irn_link(phi, get_irn_link(post_bl)); /* Conserve Phi-list for further inlinings */
+ set_irn_link(post_bl, phi);
+ /* Now the real results */
+ if (n_res > 0) {
+ for (j = 0; j < n_res; j++) {
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;