- ir_node *nn, *block;
- int i, j, irn_arity;
-
- nn = get_new_node(n);
-
- /* printf("\n old node: "); DDMSG2(n);
- printf(" new node: "); DDMSG2(nn);
- printf(" arities: old: %d, new: %d\n", get_irn_arity(n), get_irn_arity(nn)); */
-
- if (is_Block(n)) {
- /* Don't copy Bad nodes. */
- j = 0;
- irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; i++)
- if (! is_Bad(get_irn_n(n, i))) {
- set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
- }
- /* repair the block visited flag from above misuse. Repair it in both
- graphs so that the old one can still be used. */
- set_Block_block_visited(nn, 0);
- set_Block_block_visited(n, 0);
- /* Local optimization could not merge two subsequent blocks if
- in array contained Bads. Now it's possible.
- We don't call optimize_in_place as it requires
- that the fields in ir_graph are set properly. */
- if ((get_opt_control_flow_straightening()) &&
- (get_Block_n_cfgpreds(nn) == 1) &&
- (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
- ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
- if (nn == old) {
- /* Jmp jumps into the block it is in -- deal self cycle. */
- assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
- exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
- } else {
- exchange(nn, old);
- }
- }
- } else if (get_irn_op(n) == op_Phi) {
- /* Don't copy node if corresponding predecessor in block is Bad.
- The Block itself should not be Bad. */
- block = get_nodes_block(n);
- set_irn_n(nn, -1, get_new_node(block));
- j = 0;
- irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; i++)
- if (! is_Bad(get_irn_n(block, i))) {
- set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
- /*if (is_backedge(n, i)) set_backedge(nn, j);*/
- j++;
- }
- /* If the pre walker reached this Phi after the post walker visited the
- block block_visited is > 0. */
- set_Block_block_visited(get_nodes_block(n), 0);
- /* Compacting the Phi's ins might generate Phis with only one
- predecessor. */
- if (get_irn_arity(nn) == 1)
- exchange(nn, get_irn_n(nn, 0));
- } else {
- irn_arity = get_irn_arity(n);
- for (i = -1; i < irn_arity; i++)
- set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
- }
- /* Now the new node is complete. We can add it to the hash table for CSE.
- @@@ inlining aborts if we identify End. Why? */
- if (get_irn_op(nn) != op_End)
- add_identities(current_ir_graph->value_table, nn);
+ ir_node *nn, *block;
+ int i, j, irn_arity;
+
+ nn = get_new_node(n);
+
+ if (is_Block(n)) {
+ /* Don't copy Bad nodes. */
+ j = 0;
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; i++) {
+ if (! is_Bad(get_irn_n(n, i))) {
+ set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
+ }
+ }
+ /* repair the block visited flag from above misuse. Repair it in both
+ graphs so that the old one can still be used. */
+ set_Block_block_visited(nn, 0);
+ set_Block_block_visited(n, 0);
+ /* Local optimization could not merge two subsequent blocks if
+ in array contained Bads. Now it's possible.
+ We don't call optimize_in_place as it requires
+ that the fields in ir_graph are set properly. */
+ if ((get_opt_control_flow_straightening()) &&
+ (get_Block_n_cfgpreds(nn) == 1) &&
+ (get_irn_op(get_Block_cfgpred(nn, 0)) == op_Jmp)) {
+ ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
+ if (nn == old) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ assert(is_Bad(get_new_node(get_irg_bad(current_ir_graph))));
+ exchange(nn, get_new_node(get_irg_bad(current_ir_graph)));
+ } else {
+ exchange(nn, old);
+ }
+ }
+ } else if (get_irn_op(n) == op_Phi) {
+ /* Don't copy node if corresponding predecessor in block is Bad.
+ The Block itself should not be Bad. */
+ block = get_nodes_block(n);
+ set_irn_n(nn, -1, get_new_node(block));
+ j = 0;
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; i++) {
+ if (! is_Bad(get_irn_n(block, i))) {
+ set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
+ /*if (is_backedge(n, i)) set_backedge(nn, j);*/
+ j++;
+ }
+ }
+ /* If the pre walker reached this Phi after the post walker visited the
+ block block_visited is > 0. */
+ set_Block_block_visited(get_nodes_block(n), 0);
+ /* Compacting the Phi's ins might generate Phis with only one
+ predecessor. */
+ if (get_irn_arity(nn) == 1)
+ exchange(nn, get_irn_n(nn, 0));
+ } else {
+ irn_arity = get_irn_arity(n);
+ for (i = -1; i < irn_arity; i++)
+ set_irn_n (nn, i, get_new_node(get_irn_n(n, i)));
+ }
+ /* Now the new node is complete. We can add it to the hash table for CSE.
+ @@@ inlining aborts if we identify End. Why? */
+ if (get_irn_op(nn) != op_End)
+ add_identities(current_ir_graph->value_table, nn);
- ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
- ir_node *ka; /* keep alive */
- int i, irn_arity;
- unsigned long vfl;
-
- /* Some nodes must be copied by hand, sigh */
- vfl = get_irg_visited(irg);
- set_irg_visited(irg, vfl + 1);
-
- oe = get_irg_end(irg);
- mark_irn_visited(oe);
- /* copy the end node by hand, allocate dynamic in array! */
- ne = new_ir_node(get_irn_dbg_info(oe),
- irg,
- NULL,
- op_End,
- mode_X,
- -1,
- NULL);
- /* Copy the attributes. Well, there might be some in the future... */
- copy_node_attr(oe, ne);
- set_new_node(oe, ne);
-
- /* copy the Bad node */
- ob = get_irg_bad(irg);
- mark_irn_visited(ob);
- nb = new_ir_node(get_irn_dbg_info(ob),
- irg,
- NULL,
- op_Bad,
- mode_T,
- 0,
- NULL);
- copy_node_attr(ob, nb);
- set_new_node(ob, nb);
-
- /* copy the NoMem node */
- om = get_irg_no_mem(irg);
- mark_irn_visited(om);
- nm = new_ir_node(get_irn_dbg_info(om),
- irg,
- NULL,
- op_NoMem,
- mode_M,
- 0,
- NULL);
- copy_node_attr(om, nm);
- set_new_node(om, nm);
-
- /* copy the live nodes */
- set_irg_visited(irg, vfl);
- irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
-
- /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
-
- /* visit the anchors as well */
- for (i = anchor_max - 1; i >= 0; --i) {
- ir_node *n = irg->anchors[i];
-
- if (n && (get_irn_visited(n) <= vfl)) {
- set_irg_visited(irg, vfl);
- irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
- }
- }
-
- /* copy_preds for the end node ... */
- set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
-
- /*- ... and now the keep alives. -*/
- /* First pick the not marked block nodes and walk them. We must pick these
- first as else we will oversee blocks reachable from Phis. */
- irn_arity = get_End_n_keepalives(oe);
- for (i = 0; i < irn_arity; i++) {
- ka = get_End_keepalive(oe, i);
- if (is_Block(ka)) {
- if (get_irn_visited(ka) <= vfl) {
- /* We must keep the block alive and copy everything reachable */
- set_irg_visited(irg, vfl);
- irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
- }
- add_End_keepalive(ne, get_new_node(ka));
- }
- }
-
- /* Now pick other nodes. Here we will keep all! */
- irn_arity = get_End_n_keepalives(oe);
- for (i = 0; i < irn_arity; i++) {
- ka = get_End_keepalive(oe, i);
- if (!is_Block(ka)) {
- if (get_irn_visited(ka) <= vfl) {
- /* We didn't copy the node yet. */
- set_irg_visited(irg, vfl);
- irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
- }
- add_End_keepalive(ne, get_new_node(ka));
- }
- }
-
- /* start block sometimes only reached after keep alives */
- set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
- set_nodes_block(nm, get_new_node(get_nodes_block(om)));
+ ir_node *oe, *ne, *ob, *nb, *om, *nm; /* old end, new end, old bad, new bad, old NoMem, new NoMem */
+ ir_node *ka; /* keep alive */
+ int i, irn_arity;
+ unsigned long vfl;
+
+ /* Some nodes must be copied by hand, sigh */
+ vfl = get_irg_visited(irg);
+ set_irg_visited(irg, vfl + 1);
+
+ oe = get_irg_end(irg);
+ mark_irn_visited(oe);
+ /* copy the end node by hand, allocate dynamic in array! */
+ ne = new_ir_node(get_irn_dbg_info(oe),
+ irg,
+ NULL,
+ op_End,
+ mode_X,
+ -1,
+ NULL);
+ /* Copy the attributes. Well, there might be some in the future... */
+ copy_node_attr(oe, ne);
+ set_new_node(oe, ne);
+
+ /* copy the Bad node */
+ ob = get_irg_bad(irg);
+ mark_irn_visited(ob);
+ nb = new_ir_node(get_irn_dbg_info(ob),
+ irg,
+ NULL,
+ op_Bad,
+ mode_T,
+ 0,
+ NULL);
+ copy_node_attr(ob, nb);
+ set_new_node(ob, nb);
+
+ /* copy the NoMem node */
+ om = get_irg_no_mem(irg);
+ mark_irn_visited(om);
+ nm = new_ir_node(get_irn_dbg_info(om),
+ irg,
+ NULL,
+ op_NoMem,
+ mode_M,
+ 0,
+ NULL);
+ copy_node_attr(om, nm);
+ set_new_node(om, nm);
+
+ /* copy the live nodes */
+ set_irg_visited(irg, vfl);
+ irg_walk(get_nodes_block(oe), copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
+
+ /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
+
+ /* visit the anchors as well */
+ for (i = anchor_max - 1; i >= 0; --i) {
+ ir_node *n = irg->anchors[i];
+
+ if (n && (get_irn_visited(n) <= vfl)) {
+ set_irg_visited(irg, vfl);
+ irg_walk(n, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
+ }
+ }
+
+ /* copy_preds for the end node ... */
+ set_nodes_block(ne, get_new_node(get_nodes_block(oe)));
+
+ /*- ... and now the keep alives. -*/
+ /* First pick the not marked block nodes and walk them. We must pick these
+ first as else we will oversee blocks reachable from Phis. */
+ irn_arity = get_End_n_keepalives(oe);
+ for (i = 0; i < irn_arity; i++) {
+ ka = get_End_keepalive(oe, i);
+ if (is_Block(ka)) {
+ if (get_irn_visited(ka) <= vfl) {
+ /* We must keep the block alive and copy everything reachable */
+ set_irg_visited(irg, vfl);
+ irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
+ }
+ add_End_keepalive(ne, get_new_node(ka));
+ }
+ }
+
+ /* Now pick other nodes. Here we will keep all! */
+ irn_arity = get_End_n_keepalives(oe);
+ for (i = 0; i < irn_arity; i++) {
+ ka = get_End_keepalive(oe, i);
+ if (!is_Block(ka)) {
+ if (get_irn_visited(ka) <= vfl) {
+ /* We didn't copy the node yet. */
+ set_irg_visited(irg, vfl);
+ irg_walk(ka, copy_node, copy_preds, INT_TO_PTR(copy_node_nr));
+ }
+ add_End_keepalive(ne, get_new_node(ka));
+ }
+ }
+
+ /* start block sometimes only reached after keep alives */
+ set_nodes_block(nb, get_new_node(get_nodes_block(ob)));
+ set_nodes_block(nm, get_new_node(get_nodes_block(om)));
- ir_node *pre_call;
- ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
- ir_node *end, *end_bl;
- ir_node **res_pred;
- ir_node **cf_pred;
- ir_node *ret, *phi;
- int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
- int exc_handling;
- ir_type *called_frame;
- irg_inline_property prop = get_irg_inline_property(called_graph);
-
- if ( (prop < irg_inline_forced) &&
- (!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0;
-
- /* Do not inline variadic functions. */
- if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic)
- return 0;
-
- assert(get_method_n_params(get_entity_type(get_irg_entity(called_graph))) ==
- get_method_n_params(get_Call_type(call)));
-
- /*
- * currently, we cannot inline two cases:
- * - call with compound arguments
- * - graphs that take the address of a parameter
- */
- if (! can_inline(call, called_graph))
- return 0;
-
- /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
- rem_opt = get_opt_optimize();
- set_optimize(0);
-
- /* Handle graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- assert(get_irg_pinned(current_ir_graph) == op_pin_state_pinned);
- assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_outs_inconsistent(current_ir_graph);
- set_irg_extblk_inconsistent(current_ir_graph);
- set_irg_doms_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent);
-
- /* -- Check preconditions -- */
- assert(is_Call(call));
- /* @@@ does not work for InterfaceIII.java after cgana
- assert(get_Call_type(call) == get_entity_type(get_irg_entity(called_graph)));
- assert(smaller_type(get_entity_type(get_irg_entity(called_graph)),
- get_Call_type(call)));
- */
- if (called_graph == current_ir_graph) {
- set_optimize(rem_opt);
- return 0;
- }
-
- /* here we know we WILL inline, so inform the statistics */
- hook_inline(call, called_graph);
-
- /* -- Decide how to handle exception control flow: Is there a handler
- for the Call node, or do we branch directly to End on an exception?
- exc_handling:
- 0 There is a handler.
- 1 Branches to End.
- 2 Exception handling not represented in Firm. -- */
- {
- ir_node *proj, *Mproj = NULL, *Xproj = NULL;
- for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
- assert(is_Proj(proj));
- if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
- if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
- }
- if (Mproj) { assert(Xproj); exc_handling = 0; } /* Mproj */
- else if (Xproj) { exc_handling = 1; } /* !Mproj && Xproj */
- else { exc_handling = 2; } /* !Mproj && !Xproj */
- }
-
-
- /* --
- the procedure and later replaces the Start node of the called graph.
- Post_call is the old Call node and collects the results of the called
- graph. Both will end up being a tuple. -- */
- post_bl = get_nodes_block(call);
- set_irg_current_block(current_ir_graph, post_bl);
- /* XxMxPxPxPxT of Start + parameter of Call */
- in[pn_Start_X_initial_exec] = new_Jmp();
- in[pn_Start_M] = get_Call_mem(call);
- in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
- in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
- in[pn_Start_P_tls] = get_irg_tls(current_ir_graph);
- in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
- /* in[pn_Start_P_value_arg_base] = ??? */
- assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
- pre_call = new_Tuple(pn_Start_max - 1, in);
- post_call = call;
-
- /* --
- The new block gets the ins of the old block, pre_call and all its
- predecessors and all Phi nodes. -- */
- part_block(pre_call);
-
- /* -- Prepare state for dead node elimination -- */
- /* Visited flags in calling irg must be >= flag in called irg.
- Else walker and arity computation will not work. */
- if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
- set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
- if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
- set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
- /* Set pre_call as new Start node in link field of the start node of
- calling graph and pre_calls block as new block for the start block
- of calling graph.
- Further mark these nodes so that they are not visited by the
- copying. */
- set_irn_link(get_irg_start(called_graph), pre_call);
- set_irn_visited(get_irg_start(called_graph), get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
- set_irn_visited(get_irg_bad(called_graph), get_irg_visited(current_ir_graph));
-
- /* Initialize for compaction of in arrays */
- inc_irg_block_visited(current_ir_graph);
-
- /* -- Replicate local entities of the called_graph -- */
- /* copy the entities. */
- called_frame = get_irg_frame_type(called_graph);
- for (i = 0; i < get_class_n_members(called_frame); i++) {
- ir_entity *new_ent, *old_ent;
- old_ent = get_class_member(called_frame, i);
- new_ent = copy_entity_own(old_ent, get_cur_frame_type());
- set_entity_link(old_ent, new_ent);
- }
-
- /* visited is > than that of called graph. With this trick visited will
- remain unchanged so that an outer walker, e.g., searching the call nodes
- to inline, calling this inline will not visit the inlined nodes. */
- set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
-
- /* -- Performing dead node elimination inlines the graph -- */
- /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
- entities. */
- /* @@@ endless loops are not copied!! -- they should be, I think... */
- irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
- get_irg_frame_type(called_graph));
-
- /* Repair called_graph */
- set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
- set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
- set_Block_block_visited(get_irg_start_block(called_graph), 0);
-
- /* -- Merge the end of the inlined procedure with the call site -- */
- /* We will turn the old Call node into a Tuple with the following
- predecessors:
- -1: Block of Tuple.
- 0: Phi of all Memories of Return statements.
- 1: Jmp from new Block that merges the control flow from all exception
- predecessors of the old end block.
- 2: Tuple of all arguments.
- 3: Phi of Exception memories.
- In case the old Call directly branches to End on an exception we don't
- need the block merging all exceptions nor the Phi of the exception
- memories.
- */
-
- /* -- Precompute some values -- */
- end_bl = get_new_node(get_irg_end_block(called_graph));
- end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
- n_res = get_method_n_ress(get_Call_type(call));
-
- res_pred = xmalloc (n_res * sizeof(*res_pred));
- cf_pred = xmalloc (arity * sizeof(*res_pred));
-
- set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
-
- /* -- archive keepalives -- */
- irn_arity = get_irn_arity(end);
- for (i = 0; i < irn_arity; i++)
- add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
-
- /* The new end node will die. We need not free as the in array is on the obstack:
- copy_node() only generated 'D' arrays. */
-
- /* -- Replace Return nodes by Jump nodes. -- */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_block(ret));
- n_ret++;
- }
- }
- set_irn_in(post_bl, n_ret, cf_pred);
-
- /* -- Build a Tuple for all results of the method.
- Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, 4);
- /* First the Memory-Phi */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
- }
- }
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- /* Now the real results */
- if (n_res > 0) {
- for (j = 0; j < n_res; j++) {
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
- }
- if (n_ret > 0)
- phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
- else
- phi = new_Bad();
- res_pred[j] = phi;
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- }
- set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
- } else {
- set_Tuple_pred(call, pn_Call_T_result, new_Bad());
- }
- /* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
- collect the memory containing the exception objects. Further we need
- to add another block to get a correct representation of this Phi. To
- this block we add a Jmp that resolves into the X output of the Call
- when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handling == 0) {
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
- set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (is_Call(ret)) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
- } else {
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- }
- } else {
- ir_node *main_end_bl;
- int main_end_bl_arity;
- ir_node **end_preds;
-
- /* assert(exc_handling == 1 || no exceptions. ) */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
-
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
-
- for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = get_irn_n(main_end_bl, i);
- for (i = 0; i < n_exc; ++i)
- end_preds[main_end_bl_arity + i] = cf_pred[i];
- set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- free(end_preds);
- }
- free(res_pred);
- free(cf_pred);
-
- /* -- Turn CSE back on. -- */
- set_optimize(rem_opt);
-
- return 1;
+ ir_node *pre_call;
+ ir_node *post_call, *post_bl;
+ ir_node *in[pn_Start_max];
+ ir_node *end, *end_bl;
+ ir_node **res_pred;
+ ir_node **cf_pred;
+ ir_node *ret, *phi;
+ int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
+ int exc_handling;
+ ir_type *called_frame;
+ irg_inline_property prop = get_irg_inline_property(called_graph);
+
+ if ( (prop < irg_inline_forced) &&
+ (!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0;
+
+ /* Do not inline variadic functions. */
+ if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic)
+ return 0;
+
+ assert(get_method_n_params(get_entity_type(get_irg_entity(called_graph))) ==
+ get_method_n_params(get_Call_type(call)));
+
+ /*
+ * currently, we cannot inline two cases:
+ * - call with compound arguments
+ * - graphs that take the address of a parameter
+ */
+ if (! can_inline(call, called_graph))
+ return 0;
+
+ /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
+ rem_opt = get_opt_optimize();
+ set_optimize(0);
+
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ assert(get_irg_pinned(current_ir_graph) == op_pin_state_pinned);
+ assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
+ set_irg_outs_inconsistent(current_ir_graph);
+ set_irg_extblk_inconsistent(current_ir_graph);
+ set_irg_doms_inconsistent(current_ir_graph);
+ set_irg_loopinfo_inconsistent(current_ir_graph);
+ set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent);
+
+ /* -- Check preconditions -- */
+ assert(is_Call(call));
+ /* @@@ does not work for InterfaceIII.java after cgana
+ assert(get_Call_type(call) == get_entity_type(get_irg_entity(called_graph)));
+ assert(smaller_type(get_entity_type(get_irg_entity(called_graph)),
+ get_Call_type(call)));
+ */
+ if (called_graph == current_ir_graph) {
+ set_optimize(rem_opt);
+ return 0;
+ }
+
+ /* here we know we WILL inline, so inform the statistics */
+ hook_inline(call, called_graph);
+
+ /* -- Decide how to handle exception control flow: Is there a handler
+ for the Call node, or do we branch directly to End on an exception?
+ exc_handling:
+ 0 There is a handler.
+ 1 Branches to End.
+ 2 Exception handling not represented in Firm. -- */
+ {
+ ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+ for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
+ assert(is_Proj(proj));
+ if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
+ if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
+ }
+ if (Mproj) { assert(Xproj); exc_handling = 0; } /* Mproj */
+ else if (Xproj) { exc_handling = 1; } /* !Mproj && Xproj */
+ else { exc_handling = 2; } /* !Mproj && !Xproj */
+ }
+
+ /* --
+ the procedure and later replaces the Start node of the called graph.
+ Post_call is the old Call node and collects the results of the called
+ graph. Both will end up being a tuple. -- */
+ post_bl = get_nodes_block(call);
+ set_irg_current_block(current_ir_graph, post_bl);
+ /* XxMxPxPxPxT of Start + parameter of Call */
+ in[pn_Start_X_initial_exec] = new_Jmp();
+ in[pn_Start_M] = get_Call_mem(call);
+ in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
+ in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
+ in[pn_Start_P_tls] = get_irg_tls(current_ir_graph);
+ in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
+ /* in[pn_Start_P_value_arg_base] = ??? */
+ assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
+ pre_call = new_Tuple(pn_Start_max - 1, in);
+ post_call = call;
+
+ /* --
+ The new block gets the ins of the old block, pre_call and all its
+ predecessors and all Phi nodes. -- */
+ part_block(pre_call);
+
+ /* -- Prepare state for dead node elimination -- */
+ /* Visited flags in calling irg must be >= flag in called irg.
+ Else walker and arity computation will not work. */
+ if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
+ set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
+ if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
+ set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
+ /* Set pre_call as new Start node in link field of the start node of
+ calling graph and pre_calls block as new block for the start block
+ of calling graph.
+ Further mark these nodes so that they are not visited by the
+ copying. */
+ set_irn_link(get_irg_start(called_graph), pre_call);
+ set_irn_visited(get_irg_start(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
+ set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(current_ir_graph));
+ set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
+ set_irn_visited(get_irg_bad(called_graph), get_irg_visited(current_ir_graph));
+
+ /* Initialize for compaction of in arrays */
+ inc_irg_block_visited(current_ir_graph);
+
+ /* -- Replicate local entities of the called_graph -- */
+ /* copy the entities. */
+ called_frame = get_irg_frame_type(called_graph);
+ for (i = 0; i < get_class_n_members(called_frame); i++) {
+ ir_entity *new_ent, *old_ent;
+ old_ent = get_class_member(called_frame, i);
+ new_ent = copy_entity_own(old_ent, get_cur_frame_type());
+ set_entity_link(old_ent, new_ent);
+ }
+
+ /* visited is > than that of called graph. With this trick visited will
+ remain unchanged so that an outer walker, e.g., searching the call nodes
+ to inline, calling this inline will not visit the inlined nodes. */
+ set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
+
+ /* -- Performing dead node elimination inlines the graph -- */
+ /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
+ entities. */
+ /* @@@ endless loops are not copied!! -- they should be, I think... */
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ get_irg_frame_type(called_graph));
+
+ /* Repair called_graph */
+ set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
+ set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
+ set_Block_block_visited(get_irg_start_block(called_graph), 0);
+
+ /* -- Merge the end of the inlined procedure with the call site -- */
+ /* We will turn the old Call node into a Tuple with the following
+ predecessors:
+ -1: Block of Tuple.
+ 0: Phi of all Memories of Return statements.
+ 1: Jmp from new Block that merges the control flow from all exception
+ predecessors of the old end block.
+ 2: Tuple of all arguments.
+ 3: Phi of Exception memories.
+ In case the old Call directly branches to End on an exception we don't
+ need the block merging all exceptions nor the Phi of the exception
+ memories.
+ */
+
+ /* -- Precompute some values -- */
+ end_bl = get_new_node(get_irg_end_block(called_graph));
+ end = get_new_node(get_irg_end(called_graph));
+ arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ n_res = get_method_n_ress(get_Call_type(call));
+
+ res_pred = xmalloc (n_res * sizeof(*res_pred));
+ cf_pred = xmalloc (arity * sizeof(*res_pred));
+
+ set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
+
+ /* -- archive keepalives -- */
+ irn_arity = get_irn_arity(end);
+ for (i = 0; i < irn_arity; i++)
+ add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+
+ /* The new end node will die. We need not free as the in array is on the obstack:
+ copy_node() only generated 'D' arrays. */
+
+ /* -- Replace Return nodes by Jump nodes. -- */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (is_Return(ret)) {
+ cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_block(ret));
+ n_ret++;
+ }
+ }
+ set_irn_in(post_bl, n_ret, cf_pred);
+
+ /* -- Build a Tuple for all results of the method.
+ Add Phi node if there was more than one Return. -- */
+ turn_into_tuple(post_call, 4);
+ /* First the Memory-Phi */
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (is_Return(ret)) {
+ cf_pred[n_ret] = get_Return_mem(ret);
+ n_ret++;
+ }
+ }
+ phi = new_Phi(n_ret, cf_pred, mode_M);
+ set_Tuple_pred(call, pn_Call_M_regular, phi);
+ /* Conserve Phi-list for further inlinings -- but might be optimized */
+ if (get_nodes_block(phi) == post_bl) {
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
+ }
+ /* Now the real results */
+ if (n_res > 0) {
+ for (j = 0; j < n_res; j++) {
+ n_ret = 0;
+ for (i = 0; i < arity; i++) {
+ ret = get_irn_n(end_bl, i);
+ if (get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
+ }
+ if (n_ret > 0)
+ phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
+ else
+ phi = new_Bad();
+ res_pred[j] = phi;
+ /* Conserve Phi-list for further inlinings -- but might be optimized */
+ if (get_nodes_block(phi) == post_bl) {
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
+ }
+ }
+ set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
+ } else {
+ set_Tuple_pred(call, pn_Call_T_result, new_Bad());
+ }
+ /* Finally the exception control flow.
+ We have two (three) possible situations:
+ First if the Call branches to an exception handler: We need to add a Phi node to
+ collect the memory containing the exception objects. Further we need
+ to add another block to get a correct representation of this Phi. To
+ this block we add a Jmp that resolves into the X output of the Call
+ when the Call is turned into a tuple.
+ Second the Call branches to End, the exception is not handled. Just
+ add all inlined exception branches to the End node.
+ Third: there is no Exception edge at all. Handle as case two. */
+ if (exc_handling == 0) {
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = get_irn_n(end_bl, i);
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ cf_pred[n_exc] = ret;
+ n_exc++;
+ }
+ }
+ if (n_exc > 0) {
+ new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
+ set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
+ /* The Phi for the memories with the exception objects */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret;
+ ret = skip_Proj(get_irn_n(end_bl, i));
+ if (is_Call(ret)) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 3);
+ n_exc++;
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
+ n_exc++;
+ } else if (get_irn_op(ret) == op_Raise) {
+ cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
+ n_exc++;
+ }
+ }
+ set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
+ } else {
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+ }
+ } else {
+ ir_node *main_end_bl;
+ int main_end_bl_arity;
+ ir_node **end_preds;
+
+ /* assert(exc_handling == 1 || no exceptions. ) */
+ n_exc = 0;
+ for (i = 0; i < arity; i++) {
+ ir_node *ret = get_irn_n(end_bl, i);
+
+ if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+ cf_pred[n_exc] = ret;
+ n_exc++;
+ }
+ }
+ main_end_bl = get_irg_end_block(current_ir_graph);
+ main_end_bl_arity = get_irn_arity(main_end_bl);
+ end_preds = xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+
+ for (i = 0; i < main_end_bl_arity; ++i)
+ end_preds[i] = get_irn_n(main_end_bl, i);
+ for (i = 0; i < n_exc; ++i)
+ end_preds[main_end_bl_arity + i] = cf_pred[i];
+ set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+ free(end_preds);
+ }
+ free(res_pred);
+ free(cf_pred);
+
+ /* -- Turn CSE back on. -- */
+ set_optimize(rem_opt);
+
+ return 1;
- inline_irg_env *env;
- ir_graph *irg;
- int i, n_irgs;
- ir_graph *rem;
- int did_inline;
- wenv_t wenv;
- call_entry *entry, *tail;
- const call_entry *centry;
- struct obstack obst;
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
- rem = current_ir_graph;
- obstack_init(&obst);
-
- /* extend all irgs by a temporary data structure for inlining. */
- n_irgs = get_irp_n_irgs();
- for (i = 0; i < n_irgs; ++i)
- set_irg_link(get_irp_irg(i), alloc_inline_irg_env(&obst));
-
- /* Precompute information in temporary data structure. */
- wenv.obst = &obst;
- wenv.ignore_runtime = ignore_runtime;
- for (i = 0; i < n_irgs; ++i) {
- ir_graph *irg = get_irp_irg(i);
-
- assert(get_irg_phase_state(irg) != phase_building);
- free_callee_info(irg);
-
- wenv.x = get_irg_link(irg);
- irg_walk_graph(irg, NULL, collect_calls2, &wenv);
- }
-
- /* -- and now inline. -- */
-
- /* Inline leaves recursively -- we might construct new leaves. */
- do {
- did_inline = 0;
-
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- tail = NULL;
- for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
-
- if (env->n_nodes > maxsize) break;
-
- call = entry->call;
- callee = entry->callee;
-
- if (is_leave(callee) && is_smaller(callee, leavesize)) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- did_inline = inline_method(call, callee);
-
- if (did_inline) {
- /* Do some statistics */
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
-
- env->got_inline = 1;
- --env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- --callee_env->n_callers;
-
- /* remove this call from the list */
- if (tail != NULL)
- tail->next = entry->next;
- else
- env->call_head = entry->next;
- continue;
- }
- }
- tail = entry;
- }
- env->call_tail = tail;
- }
- } while (did_inline);
-
- /* inline other small functions. */
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- /* note that the list of possible calls is updated during the process */
- tail = NULL;
- for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
-
- call = entry->call;
- callee = entry->callee;
-
- if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
- (get_irg_inline_property(callee) >= irg_inline_forced))) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- if (inline_method(call, callee)) {
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
-
- /* callee was inline. Append it's call list. */
- env->got_inline = 1;
- --env->n_call_nodes;
- append_call_list(&obst, env, callee_env->call_head);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- --callee_env->n_callers;
-
- /* after we have inlined callee, all called methods inside callee
- are now called once more */
- for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
- inline_irg_env *penv = get_irg_link(centry->callee);
- ++penv->n_callers;
- }
-
- /* remove this call from the list */
- if (tail != NULL)
- tail->next = entry->next;
- else
- env->call_head = entry->next;
- continue;
- }
- }
- tail = entry;
- }
- env->call_tail = tail;
- }
-
- for (i = 0; i < n_irgs; ++i) {
- irg = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(irg);
-
- if (env->got_inline) {
- /* this irg got calls inlined */
- set_irg_outs_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
-
- optimize_graph_df(irg);
- optimize_cf(irg);
- }
- if (env->got_inline || (env->n_callers_orig != env->n_callers))
- DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(irg))));
- }
-
- obstack_free(&obst, NULL);
- current_ir_graph = rem;
+ inline_irg_env *env;
+ ir_graph *irg;
+ int i, n_irgs;
+ ir_graph *rem;
+ int did_inline;
+ wenv_t wenv;
+ call_entry *entry, *tail;
+ const call_entry *centry;
+ struct obstack obst;
+ DEBUG_ONLY(firm_dbg_module_t *dbg;)
+
+ if (!(get_opt_optimize() && get_opt_inline())) return;
+
+ FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
+ rem = current_ir_graph;
+ obstack_init(&obst);
+
+ /* extend all irgs by a temporary data structure for inlining. */
+ n_irgs = get_irp_n_irgs();
+ for (i = 0; i < n_irgs; ++i)
+ set_irg_link(get_irp_irg(i), alloc_inline_irg_env(&obst));
+
+ /* Precompute information in temporary data structure. */
+ wenv.obst = &obst;
+ wenv.ignore_runtime = ignore_runtime;
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+
+ assert(get_irg_phase_state(irg) != phase_building);
+ free_callee_info(irg);
+
+ wenv.x = get_irg_link(irg);
+ irg_walk_graph(irg, NULL, collect_calls2, &wenv);
+ }
+
+ /* -- and now inline. -- */
+
+ /* Inline leaves recursively -- we might construct new leaves. */
+ do {
+ did_inline = 0;
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ tail = NULL;
+ for (entry = env->call_head; entry != NULL; entry = entry->next) {
+ ir_graph *callee;
+
+ if (env->n_nodes > maxsize) break;
+
+ call = entry->call;
+ callee = entry->callee;
+
+ if (is_leave(callee) && is_smaller(callee, leavesize)) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ did_inline = inline_method(call, callee);
+
+ if (did_inline) {
+ /* Do some statistics */
+ inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+
+ env->got_inline = 1;
+ --env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ --callee_env->n_callers;
+
+ /* remove this call from the list */
+ if (tail != NULL)
+ tail->next = entry->next;
+ else
+ env->call_head = entry->next;
+ continue;
+ }
+ }
+ tail = entry;
+ }
+ env->call_tail = tail;
+ }
+ } while (did_inline);
+
+ /* inline other small functions. */
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ /* note that the list of possible calls is updated during the process */
+ tail = NULL;
+ for (entry = env->call_head; entry != NULL; entry = entry->next) {
+ ir_graph *callee;
+
+ call = entry->call;
+ callee = entry->callee;
+
+ if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
+ (get_irg_inline_property(callee) >= irg_inline_forced))) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ if (inline_method(call, callee)) {
+ inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+
+ /* callee was inline. Append it's call list. */
+ env->got_inline = 1;
+ --env->n_call_nodes;
+ append_call_list(&obst, env, callee_env->call_head);
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ --callee_env->n_callers;
+
+ /* after we have inlined callee, all called methods inside callee
+ are now called once more */
+ for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
+ inline_irg_env *penv = get_irg_link(centry->callee);
+ ++penv->n_callers;
+ }
+
+ /* remove this call from the list */
+ if (tail != NULL)
+ tail->next = entry->next;
+ else
+ env->call_head = entry->next;
+ continue;
+ }
+ }
+ tail = entry;
+ }
+ env->call_tail = tail;
+ }
+
+ for (i = 0; i < n_irgs; ++i) {
+ irg = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(irg);
+
+ if (env->got_inline) {
+ /* this irg got calls inlined */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+
+ optimize_graph_df(irg);
+ optimize_cf(irg);
+ }
+ if (env->got_inline || (env->n_callers_orig != env->n_callers))
+ DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(irg))));
+ }
+
+ obstack_free(&obst, NULL);
+ current_ir_graph = rem;
-place_floats_early(ir_node *n, waitq *worklist)
-{
- int i, irn_arity;
-
- /* we must not run into an infinite loop */
- assert(irn_not_visited(n));
- mark_irn_visited(n);
-
- /* Place floating nodes. */
- if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
- int depth = 0;
- ir_node *b = NULL; /* The block to place this node in */
-
- assert(is_no_Block(n));
-
- if (is_irn_start_block_placed(n)) {
- /* These nodes will not be placed by the loop below. */
- b = get_irg_start_block(current_ir_graph);
- depth = 1;
- }
-
- /* find the block for this node. */
- irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(n, i);
- ir_node *pred_block;
-
- if ((irn_not_visited(pred))
- && (get_irn_pinned(pred) == op_pin_state_floats)) {
-
- /*
- * If the current node is NOT in a dead block, but one of its
- * predecessors is, we must move the predecessor to a live block.
- * Such thing can happen, if global CSE chose a node from a dead block.
- * We move it simply to our block.
- * Note that neither Phi nor End nodes are floating, so we don't
- * need to handle them here.
- */
- if (! in_dead_block) {
- if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1)))
- set_nodes_block(pred, curr_block);
- }
- place_floats_early(pred, worklist);
- }
-
- /*
- * A node in the Bad block must stay in the bad block,
- * so don't compute a new block for it.
- */
- if (in_dead_block)
- continue;
-
- /* Because all loops contain at least one op_pin_state_pinned node, now all
- our inputs are either op_pin_state_pinned or place_early() has already
- been finished on them. We do not have any unfinished inputs! */
- pred_block = get_irn_n(pred, -1);
- if ((!is_Block_dead(pred_block)) &&
- (get_Block_dom_depth(pred_block) > depth)) {
- b = pred_block;
- depth = get_Block_dom_depth(pred_block);
- }
- /* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)) {
- b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
- assert(b != get_irg_start_block(current_ir_graph));
- depth = 2;
- }
- }
- if (b)
- set_nodes_block(n, b);
- }
-
- /*
- * Add predecessors of non floating nodes and non-floating predecessors
- * of floating nodes to worklist and fix their blocks if the are in dead block.
- */
- irn_arity = get_irn_arity(n);
-
- if (get_irn_op(n) == op_End) {
- /*
- * Simplest case: End node. Predecessors are keep-alives,
- * no need to move out of dead block.
- */
- for (i = -1; i < irn_arity; ++i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
- }
- }
- else if (is_Block(n)) {
- /*
- * Blocks: Predecessors are control flow, no need to move
- * them out of dead block.
- */
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
- }
- }
- else if (is_Phi(n)) {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * Phi nodes: move nodes from dead blocks into the effective use
- * of the Phi-input if the Phi is not in a bad block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
- }
- waitq_put(worklist, pred);
- }
- }
- }
- else {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * All other nodes: move nodes from dead blocks into the same block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, curr_block);
- }
- waitq_put(worklist, pred);
- }
- }
- }
+place_floats_early(ir_node *n, waitq *worklist) {
+ int i, irn_arity;
+
+ /* we must not run into an infinite loop */
+ assert(irn_not_visited(n));
+ mark_irn_visited(n);
+
+ /* Place floating nodes. */
+ if (get_irn_pinned(n) == op_pin_state_floats) {
+ ir_node *curr_block = get_irn_n(n, -1);
+ int in_dead_block = is_Block_unreachable(curr_block);
+ int depth = 0;
+ ir_node *b = NULL; /* The block to place this node in */
+
+ assert(is_no_Block(n));
+
+ if (is_irn_start_block_placed(n)) {
+ /* These nodes will not be placed by the loop below. */
+ b = get_irg_start_block(current_ir_graph);
+ depth = 1;
+ }
+
+ /* find the block for this node. */
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; i++) {
+ ir_node *pred = get_irn_n(n, i);
+ ir_node *pred_block;
+
+ if ((irn_not_visited(pred))
+ && (get_irn_pinned(pred) == op_pin_state_floats)) {
+
+ /*
+ * If the current node is NOT in a dead block, but one of its
+ * predecessors is, we must move the predecessor to a live block.
+ * Such thing can happen, if global CSE chose a node from a dead block.
+ * We move it simply to our block.
+ * Note that neither Phi nor End nodes are floating, so we don't
+ * need to handle them here.
+ */
+ if (! in_dead_block) {
+ if (get_irn_pinned(pred) == op_pin_state_floats &&
+ is_Block_unreachable(get_irn_n(pred, -1)))
+ set_nodes_block(pred, curr_block);
+ }
+ place_floats_early(pred, worklist);
+ }
+
+ /*
+ * A node in the Bad block must stay in the bad block,
+ * so don't compute a new block for it.
+ */
+ if (in_dead_block)
+ continue;
+
+ /* Because all loops contain at least one op_pin_state_pinned node, now all
+ our inputs are either op_pin_state_pinned or place_early() has already
+ been finished on them. We do not have any unfinished inputs! */
+ pred_block = get_irn_n(pred, -1);
+ if ((!is_Block_dead(pred_block)) &&
+ (get_Block_dom_depth(pred_block) > depth)) {
+ b = pred_block;
+ depth = get_Block_dom_depth(pred_block);
+ }
+ /* Avoid that the node is placed in the Start block */
+ if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
+ && get_irg_phase_state(current_ir_graph) != phase_backend) {
+ b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
+ assert(b != get_irg_start_block(current_ir_graph));
+ depth = 2;
+ }
+ }
+ if (b)
+ set_nodes_block(n, b);
+ }
+
+ /*
+ * Add predecessors of non floating nodes and non-floating predecessors
+ * of floating nodes to worklist and fix their blocks if the are in dead block.
+ */
+ irn_arity = get_irn_arity(n);
+
+ if (get_irn_op(n) == op_End) {
+ /*
+ * Simplest case: End node. Predecessors are keep-alives,
+ * no need to move out of dead block.
+ */
+ for (i = -1; i < irn_arity; ++i) {
+ ir_node *pred = get_irn_n(n, i);
+ if (irn_not_visited(pred))
+ waitq_put(worklist, pred);
+ }
+ } else if (is_Block(n)) {
+ /*
+ * Blocks: Predecessors are control flow, no need to move
+ * them out of dead block.
+ */
+ for (i = irn_arity - 1; i >= 0; --i) {
+ ir_node *pred = get_irn_n(n, i);
+ if (irn_not_visited(pred))
+ waitq_put(worklist, pred);
+ }
+ } else if (is_Phi(n)) {
+ ir_node *pred;
+ ir_node *curr_block = get_irn_n(n, -1);
+ int in_dead_block = is_Block_unreachable(curr_block);
+
+ /*
+ * Phi nodes: move nodes from dead blocks into the effective use
+ * of the Phi-input if the Phi is not in a bad block.
+ */
+ pred = get_irn_n(n, -1);
+ if (irn_not_visited(pred))
+ waitq_put(worklist, pred);
+
+ for (i = irn_arity - 1; i >= 0; --i) {
+ ir_node *pred = get_irn_n(n, i);
+
+ if (irn_not_visited(pred)) {
+ if (! in_dead_block &&
+ get_irn_pinned(pred) == op_pin_state_floats &&
+ is_Block_unreachable(get_irn_n(pred, -1))) {
+ set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
+ }
+ waitq_put(worklist, pred);
+ }
+ }
+ } else {
+ ir_node *pred;
+ ir_node *curr_block = get_irn_n(n, -1);
+ int in_dead_block = is_Block_unreachable(curr_block);
+
+ /*
+ * All other nodes: move nodes from dead blocks into the same block.
+ */
+ pred = get_irn_n(n, -1);
+ if (irn_not_visited(pred))
+ waitq_put(worklist, pred);
+
+ for (i = irn_arity - 1; i >= 0; --i) {
+ ir_node *pred = get_irn_n(n, i);
+
+ if (irn_not_visited(pred)) {
+ if (! in_dead_block &&
+ get_irn_pinned(pred) == op_pin_state_floats &&
+ is_Block_unreachable(get_irn_n(pred, -1))) {
+ set_nodes_block(pred, curr_block);
+ }
+ waitq_put(worklist, pred);
+ }
+ }
+ }
- ir_node *early_blk;
-
- assert(irn_not_visited(n)); /* no multiple placement */
-
- mark_irn_visited(n);
-
- /* no need to place block nodes, control nodes are already placed. */
- if ((get_irn_op(n) != op_Block) &&
- (!is_cfop(n)) &&
- (get_irn_mode(n) != mode_X)) {
- /* Remember the early_blk placement of this block to move it
- out of loop no further than the early_blk placement. */
- early_blk = get_irn_n(n, -1);
-
- /*
- * BEWARE: Here we also get code, that is live, but
- * was in a dead block. If the node is life, but because
- * of CSE in a dead block, we still might need it.
- */
-
- /* Assure that our users are all placed, except the Phi-nodes.
- --- Each data flow cycle contains at least one Phi-node. We
- have to break the `user has to be placed before the
- producer' dependence cycle and the Phi-nodes are the
- place to do so, because we need to base our placement on the
- final region of our users, which is OK with Phi-nodes, as they
- are op_pin_state_pinned, and they never have to be placed after a
- producer of one of their inputs in the same block anyway. */
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
- place_floats_late(succ, worklist);
- }
-
- if (! is_Block_dead(early_blk)) {
- /* do only move things that where not dead */
- ir_op *op = get_irn_op(n);
-
- /* We have to determine the final block of this node... except for
- constants and Projs */
- if ((get_irn_pinned(n) == op_pin_state_floats) &&
- (op != op_Const) &&
- (op != op_SymConst) &&
- (op != op_Proj))
- {
- ir_node *dca = NULL; /* deepest common ancestor in the
- dominator tree of all nodes'
- blocks depending on us; our final
- placement has to dominate DCA. */
- for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(n, i);
- ir_node *succ_blk;
-
- if (get_irn_op(succ) == op_End) {
- /*
- * This consumer is the End node, a keep alive edge.
- * This is not a real consumer, so we ignore it
- */
- continue;
- }
-
- /* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
- if (is_Block_unreachable(succ_blk))
- continue;
- dca = consumer_dom_dca(dca, succ, n);
- }
- if (dca) {
- set_nodes_block(n, dca);
- move_out_of_loops(n, early_blk);
- }
- }
- }
- }
-
- /* Add predecessors of all non-floating nodes on list. (Those of floating
- nodes are placed already and therefore are marked.) */
- for (i = 0; i < get_irn_n_outs(n); i++) {
- ir_node *succ = get_irn_out(n, i);
- if (irn_not_visited(get_irn_out(n, i))) {
- pdeq_putr(worklist, succ);
- }
- }
+ ir_node *early_blk;
+
+ assert(irn_not_visited(n)); /* no multiple placement */
+
+ mark_irn_visited(n);
+
+ /* no need to place block nodes, control nodes are already placed. */
+ if ((get_irn_op(n) != op_Block) &&
+ (!is_cfop(n)) &&
+ (get_irn_mode(n) != mode_X)) {
+ /* Remember the early_blk placement of this block to move it
+ out of loop no further than the early_blk placement. */
+ early_blk = get_irn_n(n, -1);
+
+ /*
+ * BEWARE: Here we also get code, that is live, but
+ * was in a dead block. If the node is life, but because
+ * of CSE in a dead block, we still might need it.
+ */
+
+ /* Assure that our users are all placed, except the Phi-nodes.
+ --- Each data flow cycle contains at least one Phi-node. We
+ have to break the `user has to be placed before the
+ producer' dependence cycle and the Phi-nodes are the
+ place to do so, because we need to base our placement on the
+ final region of our users, which is OK with Phi-nodes, as they
+ are op_pin_state_pinned, and they never have to be placed after a
+ producer of one of their inputs in the same block anyway. */
+ for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(n, i);
+ if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+ place_floats_late(succ, worklist);
+ }
+
+ if (! is_Block_dead(early_blk)) {
+ /* do only move things that where not dead */
+ ir_op *op = get_irn_op(n);
+
+ /* We have to determine the final block of this node... except for
+ constants and Projs */
+ if ((get_irn_pinned(n) == op_pin_state_floats) &&
+ (op != op_Const) &&
+ (op != op_SymConst) &&
+ (op != op_Proj))
+ {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
+ for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
+ ir_node *succ = get_irn_out(n, i);
+ ir_node *succ_blk;
+
+ if (get_irn_op(succ) == op_End) {
+ /*
+ * This consumer is the End node, a keep alive edge.
+ * This is not a real consumer, so we ignore it
+ */
+ continue;
+ }
+
+ /* ignore if succ is in dead code */
+ succ_blk = get_irn_n(succ, -1);
+ if (is_Block_unreachable(succ_blk))
+ continue;
+ dca = consumer_dom_dca(dca, succ, n);
+ }
+ if (dca) {
+ set_nodes_block(n, dca);
+ move_out_of_loops(n, early_blk);
+ }
+ }
+ }
+ }
+
+ /* Add successors of all non-floating nodes on list. (Those of floating
+ nodes are placed already and therefore are marked.) */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *succ = get_irn_out(n, i);
+ if (irn_not_visited(get_irn_out(n, i))) {
+ pdeq_putr(worklist, succ);
+ }
+ }