- /* The new end node will die. We need not free as the in array is on the obstack:
- copy_node only generated 'D' arrays. */
-
- /* -- Replace Return nodes by Jump nodes. -- */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_Block(ret));
- n_ret++;
- }
- }
- set_irn_in(post_bl, n_ret, cf_pred);
-
- /* -- Build a Tuple for all results of the method.
- Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, 4);
- /* First the Memory-Phi */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
- }
- }
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_Block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- /* Now the real results */
- if (n_res > 0) {
- for (j = 0; j < n_res; j++) {
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
- }
- if (n_ret > 0)
- phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
- else
- phi = new_Bad();
- res_pred[j] = phi;
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_Block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- }
- set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
- } else {
- set_Tuple_pred(call, pn_Call_T_result, new_Bad());
- }
- /* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
- collect the memory containing the exception objects. Further we need
- to add another block to get a correct representation of this Phi. To
- this block we add a Jmp that resolves into the X output of the Call
- when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handling == 0) {
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
- set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (get_irn_op(ret) == op_Call) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_Block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
- } else {
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- }
- } else {
- ir_node *main_end_bl;
- int main_end_bl_arity;
- ir_node **end_preds;
-
- /* assert(exc_handling == 1 || no exceptions. ) */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
-
- if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = (ir_node **) malloc ((n_exc + main_end_bl_arity) * sizeof (ir_node *));
-
- for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = get_irn_n(main_end_bl, i);
- for (i = 0; i < n_exc; ++i)
- end_preds[main_end_bl_arity + i] = cf_pred[i];
- set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- free(end_preds);
- }
- free(res_pred);
- free(cf_pred);
-
-#if 0 /* old. now better, correcter, faster implementation. */
- if (n_exc > 0) {
- /* -- If the exception control flow from the inlined Call directly
- branched to the end block we now have the following control
- flow predecessor pattern: ProjX -> Tuple -> Jmp. We must
- remove the Jmp along with it's empty block and add Jmp's
- predecessors as predecessors of this end block. No problem if
- there is no exception, because then branches Bad to End which
- is fine. --
- @@@ can't we know this beforehand: by getting the Proj(1) from
- the Call link list and checking whether it goes to Proj. */
- /* find the problematic predecessor of the end block. */
- end_bl = get_irg_end_block(current_ir_graph);
- for (i = 0; i < get_Block_n_cfgpreds(end_bl); i++) {
- cf_op = get_Block_cfgpred(end_bl, i);
- if (get_irn_op(cf_op) == op_Proj) {
- cf_op = get_Proj_pred(cf_op);
- if ((get_irn_op(cf_op) == op_Tuple) && (cf_op == call)) {
- /* There are unoptimized tuples from inlineing before when no exc */
- assert(get_Proj_proj(get_Block_cfgpred(end_bl, i)) == pn_Call_X_except);
- cf_op = get_Tuple_pred(cf_op, pn_Call_X_except);
- assert(get_irn_op(cf_op) == op_Jmp);
- break;
- }
- }
- }
- /* repair */
- if (i < get_Block_n_cfgpreds(end_bl)) {
- bl = get_nodes_Block(cf_op);
- arity = get_Block_n_cfgpreds(end_bl) + get_Block_n_cfgpreds(bl) - 1;
- cf_pred = (ir_node **) malloc (arity * sizeof (ir_node *));
- for (j = 0; j < i; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j);
- for (j = j; j < i + get_Block_n_cfgpreds(bl); j++)
- cf_pred[j] = get_Block_cfgpred(bl, j-i);
- for (j = j; j < arity; j++)
- cf_pred[j] = get_Block_cfgpred(end_bl, j-get_Block_n_cfgpreds(bl) +1);
- set_irn_in(end_bl, arity, cf_pred);
- free(cf_pred);
- /* Remove the exception pred from post-call Tuple. */
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- }
- }
-#endif
-
- /* -- Turn cse back on. -- */
- set_optimize(rem_opt);
-
- return 1;
-}
-
-/********************************************************************/
-/* Apply inlineing to small methods. */
-/********************************************************************/
-
-/* It makes no sense to inline too many calls in one procedure. Anyways,
- I didn't get a version with NEW_ARR_F to run. */
-#define MAX_INLINE 1024
-
-/**
- * environment for inlining small irgs
- */
-typedef struct _inline_env_t {
- int pos;
- ir_node *calls[MAX_INLINE];
-} inline_env_t;
-
-/**
- * Returns the irg called from a Call node. If the irg is not
- * known, NULL is returned.
- */
-static ir_graph *get_call_called_irg(ir_node *call) {
- ir_node *addr;
- tarval *tv;
- ir_graph *called_irg = NULL;
-
- assert(get_irn_op(call) == op_Call);
-
- addr = get_Call_ptr(call);
- if (get_irn_op(addr) == op_Const) {
- /* Check whether the constant is the pointer to a compiled entity. */
- tv = get_Const_tarval(addr);
- if (tarval_to_entity(tv))
- called_irg = get_entity_irg(tarval_to_entity(tv));
- }
- return called_irg;
-}
-
-static void collect_calls(ir_node *call, void *env) {
- inline_env_t *ienv = env;
- ir_node *addr;
- tarval *tv;
- ir_graph *called_irg;
-
- if (get_irn_op(call) != op_Call) return;
-
- addr = get_Call_ptr(call);
- if (get_irn_op(addr) == op_Const) {
- /* Check whether the constant is the pointer to a compiled entity. */
- tv = get_Const_tarval(addr);
- if (tarval_to_entity(tv)) {
- called_irg = get_entity_irg(tarval_to_entity(tv));
- if (called_irg && ienv->pos < MAX_INLINE) {
- /* The Call node calls a locally defined method. Remember to inline. */
- ienv->calls[ienv->pos++] = call;
- }
- }
- }
-}
-
-/**
- * Inlines all small methods at call sites where the called address comes
- * from a Const node that references the entity representing the called
- * method.
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined.
- */
-void inline_small_irgs(ir_graph *irg, int size) {
- int i;
- ir_graph *rem = current_ir_graph;
- inline_env_t env;
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- current_ir_graph = irg;
- /* Handle graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- free_callee_info(current_ir_graph);
-
- /* Find Call nodes to inline.
- (We can not inline during a walk of the graph, as inlineing the same
- method several times changes the visited flag of the walked graph:
- after the first inlineing visited of the callee equals visited of
- the caller. With the next inlineing both are increased.) */
- env.pos = 0;
- irg_walk(get_irg_end(irg), NULL, collect_calls, &env);
-
- if ((env.pos > 0) && (env.pos < MAX_INLINE)) {
- /* There are calls to inline */
- collect_phiprojs(irg);
- for (i = 0; i < env.pos; i++) {
- tarval *tv;
- ir_graph *callee;
- tv = get_Const_tarval(get_Call_ptr(env.calls[i]));
- callee = get_entity_irg(tarval_to_entity(tv));
- if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) == irg_inline_forced)) {
- inline_method(env.calls[i], callee);
- }
- }
- }
-
- current_ir_graph = rem;
-}
-
-/**
- * Environment for inlining irgs.
- */
-typedef struct {
- int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
- int n_nodes_orig; /**< for statistics */
- eset *call_nodes; /**< All call nodes in this graph */
- int n_call_nodes;
- int n_call_nodes_orig; /**< for statistics */
- int n_callers; /**< Number of known graphs that call this graphs. */
- int n_callers_orig; /**< for statistics */
-} inline_irg_env;
-
-static inline_irg_env *new_inline_irg_env(void) {
- inline_irg_env *env = malloc(sizeof(inline_irg_env));
- env->n_nodes = -2; /* uncount Start, End */
- env->n_nodes_orig = -2; /* uncount Start, End */
- env->call_nodes = eset_create();
- env->n_call_nodes = 0;
- env->n_call_nodes_orig = 0;
- env->n_callers = 0;
- env->n_callers_orig = 0;
- return env;
-}
-
-static void free_inline_irg_env(inline_irg_env *env) {
- eset_destroy(env->call_nodes);
- free(env);
-}
-
-static void collect_calls2(ir_node *call, void *env) {
- inline_irg_env *x = (inline_irg_env *)env;
- ir_op *op = get_irn_op(call);
- ir_graph *callee;
-
- /* count nodes in irg */
- if (op != op_Proj && op != op_Tuple && op != op_Sync) {
- x->n_nodes++;
- x->n_nodes_orig++;
- }
-
- if (op != op_Call) return;
-
- /* collect all call nodes */
- eset_insert(x->call_nodes, (void *)call);
- x->n_call_nodes++;
- x->n_call_nodes_orig++;
-
- /* count all static callers */
- callee = get_call_called_irg(call);
- if (callee) {
- ((inline_irg_env *)get_irg_link(callee))->n_callers++;
- ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++;
- }