-enum exc_mode {
- exc_handler = 0, /**< There is a handler. */
- exc_to_end = 1, /**< Branches to End. */
- exc_no_handler = 2 /**< Exception handling not represented. */
-};
-
-/* Inlines a method at the given call site. */
-int inline_method(ir_node *call, ir_graph *called_graph) {
- ir_node *pre_call;
- ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
- ir_node *end, *end_bl;
- ir_node **res_pred;
- ir_node **cf_pred;
- ir_node *ret, *phi;
- int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
- enum exc_mode exc_handling;
- ir_type *called_frame;
- irg_inline_property prop = get_irg_inline_property(called_graph);
-
- if ( (prop < irg_inline_forced) &&
- (!get_opt_optimize() || !get_opt_inline() || (prop == irg_inline_forbidden))) return 0;
-
- /* Do not inline variadic functions. */
- if (get_method_variadicity(get_entity_type(get_irg_entity(called_graph))) == variadicity_variadic)
- return 0;
-
- assert(get_method_n_params(get_entity_type(get_irg_entity(called_graph))) ==
- get_method_n_params(get_Call_type(call)));
-
- /*
- * currently, we cannot inline two cases:
- * - call with compound arguments
- * - graphs that take the address of a parameter
- */
- if (! can_inline(call, called_graph))
- return 0;
-
- /* -- Turn off optimizations, this can cause problems when allocating new nodes. -- */
- rem_opt = get_opt_optimize();
- set_optimize(0);
-
- /* Handle graph state */
- assert(get_irg_phase_state(current_ir_graph) != phase_building);
- assert(get_irg_pinned(current_ir_graph) == op_pin_state_pinned);
- assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_outs_inconsistent(current_ir_graph);
- set_irg_extblk_inconsistent(current_ir_graph);
- set_irg_doms_inconsistent(current_ir_graph);
- set_irg_loopinfo_inconsistent(current_ir_graph);
- set_irg_callee_info_state(current_ir_graph, irg_callee_info_inconsistent);
-
- /* -- Check preconditions -- */
- assert(is_Call(call));
- /* @@@ does not work for InterfaceIII.java after cgana
- assert(get_Call_type(call) == get_entity_type(get_irg_entity(called_graph)));
- assert(smaller_type(get_entity_type(get_irg_entity(called_graph)),
- get_Call_type(call)));
- */
- if (called_graph == current_ir_graph) {
- set_optimize(rem_opt);
- return 0;
- }
-
- /* here we know we WILL inline, so inform the statistics */
- hook_inline(call, called_graph);
-
- /* -- Decide how to handle exception control flow: Is there a handler
- for the Call node, or do we branch directly to End on an exception?
- exc_handling:
- 0 There is a handler.
- 1 Branches to End.
- 2 Exception handling not represented in Firm. -- */
- {
- ir_node *proj, *Mproj = NULL, *Xproj = NULL;
- for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
- long proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Call_X_except) Xproj = proj;
- if (proj_nr == pn_Call_M_except) Mproj = proj;
- }
- if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */
- else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */
- else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
- }
-
- /* --
- the procedure and later replaces the Start node of the called graph.
- Post_call is the old Call node and collects the results of the called
- graph. Both will end up being a tuple. -- */
- post_bl = get_nodes_block(call);
- set_irg_current_block(current_ir_graph, post_bl);
- /* XxMxPxPxPxT of Start + parameter of Call */
- in[pn_Start_X_initial_exec] = new_Jmp();
- in[pn_Start_M] = get_Call_mem(call);
- in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
- in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
- in[pn_Start_P_tls] = get_irg_tls(current_ir_graph);
- in[pn_Start_T_args] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
- /* in[pn_Start_P_value_arg_base] = ??? */
- assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
- pre_call = new_Tuple(pn_Start_max - 1, in);
- post_call = call;
-
- /* --
- The new block gets the ins of the old block, pre_call and all its
- predecessors and all Phi nodes. -- */
- part_block(pre_call);
-
- /* -- Prepare state for dead node elimination -- */
- /* Visited flags in calling irg must be >= flag in called irg.
- Else walker and arity computation will not work. */
- if (get_irg_visited(current_ir_graph) <= get_irg_visited(called_graph))
- set_irg_visited(current_ir_graph, get_irg_visited(called_graph)+1);
- if (get_irg_block_visited(current_ir_graph)< get_irg_block_visited(called_graph))
- set_irg_block_visited(current_ir_graph, get_irg_block_visited(called_graph));
- /* Set pre_call as new Start node in link field of the start node of
- calling graph and pre_calls block as new block for the start block
- of calling graph.
- Further mark these nodes so that they are not visited by the
- copying. */
- set_irn_link(get_irg_start(called_graph), pre_call);
- set_irn_visited(get_irg_start(called_graph), get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(current_ir_graph));
- set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
- set_irn_visited(get_irg_bad(called_graph), get_irg_visited(current_ir_graph));
-
- /* Initialize for compaction of in arrays */
- inc_irg_block_visited(current_ir_graph);
-
- /* -- Replicate local entities of the called_graph -- */
- /* copy the entities. */
- called_frame = get_irg_frame_type(called_graph);
- for (i = 0; i < get_class_n_members(called_frame); i++) {
- ir_entity *new_ent, *old_ent;
- old_ent = get_class_member(called_frame, i);
- new_ent = copy_entity_own(old_ent, get_cur_frame_type());
- set_entity_link(old_ent, new_ent);
- }
-
- /* visited is > than that of called graph. With this trick visited will
- remain unchanged so that an outer walker, e.g., searching the call nodes
- to inline, calling this inline will not visit the inlined nodes. */
- set_irg_visited(current_ir_graph, get_irg_visited(current_ir_graph)-1);
-
- /* -- Performing dead node elimination inlines the graph -- */
- /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
- entities. */
- irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
- get_irg_frame_type(called_graph));
-
- /* Repair called_graph */
- set_irg_visited(called_graph, get_irg_visited(current_ir_graph));
- set_irg_block_visited(called_graph, get_irg_block_visited(current_ir_graph));
- set_Block_block_visited(get_irg_start_block(called_graph), 0);
-
- /* -- Merge the end of the inlined procedure with the call site -- */
- /* We will turn the old Call node into a Tuple with the following
- predecessors:
- -1: Block of Tuple.
- 0: Phi of all Memories of Return statements.
- 1: Jmp from new Block that merges the control flow from all exception
- predecessors of the old end block.
- 2: Tuple of all arguments.
- 3: Phi of Exception memories.
- In case the old Call directly branches to End on an exception we don't
- need the block merging all exceptions nor the Phi of the exception
- memories.
- */
-
- /* -- Precompute some values -- */
- end_bl = get_new_node(get_irg_end_block(called_graph));
- end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
- n_res = get_method_n_ress(get_Call_type(call));
-
- res_pred = xmalloc(n_res * sizeof(*res_pred));
- cf_pred = xmalloc(arity * sizeof(*res_pred));
-
- set_irg_current_block(current_ir_graph, post_bl); /* just to make sure */
-
- /* -- archive keepalives -- */
- irn_arity = get_irn_arity(end);
- for (i = 0; i < irn_arity; i++) {
- ir_node *ka = get_End_keepalive(end, i);
- if (! is_Bad(ka))
- add_End_keepalive(get_irg_end(current_ir_graph), ka);
- }
-
- /* The new end node will die. We need not free as the in array is on the obstack:
- copy_node() only generated 'D' arrays. */
-
- /* -- Replace Return nodes by Jump nodes. -- */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = new_r_Jmp(current_ir_graph, get_nodes_block(ret));
- n_ret++;
- }
- }
- set_irn_in(post_bl, n_ret, cf_pred);
-
- /* -- Build a Tuple for all results of the method.
- Add Phi node if there was more than one Return. -- */
- turn_into_tuple(post_call, pn_Call_max);
- /* First the Memory-Phi */
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
- }
- }
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- /* Now the real results */
- if (n_res > 0) {
- for (j = 0; j < n_res; j++) {
- n_ret = 0;
- for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
- if (get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
- }
- if (n_ret > 0)
- phi = new_Phi(n_ret, cf_pred, get_irn_mode(cf_pred[0]));
- else
- phi = new_Bad();
- res_pred[j] = phi;
- /* Conserve Phi-list for further inlinings -- but might be optimized */
- if (get_nodes_block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
- }
- }
- set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
- } else {
- set_Tuple_pred(call, pn_Call_T_result, new_Bad());
- }
-
- /* For now, we cannot inline calls with value_base */
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
-
- /* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
- collect the memory containing the exception objects. Further we need
- to add another block to get a correct representation of this Phi. To
- this block we add a Jmp that resolves into the X output of the Call
- when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
- if (exc_handling == exc_handler) {
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret, *irn;
- ret = get_irn_n(end_bl, i);
- irn = skip_Proj(ret);
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
- cf_pred[n_exc] = ret;
- ++n_exc;
- }
- }
- if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
- set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
- if (is_Call(ret)) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (get_irn_op(ret) == op_Raise) {
- cf_pred[n_exc] = new_r_Proj(current_ir_graph, get_nodes_block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
- } else {
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- }
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
- } else {
- ir_node *main_end_bl;
- int main_end_bl_arity;
- ir_node **end_preds;
-
- /* assert(exc_handling == 1 || no exceptions. ) */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
- ir_node *irn = skip_Proj(ret);
-
- if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
- cf_pred[n_exc] = ret;
- n_exc++;
- }
- }
- main_end_bl = get_irg_end_block(current_ir_graph);
- main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
-
- for (i = 0; i < main_end_bl_arity; ++i)
- end_preds[i] = get_irn_n(main_end_bl, i);
- for (i = 0; i < n_exc; ++i)
- end_preds[main_end_bl_arity + i] = cf_pred[i];
- set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
- free(end_preds);
- }
- free(res_pred);
- free(cf_pred);
-
- /* -- Turn CSE back on. -- */
- set_optimize(rem_opt);
-
- return 1;
-}
-
-/********************************************************************/
-/* Apply inlineing to small methods. */
-/********************************************************************/
-
-/** Represents a possible inlinable call in a graph. */
-typedef struct _call_entry call_entry;
-struct _call_entry {
- ir_node *call; /**< the Call */
- ir_graph *callee; /**< the callee called here */
- call_entry *next; /**< for linking the next one */
-};
-
-/**
- * environment for inlining small irgs
- */
-typedef struct _inline_env_t {
- struct obstack obst; /**< an obstack where call_entries are allocated on. */
- call_entry *head; /**< the head of the call entry list */
- call_entry *tail; /**< the tail of the call entry list */
-} inline_env_t;
-
-/**
- * Returns the irg called from a Call node. If the irg is not
- * known, NULL is returned.
- */
-static ir_graph *get_call_called_irg(ir_node *call) {
- ir_node *addr;
- ir_graph *called_irg = NULL;
-
- addr = get_Call_ptr(call);
- if (is_SymConst(addr) && get_SymConst_kind(addr) == symconst_addr_ent) {
- called_irg = get_entity_irg(get_SymConst_entity(addr));
- }
-
- return called_irg;
-}
-
-/**
- * Walker: Collect all calls to known graphs inside a graph.
- */
-static void collect_calls(ir_node *call, void *env) {
- if (is_Call(call)) {
- ir_graph *called_irg = get_call_called_irg(call);
- if (called_irg) {
- /* The Call node calls a locally defined method. Remember to inline. */
- inline_env_t *ienv = env;
- call_entry *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
- entry->call = call;
- entry->callee = called_irg;
- entry->next = NULL;
-
- if (ienv->tail == NULL)
- ienv->head = entry;
- else
- ienv->tail->next = entry;
- ienv->tail = entry;
- }
- }
-}
-
-/**
- * Inlines all small methods at call sites where the called address comes
- * from a Const node that references the entity representing the called
- * method.
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined.
- */
-void inline_small_irgs(ir_graph *irg, int size) {
- ir_graph *rem = current_ir_graph;
- inline_env_t env;
- call_entry *entry;
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
-
- current_ir_graph = irg;
- /* Handle graph state */
- assert(get_irg_phase_state(irg) != phase_building);
- free_callee_info(irg);
-
- /* Find Call nodes to inline.
- (We can not inline during a walk of the graph, as inlineing the same
- method several times changes the visited flag of the walked graph:
- after the first inlineing visited of the callee equals visited of
- the caller. With the next inlineing both are increased.) */
- obstack_init(&env.obst);
- env.head = env.tail = NULL;
- irg_walk_graph(irg, NULL, collect_calls, &env);
-
- if (env.head != NULL) {
- /* There are calls to inline */
- collect_phiprojs(irg);
- for (entry = env.head; entry != NULL; entry = entry->next) {
- ir_graph *callee = entry->callee;
- if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) >= irg_inline_forced)) {
- inline_method(entry->call, callee);
- }
- }
- }
- obstack_free(&env.obst, NULL);
- current_ir_graph = rem;
-}
-
-/**
- * Environment for inlining irgs.
- */
-typedef struct {
- int n_nodes; /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
- int n_nodes_orig; /**< for statistics */
- call_entry *call_head; /**< The head of the list of all call nodes in this graph. */
- call_entry *call_tail; /**< The tail of the list of all call nodes in this graph .*/
- int n_call_nodes; /**< Number of Call nodes in the graph. */
- int n_call_nodes_orig; /**< for statistics */
- int n_callers; /**< Number of known graphs that call this graphs. */
- int n_callers_orig; /**< for statistics */
- int got_inline; /**< Set, if at leat one call inside this graph was inlined. */
-} inline_irg_env;
-
-/**
- * Allocate a new environment for inlining.
- */
-static inline_irg_env *alloc_inline_irg_env(struct obstack *obst) {
- inline_irg_env *env = obstack_alloc(obst, sizeof(*env));
- env->n_nodes = -2; /* do not count count Start, End */
- env->n_nodes_orig = -2; /* do not count Start, End */
- env->call_head = NULL;
- env->call_tail = NULL;
- env->n_call_nodes = 0;
- env->n_call_nodes_orig = 0;
- env->n_callers = 0;
- env->n_callers_orig = 0;
- env->got_inline = 0;
- return env;
-}
-
-typedef struct walker_env {
- struct obstack *obst; /**< the obstack for allocations. */
- inline_irg_env *x; /**< the inline environment */
- int ignore_runtime; /**< the ignore runtime flag */
-} wenv_t;
-
-/**
- * post-walker: collect all calls in the inline-environment
- * of a graph and sum some statistics.
- */
-static void collect_calls2(ir_node *call, void *ctx) {
- wenv_t *env = ctx;
- inline_irg_env *x = env->x;
- ir_op *op = get_irn_op(call);
- ir_graph *callee;
- call_entry *entry;
-
- /* count meaningful nodes in irg */
- if (op != op_Proj && op != op_Tuple && op != op_Sync) {
- ++x->n_nodes;
- ++x->n_nodes_orig;
- }
-
- if (op != op_Call) return;
-
- /* check, if it's a runtime call */
- if (env->ignore_runtime) {
- ir_node *symc = get_Call_ptr(call);
-
- if (is_SymConst(symc) && get_SymConst_kind(symc) == symconst_addr_ent) {
- ir_entity *ent = get_SymConst_entity(symc);
-
- if (get_entity_additional_properties(ent) & mtp_property_runtime)
- return;
- }
- }
-
- /* collect all call nodes */
- ++x->n_call_nodes;
- ++x->n_call_nodes_orig;
-
- callee = get_call_called_irg(call);
- if (callee) {
- inline_irg_env *callee_env = get_irg_link(callee);
- /* count all static callers */
- ++callee_env->n_callers;
- ++callee_env->n_callers_orig;
-
- /* link it in the list of possible inlinable entries */
- entry = obstack_alloc(env->obst, sizeof(*entry));
- entry->call = call;
- entry->callee = callee;
- entry->next = NULL;
- if (x->call_tail == NULL)
- x->call_head = entry;
- else
- x->call_tail->next = entry;
- x->call_tail = entry;
- }
-}
-
-/**
- * Returns TRUE if the number of callers in 0 in the irg's environment,
- * hence this irg is a leave.
- */
-INLINE static int is_leave(ir_graph *irg) {
- inline_irg_env *env = get_irg_link(irg);
- return env->n_call_nodes == 0;
-}
-
-/**
- * Returns TRUE if the number of callers is smaller size in the irg's environment.
- */
-INLINE static int is_smaller(ir_graph *callee, int size) {
- inline_irg_env *env = get_irg_link(callee);
- return env->n_nodes < size;
-}
-
-/**
- * Append the nodes of the list src to the nodes of the list in environment dst.
- */
-static void append_call_list(struct obstack *obst, inline_irg_env *dst, call_entry *src) {
- call_entry *entry, *nentry;
-
- /* Note that the src list points to Call nodes in the inlined graph, but
- we need Call nodes in our graph. Luckily the inliner leaves this information
- in the link field. */
- for (entry = src; entry != NULL; entry = entry->next) {
- nentry = obstack_alloc(obst, sizeof(*nentry));
- nentry->call = get_irn_link(entry->call);
- nentry->callee = entry->callee;
- nentry->next = NULL;
- dst->call_tail->next = nentry;
- dst->call_tail = nentry;
- }
-}
-
-/*
- * Inlines small leave methods at call sites where the called address comes
- * from a Const node that references the entity representing the called
- * method.
- * The size argument is a rough measure for the code size of the method:
- * Methods where the obstack containing the firm graph is smaller than
- * size are inlined.
- */
-void inline_leave_functions(int maxsize, int leavesize, int size, int ignore_runtime) {
- inline_irg_env *env;
- ir_graph *irg;
- int i, n_irgs;
- ir_graph *rem;
- int did_inline;
- wenv_t wenv;
- call_entry *entry, *tail;
- const call_entry *centry;
- struct obstack obst;
- DEBUG_ONLY(firm_dbg_module_t *dbg;)
-
- if (!(get_opt_optimize() && get_opt_inline())) return;
-
- FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
- rem = current_ir_graph;
- obstack_init(&obst);
-
- /* extend all irgs by a temporary data structure for inlining. */
- n_irgs = get_irp_n_irgs();
- for (i = 0; i < n_irgs; ++i)
- set_irg_link(get_irp_irg(i), alloc_inline_irg_env(&obst));
-
- /* Precompute information in temporary data structure. */
- wenv.obst = &obst;
- wenv.ignore_runtime = ignore_runtime;
- for (i = 0; i < n_irgs; ++i) {
- ir_graph *irg = get_irp_irg(i);
-
- assert(get_irg_phase_state(irg) != phase_building);
- free_callee_info(irg);
-
- wenv.x = get_irg_link(irg);
- irg_walk_graph(irg, NULL, collect_calls2, &wenv);
- }
-
- /* -- and now inline. -- */
-
- /* Inline leaves recursively -- we might construct new leaves. */
- do {
- did_inline = 0;
-
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- tail = NULL;
- for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
-
- if (env->n_nodes > maxsize) break;
-
- call = entry->call;
- callee = entry->callee;
-
- if (is_leave(callee) && is_smaller(callee, leavesize)) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- did_inline = inline_method(call, callee);
-
- if (did_inline) {
- /* Do some statistics */
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
-
- env->got_inline = 1;
- --env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- --callee_env->n_callers;
-
- /* remove this call from the list */
- if (tail != NULL)
- tail->next = entry->next;
- else
- env->call_head = entry->next;
- continue;
- }
- }
- tail = entry;
- }
- env->call_tail = tail;
- }
- } while (did_inline);
-
- /* inline other small functions. */
- for (i = 0; i < n_irgs; ++i) {
- ir_node *call;
- int phiproj_computed = 0;
-
- current_ir_graph = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(current_ir_graph);
-
- /* note that the list of possible calls is updated during the process */
- tail = NULL;
- for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
-
- call = entry->call;
- callee = entry->callee;
-
- if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
- (get_irg_inline_property(callee) >= irg_inline_forced))) {
- if (!phiproj_computed) {
- phiproj_computed = 1;
- collect_phiprojs(current_ir_graph);
- }
- if (inline_method(call, callee)) {
- inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
-
- /* callee was inline. Append it's call list. */
- env->got_inline = 1;
- --env->n_call_nodes;
- append_call_list(&obst, env, callee_env->call_head);
- env->n_call_nodes += callee_env->n_call_nodes;
- env->n_nodes += callee_env->n_nodes;
- --callee_env->n_callers;
-
- /* after we have inlined callee, all called methods inside callee
- are now called once more */
- for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
- inline_irg_env *penv = get_irg_link(centry->callee);
- ++penv->n_callers;
- }
-
- /* remove this call from the list */
- if (tail != NULL)
- tail->next = entry->next;
- else
- env->call_head = entry->next;
- continue;
- }
- }
- tail = entry;
- }
- env->call_tail = tail;
- }
-
- for (i = 0; i < n_irgs; ++i) {
- irg = get_irp_irg(i);
- env = (inline_irg_env *)get_irg_link(irg);
-
- if (env->got_inline) {
- /* this irg got calls inlined */
- set_irg_outs_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
-
- optimize_graph_df(irg);
- optimize_cf(irg);
- }
- if (env->got_inline || (env->n_callers_orig != env->n_callers))
- DB((dbg, SET_LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
- env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
- env->n_callers_orig, env->n_callers,
- get_entity_name(get_irg_entity(irg))));
- }
-
- obstack_free(&obst, NULL);
- current_ir_graph = rem;
-}
-
-/*******************************************************************/
-/* Code Placement. Pins all floating nodes to a block where they */
-/* will be executed only if needed. */
-/*******************************************************************/
-
-/**
- * Returns non-zero, is a block is not reachable from Start.
- *
- * @param block the block to test
- */
-static int
-is_Block_unreachable(ir_node *block) {
- return is_Block_dead(block) || get_Block_dom_depth(block) < 0;
-}
-
-/**
- * Find the earliest correct block for node n. --- Place n into the
- * same Block as its dominance-deepest Input.
- *
- * We have to avoid calls to get_nodes_block() here
- * because the graph is floating.
- *
- * move_out_of_loops() expects that place_floats_early() have placed
- * all "living" nodes into a living block. That's why we must
- * move nodes in dead block with "live" successors into a valid
- * block.
- * We move them just into the same block as it's successor (or
- * in case of a Phi into the effective use block). For Phi successors,
- * this may still be a dead block, but then there is no real use, as
- * the control flow will be dead later.
- *
- * @param n the node to be placed
- * @param worklist a worklist, predecessors of non-floating nodes are placed here
- */
-static void
-place_floats_early(ir_node *n, waitq *worklist) {
- int i, irn_arity;
-
- /* we must not run into an infinite loop */
- assert(irn_not_visited(n));
- mark_irn_visited(n);
-
- /* Place floating nodes. */
- if (get_irn_pinned(n) == op_pin_state_floats) {
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
- int depth = 0;
- ir_node *b = NULL; /* The block to place this node in */
-
- assert(is_no_Block(n));
-
- if (is_irn_start_block_placed(n)) {
- /* These nodes will not be placed by the loop below. */
- b = get_irg_start_block(current_ir_graph);
- depth = 1;
- }
-
- /* find the block for this node. */
- irn_arity = get_irn_arity(n);
- for (i = 0; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(n, i);
- ir_node *pred_block;
-
- if ((irn_not_visited(pred))
- && (get_irn_pinned(pred) == op_pin_state_floats)) {
-
- /*
- * If the current node is NOT in a dead block, but one of its
- * predecessors is, we must move the predecessor to a live block.
- * Such thing can happen, if global CSE chose a node from a dead block.
- * We move it simply to our block.
- * Note that neither Phi nor End nodes are floating, so we don't
- * need to handle them here.
- */
- if (! in_dead_block) {
- if (get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1)))
- set_nodes_block(pred, curr_block);
- }
- place_floats_early(pred, worklist);
- }
-
- /*
- * A node in the Bad block must stay in the bad block,
- * so don't compute a new block for it.
- */
- if (in_dead_block)
- continue;
-
- /* Because all loops contain at least one op_pin_state_pinned node, now all
- our inputs are either op_pin_state_pinned or place_early() has already
- been finished on them. We do not have any unfinished inputs! */
- pred_block = get_irn_n(pred, -1);
- if ((!is_Block_dead(pred_block)) &&
- (get_Block_dom_depth(pred_block) > depth)) {
- b = pred_block;
- depth = get_Block_dom_depth(pred_block);
- }
- /* Avoid that the node is placed in the Start block */
- if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
- && get_irg_phase_state(current_ir_graph) != phase_backend) {
- b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
- assert(b != get_irg_start_block(current_ir_graph));
- depth = 2;
- }
- }
- if (b)
- set_nodes_block(n, b);
- }
-
- /*
- * Add predecessors of non floating nodes and non-floating predecessors
- * of floating nodes to worklist and fix their blocks if the are in dead block.
- */
- irn_arity = get_irn_arity(n);
-
- if (get_irn_op(n) == op_End) {
- /*
- * Simplest case: End node. Predecessors are keep-alives,
- * no need to move out of dead block.
- */
- for (i = -1; i < irn_arity; ++i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
- }
- } else if (is_Block(n)) {
- /*
- * Blocks: Predecessors are control flow, no need to move
- * them out of dead block.
- */
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
- }
- } else if (is_Phi(n)) {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * Phi nodes: move nodes from dead blocks into the effective use
- * of the Phi-input if the Phi is not in a bad block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
- }
- waitq_put(worklist, pred);
- }
- }
- } else {
- ir_node *pred;
- ir_node *curr_block = get_irn_n(n, -1);
- int in_dead_block = is_Block_unreachable(curr_block);
-
- /*
- * All other nodes: move nodes from dead blocks into the same block.
- */
- pred = get_irn_n(n, -1);
- if (irn_not_visited(pred))
- waitq_put(worklist, pred);
-
- for (i = irn_arity - 1; i >= 0; --i) {
- ir_node *pred = get_irn_n(n, i);
-
- if (irn_not_visited(pred)) {
- if (! in_dead_block &&
- get_irn_pinned(pred) == op_pin_state_floats &&
- is_Block_unreachable(get_irn_n(pred, -1))) {
- set_nodes_block(pred, curr_block);
- }
- waitq_put(worklist, pred);
- }
- }
- }
-}
-
-/**
- * Floating nodes form subgraphs that begin at nodes as Const, Load,
- * Start, Call and that end at op_pin_state_pinned nodes as Store, Call. Place_early
- * places all floating nodes reachable from its argument through floating
- * nodes and adds all beginnings at op_pin_state_pinned nodes to the worklist.
- *
- * @param worklist a worklist, used for the algorithm, empty on in/output
- */
-static void place_early(waitq *worklist) {
- assert(worklist);
- inc_irg_visited(current_ir_graph);
-
- /* this inits the worklist */
- place_floats_early(get_irg_end(current_ir_graph), worklist);
-
- /* Work the content of the worklist. */
- while (!waitq_empty(worklist)) {
- ir_node *n = waitq_get(worklist);
- if (irn_not_visited(n))
- place_floats_early(n, worklist);
- }
-
- set_irg_outs_inconsistent(current_ir_graph);
- set_irg_pinned(current_ir_graph, op_pin_state_pinned);
-}
-
-/**
- * Compute the deepest common ancestor of block and dca.
- */
-static ir_node *calc_dca(ir_node *dca, ir_node *block) {
- assert(block);
-
- /* we do not want to place nodes in dead blocks */
- if (is_Block_dead(block))
- return dca;
-
- /* We found a first legal placement. */
- if (!dca) return block;
-
- /* Find a placement that is dominates both, dca and block. */
- while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
- block = get_Block_idom(block);
-
- while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) {
- dca = get_Block_idom(dca);
- }
-
- while (block != dca) {
- block = get_Block_idom(block); dca = get_Block_idom(dca);
- }
-
- return dca;
-}
-
-/** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER.
- * I.e., DCA is the block where we might place PRODUCER.
- * A data flow edge points from producer to consumer.
- */
-static ir_node *
-consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) {
- ir_node *block = NULL;
-
- /* Compute the latest block into which we can place a node so that it is
- before consumer. */
- if (get_irn_op(consumer) == op_Phi) {
- /* our consumer is a Phi-node, the effective use is in all those
- blocks through which the Phi-node reaches producer */
- int i, irn_arity;
- ir_node *phi_block = get_nodes_block(consumer);
- irn_arity = get_irn_arity(consumer);
-
- for (i = 0; i < irn_arity; i++) {
- if (get_irn_n(consumer, i) == producer) {
- ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
-
- if (! is_Block_unreachable(new_block))
- block = calc_dca(block, new_block);
- }
- }
-
- if (! block)
- block = get_irn_n(producer, -1);
- } else {
- assert(is_no_Block(consumer));
- block = get_nodes_block(consumer);
- }
-
- /* Compute the deepest common ancestor of block and dca. */
- return calc_dca(dca, block);
-}
-
-/* FIXME: the name clashes here with the function from ana/field_temperature.c
- * please rename. */
-static INLINE int get_irn_loop_depth(ir_node *n) {
- return get_loop_depth(get_irn_loop(n));
-}
-
-/**
- * Move n to a block with less loop depth than it's current block. The
- * new block must be dominated by early.
- *
- * @param n the node that should be moved
- * @param early the earliest block we can n move to
- */
-static void move_out_of_loops(ir_node *n, ir_node *early) {
- ir_node *best, *dca;
- assert(n && early);
-
-
- /* Find the region deepest in the dominator tree dominating
- dca with the least loop nesting depth, but still dominated
- by our early placement. */
- dca = get_nodes_block(n);
-
- best = dca;
- while (dca != early) {
- dca = get_Block_idom(dca);
- if (!dca || is_Bad(dca)) break; /* may be Bad if not reachable from Start */
- if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
- best = dca;
- }
- }
- if (best != get_nodes_block(n)) {
- /* debug output
- printf("Moving out of loop: "); DDMN(n);
- printf(" Outermost block: "); DDMN(early);
- printf(" Best block: "); DDMN(best);
- printf(" Innermost block: "); DDMN(get_nodes_block(n));
- */
- set_nodes_block(n, best);
- }
-}
-
-/* deepest common ancestor in the dominator tree of all nodes'
- blocks depending on us; our final placement has to dominate DCA. */
-static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
-{
- int i;
-
- for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
- ir_node *succ = get_irn_out(node, i);
- ir_node *succ_blk;
-
- if (is_End(succ)) {
- /*
- * This consumer is the End node, a keep alive edge.
- * This is not a real consumer, so we ignore it
- */
- continue;
- }
-
- if(is_Proj(succ)) {
- dca = get_deepest_common_ancestor(succ, dca);
- } else {
- /* ignore if succ is in dead code */
- succ_blk = get_irn_n(succ, -1);
- if (is_Block_unreachable(succ_blk))
- continue;
- dca = consumer_dom_dca(dca, succ, node);
- }
- }
-
- return dca;
-}
-
-static void set_projs_block(ir_node *node, ir_node *block)