+ if (!(get_opt_optimize() && get_opt_inline())) return;
+
+ current_ir_graph = irg;
+ /* Handle graph state */
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ free_callee_info(current_ir_graph);
+
+ /* Find Call nodes to inline.
+ (We can not inline during a walk of the graph, as inlineing the same
+ method several times changes the visited flag of the walked graph:
+ after the first inlineing visited of the callee equals visited of
+ the caller. With the next inlineing both are increased.) */
+ env.pos = 0;
+ irg_walk(get_irg_end(irg), NULL, collect_calls, &env);
+
+ if ((env.pos > 0) && (env.pos < MAX_INLINE)) {
+ /* There are calls to inline */
+ collect_phiprojs(irg);
+ for (i = 0; i < env.pos; i++) {
+ ir_graph *callee;
+ callee = get_entity_irg(get_SymConst_entity(get_Call_ptr(env.calls[i])));
+ if (((_obstack_memory_used(callee->obst) - obstack_room(callee->obst)) < size) ||
+ (get_irg_inline_property(callee) == irg_inline_forced)) {
+ inline_method(env.calls[i], callee);
+ }
+ }
+ }
+
+ current_ir_graph = rem;
+}
+
+/**
+ * Environment for inlining irgs.
+ */
+typedef struct {
+ int n_nodes; /**< Nodes in graph except Id, Tuple, Proj, Start, End */
+ int n_nodes_orig; /**< for statistics */
+ eset *call_nodes; /**< All call nodes in this graph */
+ int n_call_nodes;
+ int n_call_nodes_orig; /**< for statistics */
+ int n_callers; /**< Number of known graphs that call this graphs. */
+ int n_callers_orig; /**< for statistics */
+} inline_irg_env;
+
+static inline_irg_env *new_inline_irg_env(void) {
+ inline_irg_env *env = xmalloc(sizeof(inline_irg_env));
+ env->n_nodes = -2; /* uncount Start, End */
+ env->n_nodes_orig = -2; /* uncount Start, End */
+ env->call_nodes = eset_create();
+ env->n_call_nodes = 0;
+ env->n_call_nodes_orig = 0;
+ env->n_callers = 0;
+ env->n_callers_orig = 0;
+ return env;
+}
+
+static void free_inline_irg_env(inline_irg_env *env) {
+ eset_destroy(env->call_nodes);
+ free(env);
+}
+
+static void collect_calls2(ir_node *call, void *env) {
+ inline_irg_env *x = (inline_irg_env *)env;
+ ir_op *op = get_irn_op(call);
+ ir_graph *callee;
+
+ /* count nodes in irg */
+ if (op != op_Proj && op != op_Tuple && op != op_Sync) {
+ x->n_nodes++;
+ x->n_nodes_orig++;
+ }
+
+ if (op != op_Call) return;
+
+ /* collect all call nodes */
+ eset_insert(x->call_nodes, (void *)call);
+ x->n_call_nodes++;
+ x->n_call_nodes_orig++;
+
+ /* count all static callers */
+ callee = get_call_called_irg(call);
+ if (callee) {
+ ((inline_irg_env *)get_irg_link(callee))->n_callers++;
+ ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++;
+ }
+}
+
+INLINE static int is_leave(ir_graph *irg) {
+ return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0);
+}
+
+INLINE static int is_smaller(ir_graph *callee, int size) {
+ return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size);
+}
+
+
+/*
+ * Inlines small leave methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
+void inline_leave_functions(int maxsize, int leavesize, int size) {
+ inline_irg_env *env;
+ int i, n_irgs = get_irp_n_irgs();
+ ir_graph *rem = current_ir_graph;
+ int did_inline = 1;
+
+ if (!(get_opt_optimize() && get_opt_inline())) return;
+
+ /* extend all irgs by a temporary data structure for inlineing. */
+ for (i = 0; i < n_irgs; ++i)
+ set_irg_link(get_irp_irg(i), new_inline_irg_env());
+
+ /* Precompute information in temporary data structure. */
+ for (i = 0; i < n_irgs; ++i) {
+ current_ir_graph = get_irp_irg(i);
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ free_callee_info(current_ir_graph);
+
+ irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2,
+ get_irg_link(current_ir_graph));
+ }
+
+ /* -- and now inline. -- */
+
+ /* Inline leaves recursively -- we might construct new leaves. */
+ while (did_inline) {
+ did_inline = 0;
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) {
+ if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */
+ ir_graph *callee = get_call_called_irg(call);
+
+ if (env->n_nodes > maxsize) continue; // break;
+
+ if (callee && (is_leave(callee) && is_smaller(callee, leavesize))) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ did_inline = inline_method(call, callee);
+
+ if (did_inline) {
+ /* Do some statistics */
+ inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+ env->n_call_nodes --;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
+ }
+ }
+ }
+ }
+
+ /* inline other small functions. */
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ eset *walkset;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ /* we can not walk and change a set, nor remove from it.
+ So recompute.*/
+ walkset = env->call_nodes;
+ env->call_nodes = eset_create();
+ for (call = eset_first(walkset); call; call = eset_next(walkset)) {
+ if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */
+ ir_graph *callee = get_call_called_irg(call);
+
+ if (callee &&
+ ((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
+ (get_irg_inline_property(callee) == irg_inline_forced))) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ if (inline_method(call, callee)) {
+ inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+ env->n_call_nodes--;
+ eset_insert_all(env->call_nodes, callee_env->call_nodes); /* @@@ ??? This are the wrong nodes !? Not the copied ones. */
+ env->n_call_nodes += callee_env->n_call_nodes;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
+ } else {
+ eset_insert(env->call_nodes, call);
+ }
+ }
+ eset_destroy(walkset);
+ }
+
+ for (i = 0; i < n_irgs; ++i) {
+ current_ir_graph = get_irp_irg(i);
+#if 0
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+ if ((env->n_call_nodes_orig != env->n_call_nodes) ||
+ (env->n_callers_orig != env->n_callers))
+ printf("Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
+ env->n_nodes_orig, env->n_nodes, env->n_call_nodes_orig, env->n_call_nodes,
+ env->n_callers_orig, env->n_callers,
+ get_entity_name(get_irg_entity(current_ir_graph)));
+#endif
+ free_inline_irg_env((inline_irg_env *)get_irg_link(current_ir_graph));
+ }
+
+ current_ir_graph = rem;
+}
+
+/*******************************************************************/
+/* Code Placement. Pins all floating nodes to a block where they */
+/* will be executed only if needed. */
+/*******************************************************************/
+
+/**
+ * Find the earliest correct block for N. --- Place N into the
+ * same Block as its dominance-deepest Input.
+ */
+static void
+place_floats_early(ir_node *n, pdeq *worklist)
+{
+ int i, start, irn_arity;
+
+ /* we must not run into an infinite loop */
+ assert (irn_not_visited(n));
+ mark_irn_visited(n);
+
+ /* Place floating nodes. */
+ if (get_irn_pinned(n) == op_pin_state_floats) {
+ int depth = 0;
+ ir_node *b = new_Bad(); /* The block to place this node in */
+ int bad_recursion = is_Bad(get_nodes_block(n));
+
+ assert(get_irn_op(n) != op_Block);
+
+ if ((get_irn_op(n) == op_Const) ||
+ (get_irn_op(n) == op_SymConst) ||
+ (is_Bad(n)) ||
+ (get_irn_op(n) == op_Unknown)) {
+ /* These nodes will not be placed by the loop below. */
+ b = get_irg_start_block(current_ir_graph);
+ depth = 1;
+ }
+
+ /* find the block for this node. */
+ irn_arity = get_irn_arity(n);
+ for (i = 0; i < irn_arity; i++) {
+ ir_node *dep = get_irn_n(n, i);
+ ir_node *dep_block;
+
+ if ((irn_not_visited(dep))
+ && (get_irn_pinned(dep) == op_pin_state_floats)) {
+ place_floats_early(dep, worklist);
+ }
+
+ /*
+ * A node in the Bad block must stay in the bad block,
+ * so don't compute a new block for it.
+ */
+ if (bad_recursion)
+ continue;
+
+ /* Because all loops contain at least one op_pin_state_pinned node, now all
+ our inputs are either op_pin_state_pinned or place_early has already
+ been finished on them. We do not have any unfinished inputs! */
+ dep_block = get_nodes_block(dep);
+ if ((!is_Bad(dep_block)) &&
+ (get_Block_dom_depth(dep_block) > depth)) {
+ b = dep_block;
+ depth = get_Block_dom_depth(dep_block);
+ }
+ /* Avoid that the node is placed in the Start block */
+ if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)) {
+ b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
+ assert(b != get_irg_start_block(current_ir_graph));
+ depth = 2;
+ }
+ }
+ set_nodes_block(n, b);
+ }
+
+ /* Add predecessors of non floating nodes on worklist. */
+ start = (get_irn_op(n) == op_Block) ? 0 : -1;
+ irn_arity = get_irn_arity(n);
+ for (i = start; i < irn_arity; i++) {
+ ir_node *pred = get_irn_n(n, i);
+ if (irn_not_visited(pred)) {
+ pdeq_putr (worklist, pred);
+ }
+ }
+}
+
+/**
+ * Floating nodes form subgraphs that begin at nodes as Const, Load,
+ * Start, Call and that end at op_pin_state_pinned nodes as Store, Call. Place_early
+ * places all floating nodes reachable from its argument through floating
+ * nodes and adds all beginnings at op_pin_state_pinned nodes to the worklist.
+ */
+static INLINE void place_early(pdeq *worklist) {
+ assert(worklist);
+ inc_irg_visited(current_ir_graph);
+
+ /* this inits the worklist */
+ place_floats_early(get_irg_end(current_ir_graph), worklist);
+
+ /* Work the content of the worklist. */
+ while (!pdeq_empty (worklist)) {
+ ir_node *n = pdeq_getl (worklist);
+ if (irn_not_visited(n)) place_floats_early(n, worklist);
+ }
+
+ set_irg_outs_inconsistent(current_ir_graph);
+ current_ir_graph->op_pin_state_pinned = op_pin_state_pinned;
+}
+
+/** Compute the deepest common ancestor of block and dca. */
+static ir_node *calc_dca(ir_node *dca, ir_node *block)
+{
+ assert(block);
+ if (!dca) return block;
+ while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
+ block = get_Block_idom(block);
+ while (get_Block_dom_depth(dca) > get_Block_dom_depth(block)) {
+ dca = get_Block_idom(dca);
+ }
+ while (block != dca)
+ { block = get_Block_idom(block); dca = get_Block_idom(dca); }
+
+ return dca;
+}
+
+/** Deepest common dominance ancestor of DCA and CONSUMER of PRODUCER.
+ * I.e., DCA is the block where we might place PRODUCER.
+ * A data flow edge points from producer to consumer.
+ */
+static ir_node *
+consumer_dom_dca (ir_node *dca, ir_node *consumer, ir_node *producer)
+{
+ ir_node *block = NULL;
+
+ /* Compute the latest block into which we can place a node so that it is
+ before consumer. */
+ if (get_irn_op(consumer) == op_Phi) {
+ /* our consumer is a Phi-node, the effective use is in all those
+ blocks through which the Phi-node reaches producer */
+ int i, irn_arity;
+ ir_node *phi_block = get_nodes_block(consumer);
+ irn_arity = get_irn_arity(consumer);
+
+ for (i = 0; i < irn_arity; i++) {
+ if (get_irn_n(consumer, i) == producer) {
+ ir_node *new_block = get_nodes_block(get_Block_cfgpred(phi_block, i));
+
+ block = calc_dca(block, new_block);
+ }
+ }
+ } else {
+ assert(is_no_Block(consumer));
+ block = get_nodes_block(consumer);
+ }
+
+ /* Compute the deepest common ancestor of block and dca. */
+ return calc_dca(dca, block);
+}
+
+static INLINE int get_irn_loop_depth(ir_node *n) {
+ return get_loop_depth(get_irn_loop(n));
+}
+
+/**
+ * Move n to a block with less loop depth than it's current block. The
+ * new block must be dominated by early.
+ */
+static void
+move_out_of_loops (ir_node *n, ir_node *early)
+{
+ ir_node *best, *dca;
+ assert(n && early);
+
+
+ /* Find the region deepest in the dominator tree dominating
+ dca with the least loop nesting depth, but still dominated
+ by our early placement. */
+ dca = get_nodes_block(n);
+ best = dca;
+ while (dca != early) {
+ dca = get_Block_idom(dca);
+ if (!dca || is_Bad(dca)) break; /* may be Bad if not reachable from Start */
+ if (get_irn_loop_depth(dca) < get_irn_loop_depth(best)) {
+ best = dca;
+ }
+ }
+ if (best != get_nodes_block(n)) {
+ /* debug output
+ printf("Moving out of loop: "); DDMN(n);
+ printf(" Outermost block: "); DDMN(early);
+ printf(" Best block: "); DDMN(best);
+ printf(" Innermost block: "); DDMN(get_nodes_block(n));
+ */
+ set_nodes_block(n, best);
+ }
+}
+
+/**
+ * Find the latest legal block for N and place N into the
+ * `optimal' Block between the latest and earliest legal block.
+ * The `optimal' block is the dominance-deepest block of those
+ * with the least loop-nesting-depth. This places N out of as many
+ * loops as possible and then makes it as control dependant as
+ * possible.
+ */
+static void
+place_floats_late(ir_node *n, pdeq *worklist)
+{
+ int i;
+ ir_node *early;
+
+ assert (irn_not_visited(n)); /* no multiple placement */
+
+ mark_irn_visited(n);
+
+ /* no need to place block nodes, control nodes are already placed. */
+ if ((get_irn_op(n) != op_Block) &&
+ (!is_cfop(n)) &&
+ (get_irn_mode(n) != mode_X)) {
+ /* Remember the early placement of this block to move it
+ out of loop no further than the early placement. */
+ early = get_nodes_block(n);
+
+ /* Do not move code not reachable from Start. For
+ * these we could not compute dominator information. */
+ if (is_Bad(early) || get_Block_dom_depth(early) == -1)
+ return;
+
+ /* Assure that our users are all placed, except the Phi-nodes.
+ --- Each data flow cycle contains at least one Phi-node. We
+ have to break the `user has to be placed before the
+ producer' dependence cycle and the Phi-nodes are the
+ place to do so, because we need to base our placement on the
+ final region of our users, which is OK with Phi-nodes, as they
+ are op_pin_state_pinned, and they never have to be placed after a
+ producer of one of their inputs in the same block anyway. */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *succ = get_irn_out(n, i);
+ if (irn_not_visited(succ) && (get_irn_op(succ) != op_Phi))
+ place_floats_late(succ, worklist);
+ }
+
+ /* We have to determine the final block of this node... except for
+ constants. */
+ if ((get_irn_pinned(n) == op_pin_state_floats) &&
+ (get_irn_op(n) != op_Const) &&
+ (get_irn_op(n) != op_SymConst)) {
+ ir_node *dca = NULL; /* deepest common ancestor in the
+ dominator tree of all nodes'
+ blocks depending on us; our final
+ placement has to dominate DCA. */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *out = get_irn_out(n, i);
+ /* ignore if out is in dead code */
+ ir_node *outbl = get_nodes_block(out);
+ if (is_Bad(outbl) || get_Block_dom_depth(outbl) == -1)
+ continue;
+ dca = consumer_dom_dca (dca, out, n);
+ }
+ if (dca) {
+ set_nodes_block(n, dca);
+
+ move_out_of_loops (n, early);
+ }
+ /* else all outs are in dead code */
+ }
+ }
+
+ /* Add predecessors of all non-floating nodes on list. (Those of floating
+ nodes are placeded already and therefore are marked.) */
+ for (i = 0; i < get_irn_n_outs(n); i++) {
+ ir_node *succ = get_irn_out(n, i);
+ if (irn_not_visited(get_irn_out(n, i))) {
+ pdeq_putr (worklist, succ);
+ }
+ }
+}
+
+static INLINE void place_late(pdeq *worklist) {
+ assert(worklist);
+ inc_irg_visited(current_ir_graph);
+
+ /* This fills the worklist initially. */
+ place_floats_late(get_irg_start_block(current_ir_graph), worklist);
+
+ /* And now empty the worklist again... */
+ while (!pdeq_empty (worklist)) {
+ ir_node *n = pdeq_getl (worklist);
+ if (irn_not_visited(n)) place_floats_late(n, worklist);
+ }
+}
+
+void place_code(ir_graph *irg) {
+ pdeq *worklist;