+static void free_inline_irg_env(inline_irg_env *env) {
+ eset_destroy(env->call_nodes);
+ free(env);
+}
+
+static void collect_calls2(ir_node *call, void *env) {
+ inline_irg_env *x = (inline_irg_env *)env;
+ ir_op *op = get_irn_op(call);
+ ir_graph *callee;
+
+ /* count nodes in irg */
+ if (op != op_Proj && op != op_Tuple && op != op_Sync) {
+ x->n_nodes++;
+ x->n_nodes_orig++;
+ }
+
+ if (op != op_Call) return;
+
+ /* collect all call nodes */
+ eset_insert(x->call_nodes, (void *)call);
+ x->n_call_nodes++;
+ x->n_call_nodes_orig++;
+
+ /* count all static callers */
+ callee = get_call_called_irg(call);
+ if (callee) {
+ ((inline_irg_env *)get_irg_link(callee))->n_callers++;
+ ((inline_irg_env *)get_irg_link(callee))->n_callers_orig++;
+ }
+}
+
+INLINE static int is_leave(ir_graph *irg) {
+ return (((inline_irg_env *)get_irg_link(irg))->n_call_nodes == 0);
+}
+
+INLINE static int is_smaller(ir_graph *callee, int size) {
+ return (((inline_irg_env *)get_irg_link(callee))->n_nodes < size);
+}
+
+
+/*
+ * Inlines small leave methods at call sites where the called address comes
+ * from a Const node that references the entity representing the called
+ * method.
+ * The size argument is a rough measure for the code size of the method:
+ * Methods where the obstack containing the firm graph is smaller than
+ * size are inlined.
+ */
+void inline_leave_functions(int maxsize, int leavesize, int size) {
+ inline_irg_env *env;
+ int i, n_irgs = get_irp_n_irgs();
+ ir_graph *rem = current_ir_graph;
+ int did_inline = 1;
+
+ if (!(get_opt_optimize() && get_opt_inline())) return;
+
+ /* extend all irgs by a temporary data structure for inlineing. */
+ for (i = 0; i < n_irgs; ++i)
+ set_irg_link(get_irp_irg(i), new_inline_irg_env());
+
+ /* Precompute information in temporary data structure. */
+ for (i = 0; i < n_irgs; ++i) {
+ current_ir_graph = get_irp_irg(i);
+ assert(get_irg_phase_state(current_ir_graph) != phase_building);
+ free_callee_info(current_ir_graph);
+
+ irg_walk(get_irg_end(current_ir_graph), NULL, collect_calls2,
+ get_irg_link(current_ir_graph));
+ }
+
+ /* -- and now inline. -- */
+
+ /* Inline leaves recursively -- we might construct new leaves. */
+ while (did_inline) {
+ did_inline = 0;
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_node *call;
+ int phiproj_computed = 0;
+
+ current_ir_graph = get_irp_irg(i);
+ env = (inline_irg_env *)get_irg_link(current_ir_graph);
+
+ for (call = eset_first(env->call_nodes); call; call = eset_next(env->call_nodes)) {
+ if (get_irn_op(call) == op_Tuple) continue; /* We already inlined. */
+ ir_graph *callee = get_call_called_irg(call);
+
+ if (env->n_nodes > maxsize) continue; // break;
+
+ if (callee && (is_leave(callee) && is_smaller(callee, leavesize))) {
+ if (!phiproj_computed) {
+ phiproj_computed = 1;
+ collect_phiprojs(current_ir_graph);
+ }
+ did_inline = inline_method(call, callee);
+
+ if (did_inline) {
+ /* Do some statistics */
+ inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+ env->n_call_nodes --;
+ env->n_nodes += callee_env->n_nodes;
+ callee_env->n_callers--;
+ }
+ }
+ }
+ }
+ }