* @author Michael Beck, Goetz Lindenmaier
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <limits.h>
#include <assert.h>
#include "irouts.h"
#include "irloop_t.h"
#include "irbackedge_t.h"
-#include "opt_inline_t.h"
+#include "opt_init.h"
#include "cgana.h"
#include "trouts.h"
#include "error.h"
#include "irhooks.h"
#include "irtools.h"
#include "iropt_dbg.h"
+#include "irpass_t.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
* accesses. This function is called for all Phi and Block nodes
* in a Block.
*/
-static INLINE int
+static inline int
compute_new_arity(ir_node *b) {
int i, res, irn_arity;
int irg_v, block_v;
}
copy_node_attr(n, nn);
-#ifdef DEBUG_libfirm
- {
- int copy_node_nr = env != NULL;
- if (copy_node_nr) {
- /* for easier debugging, we want to copy the node numbers too */
- nn->node_nr = n->node_nr;
- }
+ if (env != NULL) {
+ /* for easier debugging, we want to copy the node numbers too */
+ nn->node_nr = n->node_nr;
}
-#endif
set_new_node(n, nn);
hook_dead_node_elim_subst(current_ir_graph, n, nn);
in array contained Bads. Now it's possible.
We don't call optimize_in_place as it requires
that the fields in ir_graph are set properly. */
- if ((get_opt_control_flow_straightening()) &&
- (get_Block_n_cfgpreds(nn) == 1) &&
- is_Jmp(get_Block_cfgpred(nn, 0))) {
+ if (!has_Block_entity(nn) &&
+ get_opt_control_flow_straightening() &&
+ get_Block_n_cfgpreds(nn) == 1 &&
+ is_Jmp(get_Block_cfgpred(nn, 0))) {
ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
if (nn == old) {
/* Jmp jumps into the block it is in -- deal self cycle. */
#endif
}
+ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
+ return def_graph_pass(name ? name : "dce", dead_node_elimination);
+}
+
/**
* Relink bad predecessors of a block and store the old in array to the
* link field. This function is called by relink_bad_predecessors().
res->dead_node_elim_subst.context = res;
res->dead_node_elim_subst.next = NULL;
-#ifndef FIRM_ENABLE_HOOKS
- assert(0 && "need hooks enabled");
-#endif
-
register_hook(hook_dead_node_elim, &res->dead_node_elim);
register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
return res;
if (*place != NULL) {
ir_node *irn = *place;
survive_dce_list_t *curr = pmap_get(sd->places, irn);
- survive_dce_list_t *nw = obstack_alloc(&sd->obst, sizeof(nw[0]));
+ survive_dce_list_t *nw = OALLOC(&sd->obst, survive_dce_list_t);
nw->next = curr;
nw->place = place;
copy_node(n, NULL);
if (is_Sel(n)) {
- nn = get_new_node (n);
+ nn = get_new_node(n);
assert(is_Sel(nn));
+ /* use copied entities from the new frame */
if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
}
} else if (is_Block(n)) {
- nn = get_new_node (n);
- nn->attr.block.irg = current_ir_graph;
+ nn = get_new_node(n);
+ nn->attr.block.irg.irg = current_ir_graph;
}
}
*/
static void find_addr(ir_node *node, void *env) {
int *allow_inline = env;
- if (is_Proj(node) &&
- is_Start(get_Proj_pred(node)) &&
- get_Proj_proj(node) == pn_Start_P_value_arg_base) {
- *allow_inline = 0;
+ if (is_Sel(node)) {
+ ir_graph *irg = current_ir_graph;
+ if (get_Sel_ptr(node) == get_irg_frame(irg)) {
+ /* access to frame */
+ ir_entity *ent = get_Sel_entity(node);
+ if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
+ /* access to value_type */
+ *allow_inline = 0;
+ }
+ }
} else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
/* From GCC:
* Refuse to inline alloca call unless user explicitly forced so as this
}
enum exc_mode {
- exc_handler = 0, /**< There is a handler. */
- exc_to_end = 1, /**< Branches to End. */
- exc_no_handler = 2 /**< Exception handling not represented. */
+ exc_handler, /**< There is a handler. */
+ exc_no_handler /**< Exception handling not represented. */
};
/* Inlines a method at the given call site. */
ir_node **args_in;
ir_node *ret, *phi;
int arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+ int n_mem_phi;
enum exc_mode exc_handling;
ir_type *called_frame, *curr_frame, *mtp, *ctp;
ir_entity *ent;
mtp = get_entity_type(ent);
ctp = get_Call_type(call);
- if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
- /* this is a bad feature of C: without a prototype, we can can call a function with less
- parameters than needed. Currently we don't support this, although it would be
- to use Unknown than. */
+ n_params = get_method_n_params(mtp);
+ n_res = get_method_n_ress(mtp);
+ if (n_params > get_method_n_params(ctp)) {
+ /* this is a bad feature of C: without a prototype, we can
+ * call a function with less parameters than needed. Currently
+ * we don't support this, although we could use Unknown than. */
+ return 0;
+ }
+ if (n_res != get_method_n_ress(ctp)) {
return 0;
}
/* Argh, compiling C has some bad consequences:
- the call type AND the method type might be different.
- It is implementation defendant what happens in that case.
- We support inlining, if the bitsize of the types matches AND
- the same arithmetic is used. */
- n_params = get_method_n_params(mtp);
+ * It is implementation dependent what happens in that case.
+ * We support inlining, if the bitsize of the types matches AND
+ * the same arithmetic is used. */
for (i = n_params - 1; i >= 0; --i) {
ir_type *param_tp = get_method_param_type(mtp, i);
ir_type *arg_tp = get_method_param_type(ctp, i);
/* otherwise we can simply "reinterpret" the bits */
}
}
+ for (i = n_res - 1; i >= 0; --i) {
+ ir_type *decl_res_tp = get_method_res_type(mtp, i);
+ ir_type *used_res_tp = get_method_res_type(ctp, i);
+
+ if (decl_res_tp != used_res_tp) {
+ ir_mode *decl_mode = get_type_mode(decl_res_tp);
+ ir_mode *used_mode = get_type_mode(used_res_tp);
+ if (decl_mode == NULL || used_mode == NULL)
+ return 0;
+ if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
+ return 0;
+ if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
+ return 0;
+ /* otherwise we can "reinterpret" the bits */
+ }
+ }
irg = get_irn_irg(call);
set_irg_doms_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
/* -- Check preconditions -- */
assert(is_Call(call));
for the Call node, or do we branch directly to End on an exception?
exc_handling:
0 There is a handler.
- 1 Branches to End.
2 Exception handling not represented in Firm. -- */
{
- ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+ ir_node *Xproj = NULL;
+ ir_node *proj;
for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
long proj_nr = get_Proj_proj(proj);
if (proj_nr == pn_Call_X_except) Xproj = proj;
- if (proj_nr == pn_Call_M_except) Mproj = proj;
}
- if (Mproj) { assert(Xproj); exc_handling = exc_handler; } /* Mproj */
- else if (Xproj) { exc_handling = exc_to_end; } /* !Mproj && Xproj */
- else { exc_handling = exc_no_handler; } /* !Mproj && !Xproj */
+ exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
}
/* create the argument tuple */
ir_mode *mode = get_type_mode(param_tp);
if (mode != get_irn_mode(arg)) {
- arg = new_r_Conv(irg, block, arg, mode);
+ arg = new_r_Conv(block, arg, mode);
}
args_in[i] = arg;
}
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_P_tls] = get_irg_tls(irg);
in[pn_Start_T_args] = new_Tuple(n_params, args_in);
- /* in[pn_Start_P_value_arg_base] = ??? */
- assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
- pre_call = new_Tuple(pn_Start_max - 1, in);
+ pre_call = new_Tuple(pn_Start_max, in);
post_call = call;
/* --
/* -- Replicate local entities of the called_graph -- */
/* copy the entities. */
+ irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
called_frame = get_irg_frame_type(called_graph);
curr_frame = get_irg_frame_type(irg);
for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
get_irg_frame_type(called_graph));
+ irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+
/* Repair called_graph */
set_irg_visited(called_graph, get_irg_visited(irg));
set_irg_block_visited(called_graph, get_irg_block_visited(irg));
ir_node *ret;
ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
- cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
+ cf_pred[n_ret] = new_r_Jmp(get_nodes_block(ret));
n_ret++;
}
}
Add Phi node if there was more than one Return. -- */
turn_into_tuple(post_call, pn_Call_max);
/* First the Memory-Phi */
- n_ret = 0;
+ n_mem_phi = 0;
for (i = 0; i < arity; i++) {
ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_mem(ret);
- n_ret++;
+ cf_pred[n_mem_phi++] = get_Return_mem(ret);
+ }
+ /* memory output for some exceptions is directly connected to End */
+ if (is_Call(ret)) {
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+ } else if (is_fragile_op(ret)) {
+ /* We rely that all cfops have the memory output at the same position. */
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+ } else if (is_Raise(ret)) {
+ cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
}
}
- phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, pn_Call_M_regular, phi);
+ phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+ set_Tuple_pred(call, pn_Call_M, phi);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_block(phi) == post_bl) {
set_irn_link(phi, get_irn_link(post_bl));
/* Now the real results */
if (n_res > 0) {
for (j = 0; j < n_res; j++) {
+ ir_type *res_type = get_method_res_type(ctp, j);
+ ir_mode *res_mode = get_type_mode(res_type);
n_ret = 0;
for (i = 0; i < arity; i++) {
ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
- cf_pred[n_ret] = get_Return_res(ret, j);
+ ir_node *res = get_Return_res(ret, j);
+ if (get_irn_mode(res) != res_mode) {
+ ir_node *block = get_nodes_block(res);
+ res = new_r_Conv(block, res, res_mode);
+ }
+ cf_pred[n_ret] = res;
n_ret++;
}
}
set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
/* Finally the exception control flow.
- We have two (three) possible situations:
- First if the Call branches to an exception handler: We need to add a Phi node to
+ We have two possible situations:
+ First if the Call branches to an exception handler:
+ We need to add a Phi node to
collect the memory containing the exception objects. Further we need
to add another block to get a correct representation of this Phi. To
this block we add a Jmp that resolves into the X output of the Call
when the Call is turned into a tuple.
- Second the Call branches to End, the exception is not handled. Just
- add all inlined exception branches to the End node.
- Third: there is no Exception edge at all. Handle as case two. */
+ Second: There is no exception edge. Just add all inlined exception
+ branches to the End node.
+ */
if (exc_handling == exc_handler) {
n_exc = 0;
for (i = 0; i < arity; i++) {
}
}
if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
+ ir_node *block = new_Block(n_exc, cf_pred);
+ set_cur_block(block);
set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
- /* The Phi for the memories with the exception objects */
- n_exc = 0;
- for (i = 0; i < arity; i++) {
- ir_node *ret;
- ret = skip_Proj(get_Block_cfgpred(end_bl, i));
- if (is_Call(ret)) {
- cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
- n_exc++;
- } else if (is_fragile_op(ret)) {
- /* We rely that all cfops have the memory output at the same position. */
- cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
- n_exc++;
- } else if (is_Raise(ret)) {
- cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
- n_exc++;
- }
- }
- set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
} else {
set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_Bad());
- set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
free(end_preds);
}
free(res_pred);
if (called_irg != NULL) {
/* The Call node calls a locally defined method. Remember to inline. */
inline_env_t *ienv = env;
- call_entry *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
+ call_entry *entry = OALLOC(&ienv->obst, call_entry);
entry->call = call;
entry->callee = called_irg;
entry->loop_depth = 0;
if (! list_empty(&env.calls)) {
/* There are calls to inline */
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
collect_phiprojs(irg);
list_for_each_entry(call_entry, entry, &env.calls, list) {
inline_method(entry->call, callee);
}
}
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
obstack_free(&env.obst, NULL);
current_ir_graph = rem;
}
+struct inline_small_irgs_pass_t {
+ ir_graph_pass_t pass;
+ int size;
+};
+
+/**
+ * Wrapper to run inline_small_irgs() as a pass.
+ */
+static int inline_small_irgs_wrapper(ir_graph *irg, void *context) {
+ struct inline_small_irgs_pass_t *pass = context;
+
+ inline_small_irgs(irg, pass->size);
+ return 0;
+}
+
+/* create a pass for inline_small_irgs() */
+ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size) {
+ struct inline_small_irgs_pass_t *pass =
+ XMALLOCZ(struct inline_small_irgs_pass_t);
+
+ pass->size = size;
+ return def_graph_pass_constructor(
+ &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
+}
+
/**
* Environment for inlining irgs.
*/
unsigned n_callers; /**< Number of known graphs that call this graphs. */
unsigned n_callers_orig; /**< for statistics */
unsigned got_inline:1; /**< Set, if at least one call inside this graph was inlined. */
- unsigned local_vars:1; /**< Set, if an inlined function got the address of a local variable. */
unsigned recursive:1; /**< Set, if this function is self recursive. */
} inline_irg_env;
* Allocate a new environment for inlining.
*/
static inline_irg_env *alloc_inline_irg_env(void) {
- inline_irg_env *env = obstack_alloc(&temp_obst, sizeof(*env));
+ inline_irg_env *env = OALLOC(&temp_obst, inline_irg_env);
INIT_LIST_HEAD(&env->calls);
env->local_weights = NULL;
env->n_nodes = -2; /* do not count count Start, End */
env->n_callers = 0;
env->n_callers_orig = 0;
env->got_inline = 0;
- env->local_vars = 0;
env->recursive = 0;
return env;
}
x->recursive = 1;
/* link it in the list of possible inlinable entries */
- entry = obstack_alloc(&temp_obst, sizeof(*entry));
+ entry = OALLOC(&temp_obst, call_entry);
entry->call = call;
entry->callee = callee;
entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
-INLINE static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg) {
inline_irg_env *env = get_irg_link(irg);
return env->n_call_nodes == 0;
}
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
-INLINE static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size) {
inline_irg_env *env = get_irg_link(callee);
return env->n_nodes < size;
}
*/
static call_entry *duplicate_call_entry(const call_entry *entry,
ir_node *new_call, int loop_depth_delta) {
- call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
+ call_entry *nentry = OALLOC(&temp_obst, call_entry);
nentry->call = new_call;
nentry->callee = entry->callee;
nentry->benefice = entry->benefice;
current_ir_graph = get_irp_irg(i);
env = get_irg_link(current_ir_graph);
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
ir_graph *callee;
irg_inline_property prop;
}
}
}
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
} while (did_inline);
current_ir_graph = get_irp_irg(i);
env = get_irg_link(current_ir_graph);
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* note that the list of possible calls is updated during the process */
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
irg_inline_property prop;
inline_irg_env *callee_env;
ir_graph *copy;
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/*
* No copy yet, create one.
* Note that recursive methods are never leaves, so it is sufficient
/* create_irg_copy() destroys the Proj links, recompute them */
phiproj_computed = 0;
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* allocate new environment */
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
}
}
}
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
for (i = 0; i < n_irgs; ++i) {
current_ir_graph = rem;
}
+struct inline_leave_functions_pass_t {
+ ir_prog_pass_t pass;
+ unsigned maxsize;
+ unsigned leavesize;
+ unsigned size;
+ int ignore_runtime;
+};
+
+/**
+ * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ */
+static int inline_leave_functions_wrapper(ir_prog *irp, void *context) {
+ struct inline_leave_functions_pass_t *pass = context;
+
+ (void)irp;
+ inline_leave_functions(
+ pass->maxsize, pass->leavesize,
+ pass->size, pass->ignore_runtime);
+ return 0;
+}
+
+/* create a pass for inline_leave_functions() */
+ir_prog_pass_t *inline_leave_functions_pass(
+ const char *name, unsigned maxsize, unsigned leavesize,
+ unsigned size, int ignore_runtime) {
+ struct inline_leave_functions_pass_t *pass =
+ XMALLOCZ(struct inline_leave_functions_pass_t);
+
+ pass->maxsize = maxsize;
+ pass->leavesize = leavesize;
+ pass->size = size;
+ pass->ignore_runtime = ignore_runtime;
+
+ return def_prog_pass_constructor(
+ &pass->pass,
+ name ? name : "inline_leave_functions",
+ inline_leave_functions_wrapper);
+}
+
/**
* Calculate the parameter weights for transmitting the address of a local variable.
*/
}
current_ir_graph = irg;
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
/* put irgs into the pqueue */
pqueue = new_pqueue();
if (benefice < inline_threshold)
continue;
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/*
* No copy yet, create one.
* Note that recursive methods are never leaves, so it is
/* create_irg_copy() destroys the Proj links, recompute them */
phiproj_computed = 0;
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* allocate a new environment */
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
/* callee was inline. Append it's call list. */
env->got_inline = 1;
- if (curr_call->local_adr)
- env->local_vars = 1;
--env->n_call_nodes;
/* we just generate a bunch of new calls */
env->n_nodes += callee_env->n_nodes;
--callee_env->n_callers;
}
-
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
del_pqueue(pqueue);
}
* Heuristic inliner. Calculates a benefice value for every call and inlines
* those calls with a value higher than the threshold.
*/
-void inline_functions(unsigned maxsize, int inline_threshold) {
+void inline_functions(unsigned maxsize, int inline_threshold,
+ opt_ptr after_inline_opt)
+{
inline_irg_env *env;
int i, n_irgs;
ir_graph *rem;
ir_graph *irg = irgs[i];
env = get_irg_link(irg);
- if (env->got_inline) {
+ if (env->got_inline && after_inline_opt != NULL) {
/* this irg got calls inlined: optimize it */
- if (get_opt_combo()) {
- if (env->local_vars) {
- scalar_replacement_opt(irg);
- }
- combo(irg);
- } else {
- if (env->local_vars) {
- if (scalar_replacement_opt(irg)) {
- optimize_graph_df(irg);
- }
- }
- optimize_cf(irg);
- }
+ after_inline_opt(irg);
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
current_ir_graph = rem;
}
+struct inline_functions_pass_t {
+ ir_prog_pass_t pass;
+ unsigned maxsize;
+ int inline_threshold;
+ opt_ptr after_inline_opt;
+};
+
+/**
+ * Wrapper to run inline_functions() as a ir_prog pass.
+ */
+static int inline_functions_wrapper(ir_prog *irp, void *context) {
+ struct inline_functions_pass_t *pass = context;
+
+ (void)irp;
+ inline_functions(pass->maxsize, pass->inline_threshold,
+ pass->after_inline_opt);
+ return 0;
+}
+
+/* create a ir_prog pass for inline_functions */
+ir_prog_pass_t *inline_functions_pass(
+ const char *name, unsigned maxsize, int inline_threshold,
+ opt_ptr after_inline_opt) {
+ struct inline_functions_pass_t *pass =
+ XMALLOCZ(struct inline_functions_pass_t);
+
+ pass->maxsize = maxsize;
+ pass->inline_threshold = inline_threshold;
+ pass->after_inline_opt = after_inline_opt;
+
+ return def_prog_pass_constructor(
+ &pass->pass, name ? name : "inline_functions",
+ inline_functions_wrapper);
+}
+
void firm_init_inline(void) {
FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
}