/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @file
* @brief Dead node elimination and Procedure Inlining.
* @author Michael Beck, Goetz Lindenmaier
- * @version $Id$
*/
#include "config.h"
#include "irtools.h"
#include "iropt_dbg.h"
#include "irpass_t.h"
-#include "irphase_t.h"
+#include "irnodemap.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
ir_node *new_node = irn_copy_into_irg(node, new_irg);
set_new_node(node, new_node);
-
if (is_Sel(node)) {
ir_graph *old_irg = get_irn_irg(node);
ir_type *old_frame_type = get_irg_frame_type(old_irg);
assert(is_Sel(new_node));
/* use copied entities from the new frame */
if (get_entity_owner(old_entity) == old_frame_type) {
- ir_entity *new_entity = get_entity_link(old_entity);
+ ir_entity *new_entity = (ir_entity*)get_entity_link(old_entity);
assert(new_entity != NULL);
set_Sel_entity(new_node, new_entity);
}
*/
static void find_addr(ir_node *node, void *env)
{
- bool *allow_inline = env;
+ bool *allow_inline = (bool*)env;
- if (is_Sel(node)) {
+ if (is_Block(node) && get_Block_entity(node)) {
+ /**
+ * Currently we can't handle blocks whose address was taken correctly
+ * when inlining
+ */
+ *allow_inline = false;
+ } else if (is_Sel(node)) {
ir_graph *irg = current_ir_graph;
if (get_Sel_ptr(node) == get_irg_frame(irg)) {
/* access to frame */
/* access to value_type */
*allow_inline = false;
}
+ if (is_parameter_entity(ent)) {
+ *allow_inline = false;
+ }
}
} else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
/* From GCC:
ir_entity *called = get_irg_entity(called_graph);
ir_type *called_type = get_entity_type(called);
ir_type *call_type = get_Call_type(call);
- int n_params = get_method_n_params(called_type);
- int n_arguments = get_method_n_params(call_type);
- int n_res = get_method_n_ress(called_type);
+ size_t n_params = get_method_n_params(called_type);
+ size_t n_arguments = get_method_n_params(call_type);
+ size_t n_res = get_method_n_ress(called_type);
irg_inline_property prop = get_irg_inline_property(called_graph);
- int i;
+ size_t i;
bool res;
if (prop == irg_inline_forbidden)
* It is implementation dependent what happens in that case.
* We support inlining, if the bitsize of the types matches AND
* the same arithmetic is used. */
- for (i = n_params - 1; i >= 0; --i) {
+ for (i = 0; i < n_params; ++i) {
ir_type *param_tp = get_method_param_type(called_type, i);
ir_type *arg_tp = get_method_param_type(call_type, i);
/* otherwise we can simply "reinterpret" the bits */
}
}
- for (i = n_res - 1; i >= 0; --i) {
+ for (i = 0; i < n_res; ++i) {
ir_type *decl_res_tp = get_method_res_type(called_type, i);
ir_type *used_res_tp = get_method_res_type(call_type, i);
{
ir_type *from_frame = get_irg_frame_type(from);
ir_type *to_frame = get_irg_frame_type(to);
- int n_members = get_class_n_members(from_frame);
- int i;
+ size_t n_members = get_class_n_members(from_frame);
+ size_t i;
assert(from_frame != to_frame);
for (i = 0; i < n_members; ++i) {
ir_entity *old_ent = get_class_member(from_frame, i);
ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
set_entity_link(old_ent, new_ent);
+ assert (!is_parameter_entity(old_ent));
}
}
{
ir_node *pre_call;
ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
+ ir_node *in[pn_Start_max+1];
ir_node *end, *end_bl, *block;
ir_node **res_pred;
ir_node **cf_pred;
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) == op_pin_state_pinned);
assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_outs_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+ | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
edges_deactivate(irg);
/* here we know we WILL inline, so inform the statistics */
{
ir_node *Xproj = NULL;
ir_node *proj;
- for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
+ for (proj = (ir_node*)get_irn_link(call); proj != NULL;
+ proj = (ir_node*)get_irn_link(proj)) {
long proj_nr = get_Proj_proj(proj);
if (proj_nr == pn_Call_X_except) Xproj = proj;
}
in[pn_Start_M] = get_Call_mem(call);
in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
- in[pn_Start_P_tls] = get_irg_tls(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
- pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+ pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
post_call = call;
/* --
{
ir_node *start_block;
ir_node *start;
- ir_node *bad;
ir_node *nomem;
start_block = get_irg_start_block(called_graph);
set_new_node(start, pre_call);
mark_irn_visited(start);
- bad = get_irg_bad(called_graph);
- set_new_node(bad, get_irg_bad(irg));
- mark_irn_visited(bad);
-
nomem = get_irg_no_mem(called_graph);
set_new_node(nomem, get_irg_no_mem(irg));
mark_irn_visited(nomem);
/* entitiy link is used to link entities on old stackframe to the
* new stackframe */
- irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+ irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
/* copy entities and nodes */
assert(!irn_visited(get_irg_end(called_graph)));
irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
irg);
- irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+ irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
/* -- Merge the end of the inlined procedure with the call site -- */
/* We will turn the old Call node into a Tuple with the following
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
- turn_into_tuple(post_call, pn_Call_max);
+ turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
n_mem_phi = 0;
for (i = 0; i < arity; i++) {
}
}
if (n_ret > 0) {
- ir_mode *mode = get_irn_mode(cf_pred[0]);
- phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+ phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
} else {
- phi = new_r_Bad(irg);
+ phi = new_r_Bad(irg, res_mode);
}
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
} else {
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
}
/* handle the regular call */
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
- /* For now, we cannot inline calls with value_base */
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-
/* Finally the exception control flow.
We have two possible situations:
First if the Call branches to an exception handler:
set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
}
} else {
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
free(end_preds);
}
free(res_pred);
ir_node *addr;
addr = get_Call_ptr(call);
- if (is_Global(addr)) {
- ir_entity *ent = get_Global_entity(addr);
+ if (is_SymConst_addr_ent(addr)) {
+ ir_entity *ent = get_SymConst_entity(addr);
/* we don't know which function gets finally bound to a weak symbol */
if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
return NULL;
if (called_irg != NULL) {
/* The Call node calls a locally defined method. Remember to inline. */
- inline_env_t *ienv = env;
+ inline_env_t *ienv = (inline_env_t*)env;
call_entry *entry = OALLOC(&ienv->obst, call_entry);
entry->call = call;
entry->callee = called_irg;
{
ir_graph *rem = current_ir_graph;
inline_env_t env;
- call_entry *entry;
current_ir_graph = irg;
/* Handle graph state */
current_ir_graph = rem;
}
-struct inline_small_irgs_pass_t {
+typedef struct inline_small_irgs_pass_t {
ir_graph_pass_t pass;
int size;
-};
+} inline_small_irgs_pass_t;
/**
* Wrapper to run inline_small_irgs() as a pass.
*/
static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
{
- struct inline_small_irgs_pass_t *pass = context;
+ inline_small_irgs_pass_t *pass = (inline_small_irgs_pass_t*)context;
inline_small_irgs(irg, pass->size);
return 0;
/* create a pass for inline_small_irgs() */
ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
{
- struct inline_small_irgs_pass_t *pass =
- XMALLOCZ(struct inline_small_irgs_pass_t);
+ inline_small_irgs_pass_t *pass = XMALLOCZ(inline_small_irgs_pass_t);
pass->size = size;
return def_graph_pass_constructor(
*/
static void collect_calls2(ir_node *call, void *ctx)
{
- wenv_t *env = ctx;
+ wenv_t *env = (wenv_t*)ctx;
inline_irg_env *x = env->x;
- ir_opcode code = get_irn_opcode(call);
+ unsigned code = get_irn_opcode(call);
ir_graph *callee;
call_entry *entry;
if (env->ignore_runtime) {
ir_node *symc = get_Call_ptr(call);
- if (is_Global(symc)) {
- ir_entity *ent = get_Global_entity(symc);
+ if (is_SymConst_addr_ent(symc)) {
+ ir_entity *ent = get_SymConst_entity(symc);
if (get_entity_additional_properties(ent) & mtp_property_runtime)
return;
callee = get_call_called_irg(call);
if (callee != NULL) {
if (! env->ignore_callers) {
- inline_irg_env *callee_env = get_irg_link(callee);
+ inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
/* count all static callers */
++callee_env->n_callers;
++callee_env->n_callers_orig;
/**
* Returns TRUE if the number of callers is 0 in the irg's environment,
- * hence this irg is a leave.
+ * hence this irg is a leaf.
*/
-inline static int is_leave(ir_graph *irg)
+inline static int is_leaf(ir_graph *irg)
{
- inline_irg_env *env = get_irg_link(irg);
+ inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
return env->n_call_nodes == 0;
}
*/
inline static int is_smaller(ir_graph *callee, unsigned size)
{
- inline_irg_env *env = get_irg_link(callee);
+ inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
return env->n_nodes < size;
}
*/
static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
{
- call_entry *entry, *nentry;
+ call_entry *nentry;
/* Note that the src list points to Call nodes in the inlined graph, but
we need Call nodes in our graph. Luckily the inliner leaves this information
in the link field. */
list_for_each_entry(call_entry, entry, &src->calls, list) {
- nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
+ nentry = duplicate_call_entry(entry, (ir_node*)get_irn_link(entry->call), loop_depth);
list_add_tail(&nentry->list, &dst->calls);
}
dst->n_call_nodes += src->n_call_nodes;
}
/*
- * Inlines small leave methods at call sites where the called address comes
+ * Inlines small leaf methods at call sites where the called address comes
* from a Const node that references the entity representing the called
* method.
* The size argument is a rough measure for the code size of the method:
* Methods where the obstack containing the firm graph is smaller than
* size are inlined.
*/
-void inline_leave_functions(unsigned maxsize, unsigned leavesize,
- unsigned size, int ignore_runtime)
+void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
+ unsigned size, int ignore_runtime)
{
inline_irg_env *env;
ir_graph *irg;
- int i, n_irgs;
+ size_t i, n_irgs;
ir_graph *rem;
int did_inline;
wenv_t wenv;
- call_entry *entry, *next;
- const call_entry *centry;
pmap *copied_graphs;
pmap_entry *pm_entry;
assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
- assure_cf_loop(irg);
- wenv.x = get_irg_link(irg);
+ assure_irg_properties(irg,
+ IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+ wenv.x = (inline_irg_env*)get_irg_link(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
}
/* -- and now inline. -- */
- /* Inline leaves recursively -- we might construct new leaves. */
+ /* Inline leafs recursively -- we might construct new leafs. */
do {
did_inline = 0;
int phiproj_computed = 0;
current_ir_graph = get_irp_irg(i);
- env = get_irg_link(current_ir_graph);
+ env = (inline_irg_env*)get_irg_link(current_ir_graph);
ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
continue;
}
- if (is_leave(callee) && (
- is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
+ if (is_leaf(callee) && (
+ is_smaller(callee, leafsize) || prop >= irg_inline_forced)) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
did_inline = inline_method(call, callee);
if (did_inline) {
- inline_irg_env *callee_env = get_irg_link(callee);
+ inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
int phiproj_computed = 0;
current_ir_graph = get_irp_irg(i);
- env = get_irg_link(current_ir_graph);
+ env = (inline_irg_env*)get_irg_link(current_ir_graph);
ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
irg_inline_property prop;
ir_graph *callee;
- pmap_entry *e;
+ ir_graph *calleee;
call = entry->call;
callee = entry->callee;
continue;
}
- e = pmap_find(copied_graphs, callee);
- if (e != NULL) {
+ calleee = pmap_get(ir_graph, copied_graphs, callee);
+ if (calleee != NULL) {
/*
* Remap callee if we have a copy.
* FIXME: Should we do this only for recursive Calls ?
*/
- callee = e->value;
+ callee = calleee;
}
if (prop >= irg_inline_forced ||
/*
* No copy yet, create one.
- * Note that recursive methods are never leaves, so it is sufficient
+ * Note that recursive methods are never leafs, so it is sufficient
* to test this condition here.
*/
copy = create_irg_copy(callee);
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_irg_properties(copy,
+ IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
append_call_list(env, callee_env, entry->loop_depth);
/* after we have inlined callee, all called methods inside callee
are now called once more */
list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
- inline_irg_env *penv = get_irg_link(centry->callee);
+ inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
++penv->n_callers;
}
for (i = 0; i < n_irgs; ++i) {
irg = get_irp_irg(i);
- env = get_irg_link(irg);
+ env = (inline_irg_env*)get_irg_link(irg);
if (env->got_inline) {
optimize_graph_df(irg);
/* kill the copied graphs: we don't need them anymore */
foreach_pmap(copied_graphs, pm_entry) {
- ir_graph *copy = pm_entry->value;
+ ir_graph *copy = (ir_graph*)pm_entry->value;
/* reset the entity, otherwise it will be deleted in the next step ... */
set_irg_entity(copy, NULL);
current_ir_graph = rem;
}
-struct inline_leave_functions_pass_t {
+typedef struct inline_leaf_functions_pass_t {
ir_prog_pass_t pass;
unsigned maxsize;
- unsigned leavesize;
+ unsigned leafsize;
unsigned size;
int ignore_runtime;
-};
+} inline_leaf_functions_pass_t;
/**
- * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ * Wrapper to run inline_leaf_functions() as a ir_prog pass.
*/
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
{
- struct inline_leave_functions_pass_t *pass = context;
+ inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
(void)irp;
- inline_leave_functions(
- pass->maxsize, pass->leavesize,
+ inline_leaf_functions(
+ pass->maxsize, pass->leafsize,
pass->size, pass->ignore_runtime);
return 0;
}
-/* create a pass for inline_leave_functions() */
-ir_prog_pass_t *inline_leave_functions_pass(
- const char *name, unsigned maxsize, unsigned leavesize,
+/* create a pass for inline_leaf_functions() */
+ir_prog_pass_t *inline_leaf_functions_pass(
+ const char *name, unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime)
{
- struct inline_leave_functions_pass_t *pass =
- XMALLOCZ(struct inline_leave_functions_pass_t);
+ inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
pass->maxsize = maxsize;
- pass->leavesize = leavesize;
+ pass->leafsize = leafsize;
pass->size = size;
pass->ignore_runtime = ignore_runtime;
return def_prog_pass_constructor(
&pass->pass,
- name ? name : "inline_leave_functions",
- inline_leave_functions_wrapper);
+ name ? name : "inline_leaf_functions",
+ inline_leaf_functions_wrapper);
}
/**
{
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp;
- int nparams, i, proj_nr;
+ size_t nparams;
+ int i;
+ long proj_nr;
ir_node *irg_args, *arg;
mtp = get_entity_type(ent);
* After inlining, the local variable might be transformed into a
* SSA variable by scalar_replacement().
*/
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
{
- inline_irg_env *env = get_irg_link(callee);
+ inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
- if (env->local_weights != NULL) {
- if (pos < ARR_LEN(env->local_weights))
- return env->local_weights[pos];
- return 0;
- }
-
- analyze_irg_local_weights(env, callee);
+ if (env->local_weights == NULL)
+ analyze_irg_local_weights(env, callee);
if (pos < ARR_LEN(env->local_weights))
return env->local_weights[pos];
{
ir_node *call = entry->call;
ir_entity *ent = get_irg_entity(callee);
+ ir_type *callee_frame;
+ size_t i, n_members, n_params;
ir_node *frame_ptr;
ir_type *mtp;
int weight = 0;
- int i, n_params, all_const;
+ int all_const;
unsigned cc, v;
irg_inline_property prop;
return entry->benefice = INT_MIN;
}
+ callee_frame = get_irg_frame_type(callee);
+ n_members = get_class_n_members(callee_frame);
+ for (i = 0; i < n_members; ++i) {
+ ir_entity *frame_ent = get_class_member(callee_frame, i);
+ if (is_parameter_entity(frame_ent)) {
+ // TODO inliner should handle parameter entities by inserting Store operations
+ DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
+ set_irg_inline_property(callee, irg_inline_forbidden);
+ return entry->benefice = INT_MIN;
+ }
+ }
+
if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
call, callee));
cc = get_method_calling_convention(mtp);
if (cc & cc_reg_param) {
/* register parameter, smaller costs for register parameters */
- int max_regs = cc & ~cc_bits;
+ size_t max_regs = cc & ~cc_bits;
if (max_regs < n_params)
weight += max_regs * 2 + (n_params - max_regs) * 5;
}
entry->all_const = all_const;
- callee_env = get_irg_link(callee);
+ callee_env = (inline_irg_env*)get_irg_link(callee);
if (callee_env->n_callers == 1 &&
callee != current_ir_graph &&
!entity_is_externally_visible(ent)) {
if (callee_env->n_nodes < 30 && !callee_env->recursive)
weight += 2000;
- /* and finally for leaves: they do not increase the register pressure
+ /* and finally for leafs: they do not increase the register pressure
because of callee safe registers */
if (callee_env->n_call_nodes == 0)
weight += 400;
return entry->benefice = weight;
}
-static ir_graph **irgs;
-static int last_irg;
+typedef struct walk_env_t {
+ ir_graph **irgs;
+ size_t last_irg;
+} walk_env_t;
/**
* Callgraph walker, collect all visited graphs.
*/
static void callgraph_walker(ir_graph *irg, void *data)
{
- (void) data;
- irgs[last_irg++] = irg;
+ walk_env_t *env = (walk_env_t *)data;
+ env->irgs[env->last_irg++] = irg;
}
/**
*/
static ir_graph **create_irg_list(void)
{
- ir_entity **free_methods;
- int arr_len;
- int n_irgs = get_irp_n_irgs();
+ ir_entity **free_methods;
+ size_t n_irgs = get_irp_n_irgs();
+ walk_env_t env;
- cgana(&arr_len, &free_methods);
+ cgana(&free_methods);
xfree(free_methods);
compute_callgraph();
- last_irg = 0;
- irgs = XMALLOCNZ(ir_graph*, n_irgs);
+ env.irgs = XMALLOCNZ(ir_graph*, n_irgs);
+ env.last_irg = 0;
+
+ callgraph_walk(NULL, callgraph_walker, &env);
+ assert(n_irgs == env.last_irg);
- callgraph_walk(NULL, callgraph_walker, NULL);
- assert(n_irgs == last_irg);
+ free_callgraph();
- return irgs;
+ return env.irgs;
}
/**
int inline_threshold, pmap *copied_graphs)
{
int phiproj_computed = 0;
- inline_irg_env *env = get_irg_link(irg);
- call_entry *curr_call;
+ inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
wenv_t wenv;
pqueue_t *pqueue;
/* note that the list of possible calls is updated during the process */
while (!pqueue_empty(pqueue)) {
int did_inline;
- call_entry *curr_call = pqueue_pop_front(pqueue);
+ call_entry *curr_call = (call_entry*)pqueue_pop_front(pqueue);
ir_graph *callee = curr_call->callee;
ir_node *call_node = curr_call->call;
- inline_irg_env *callee_env = get_irg_link(callee);
+ inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
irg_inline_property prop = get_irg_inline_property(callee);
+ ir_graph *calleee;
int loop_depth;
- const call_entry *centry;
- pmap_entry *e;
if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
continue;
}
- e = pmap_find(copied_graphs, callee);
- if (e != NULL) {
+ calleee = pmap_get(ir_graph, copied_graphs, callee);
+ if (calleee != NULL) {
int benefice = curr_call->benefice;
/*
* Reduce the weight for recursive function IFF not all arguments are const.
/*
* Remap callee if we have a copy.
*/
- callee = e->value;
- callee_env = get_irg_link(callee);
+ callee = calleee;
+ callee_env = (inline_irg_env*)get_irg_link(callee);
}
if (current_ir_graph == callee) {
/*
* No copy yet, create one.
- * Note that recursive methods are never leaves, so it is
+ * Note that recursive methods are never leafs, so it is
* sufficient to test this condition here.
*/
copy = create_irg_copy(callee);
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_irg_properties(copy, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+ memset(&wenv, 0, sizeof(wenv));
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/* remove it from the caller list */
list_del(&curr_call->list);
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
/* we just generate a bunch of new calls */
loop_depth = curr_call->loop_depth;
list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
- inline_irg_env *penv = get_irg_link(centry->callee);
+ inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
ir_node *new_call;
call_entry *new_entry;
/* Note that the src list points to Call nodes in the inlined graph,
* but we need Call nodes in our graph. Luckily the inliner leaves
* this information in the link field. */
- new_call = get_irn_link(centry->call);
+ new_call = (ir_node*)get_irn_link(centry->call);
+ if (get_irn_irg(new_call) != irg) {
+ /* centry->call has not been copied, which means it is dead.
+ * This might happen during inlining, if a const function,
+ * which cannot be inlined is only used as an unused argument
+ * of another function, which is inlined. */
+ continue;
+ }
assert(is_Call(new_call));
new_entry = duplicate_call_entry(centry, new_call, loop_depth);
opt_ptr after_inline_opt)
{
inline_irg_env *env;
- int i, n_irgs;
+ size_t i, n_irgs;
ir_graph *rem;
wenv_t wenv;
pmap *copied_graphs;
free_callee_info(irg);
- wenv.x = get_irg_link(irg);
- assure_cf_loop(irg);
+ wenv.x = (inline_irg_env*)get_irg_link(irg);
+ assure_loopinfo(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}
for (i = 0; i < n_irgs; ++i) {
ir_graph *irg = irgs[i];
- env = get_irg_link(irg);
+ env = (inline_irg_env*)get_irg_link(irg);
if (env->got_inline && after_inline_opt != NULL) {
/* this irg got calls inlined: optimize it */
after_inline_opt(irg);
/* kill the copied graphs: we don't need them anymore */
foreach_pmap(copied_graphs, pm_entry) {
- ir_graph *copy = pm_entry->value;
+ ir_graph *copy = (ir_graph*)pm_entry->value;
/* reset the entity, otherwise it will be deleted in the next step ... */
set_irg_entity(copy, NULL);
current_ir_graph = rem;
}
-struct inline_functions_pass_t {
+typedef struct inline_functions_pass_t {
ir_prog_pass_t pass;
unsigned maxsize;
int inline_threshold;
opt_ptr after_inline_opt;
-};
+} inline_functions_pass_t;
/**
* Wrapper to run inline_functions() as a ir_prog pass.
*/
static int inline_functions_wrapper(ir_prog *irp, void *context)
{
- struct inline_functions_pass_t *pass = context;
+ inline_functions_pass_t *pass = (inline_functions_pass_t*)context;
(void)irp;
inline_functions(pass->maxsize, pass->inline_threshold,
const char *name, unsigned maxsize, int inline_threshold,
opt_ptr after_inline_opt)
{
- struct inline_functions_pass_t *pass =
- XMALLOCZ(struct inline_functions_pass_t);
+ inline_functions_pass_t *pass = XMALLOCZ(inline_functions_pass_t);
pass->maxsize = maxsize;
pass->inline_threshold = inline_threshold;