#include "irflag_t.h"
#include "irhooks.h"
#include "irtools.h"
+#include "iropt_dbg.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
n = identify_remember(current_ir_graph->value_table, nn);
if (nn != n) {
+ DBG_OPT_CSE(nn, n);
exchange(nn, n);
}
}
/* -- Precompute some values -- */
end_bl = get_new_node(get_irg_end_block(called_graph));
end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = xmalloc(n_res * sizeof(*res_pred));
n_ret = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
n_ret++;
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret, *irn;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
+ ret = skip_Proj(get_Block_cfgpred(end_bl, i));
if (is_Call(ret)) {
cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
n_exc++;
/* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
ir_node *irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
if (env.head != NULL) {
/* There are calls to inline */
collect_phiprojs(irg);
+
for (entry = env.head; entry != NULL; entry = entry->next) {
- ir_graph *callee = entry->callee;
- if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) >= irg_inline_forced)) {
+ ir_graph *callee = entry->callee;
+ irg_inline_property prop = get_irg_inline_property(callee);
+
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
+ if (prop >= irg_inline_forced ||
+ _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
inline_method(entry->call, callee);
}
}
tail = NULL;
for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
+ ir_graph *callee;
+ irg_inline_property prop;
- if (env->n_nodes > maxsize) break;
+ if (env->n_nodes > maxsize)
+ break;
call = entry->call;
callee = entry->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
if (is_leave(callee) && (
- is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
+ is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
/* note that the list of possible calls is updated during the process */
tail = NULL;
for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
- pmap_entry *e;
+ irg_inline_property prop;
+ ir_graph *callee;
+ pmap_entry *e;
call = entry->call;
callee = entry->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
e = pmap_find(copied_graphs, callee);
if (e != NULL) {
/*
callee = e->value;
}
- if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
- (get_irg_inline_property(callee) >= irg_inline_forced))) {
+ if (prop >= irg_inline_forced ||
+ (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
if (current_ir_graph == callee) {
/*
* Recursive call: we cannot directly inline because we cannot walk
inline_irg_env *curr_env, *callee_env;
- if (get_entity_additional_properties(ent) & mtp_property_noreturn) {
- /* do NOT inline noreturn calls */
+ if (get_entity_additional_properties(ent) & mtp_property_noreturn|mtp_property_weak) {
+ /* do NOT inline noreturn or weak calls */
return INT_MIN;
}
}
callee_env = get_irg_link(callee);
- if (get_entity_visibility(ent) == visibility_local &&
- callee_env->n_callers_orig == 1 &&
- callee != current_ir_graph) {
+ if (callee_env->n_callers == 1 && callee != current_ir_graph) {
/* we are the only caller, give big bonus */
- weight += 5000;
+ if (get_entity_visibility(ent) == visibility_local) {
+ weight += 5000;
+ } else {
+ weight += 200;
+ }
}
/* do not inline big functions */
return weight;
}
+static ir_graph **irgs;
+static int last_irg;
+
+static void callgraph_walker(ir_graph *irg, void *data)
+{
+ (void) data;
+ irgs[last_irg++] = irg;
+}
+
+static ir_graph **create_irg_list(void)
+{
+ ir_entity **free_methods;
+ int arr_len;
+ int n_irgs = get_irp_n_irgs();
+
+ cgana(&arr_len, &free_methods);
+ xfree(free_methods);
+
+ compute_callgraph();
+
+ last_irg = 0;
+ irgs = xmalloc(n_irgs * sizeof(*irgs));
+ memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+
+ callgraph_walk(NULL, callgraph_walker, NULL);
+ assert(n_irgs == last_irg);
+
+ return irgs;
+}
+
/**
* Heuristic inliner. Calculates a benefice value for every call and inlines
* those calls with a value higher than the threshold.
const call_entry *centry;
pmap *copied_graphs;
pmap_entry *pm_entry;
+ ir_graph **irgs;
rem = current_ir_graph;
obstack_init(&temp_obst);
+ irgs = create_irg_list();
+
/* a map for the copied graphs, used to inline recursive calls */
copied_graphs = pmap_create();
/* extend all irgs by a temporary data structure for inlining. */
n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i)
- set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
+ set_irg_link(irgs[i], alloc_inline_irg_env());
/* Precompute information in temporary data structure. */
wenv.ignore_runtime = 0;
wenv.ignore_callers = 0;
for (i = 0; i < n_irgs; ++i) {
- ir_graph *irg = get_irp_irg(i);
+ ir_graph *irg = irgs[i];
- assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
wenv.x = get_irg_link(irg);
for (i = 0; i < n_irgs; ++i) {
int phiproj_computed = 0;
ir_node *call;
- ir_graph *irg = get_irp_irg(i);
+ ir_graph *irg = irgs[i];
current_ir_graph = irg;
env = get_irg_link(irg);
/* note that the list of possible calls is updated during the process */
last_call = &env->call_head;
for (curr_call = env->call_head; curr_call != NULL;) {
- ir_graph *callee;
- pmap_entry *e;
- int benefice;
- unsigned local_adr;
+ irg_inline_property prop;
+ ir_graph *callee;
+ pmap_entry *e;
+ int benefice;
+ unsigned local_adr;
if (env->n_nodes > maxsize) break;
call = curr_call->call;
callee = curr_call->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
e = pmap_find(copied_graphs, callee);
if (e != NULL) {
/*
benefice = calc_inline_benefice(call, callee, &local_adr);
DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
- if (benefice > -inline_threshold ||
- (get_irg_inline_property(callee) >= irg_inline_forced)) {
+ if (benefice > -inline_threshold || prop >= irg_inline_forced) {
if (current_ir_graph == callee) {
/*
* Recursive call: we cannot directly inline because we cannot walk
}
for (i = 0; i < n_irgs; ++i) {
- ir_graph *irg = get_irp_irg(i);
+ ir_graph *irg = irgs[i];
env = get_irg_link(irg);
if (env->got_inline) {
}
pmap_destroy(copied_graphs);
+ xfree(irgs);
+
obstack_free(&temp_obst, NULL);
current_ir_graph = rem;
}