#include "irflag_t.h"
#include "irhooks.h"
#include "irtools.h"
+#include "iropt_dbg.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
#endif
struct obstack *graveyard_obst = NULL;
struct obstack *rebirth_obst = NULL;
- assert(! edges_activated(irg) && "dead node elimination requires disabled edges");
+
+ edges_deactivate(irg);
/* inform statistics that we started a dead-node elimination run */
hook_dead_node_elim(irg, 1);
* inlined procedure. The new entities must be in the link field of
* the entities.
*/
-static INLINE void
-copy_node_inline(ir_node *n, void *env) {
+static void copy_node_inline(ir_node *n, void *env) {
ir_node *nn;
ir_type *frame_tp = (ir_type *)env;
}
}
+/**
+ * Copies new predecessors of old node and move constants to
+ * the Start Block.
+ */
+static void copy_preds_inline(ir_node *n, void *env) {
+ ir_node *nn;
+
+ copy_preds(n, env);
+ nn = skip_Id(get_new_node(n));
+ if (is_irn_constlike(nn)) {
+ /* move Constants into the start block */
+ set_nodes_block(nn, get_irg_start_block(current_ir_graph));
+
+ n = identify_remember(current_ir_graph->value_table, nn);
+ if (nn != n) {
+ DBG_OPT_CSE(nn, n);
+ exchange(nn, n);
+ }
+ }
+}
+
/**
* Walker: checks if P_value_arg_base is used.
*/
ir_entity *ent;
ir_graph *rem, *irg;
irg_inline_property prop = get_irg_inline_property(called_graph);
+ unsigned long visited;
if (prop == irg_inline_forbidden)
return 0;
set_irg_visited(irg, get_irg_visited(called_graph) + 1);
if (get_irg_block_visited(irg) < get_irg_block_visited(called_graph))
set_irg_block_visited(irg, get_irg_block_visited(called_graph));
+ visited = get_irg_visited(irg);
+
/* Set pre_call as new Start node in link field of the start node of
calling graph and pre_calls block as new block for the start block
of calling graph.
Further mark these nodes so that they are not visited by the
copying. */
set_irn_link(get_irg_start(called_graph), pre_call);
- set_irn_visited(get_irg_start(called_graph), get_irg_visited(irg));
+ set_irn_visited(get_irg_start(called_graph), visited);
set_irn_link(get_irg_start_block(called_graph), get_nodes_block(pre_call));
- set_irn_visited(get_irg_start_block(called_graph), get_irg_visited(irg));
- set_irn_link(get_irg_bad(called_graph), get_irg_bad(irg));
- set_irn_visited(get_irg_bad(called_graph), get_irg_visited(irg));
+ set_irn_visited(get_irg_start_block(called_graph), visited);
+
+ set_irn_link(get_irg_bad(called_graph), get_irg_bad(current_ir_graph));
+ set_irn_visited(get_irg_bad(called_graph), visited);
+
+ set_irn_link(get_irg_no_mem(called_graph), get_irg_no_mem(current_ir_graph));
+ set_irn_visited(get_irg_no_mem(called_graph), visited);
/* Initialize for compaction of in arrays */
inc_irg_block_visited(irg);
/* -- Performing dead node elimination inlines the graph -- */
/* Copies the nodes to the obstack of current_ir_graph. Updates links to new
entities. */
- irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
+ irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
get_irg_frame_type(called_graph));
/* Repair called_graph */
/* -- Precompute some values -- */
end_bl = get_new_node(get_irg_end_block(called_graph));
end = get_new_node(get_irg_end(called_graph));
- arity = get_irn_arity(end_bl); /* arity = n_exc + n_ret */
+ arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
res_pred = xmalloc(n_res * sizeof(*res_pred));
n_ret = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
n_ret++;
/* First the Memory-Phi */
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_mem(ret);
n_ret++;
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
if (is_Return(ret)) {
cf_pred[n_ret] = get_Return_res(ret, j);
n_ret++;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret, *irn;
- ret = get_irn_n(end_bl, i);
+ ret = get_Block_cfgpred(end_bl, i);
irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
cf_pred[n_exc] = ret;
n_exc = 0;
for (i = 0; i < arity; i++) {
ir_node *ret;
- ret = skip_Proj(get_irn_n(end_bl, i));
+ ret = skip_Proj(get_Block_cfgpred(end_bl, i));
if (is_Call(ret)) {
cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
n_exc++;
/* assert(exc_handling == 1 || no exceptions. ) */
n_exc = 0;
for (i = 0; i < arity; i++) {
- ir_node *ret = get_irn_n(end_bl, i);
+ ir_node *ret = get_Block_cfgpred(end_bl, i);
ir_node *irn = skip_Proj(ret);
if (is_fragile_op(irn) || is_Raise(irn)) {
if (env.head != NULL) {
/* There are calls to inline */
collect_phiprojs(irg);
+
for (entry = env.head; entry != NULL; entry = entry->next) {
- ir_graph *callee = entry->callee;
- if (((_obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst)) < size) ||
- (get_irg_inline_property(callee) >= irg_inline_forced)) {
+ ir_graph *callee = entry->callee;
+ irg_inline_property prop = get_irg_inline_property(callee);
+
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
+ if (prop >= irg_inline_forced ||
+ _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
inline_method(entry->call, callee);
}
}
tail = NULL;
for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
+ ir_graph *callee;
+ irg_inline_property prop;
- if (env->n_nodes > maxsize) break;
+ if (env->n_nodes > maxsize)
+ break;
call = entry->call;
callee = entry->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
if (is_leave(callee) && (
- is_smaller(callee, leavesize) || (get_irg_inline_property(callee) >= irg_inline_forced))) {
+ is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
/* note that the list of possible calls is updated during the process */
tail = NULL;
for (entry = env->call_head; entry != NULL; entry = entry->next) {
- ir_graph *callee;
- pmap_entry *e;
+ irg_inline_property prop;
+ ir_graph *callee;
+ pmap_entry *e;
call = entry->call;
callee = entry->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
e = pmap_find(copied_graphs, callee);
if (e != NULL) {
/*
callee = e->value;
}
- if (((is_smaller(callee, size) && (env->n_nodes < maxsize)) || /* small function */
- (get_irg_inline_property(callee) >= irg_inline_forced))) {
+ if (prop >= irg_inline_forced ||
+ (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
if (current_ir_graph == callee) {
/*
* Recursive call: we cannot directly inline because we cannot walk
}
/**
- * calculate a benefice value for inlining the given call.
+ * Calculate a benefice value for inlining the given call.
+ *
+ * @param call the call node we have to inspect
+ * @param callee the called graph
+ * @param local_adr set after return if an address of a local variable is
+ * transmitted as a parameter
*/
static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local_adr) {
ir_entity *ent = get_irg_entity(callee);
inline_irg_env *curr_env, *callee_env;
- if (get_entity_additional_properties(ent) & mtp_property_noreturn) {
- /* do NOT inline noreturn calls */
+ if (get_entity_additional_properties(ent) & mtp_property_noreturn|mtp_property_weak) {
+ /* do NOT inline noreturn or weak calls */
return INT_MIN;
}
for (i = 0; i < n_params; ++i) {
ir_node *param = get_Call_param(call, i);
- if (is_Const(param))
+ if (is_Const(param)) {
weight += get_method_param_weight(ent, i);
- else {
+ } else {
all_const = 0;
if (is_SymConst(param))
weight += get_method_param_weight(ent, i);
}
callee_env = get_irg_link(callee);
- if (get_entity_visibility(ent) == visibility_local &&
- callee_env->n_callers_orig == 1 &&
- callee != current_ir_graph) {
+ if (callee_env->n_callers == 1 && callee != current_ir_graph) {
/* we are the only caller, give big bonus */
- weight += 5000;
+ if (get_entity_visibility(ent) == visibility_local) {
+ weight += 5000;
+ } else {
+ weight += 200;
+ }
}
/* do not inline big functions */
return weight;
}
+static ir_graph **irgs;
+static int last_irg;
+
+static void callgraph_walker(ir_graph *irg, void *data)
+{
+ (void) data;
+ irgs[last_irg++] = irg;
+}
+
+static ir_graph **create_irg_list(void)
+{
+ ir_entity **free_methods;
+ int arr_len;
+ int n_irgs = get_irp_n_irgs();
+
+ cgana(&arr_len, &free_methods);
+ xfree(free_methods);
+
+ compute_callgraph();
+
+ last_irg = 0;
+ irgs = xmalloc(n_irgs * sizeof(*irgs));
+ memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+
+ callgraph_walk(NULL, callgraph_walker, NULL);
+ assert(n_irgs == last_irg);
+
+ return irgs;
+}
+
/**
* Heuristic inliner. Calculates a benefice value for every call and inlines
* those calls with a value higher than the threshold.
const call_entry *centry;
pmap *copied_graphs;
pmap_entry *pm_entry;
+ ir_graph **irgs;
rem = current_ir_graph;
obstack_init(&temp_obst);
+ irgs = create_irg_list();
+
/* a map for the copied graphs, used to inline recursive calls */
copied_graphs = pmap_create();
/* extend all irgs by a temporary data structure for inlining. */
n_irgs = get_irp_n_irgs();
for (i = 0; i < n_irgs; ++i)
- set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
+ set_irg_link(irgs[i], alloc_inline_irg_env());
/* Precompute information in temporary data structure. */
wenv.ignore_runtime = 0;
wenv.ignore_callers = 0;
for (i = 0; i < n_irgs; ++i) {
- ir_graph *irg = get_irp_irg(i);
+ ir_graph *irg = irgs[i];
- assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
wenv.x = get_irg_link(irg);
for (i = 0; i < n_irgs; ++i) {
int phiproj_computed = 0;
ir_node *call;
- ir_graph *irg = get_irp_irg(i);
+ ir_graph *irg = irgs[i];
current_ir_graph = irg;
env = get_irg_link(irg);
/* note that the list of possible calls is updated during the process */
last_call = &env->call_head;
for (curr_call = env->call_head; curr_call != NULL;) {
- ir_graph *callee;
- pmap_entry *e;
- int benefice;
- unsigned local_adr;
+ irg_inline_property prop;
+ ir_graph *callee;
+ pmap_entry *e;
+ int benefice;
+ unsigned local_adr;
if (env->n_nodes > maxsize) break;
call = curr_call->call;
callee = curr_call->callee;
+ prop = get_irg_inline_property(callee);
+ if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
+ /* do not inline forbidden / weak graphs */
+ continue;
+ }
+
e = pmap_find(copied_graphs, callee);
if (e != NULL) {
/*
benefice = calc_inline_benefice(call, callee, &local_adr);
DB((dbg, LEVEL_2, "In %+F Call %+F has benefice %d\n", irg, callee, benefice));
- if (benefice > -inline_threshold ||
- (get_irg_inline_property(callee) >= irg_inline_forced)) {
+ if (benefice > -inline_threshold || prop >= irg_inline_forced) {
if (current_ir_graph == callee) {
/*
* Recursive call: we cannot directly inline because we cannot walk
curr_call = curr_call->next;
}
+ }
+
+ for (i = 0; i < n_irgs; ++i) {
+ ir_graph *irg = irgs[i];
+
+ env = get_irg_link(irg);
if (env->got_inline) {
/* this irg got calls inlined: optimize it */
}
pmap_destroy(copied_graphs);
+ xfree(irgs);
+
obstack_free(&temp_obst, NULL);
current_ir_graph = rem;
}