* @author Michael Beck, Goetz Lindenmaier
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <limits.h>
#include <assert.h>
#include "irgmod.h"
#include "irgwalk.h"
-#include "adt/array.h"
-#include "adt/list.h"
-#include "adt/pset.h"
-#include "adt/pmap.h"
-#include "adt/pdeq.h"
-#include "adt/xmalloc.h"
-#include "adt/pqueue.h"
+#include "array_t.h"
+#include "list.h"
+#include "pset.h"
+#include "pmap.h"
+#include "pdeq.h"
+#include "xmalloc.h"
+#include "pqueue.h"
#include "irouts.h"
#include "irloop_t.h"
* accesses. This function is called for all Phi and Block nodes
* in a Block.
*/
-static INLINE int
+static inline int
compute_new_arity(ir_node *b) {
int i, res, irn_arity;
int irg_v, block_v;
graveyard_obst = irg->obst;
/* A new obstack, where the reachable nodes will be copied to. */
- rebirth_obst = xmalloc(sizeof(*rebirth_obst));
+ rebirth_obst = XMALLOC(struct obstack);
irg->obst = rebirth_obst;
obstack_init(irg->obst);
irg->last_node_idx = 0;
* Make a new Survive DCE environment.
*/
survive_dce_t *new_survive_dce(void) {
- survive_dce_t *res = xmalloc(sizeof(res[0]));
+ survive_dce_t *res = XMALLOC(survive_dce_t);
obstack_init(&res->obst);
res->places = pmap_create();
res->new_places = NULL;
res->dead_node_elim_subst.context = res;
res->dead_node_elim_subst.next = NULL;
-#ifndef FIRM_ENABLE_HOOKS
- assert(0 && "need hooks enabled");
-#endif
-
register_hook(hook_dead_node_elim, &res->dead_node_elim);
register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
return res;
* using alloca is called in loop. In GCC present in SPEC2000 inlining
* into schedule_block cause it to require 2GB of ram instead of 256MB.
*
- * Sorryly this is true with our implementation also.
+ * Sorrily this is true with our implementation also.
* Moreover, we cannot differentiate between alloca() and VLA yet, so this
* disables inlining of functions using VLA (with are completely save).
*
arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
- res_pred = xmalloc(n_res * sizeof(*res_pred));
- cf_pred = xmalloc(arity * sizeof(*res_pred));
+ res_pred = XMALLOCN(ir_node*, n_res);
+ cf_pred = XMALLOCN(ir_node*, arity);
set_irg_current_block(irg, post_bl); /* just to make sure */
n_exc++;
}
}
- main_end_bl = get_irg_end_block(irg);
+ main_end_bl = get_irg_end_block(irg);
main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+ end_preds = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
for (i = 0; i < main_end_bl_arity; ++i)
end_preds[i] = get_irn_n(main_end_bl, i);
}
/********************************************************************/
-/* Apply inlineing to small methods. */
+/* Apply inlining to small methods. */
/********************************************************************/
static struct obstack temp_obst;
/** Represents a possible inlinable call in a graph. */
typedef struct _call_entry {
- ir_node *call; /**< the Call node */
- ir_graph *callee; /**< the callee IR-graph called here */
- list_head list; /**< for linking the next one */
- int loop_depth; /**< the loop depth of this call */
- int benefice; /**< calculated benefice of this call */
- unsigned local_adr:1; /**< Set if this calls get an address of a local variable. */
- unsigned all_const:1; /**< Set if this calls has only constant parameters. */
+ ir_node *call; /**< The Call node. */
+ ir_graph *callee; /**< The callee IR-graph. */
+ list_head list; /**< List head for linking the next one. */
+ int loop_depth; /**< The loop depth of this call. */
+ int benefice; /**< The calculated benefice of this call. */
+ unsigned local_adr:1; /**< Set if this call gets an address of a local variable. */
+ unsigned all_const:1; /**< Set if this call has only constant parameters. */
} call_entry;
/**
* environment for inlining small irgs
*/
typedef struct _inline_env_t {
- struct obstack obst; /**< an obstack where call_entries are allocated on. */
- list_head calls; /**< the call entry list */
+ struct obstack obst; /**< An obstack where call_entries are allocated on. */
+ list_head calls; /**< The call entry list. */
} inline_env_t;
/**
free_callee_info(irg);
/* Find Call nodes to inline.
- (We can not inline during a walk of the graph, as inlineing the same
+ (We can not inline during a walk of the graph, as inlining the same
method several times changes the visited flag of the walked graph:
- after the first inlineing visited of the callee equals visited of
- the caller. With the next inlineing both are increased.) */
+ after the first inlining visited of the callee equals visited of
+ the caller. With the next inlining both are increased.) */
obstack_init(&env.obst);
INIT_LIST_HEAD(&env.calls);
irg_walk_graph(irg, NULL, collect_calls, &env);
entry->call = call;
entry->callee = callee;
entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
+ entry->benefice = 0;
+ entry->local_adr = 0;
+ entry->all_const = 0;
list_add_tail(&entry->list, &x->calls);
}
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
-INLINE static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg) {
inline_irg_env *env = get_irg_link(irg);
return env->n_call_nodes == 0;
}
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
-INLINE static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size) {
inline_irg_env *env = get_irg_link(callee);
return env->n_nodes < size;
}
for (i = 0; i < n_irgs; ++i)
set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
- /* Precompute information in temporary data structure. */
+ /* Pre-compute information in temporary data structure. */
wenv.ignore_runtime = ignore_runtime;
wenv.ignore_callers = 0;
for (i = 0; i < n_irgs; ++i) {
if (did_inline) {
inline_irg_env *callee_env = get_irg_link(callee);
- /* was inlined, must be recomputed */
+ /* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
/* Do some statistics */
if (did_inline) {
inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
- /* was inlined, must be recomputed */
+ /* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
/* callee was inline. Append it's call list. */
compute_callgraph();
last_irg = 0;
- irgs = xmalloc(n_irgs * sizeof(*irgs));
- memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+ irgs = XMALLOCNZ(ir_graph*, n_irgs);
callgraph_walk(NULL, callgraph_walker, NULL);
assert(n_irgs == last_irg);
}
/**
- * Push a call onto the priority list if its
- * benefice is big enough.
+ * Push a call onto the priority list if its benefice is big enough.
*
* @param pqueue the priority queue of calls
* @param call the call entry
static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
int inline_threshold)
{
- int benefice;
ir_graph *callee = call->callee;
irg_inline_property prop = get_irg_inline_property(callee);
+ int benefice = calc_inline_benefice(call, callee);
- if (prop & irg_inline_forced) {
- /* give them a big benefice, so forced are inline first */
- benefice = 100000 + call->loop_depth;
- call->benefice = benefice;
- DB((dbg, LEVEL_2, "In %+F Call %+F to %+F is forced\n",
- get_irn_irg(call->call), call->call, callee));
- } else {
- benefice = calc_inline_benefice(call, callee);
- DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
- get_irn_irg(call->call), call->call, callee, benefice));
- }
+ DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
+ get_irn_irg(call->call), call->call, callee, benefice));
- if (benefice < inline_threshold && !(prop & irg_inline_forced))
+ if (prop < irg_inline_forced && benefice < inline_threshold) {
return;
+ }
pqueue_put(pqueue, call, benefice);
}
ir_graph *callee = curr_call->callee;
ir_node *call_node = curr_call->call;
inline_irg_env *callee_env = get_irg_link(callee);
+ irg_inline_property prop = get_irg_inline_property(callee);
int loop_depth;
const call_entry *centry;
pmap_entry *e;
- /* we need a hard limit here, else it would be possible to inline
- * recursive functions forever. */
- if (env->n_nodes + callee_env->n_nodes > maxsize) {
+ if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
env->n_nodes, callee, callee_env->n_nodes));
continue;
if (!did_inline)
continue;
- /* got inlined, must be recomputed */
+ /* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
/* remove it from the caller list */
del_pqueue(pqueue);
}
-/**
+/*
* Heuristic inliner. Calculates a benefice value for every call and inlines
* those calls with a value higher than the threshold.
*/
for (i = 0; i < n_irgs; ++i)
set_irg_link(irgs[i], alloc_inline_irg_env());
- /* Precompute information in temporary data structure. */
+ /* Pre-compute information in temporary data structure. */
wenv.ignore_runtime = 0;
wenv.ignore_callers = 0;
for (i = 0; i < n_irgs; ++i) {
env = get_irg_link(irg);
if (env->got_inline) {
/* this irg got calls inlined: optimize it */
-
- if (0) {
- /* scalar replacement does not work well with Tuple nodes, so optimize them away */
- optimize_graph_df(irg);
-
+ if (get_opt_combo()) {
+ if (env->local_vars) {
+ scalar_replacement_opt(irg);
+ }
+ combo(irg);
+ } else {
if (env->local_vars) {
if (scalar_replacement_opt(irg)) {
optimize_graph_df(irg);
}
}
optimize_cf(irg);
- } else {
- if (env->local_vars) {
- scalar_replacement_opt(irg);
- }
- combo(irg);
}
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {