* @author Michael Beck, Goetz Lindenmaier
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <limits.h>
#include <assert.h>
#include "irgmod.h"
#include "irgwalk.h"
-#include "adt/array.h"
-#include "adt/list.h"
-#include "adt/pset.h"
-#include "adt/pmap.h"
-#include "adt/pdeq.h"
-#include "adt/xmalloc.h"
-#include "adt/pqueue.h"
+#include "array_t.h"
+#include "list.h"
+#include "pset.h"
+#include "pmap.h"
+#include "pdeq.h"
+#include "xmalloc.h"
+#include "pqueue.h"
#include "irouts.h"
#include "irloop_t.h"
* accesses. This function is called for all Phi and Block nodes
* in a Block.
*/
-static INLINE int
+static inline int
compute_new_arity(ir_node *b) {
int i, res, irn_arity;
int irg_v, block_v;
}
copy_node_attr(n, nn);
-#ifdef DEBUG_libfirm
- {
- int copy_node_nr = env != NULL;
- if (copy_node_nr) {
- /* for easier debugging, we want to copy the node numbers too */
- nn->node_nr = n->node_nr;
- }
+ if (env != NULL) {
+ /* for easier debugging, we want to copy the node numbers too */
+ nn->node_nr = n->node_nr;
}
-#endif
set_new_node(n, nn);
hook_dead_node_elim_subst(current_ir_graph, n, nn);
in array contained Bads. Now it's possible.
We don't call optimize_in_place as it requires
that the fields in ir_graph are set properly. */
- if ((get_opt_control_flow_straightening()) &&
- (get_Block_n_cfgpreds(nn) == 1) &&
- is_Jmp(get_Block_cfgpred(nn, 0))) {
+ if (!has_Block_label(nn) &&
+ get_opt_control_flow_straightening() &&
+ get_Block_n_cfgpreds(nn) == 1 &&
+ is_Jmp(get_Block_cfgpred(nn, 0))) {
ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
if (nn == old) {
/* Jmp jumps into the block it is in -- deal self cycle. */
graveyard_obst = irg->obst;
/* A new obstack, where the reachable nodes will be copied to. */
- rebirth_obst = xmalloc(sizeof(*rebirth_obst));
+ rebirth_obst = XMALLOC(struct obstack);
irg->obst = rebirth_obst;
obstack_init(irg->obst);
irg->last_node_idx = 0;
* Make a new Survive DCE environment.
*/
survive_dce_t *new_survive_dce(void) {
- survive_dce_t *res = xmalloc(sizeof(res[0]));
+ survive_dce_t *res = XMALLOC(survive_dce_t);
obstack_init(&res->obst);
res->places = pmap_create();
res->new_places = NULL;
res->dead_node_elim_subst.context = res;
res->dead_node_elim_subst.next = NULL;
-#ifndef FIRM_ENABLE_HOOKS
- assert(0 && "need hooks enabled");
-#endif
-
register_hook(hook_dead_node_elim, &res->dead_node_elim);
register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
return res;
*/
static void find_addr(ir_node *node, void *env) {
int *allow_inline = env;
- if (is_Proj(node) &&
- is_Start(get_Proj_pred(node)) &&
- get_Proj_proj(node) == pn_Start_P_value_arg_base) {
- *allow_inline = 0;
+ if (is_Sel(node)) {
+ ir_graph *irg = current_ir_graph;
+ if (get_Sel_ptr(node) == get_irg_frame(irg)) {
+ /* access to frame */
+ ir_entity *ent = get_Sel_entity(node);
+ if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
+ /* access to value_type */
+ *allow_inline = 0;
+ }
+ }
} else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
/* From GCC:
* Refuse to inline alloca call unless user explicitly forced so as this
mtp = get_entity_type(ent);
ctp = get_Call_type(call);
if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
- /* this is a bad feature of C: without a prototype, we can can call a function with less
- parameters than needed. Currently we don't support this, although it would be
- to use Unknown than. */
+ /* this is a bad feature of C: without a prototype, we can
+ * call a function with less parameters than needed. Currently
+ * we don't support this, although we could use Unknown than. */
return 0;
}
/* Argh, compiling C has some bad consequences:
- the call type AND the method type might be different.
- It is implementation defendant what happens in that case.
- We support inlining, if the bitsize of the types matches AND
- the same arithmetic is used. */
+ * It is implementation dependent what happens in that case.
+ * We support inlining, if the bitsize of the types matches AND
+ * the same arithmetic is used. */
n_params = get_method_n_params(mtp);
for (i = n_params - 1; i >= 0; --i) {
ir_type *param_tp = get_method_param_type(mtp, i);
set_irg_doms_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
+ set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
/* -- Check preconditions -- */
assert(is_Call(call));
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_P_tls] = get_irg_tls(irg);
in[pn_Start_T_args] = new_Tuple(n_params, args_in);
- /* in[pn_Start_P_value_arg_base] = ??? */
- assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
- pre_call = new_Tuple(pn_Start_max - 1, in);
+ pre_call = new_Tuple(pn_Start_max, in);
post_call = call;
/* --
/* -- Replicate local entities of the called_graph -- */
/* copy the entities. */
+ irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
called_frame = get_irg_frame_type(called_graph);
curr_frame = get_irg_frame_type(irg);
for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
get_irg_frame_type(called_graph));
+ irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+
/* Repair called_graph */
set_irg_visited(called_graph, get_irg_visited(irg));
set_irg_block_visited(called_graph, get_irg_block_visited(irg));
arity = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret */
n_res = get_method_n_ress(get_Call_type(call));
- res_pred = xmalloc(n_res * sizeof(*res_pred));
- cf_pred = xmalloc(arity * sizeof(*res_pred));
+ res_pred = XMALLOCN(ir_node*, n_res);
+ cf_pred = XMALLOCN(ir_node*, arity);
set_irg_current_block(irg, post_bl); /* just to make sure */
}
}
if (n_exc > 0) {
- new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
+ ir_node *block = new_Block(n_exc, cf_pred);
+ set_cur_block(block);
+
set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
/* The Phi for the memories with the exception objects */
n_exc = 0;
n_exc++;
}
}
- main_end_bl = get_irg_end_block(irg);
+ main_end_bl = get_irg_end_block(irg);
main_end_bl_arity = get_irn_arity(main_end_bl);
- end_preds = xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+ end_preds = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
for (i = 0; i < main_end_bl_arity; ++i)
end_preds[i] = get_irn_n(main_end_bl, i);
if (! list_empty(&env.calls)) {
/* There are calls to inline */
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
collect_phiprojs(irg);
list_for_each_entry(call_entry, entry, &env.calls, list) {
inline_method(entry->call, callee);
}
}
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
obstack_free(&env.obst, NULL);
current_ir_graph = rem;
* Returns TRUE if the number of callers is 0 in the irg's environment,
* hence this irg is a leave.
*/
-INLINE static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg) {
inline_irg_env *env = get_irg_link(irg);
return env->n_call_nodes == 0;
}
* Returns TRUE if the number of nodes in the callee is
* smaller then size in the irg's environment.
*/
-INLINE static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size) {
inline_irg_env *env = get_irg_link(callee);
return env->n_nodes < size;
}
current_ir_graph = get_irp_irg(i);
env = get_irg_link(current_ir_graph);
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
ir_graph *callee;
irg_inline_property prop;
}
}
}
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
} while (did_inline);
current_ir_graph = get_irp_irg(i);
env = get_irg_link(current_ir_graph);
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* note that the list of possible calls is updated during the process */
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
irg_inline_property prop;
inline_irg_env *callee_env;
ir_graph *copy;
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/*
* No copy yet, create one.
* Note that recursive methods are never leaves, so it is sufficient
/* create_irg_copy() destroys the Proj links, recompute them */
phiproj_computed = 0;
+ ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* allocate new environment */
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
}
}
}
+ ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
}
for (i = 0; i < n_irgs; ++i) {
compute_callgraph();
last_irg = 0;
- irgs = xmalloc(n_irgs * sizeof(*irgs));
- memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+ irgs = XMALLOCNZ(ir_graph*, n_irgs);
callgraph_walk(NULL, callgraph_walker, NULL);
assert(n_irgs == last_irg);
}
/**
- * Push a call onto the priority list if its
- * benefice is big enough.
+ * Push a call onto the priority list if its benefice is big enough.
*
* @param pqueue the priority queue of calls
* @param call the call entry
static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
int inline_threshold)
{
- int benefice;
ir_graph *callee = call->callee;
irg_inline_property prop = get_irg_inline_property(callee);
+ int benefice = calc_inline_benefice(call, callee);
- if (prop >= irg_inline_forced) {
- /* give them a big benefice, so forced are inline first */
- benefice = 100000 + call->loop_depth;
- call->benefice = benefice;
- DB((dbg, LEVEL_2, "In %+F Call %+F to %+F is forced\n",
- get_irn_irg(call->call), call->call, callee));
- } else {
- benefice = calc_inline_benefice(call, callee);
- DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
- get_irn_irg(call->call), call->call, callee, benefice));
- }
+ DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
+ get_irn_irg(call->call), call->call, callee, benefice));
- if (benefice < inline_threshold && prop < irg_inline_forced)
+ if (prop < irg_inline_forced && benefice < inline_threshold) {
return;
+ }
pqueue_put(pqueue, call, benefice);
}
}
current_ir_graph = irg;
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
/* put irgs into the pqueue */
pqueue = new_pqueue();
if (benefice < inline_threshold)
continue;
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/*
* No copy yet, create one.
* Note that recursive methods are never leaves, so it is
/* create_irg_copy() destroys the Proj links, recompute them */
phiproj_computed = 0;
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
/* allocate a new environment */
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
env->n_nodes += callee_env->n_nodes;
--callee_env->n_callers;
}
-
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
del_pqueue(pqueue);
}
env = get_irg_link(irg);
if (env->got_inline) {
/* this irg got calls inlined: optimize it */
-
- if (0) {
- /* scalar replacement does not work well with Tuple nodes, so optimize them away */
- optimize_graph_df(irg);
-
+ if (get_opt_combo()) {
+ if (env->local_vars) {
+ scalar_replacement_opt(irg);
+ }
+ combo(irg);
+ } else {
if (env->local_vars) {
if (scalar_replacement_opt(irg)) {
optimize_graph_df(irg);
}
}
optimize_cf(irg);
- } else {
- if (env->local_vars) {
- scalar_replacement_opt(irg);
- }
- combo(irg);
}
}
if (env->got_inline || (env->n_callers_orig != env->n_callers)) {