* @file
* @brief Dead node elimination and Procedure Inlining.
* @author Michael Beck, Goetz Lindenmaier
- * @version $Id$
*/
#include "config.h"
#include "irtools.h"
#include "iropt_dbg.h"
#include "irpass_t.h"
-#include "irphase_t.h"
+#include "irnodemap.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
{
bool *allow_inline = (bool*)env;
- if (is_Sel(node)) {
+ if (is_Block(node) && get_Block_entity(node)) {
+ /**
+ * Currently we can't handle blocks whose address was taken correctly
+ * when inlining
+ */
+ *allow_inline = false;
+ } else if (is_Sel(node)) {
ir_graph *irg = current_ir_graph;
if (get_Sel_ptr(node) == get_irg_frame(irg)) {
/* access to frame */
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) == op_pin_state_pinned);
assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_extblk_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
+ | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS
+ | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
edges_deactivate(irg);
/* here we know we WILL inline, so inform the statistics */
ir_node *addr;
addr = get_Call_ptr(call);
- if (is_Global(addr)) {
- ir_entity *ent = get_Global_entity(addr);
+ if (is_SymConst_addr_ent(addr)) {
+ ir_entity *ent = get_SymConst_entity(addr);
/* we don't know which function gets finally bound to a weak symbol */
if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
return NULL;
if (env->ignore_runtime) {
ir_node *symc = get_Call_ptr(call);
- if (is_Global(symc)) {
- ir_entity *ent = get_Global_entity(symc);
+ if (is_SymConst_addr_ent(symc)) {
+ ir_entity *ent = get_SymConst_entity(symc);
if (get_entity_additional_properties(ent) & mtp_property_runtime)
return;
/**
* Returns TRUE if the number of callers is 0 in the irg's environment,
- * hence this irg is a leave.
+ * hence this irg is a leaf.
*/
-inline static int is_leave(ir_graph *irg)
+inline static int is_leaf(ir_graph *irg)
{
inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
return env->n_call_nodes == 0;
}
/*
- * Inlines small leave methods at call sites where the called address comes
+ * Inlines small leaf methods at call sites where the called address comes
* from a Const node that references the entity representing the called
* method.
* The size argument is a rough measure for the code size of the method:
* Methods where the obstack containing the firm graph is smaller than
* size are inlined.
*/
-void inline_leave_functions(unsigned maxsize, unsigned leavesize,
+void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime)
{
inline_irg_env *env;
assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
- assure_cf_loop(irg);
+ assure_loopinfo(irg);
wenv.x = (inline_irg_env*)get_irg_link(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}
/* -- and now inline. -- */
- /* Inline leaves recursively -- we might construct new leaves. */
+ /* Inline leafs recursively -- we might construct new leafs. */
do {
did_inline = 0;
continue;
}
- if (is_leave(callee) && (
- is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
+ if (is_leaf(callee) && (
+ is_smaller(callee, leafsize) || prop >= irg_inline_forced)) {
if (!phiproj_computed) {
phiproj_computed = 1;
collect_phiprojs(current_ir_graph);
list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
irg_inline_property prop;
ir_graph *callee;
- pmap_entry *e;
+ ir_graph *calleee;
call = entry->call;
callee = entry->callee;
continue;
}
- e = pmap_find(copied_graphs, callee);
- if (e != NULL) {
+ calleee = (ir_graph*)pmap_get(copied_graphs, callee);
+ if (calleee != NULL) {
/*
* Remap callee if we have a copy.
* FIXME: Should we do this only for recursive Calls ?
*/
- callee = (ir_graph*)e->value;
+ callee = calleee;
}
if (prop >= irg_inline_forced ||
/*
* No copy yet, create one.
- * Note that recursive methods are never leaves, so it is sufficient
+ * Note that recursive methods are never leafs, so it is sufficient
* to test this condition here.
*/
copy = create_irg_copy(callee);
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_loopinfo(copy);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
current_ir_graph = rem;
}
-typedef struct inline_leave_functions_pass_t {
+typedef struct inline_leaf_functions_pass_t {
ir_prog_pass_t pass;
unsigned maxsize;
- unsigned leavesize;
+ unsigned leafsize;
unsigned size;
int ignore_runtime;
-} inline_leave_functions_pass_t;
+} inline_leaf_functions_pass_t;
/**
- * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ * Wrapper to run inline_leaf_functions() as a ir_prog pass.
*/
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
{
- inline_leave_functions_pass_t *pass = (inline_leave_functions_pass_t*)context;
+ inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
(void)irp;
- inline_leave_functions(
- pass->maxsize, pass->leavesize,
+ inline_leaf_functions(
+ pass->maxsize, pass->leafsize,
pass->size, pass->ignore_runtime);
return 0;
}
-/* create a pass for inline_leave_functions() */
-ir_prog_pass_t *inline_leave_functions_pass(
- const char *name, unsigned maxsize, unsigned leavesize,
+/* create a pass for inline_leaf_functions() */
+ir_prog_pass_t *inline_leaf_functions_pass(
+ const char *name, unsigned maxsize, unsigned leafsize,
unsigned size, int ignore_runtime)
{
- inline_leave_functions_pass_t *pass = XMALLOCZ(inline_leave_functions_pass_t);
+ inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
pass->maxsize = maxsize;
- pass->leavesize = leavesize;
+ pass->leafsize = leafsize;
pass->size = size;
pass->ignore_runtime = ignore_runtime;
return def_prog_pass_constructor(
&pass->pass,
- name ? name : "inline_leave_functions",
- inline_leave_functions_wrapper);
+ name ? name : "inline_leaf_functions",
+ inline_leaf_functions_wrapper);
}
/**
if (callee_env->n_nodes < 30 && !callee_env->recursive)
weight += 2000;
- /* and finally for leaves: they do not increase the register pressure
+ /* and finally for leafs: they do not increase the register pressure
because of callee safe registers */
if (callee_env->n_call_nodes == 0)
weight += 400;
callgraph_walk(NULL, callgraph_walker, &env);
assert(n_irgs == env.last_irg);
+ free_callgraph();
+
return env.irgs;
}
ir_node *call_node = curr_call->call;
inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
irg_inline_property prop = get_irg_inline_property(callee);
+ ir_graph *calleee;
int loop_depth;
const call_entry *centry;
- pmap_entry *e;
if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
continue;
}
- e = pmap_find(copied_graphs, callee);
- if (e != NULL) {
+ calleee = (ir_graph*)pmap_get(copied_graphs, callee);
+ if (calleee != NULL) {
int benefice = curr_call->benefice;
/*
* Reduce the weight for recursive function IFF not all arguments are const.
/*
* Remap callee if we have a copy.
*/
- callee = (ir_graph*)e->value;
+ callee = calleee;
callee_env = (inline_irg_env*)get_irg_link(callee);
}
/*
* No copy yet, create one.
- * Note that recursive methods are never leaves, so it is
+ * Note that recursive methods are never leafs, so it is
* sufficient to test this condition here.
*/
copy = create_irg_copy(callee);
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_loopinfo(copy);
memset(&wenv, 0, sizeof(wenv));
wenv.x = callee_env;
wenv.ignore_callers = 1;
free_callee_info(irg);
wenv.x = (inline_irg_env*)get_irg_link(irg);
- assure_cf_loop(irg);
+ assure_loopinfo(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}