* @author Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include <adt/raw_bitset.h>
-#include "funccall_t.h"
+#include "opt_init.h"
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irloop_t.h"
#include "ircons.h"
#include "iredges_t.h"
+#include "irpass_t.h"
+#include "iroptimize.h"
#include "analyze_irg_args.h"
#include "irhooks.h"
#include "debug.h"
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
assert(get_irn_mode(mem) == mode_M);
switch (get_Proj_proj(proj)) {
- case pn_Call_M_regular: {
+ case pn_Call_M: {
/* in dead code there might be cycles where proj == mem */
if (proj != mem)
exchange(proj, mem);
break;
}
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
exc_changed = 1;
- exchange(proj, new_r_Jmp(irg, block));
+ exchange(proj, new_r_Jmp(block));
break;
}
default:
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
/* kill any exception flow */
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
exc_changed = 1;
- exchange(proj, new_r_Jmp(irg, block));
+ exchange(proj, new_r_Jmp(block));
break;
}
default:
* the mtp_property.
*
* @return mtp_property_const if only calls of const functions are detected
- * mtp_property_pure if only Loads and const/pure
- * calls detected
- * mtp_no_property else
+ * mtp_property_pure if only Loads and const/pure calls detected
+ * mtp_no_property else
*/
static unsigned _follow_mem(ir_node *node) {
unsigned m, mode = mtp_property_const;
if (mode == mtp_no_property)
return mtp_no_property;
- if (irn_visited(node))
+ if (irn_visited_else_mark(node))
return mode;
- mark_irn_visited(node);
-
switch (get_irn_opcode(node)) {
case iro_Proj:
node = get_Proj_pred(node);
case iro_Call:
/* A call is only tolerable if its either constant or pure. */
ptr = get_Call_ptr(node);
- if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
+ if (is_SymConst_addr_ent(ptr)) {
ir_entity *ent = get_SymConst_entity(ptr);
ir_graph *irg = get_entity_irg(ent);
current_ir_graph = irg;
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
inc_irg_visited(irg);
- /* mark the initial mem: recursion of follow_mem stops here */
+ /* mark the initial mem: recursion of follow_mem() stops here */
mark_irn_visited(get_irg_initial_mem(irg));
/* visit every Return */
if (top)
SET_IRG_READY(irg);
CLEAR_IRG_BUSY(irg);
+ ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
current_ir_graph = rem;
return prop;
} /* check_const_or_pure_function */
ctx->nonfloat_const_call_list = NULL;
ctx->pure_call_list = NULL;
ctx->proj_list = NULL;
+
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
- if (ctx->float_const_call_list != NULL) {
+ if (ctx->float_const_call_list != NULL)
fix_const_call_lists(irg, ctx);
-
- /* this graph was changed, invalidate analysis info */
- set_irg_outs_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
- }
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
}
} /* handle_const_Calls */
ctx->nothrow_call_list = NULL;
ctx->proj_list = NULL;
+
+ ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
- if (ctx->nothrow_call_list) {
+ if (ctx->nothrow_call_list)
fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
-
- /* this graph was changed, invalidate analysis info */
- set_irg_outs_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
- }
+ ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
}
}
/**
* When a function was detected as "const", it might be moved out of loops.
- * This might be dangerous if the graph might contain endless loops.
+ * This might be dangerous if the graph can contain endless loops.
*/
static void check_for_possible_endless_loops(ir_graph *irg) {
ir_loop *root_loop;
is_alloc_entity = callback;
/* prepare: mark all graphs as not analyzed */
- last_idx = get_irp_last_idx();
+ last_idx = get_irp_last_idx();
ready_set = rbitset_malloc(last_idx);
busy_set = rbitset_malloc(last_idx);
/* initialize the funccall optimization */
void firm_init_funccalls(void) {
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
-// firm_dbg_set_mask(dbg, -1);
} /* firm_init_funccalls */
+
+struct pass_t {
+ ir_prog_pass_t pass;
+ int force_run;
+ check_alloc_entity_func callback;
+};
+
+/**
+ * Wrapper for running optimize_funccalls() as an ir_prog pass.
+ */
+static int pass_wrapper(ir_prog *irp, void *context)
+{
+ struct pass_t *pass = context;
+
+ (void)irp;
+ optimize_funccalls(pass->force_run, pass->callback);
+ return 0;
+} /* pass_wrapper */
+
+/* Creates an ir_prog pass for optimize_funccalls. */
+ir_prog_pass_t *optimize_funccalls_pass(
+ const char *name,
+ int force_run, check_alloc_entity_func callback)
+{
+ struct pass_t *pass = XMALLOCZ(struct pass_t);
+
+ pass->force_run = force_run;
+ pass->callback = callback;
+
+ return def_prog_pass_constructor(
+ &pass->pass, name ? name : "funccall", pass_wrapper);
+} /* optimize_funccalls_pass */