*/
#include "config.h"
-#include <adt/raw_bitset.h>
-
-#include "funccall_t.h"
+#include "opt_init.h"
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irloop_t.h"
#include "ircons.h"
#include "iredges_t.h"
+#include "irpass_t.h"
+#include "iroptimize.h"
#include "analyze_irg_args.h"
#include "irhooks.h"
+#include "raw_bitset.h"
#include "debug.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
* Walker: Collect all calls to const and pure functions
* to lists. Collect all Proj(Call) nodes into a Proj list.
*/
-static void collect_const_and_pure_calls(ir_node *node, void *env) {
+static void collect_const_and_pure_calls(ir_node *node, void *env)
+{
env_t *ctx = env;
ir_node *call, *ptr;
ir_entity *ent;
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
* @param irg the graph that contained calls to pure functions
* @param ctx context
*/
-static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
+static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
+{
ir_node *call, *next, *mem, *proj;
int exc_changed = 0;
ir_graph *rem = current_ir_graph;
assert(get_irn_mode(mem) == mode_M);
switch (get_Proj_proj(proj)) {
- case pn_Call_M_regular: {
+ case pn_Call_M: {
/* in dead code there might be cycles where proj == mem */
if (proj != mem)
exchange(proj, mem);
break;
}
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
exc_changed = 1;
- exchange(proj, new_r_Jmp(irg, block));
+ exchange(proj, new_r_Jmp(block));
break;
}
default:
- ;
+ break;
}
}
* Walker: Collect all calls to nothrow functions
* to lists. Collect all Proj(Call) nodes into a Proj list.
*/
-static void collect_nothrow_calls(ir_node *node, void *env) {
+static void collect_nothrow_calls(ir_node *node, void *env)
+{
env_t *ctx = env;
ir_node *call, *ptr;
ir_entity *ent;
/* collect the Proj's in the Proj list */
switch (get_Proj_proj(node)) {
- case pn_Call_M_regular:
+ case pn_Call_M:
case pn_Call_X_except:
case pn_Call_X_regular:
- case pn_Call_M_except:
set_irn_link(node, ctx->proj_list);
ctx->proj_list = node;
break;
* @param call_list the list of all call sites of const functions
* @param proj_list the list of all memory/exception Proj's of this call sites
*/
-static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
+{
ir_node *call, *next, *proj;
int exc_changed = 0;
ir_graph *rem = current_ir_graph;
/* kill any exception flow */
switch (get_Proj_proj(proj)) {
case pn_Call_X_except:
- case pn_Call_M_except:
exc_changed = 1;
exchange(proj, get_irg_bad(irg));
break;
case pn_Call_X_regular: {
ir_node *block = get_nodes_block(call);
exc_changed = 1;
- exchange(proj, new_r_Jmp(irg, block));
+ exchange(proj, new_r_Jmp(block));
break;
}
default:
- ;
+ break;
}
}
/* forward */
static unsigned check_const_or_pure_function(ir_graph *irg, int top);
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
/**
* Calculate the bigger property of two. Handle the temporary flag right.
*/
-static unsigned max_property(unsigned a, unsigned b) {
+static unsigned max_property(unsigned a, unsigned b)
+{
unsigned r, t = (a | b) & mtp_temporary;
a &= ~mtp_temporary;
b &= ~mtp_temporary;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned _follow_mem(ir_node *node) {
+static unsigned _follow_mem(ir_node *node)
+{
unsigned m, mode = mtp_property_const;
ir_node *ptr;
int i;
* mtp_property_pure if only Loads and const/pure calls detected
* mtp_no_property else
*/
-static unsigned follow_mem(ir_node *node, unsigned mode) {
+static unsigned follow_mem(ir_node *node, unsigned mode)
+{
unsigned m;
m = _follow_mem(node);
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
+static unsigned check_const_or_pure_function(ir_graph *irg, int top)
+{
ir_node *end, *endbl;
int j;
unsigned prop = get_irg_additional_properties(irg);
*
* @param ctx context
*/
-static void handle_const_Calls(env_t *ctx) {
+static void handle_const_Calls(env_t *ctx)
+{
int i;
ctx->n_calls_SymConst = 0;
*
* @param ctx context
*/
-static void handle_nothrow_Calls(env_t *ctx) {
+static void handle_nothrow_Calls(env_t *ctx)
+{
int i;
ctx->n_calls_SymConst = 0;
*
* @param node the node to check
*/
-static int is_malloc_call_result(const ir_node *node) {
+static int is_malloc_call_result(const ir_node *node)
+{
if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
/* Firm style high-level allocation */
return 1;
/**
* Update a property depending on a call property.
*/
-static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
+static unsigned update_property(unsigned orig_prop, unsigned call_prop)
+{
unsigned t = (orig_prop | call_prop) & mtp_temporary;
unsigned r = orig_prop & call_prop;
return r | t;
/**
* Check if a node is stored.
*/
-static int is_stored(const ir_node *n) {
+static int is_stored(const ir_node *n)
+{
const ir_edge_t *edge;
const ir_node *ptr;
*
* return ~mtp_property_malloc if return values are stored, ~0 else
*/
-static unsigned check_stored_result(ir_graph *irg) {
+static unsigned check_stored_result(ir_graph *irg)
+{
ir_node *end_blk = get_irg_end_block(irg);
int i, j;
unsigned res = ~0;
* @param irg the graph to check
* @param top if set, this is the top call
*/
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
+static unsigned check_nothrow_or_malloc(ir_graph *irg, int top)
+{
ir_node *end_blk = get_irg_end_block(irg);
ir_entity *ent;
ir_type *mtp;
* When a function was detected as "const", it might be moved out of loops.
* This might be dangerous if the graph can contain endless loops.
*/
-static void check_for_possible_endless_loops(ir_graph *irg) {
+static void check_for_possible_endless_loops(ir_graph *irg)
+{
ir_loop *root_loop;
assure_cf_loop(irg);
} /* optimize_funccalls */
/* initialize the funccall optimization */
-void firm_init_funccalls(void) {
+void firm_init_funccalls(void)
+{
FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
} /* firm_init_funccalls */
+
+struct pass_t {
+ ir_prog_pass_t pass;
+ int force_run;
+ check_alloc_entity_func callback;
+};
+
+/**
+ * Wrapper for running optimize_funccalls() as an ir_prog pass.
+ */
+static int pass_wrapper(ir_prog *irp, void *context)
+{
+ struct pass_t *pass = context;
+
+ (void)irp;
+ optimize_funccalls(pass->force_run, pass->callback);
+ return 0;
+} /* pass_wrapper */
+
+/* Creates an ir_prog pass for optimize_funccalls. */
+ir_prog_pass_t *optimize_funccalls_pass(
+ const char *name,
+ int force_run, check_alloc_entity_func callback)
+{
+ struct pass_t *pass = XMALLOCZ(struct pass_t);
+
+ pass->force_run = force_run;
+ pass->callback = callback;
+
+ return def_prog_pass_constructor(
+ &pass->pass, name ? name : "funccall", pass_wrapper);
+} /* optimize_funccalls_pass */