X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Ffunccall.c;h=8b0554f1efb5b0e8c5d5ace1c0ed38bdbfd1d40b;hb=bf9be89cbbf5d6ca1b80fd5736b80f47c1a14e90;hp=5ce0d7413afb7051ec42e55ce01c613042922a30;hpb=f8b8a445d2c65da173ad640978a5687761a3a620;p=libfirm diff --git a/ir/opt/funccall.c b/ir/opt/funccall.c index 5ce0d7413..8b0554f1e 100644 --- a/ir/opt/funccall.c +++ b/ir/opt/funccall.c @@ -27,7 +27,7 @@ #include -#include "funccall_t.h" +#include "opt_init.h" #include "irnode_t.h" #include "irgraph_t.h" @@ -40,6 +40,7 @@ #include "ircons.h" #include "iredges_t.h" #include "irpass_t.h" +#include "iroptimize.h" #include "analyze_irg_args.h" #include "irhooks.h" #include "debug.h" @@ -79,7 +80,8 @@ static unsigned *busy_set; * Walker: Collect all calls to const and pure functions * to lists. Collect all Proj(Call) nodes into a Proj list. */ -static void collect_const_and_pure_calls(ir_node *node, void *env) { +static void collect_const_and_pure_calls(ir_node *node, void *env) +{ env_t *ctx = env; ir_node *call, *ptr; ir_entity *ent; @@ -154,10 +156,9 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) { /* collect the Proj's in the Proj list */ switch (get_Proj_proj(node)) { - case pn_Call_M_regular: + case pn_Call_M: case pn_Call_X_except: case pn_Call_X_regular: - case pn_Call_M_except: set_irn_link(node, ctx->proj_list); ctx->proj_list = node; break; @@ -173,7 +174,8 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) { * @param irg the graph that contained calls to pure functions * @param ctx context */ -static void fix_const_call_lists(ir_graph *irg, env_t *ctx) { +static void fix_const_call_lists(ir_graph *irg, env_t *ctx) +{ ir_node *call, *next, *mem, *proj; int exc_changed = 0; ir_graph *rem = current_ir_graph; @@ -222,14 +224,13 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) { assert(get_irn_mode(mem) == mode_M); switch (get_Proj_proj(proj)) { - case pn_Call_M_regular: { + case pn_Call_M: { /* in dead code there might be cycles where proj == mem */ if (proj != mem) exchange(proj, mem); break; } case pn_Call_X_except: - case pn_Call_M_except: exc_changed = 1; exchange(proj, get_irg_bad(irg)); break; @@ -259,7 +260,8 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) { * Walker: Collect all calls to nothrow functions * to lists. Collect all Proj(Call) nodes into a Proj list. */ -static void collect_nothrow_calls(ir_node *node, void *env) { +static void collect_nothrow_calls(ir_node *node, void *env) +{ env_t *ctx = env; ir_node *call, *ptr; ir_entity *ent; @@ -320,10 +322,9 @@ static void collect_nothrow_calls(ir_node *node, void *env) { /* collect the Proj's in the Proj list */ switch (get_Proj_proj(node)) { - case pn_Call_M_regular: + case pn_Call_M: case pn_Call_X_except: case pn_Call_X_regular: - case pn_Call_M_except: set_irn_link(node, ctx->proj_list); ctx->proj_list = node; break; @@ -340,7 +341,8 @@ static void collect_nothrow_calls(ir_node *node, void *env) { * @param call_list the list of all call sites of const functions * @param proj_list the list of all memory/exception Proj's of this call sites */ -static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) { +static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) +{ ir_node *call, *next, *proj; int exc_changed = 0; ir_graph *rem = current_ir_graph; @@ -368,7 +370,6 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr /* kill any exception flow */ switch (get_Proj_proj(proj)) { case pn_Call_X_except: - case pn_Call_M_except: exc_changed = 1; exchange(proj, get_irg_bad(irg)); break; @@ -408,7 +409,8 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top); /** * Calculate the bigger property of two. Handle the temporary flag right. */ -static unsigned max_property(unsigned a, unsigned b) { +static unsigned max_property(unsigned a, unsigned b) +{ unsigned r, t = (a | b) & mtp_temporary; a &= ~mtp_temporary; b &= ~mtp_temporary; @@ -427,7 +429,8 @@ static unsigned max_property(unsigned a, unsigned b) { * mtp_property_pure if only Loads and const/pure calls detected * mtp_no_property else */ -static unsigned _follow_mem(ir_node *node) { +static unsigned _follow_mem(ir_node *node) +{ unsigned m, mode = mtp_property_const; ir_node *ptr; int i; @@ -503,7 +506,8 @@ static unsigned _follow_mem(ir_node *node) { * mtp_property_pure if only Loads and const/pure calls detected * mtp_no_property else */ -static unsigned follow_mem(ir_node *node, unsigned mode) { +static unsigned follow_mem(ir_node *node, unsigned mode) +{ unsigned m; m = _follow_mem(node); @@ -516,7 +520,8 @@ static unsigned follow_mem(ir_node *node, unsigned mode) { * @param irg the graph to check * @param top if set, this is the top call */ -static unsigned check_const_or_pure_function(ir_graph *irg, int top) { +static unsigned check_const_or_pure_function(ir_graph *irg, int top) +{ ir_node *end, *endbl; int j; unsigned prop = get_irg_additional_properties(irg); @@ -622,7 +627,8 @@ static unsigned check_const_or_pure_function(ir_graph *irg, int top) { * * @param ctx context */ -static void handle_const_Calls(env_t *ctx) { +static void handle_const_Calls(env_t *ctx) +{ int i; ctx->n_calls_SymConst = 0; @@ -651,7 +657,8 @@ static void handle_const_Calls(env_t *ctx) { * * @param ctx context */ -static void handle_nothrow_Calls(env_t *ctx) { +static void handle_nothrow_Calls(env_t *ctx) +{ int i; ctx->n_calls_SymConst = 0; @@ -679,7 +686,8 @@ static void handle_nothrow_Calls(env_t *ctx) { * * @param node the node to check */ -static int is_malloc_call_result(const ir_node *node) { +static int is_malloc_call_result(const ir_node *node) +{ if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) { /* Firm style high-level allocation */ return 1; @@ -698,7 +706,8 @@ static int is_malloc_call_result(const ir_node *node) { /** * Update a property depending on a call property. */ -static unsigned update_property(unsigned orig_prop, unsigned call_prop) { +static unsigned update_property(unsigned orig_prop, unsigned call_prop) +{ unsigned t = (orig_prop | call_prop) & mtp_temporary; unsigned r = orig_prop & call_prop; return r | t; @@ -707,7 +716,8 @@ static unsigned update_property(unsigned orig_prop, unsigned call_prop) { /** * Check if a node is stored. */ -static int is_stored(const ir_node *n) { +static int is_stored(const ir_node *n) +{ const ir_edge_t *edge; const ir_node *ptr; @@ -765,7 +775,8 @@ static int is_stored(const ir_node *n) { * * return ~mtp_property_malloc if return values are stored, ~0 else */ -static unsigned check_stored_result(ir_graph *irg) { +static unsigned check_stored_result(ir_graph *irg) +{ ir_node *end_blk = get_irg_end_block(irg); int i, j; unsigned res = ~0; @@ -798,7 +809,8 @@ finish: * @param irg the graph to check * @param top if set, this is the top call */ -static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) { +static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) +{ ir_node *end_blk = get_irg_end_block(irg); ir_entity *ent; ir_type *mtp; @@ -982,7 +994,8 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) { * When a function was detected as "const", it might be moved out of loops. * This might be dangerous if the graph can contain endless loops. */ -static void check_for_possible_endless_loops(ir_graph *irg) { +static void check_for_possible_endless_loops(ir_graph *irg) +{ ir_loop *root_loop; assure_cf_loop(irg); @@ -1071,7 +1084,8 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback) } /* optimize_funccalls */ /* initialize the funccall optimization */ -void firm_init_funccalls(void) { +void firm_init_funccalls(void) +{ FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls"); } /* firm_init_funccalls */ @@ -1084,30 +1098,25 @@ struct pass_t { /** * Wrapper for running optimize_funccalls() as an ir_prog pass. */ -static int pass_wrapper(ir_graph *irg, void *context) { +static int pass_wrapper(ir_prog *irp, void *context) +{ struct pass_t *pass = context; + + (void)irp; optimize_funccalls(pass->force_run, pass->callback); return 0; } /* pass_wrapper */ /* Creates an ir_prog pass for optimize_funccalls. */ ir_prog_pass_t *optimize_funccalls_pass( - const char *name, int verify, int dump, + const char *name, int force_run, check_alloc_entity_func callback) { - struct pass_t *pass = xmalloc(sizeof(*pass)); - - pass->pass.kind = k_ir_prog_pass; - pass->pass.run_on_irprog = pass_wrapper; - pass->pass.context = pass; - pass->pass.name = name ? name : "funccalls"; - pass->pass.verify = verify != 0; - pass->pass.dump = dump != 0; - - INIT_LIST_HEAD(&pass->pass.list); + struct pass_t *pass = XMALLOCZ(struct pass_t); pass->force_run = force_run; pass->callback = callback; - return &pass->pass; + return def_prog_pass_constructor( + &pass->pass, name ? name : "funccall", pass_wrapper); } /* optimize_funccalls_pass */