X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Ffunccall.c;h=256e32c0752a8ae06c3efb7842853c943985ba03;hb=b27ae245166bb695bc4e418ff416d91bc37d0f28;hp=65ef2f890644d0fc0de866d1f4801606cb3a1c7d;hpb=ce6161a7e42a48f7422b7babcc64d8ace18e2687;p=libfirm diff --git a/ir/opt/funccall.c b/ir/opt/funccall.c index 65ef2f890..256e32c07 100644 --- a/ir/opt/funccall.c +++ b/ir/opt/funccall.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -49,8 +49,8 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg;) * The walker environment for updating function calls. */ typedef struct env_t { - unsigned n_calls_SymConst; - unsigned n_calls_Sel; + size_t n_calls_SymConst; + size_t n_calls_Sel; ir_node *float_const_call_list; /**< The list of all floating const function calls that will be changed. */ ir_node *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */ ir_node *pure_call_list; /**< The list of all pure function calls that will be changed. */ @@ -58,9 +58,6 @@ typedef struct env_t { ir_node *proj_list; /**< The list of all potential Proj nodes that must be fixed. */ } env_t; -/** If non-null, evaluates entities for being a heap alloc. */ -static check_alloc_entity_func is_alloc_entity = NULL; - /** Ready IRG's are marked in the ready set. */ static unsigned *ready_set; @@ -103,7 +100,7 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) is_Sel(ptr) && get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) { /* If all possible callees are const functions, we can remove the memory edge. */ - int i, n_callees = get_Call_n_callees(call); + size_t i, n_callees = get_Call_n_callees(call); if (n_callees == 0) { /* This is kind of strange: dying code or a Call that will raise an exception when executed as there is no implementation to call. So better not @@ -228,7 +225,7 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) } case pn_Call_X_except: exc_changed = 1; - exchange(proj, get_irg_bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); break; case pn_Call_X_regular: { ir_node *block = get_nodes_block(call); @@ -242,7 +239,6 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) } /* changes were done ... */ - set_irg_outs_inconsistent(irg); set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent); if (exc_changed) { @@ -279,7 +275,7 @@ static void collect_nothrow_calls(ir_node *node, void *env) is_Sel(ptr) && get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) { /* If all possible callees are nothrow functions, we can remove the exception edge. */ - int i, n_callees = get_Call_n_callees(call); + size_t i, n_callees = get_Call_n_callees(call); if (n_callees == 0) { /* This is kind of strange: dying code or a Call that will raise an exception when executed as there is no implementation to call. So better not @@ -363,7 +359,7 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr switch (get_Proj_proj(proj)) { case pn_Call_X_except: exc_changed = 1; - exchange(proj, get_irg_bad(irg)); + exchange(proj, new_r_Bad(irg, mode_X)); break; case pn_Call_X_regular: { ir_node *block = get_nodes_block(call); @@ -377,7 +373,6 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr } /* changes were done ... */ - set_irg_outs_inconsistent(irg); set_irg_loopinfo_state(irg, loopinfo_cf_inconsistent); if (exc_changed) { @@ -470,12 +465,10 @@ static mtp_additional_properties follow_mem_(ir_node *node) ir_entity *ent = get_SymConst_entity(ptr); ir_graph *irg = get_entity_irg(ent); - if (irg == get_irn_irg(node)) { - /* A self-recursive call. The property did not depend on this call. */ - } else if (irg == NULL) { + if (irg == NULL) { m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure); mode = max_property(mode, m); - } else if (irg != NULL) { + } else { /* we have a graph, analyze it. */ m = check_const_or_pure_function(irg, /*top=*/0); mode = max_property(mode, m); @@ -531,9 +524,10 @@ static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int return mtp_no_property; } if (IS_IRG_BUSY(irg)) { - /* we are still evaluate this method. Be optimistic, - return the best possible so far but mark the result as temporary. */ - return mtp_temporary | mtp_property_const; + /* We are still evaluate this method. + * The function (indirectly) calls itself and thus may not terminate. + */ + return mtp_no_property; } SET_IRG_BUSY(irg); @@ -593,17 +587,13 @@ static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int } } - if (prop != mtp_no_property) { - if (top || (prop & mtp_temporary) == 0) { - /* We use the temporary flag here to mark optimistic result. - Set the property only if we are sure that it does NOT base on - temporary results OR if we are at top-level. */ - add_irg_additional_properties(irg, prop & ~mtp_temporary); - SET_IRG_READY(irg); + if (top) { + /* Set the property only if we are at top-level. */ + if (prop != mtp_no_property) { + add_irg_additional_properties(irg, prop); } - } - if (top) SET_IRG_READY(irg); + } CLEAR_IRG_BUSY(irg); ir_free_resources(irg, IR_RESOURCE_IRN_VISITED); return prop; @@ -616,13 +606,13 @@ static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int */ static void handle_const_Calls(env_t *ctx) { - int i; + size_t i, n; ctx->n_calls_SymConst = 0; ctx->n_calls_Sel = 0; /* all calls of const functions can be transformed */ - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { ir_graph *irg = get_irp_irg(i); ctx->float_const_call_list = NULL; @@ -646,13 +636,13 @@ static void handle_const_Calls(env_t *ctx) */ static void handle_nothrow_Calls(env_t *ctx) { - int i; + size_t i, n; ctx->n_calls_SymConst = 0; ctx->n_calls_Sel = 0; /* all calls of const functions can be transformed */ - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { ir_graph *irg = get_irp_irg(i); ctx->nothrow_call_list = NULL; @@ -679,16 +669,9 @@ static int is_malloc_call_result(const ir_node *node) /* Firm style high-level allocation */ return 1; } - if (is_alloc_entity != NULL && is_Call(node)) { - ir_node *ptr = get_Call_ptr(node); - - if (is_Global(ptr)) { - ir_entity *ent = get_Global_entity(ptr); - return is_alloc_entity(ent); - } - } + /* TODO: check mtp_malloc */ return 0; -} /* is_malloc_call_result */ +} /** * Update a property depending on a call property. @@ -732,11 +715,11 @@ static int is_stored(const ir_node *n) ptr = get_Call_ptr(succ); if (is_Global(ptr)) { ir_entity *ent = get_Global_entity(ptr); - int i; + size_t i; /* we know the called entity */ - for (i = get_Call_n_params(succ) - 1; i >= 0; --i) { - if (get_Call_param(succ, i) == n) { + for (i = get_Call_n_params(succ); i > 0;) { + if (get_Call_param(succ, --i) == n) { /* n is the i'th param of the call */ if (get_method_param_access(ent, i) & ptr_access_store) { /* n is store in ent */ @@ -765,17 +748,18 @@ static int is_stored(const ir_node *n) static mtp_additional_properties check_stored_result(ir_graph *irg) { ir_node *end_blk = get_irg_end_block(irg); - int i, j; + int i; mtp_additional_properties res = ~mtp_no_property; int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL); for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) { ir_node *pred = get_Block_cfgpred(end_blk, i); + size_t j; if (! is_Return(pred)) continue; - for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) { - const ir_node *irn = get_Return_res(pred, j); + for (j = get_Return_n_ress(pred); j > 0;) { + const ir_node *irn = get_Return_res(pred, --j); if (is_stored(irn)) { /* bad, might create an alias */ @@ -802,7 +786,7 @@ static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top) ir_node *end_blk = get_irg_end_block(irg); ir_entity *ent; ir_type *mtp; - int i, j; + int i; if (IS_IRG_READY(irg)) { /* already checked */ @@ -826,9 +810,11 @@ static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top) if (is_Return(pred)) { if (curr_prop & mtp_property_malloc) { + size_t j; + /* check, if malloc is called here */ - for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) { - ir_node *res = get_Return_res(pred, j); + for (j = get_Return_n_ress(pred); j > 0;) { + ir_node *res = get_Return_res(pred, --j); /* skip Confirms and Casts */ res = skip_HighLevel_ops(res); @@ -857,7 +843,7 @@ static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top) is_Sel(ptr) && get_irg_callee_info_state(irg) == irg_callee_info_consistent) { /* check if all possible callees are malloc functions. */ - int i, n_callees = get_Call_n_callees(res); + size_t i, n_callees = get_Call_n_callees(res); if (n_callees == 0) { /* This is kind of strange: dying code or a Call that will raise an exception when executed as there is no implementation to call. So better not @@ -915,7 +901,7 @@ static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top) is_Sel(ptr) && get_irg_callee_info_state(irg) == irg_callee_info_consistent) { /* check if all possible callees are nothrow functions. */ - int i, n_callees = get_Call_n_callees(pred); + size_t i, n_callees = get_Call_n_callees(pred); if (n_callees == 0) { /* This is kind of strange: dying code or a Call that will raise an exception when executed as there is no implementation to call. So better not @@ -994,15 +980,15 @@ static void check_for_possible_endless_loops(ir_graph *irg) /* * optimize function calls by handling const functions */ -void optimize_funccalls(int force_run, check_alloc_entity_func callback) +void optimize_funccalls(void) { - int i, last_idx; - unsigned num_const = 0; - unsigned num_pure = 0; - unsigned num_nothrow = 0; - unsigned num_malloc = 0; - - is_alloc_entity = callback; + size_t i, n; + size_t last_idx; + env_t ctx; + size_t num_const = 0; + size_t num_pure = 0; + size_t num_nothrow = 0; + size_t num_malloc = 0; /* prepare: mark all graphs as not analyzed */ last_idx = get_irp_last_idx(); @@ -1011,7 +997,7 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback) /* first step: detect, which functions are nothrow or malloc */ DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n")); - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { ir_graph *irg = get_irp_irg(i); unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1); @@ -1026,23 +1012,17 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback) /* second step: remove exception edges: this must be done before the detection of const and pure functions take place. */ - if (force_run || num_nothrow > 0) { - env_t ctx; - - handle_nothrow_Calls(&ctx); - DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc)); - DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n", - ctx.n_calls_SymConst, ctx.n_calls_Sel)); - } else { - DB((dbg, LEVEL_1, "No graphs without side effects detected\n")); - } + handle_nothrow_Calls(&ctx); + DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc)); + DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n", + ctx.n_calls_SymConst, ctx.n_calls_Sel)); rbitset_clear_all(ready_set, last_idx); rbitset_clear_all(busy_set, last_idx); /* third step: detect, which functions are const or pure */ DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n")); - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { ir_graph *irg = get_irp_irg(i); unsigned prop = check_const_or_pure_function(irg, /*top=*/1); @@ -1056,54 +1036,23 @@ void optimize_funccalls(int force_run, check_alloc_entity_func callback) } } - if (force_run || num_const > 0) { - env_t ctx; + handle_const_Calls(&ctx); + DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure)); + DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n", + ctx.n_calls_SymConst, ctx.n_calls_Sel)); - handle_const_Calls(&ctx); - DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure)); - DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n", - ctx.n_calls_SymConst, ctx.n_calls_Sel)); - } else { - DB((dbg, LEVEL_1, "No graphs without side effects detected\n")); - } xfree(busy_set); xfree(ready_set); -} /* optimize_funccalls */ +} /* initialize the funccall optimization */ void firm_init_funccalls(void) { FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls"); -} /* firm_init_funccalls */ - -typedef struct pass_t { - ir_prog_pass_t pass; - int force_run; - check_alloc_entity_func callback; -} pass_t; - -/** - * Wrapper for running optimize_funccalls() as an ir_prog pass. - */ -static int pass_wrapper(ir_prog *irp, void *context) -{ - pass_t *pass = (pass_t*)context; - - (void)irp; - optimize_funccalls(pass->force_run, pass->callback); - return 0; -} /* pass_wrapper */ +} /* Creates an ir_prog pass for optimize_funccalls. */ -ir_prog_pass_t *optimize_funccalls_pass( - const char *name, - int force_run, check_alloc_entity_func callback) +ir_prog_pass_t *optimize_funccalls_pass(const char *name) { - struct pass_t *pass = XMALLOCZ(struct pass_t); - - pass->force_run = force_run; - pass->callback = callback; - - return def_prog_pass_constructor( - &pass->pass, name ? name : "funccall", pass_wrapper); -} /* optimize_funccalls_pass */ + return def_prog_pass(name ? name : "funccall", optimize_funccalls); +}