+static mtp_additional_properties check_stored_result(ir_graph *irg)
+{
+ ir_node *end_blk = get_irg_end_block(irg);
+ mtp_additional_properties res = ~mtp_no_property;
+
+ assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
+
+ for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+ if (! is_Return(pred))
+ continue;
+ for (size_t j = get_Return_n_ress(pred); j > 0;) {
+ const ir_node *irn = get_Return_res(pred, --j);
+
+ if (is_stored(irn)) {
+ /* bad, might create an alias */
+ res = ~mtp_property_malloc;
+ goto finish;
+ }
+ }
+ }
+finish:
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
+ return res;
+}
+
+/**
+ * Check if a graph represents a nothrow or a malloc function.
+ *
+ * @param irg the graph to check
+ * @param top if set, this is the top call
+ */
+static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, bool top)
+{
+ mtp_additional_properties curr_prop
+ = mtp_property_malloc | mtp_property_nothrow;
+
+ ir_entity *ent = get_irg_entity(irg);
+ if (IS_IRG_READY(irg)) {
+ /* already checked */
+ return get_entity_additional_properties(ent);
+ }
+ if (IS_IRG_BUSY(irg)) {
+ /* we are still evaluate this method. Be optimistic,
+ return the best possible so far but mark the result as temporary. */
+ return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
+ }
+ SET_IRG_BUSY(irg);
+
+ ir_type *mtp = get_entity_type(ent);
+ if (get_method_n_ress(mtp) <= 0)
+ curr_prop &= ~mtp_property_malloc;
+
+ ir_node *end_blk = get_irg_end_block(irg);
+ for (int i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+ if (is_Return(pred)) {
+ if (curr_prop & mtp_property_malloc) {
+ /* check, if malloc is called here */
+ for (size_t j = get_Return_n_ress(pred); j > 0;) {
+ ir_node *res = get_Return_res(pred, --j);
+
+ /* skip Confirms and Casts */
+ res = skip_HighLevel_ops(res);
+ /* skip Proj's */
+ while (is_Proj(res))
+ res = get_Proj_pred(res);
+ if (is_malloc_call_result(res)) {
+ /* ok, this is a malloc */
+ } else if (is_Call(res)) {
+ ir_node *ptr = get_Call_ptr(res);
+
+ if (is_SymConst_addr_ent(ptr)) {
+ /* a direct call */
+ ir_entity *ent = get_SymConst_entity(ptr);
+ ir_graph *callee = get_entity_irg(ent);
+
+ if (callee == irg) {
+ /* A self-recursive call. The property did not depend on this call. */
+ } else if (callee != NULL) {
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, false);
+ curr_prop = update_property(curr_prop, prop);
+ } else {
+ curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
+ }
+ } else {
+ /* unknown call */
+ curr_prop &= ~mtp_property_malloc;
+ }
+ } else {
+ /* unknown return value */
+ curr_prop &= ~mtp_property_malloc;
+ }
+ }
+ }
+ } else if (curr_prop & mtp_property_nothrow) {
+ /* exception flow detected */
+ pred = skip_Proj(pred);
+
+ if (is_Call(pred)) {
+ ir_node *ptr = get_Call_ptr(pred);
+
+ if (is_SymConst_addr_ent(ptr)) {
+ /* a direct call */
+ ir_entity *ent = get_SymConst_entity(ptr);
+ ir_graph *callee = get_entity_irg(ent);
+
+ if (callee == irg) {
+ /* A self-recursive call. The property did not depend on this call. */
+ } else if (callee != NULL) {
+ /* Note: we check here for nothrow only, so do NOT reset the malloc property */
+ mtp_additional_properties prop = check_nothrow_or_malloc(callee, false) | mtp_property_malloc;
+ curr_prop = update_property(curr_prop, prop);
+ } else {
+ if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ } else {
+ /* unknown call */
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ } else {
+ /* real exception flow possible. */
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ }
+ if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
+ /* no need to search further */
+ break;
+ }
+ }
+
+ if (curr_prop & mtp_property_malloc) {
+ /* Note that the malloc property means not only return newly allocated
+ * memory, but also that this memory is ALIAS FREE.
+ * To ensure that, we do NOT allow that the returned memory is somewhere
+ * stored. */
+ curr_prop &= check_stored_result(irg);
+ }
+
+ if (curr_prop != mtp_no_property
+ && (top || (curr_prop & mtp_temporary) == 0)) {
+ /* We use the temporary flag here to mark an optimistic result.
+ * Set the property only if we are sure that it does NOT base on
+ * temporary results OR if we are at top-level. */
+ add_entity_additional_properties(ent, curr_prop & ~mtp_temporary);
+ SET_IRG_READY(irg);
+ }
+ if (top)
+ SET_IRG_READY(irg);
+ CLEAR_IRG_BUSY(irg);
+ return curr_prop;
+}
+
+/**
+ * When a function was detected as "const", it might be moved out of loops.
+ * This might be dangerous if the graph can contain endless loops.
+ */
+static void check_for_possible_endless_loops(ir_graph *irg)
+{
+ assure_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+
+ ir_loop *root_loop = get_irg_loop(irg);
+ if (root_loop->flags & loop_outer_loop) {
+ ir_entity *ent = get_irg_entity(irg);
+ add_entity_additional_properties(ent, mtp_property_has_loop);
+ }
+
+ confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
+}
+
+void optimize_funccalls(void)
+{
+ /* prepare: mark all graphs as not analyzed */
+ size_t last_idx = get_irp_last_idx();
+ ready_set = rbitset_malloc(last_idx);
+ busy_set = rbitset_malloc(last_idx);
+
+ /* first step: detect, which functions are nothrow or malloc */
+ DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
+ for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ unsigned prop = check_nothrow_or_malloc(irg, true);
+
+ if (prop & mtp_property_nothrow) {
+ DB((dbg, LEVEL_2, "%+F has the nothrow property\n", irg));
+ } else if (prop & mtp_property_malloc) {
+ DB((dbg, LEVEL_2, "%+F has the malloc property\n", irg));
+ }
+ }
+
+ /* second step: remove exception edges: this must be done before the
+ detection of const and pure functions take place. */
+ env_t ctx;
+ handle_nothrow_Calls(&ctx);
+
+ rbitset_clear_all(ready_set, last_idx);
+ rbitset_clear_all(busy_set, last_idx);
+
+ /* third step: detect, which functions are const or pure */
+ DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
+ for (size_t i = 0, n = get_irp_n_irgs(); i < n; ++i) {
+ ir_graph *irg = get_irp_irg(i);
+ unsigned prop = check_const_or_pure_function(irg, true);
+
+ if (prop & mtp_property_const) {
+ DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
+ check_for_possible_endless_loops(irg);
+ } else if (prop & mtp_property_pure) {
+ DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
+ }
+ }
+
+ handle_const_Calls(&ctx);
+
+ xfree(busy_set);
+ xfree(ready_set);
+}
+
+void firm_init_funccalls(void)
+{
+ FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
+}
+
+ir_prog_pass_t *optimize_funccalls_pass(const char *name)