+/**
+ * Handle calls to nothrow functions.
+ *
+ * @param ctx context
+ */
+static void handle_nothrow_Calls(env_t *ctx) {
+ int i;
+
+ ctx->n_calls_SymConst = 0;
+ ctx->n_calls_Sel = 0;
+
+ /* all calls of const functions can be transformed */
+ for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+ ir_graph *irg = get_irp_irg(i);
+
+ ctx->nothrow_call_list = NULL;
+ ctx->proj_list = NULL;
+ irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
+
+ if (ctx->nothrow_call_list) {
+ fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
+
+ /* this graph was changed, invalidate analysis info */
+ set_irg_outs_inconsistent(irg);
+ set_irg_doms_inconsistent(irg);
+ }
+ }
+}
+
+/**
+ * Check, whether a given node represents a return value of
+ * a malloc like function (ie, new heap allocated memory).
+ *
+ * @param node the node to check
+ */
+static int is_malloc_call_result(const ir_node *node) {
+ if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
+ /* Firm style high-level allocation */
+ return 1;
+ }
+ if (is_alloc_entity != NULL && is_Call(node)) {
+ ir_node *ptr = get_Call_ptr(node);
+
+ if (is_Global(ptr)) {
+ ir_entity *ent = get_Global_entity(ptr);
+ return is_alloc_entity(ent);
+ }
+ }
+ return 0;
+} /* is_malloc_call_result */
+
+/**
+ * Update a property depending on a call property.
+ */
+static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
+ unsigned t = (orig_prop | call_prop) & mtp_temporary;
+ unsigned r = orig_prop & call_prop;
+ return r | t;
+} /** update_property */
+
+/**
+ * Check if a node is stored.
+ */
+static int is_stored(const ir_node *n) {
+ const ir_edge_t *edge;
+ const ir_node *ptr;
+
+ foreach_out_edge(n, edge) {
+ const ir_node *succ = get_edge_src_irn(edge);
+
+ switch (get_irn_opcode(succ)) {
+ case iro_Return:
+ case iro_Load:
+ case iro_Cmp:
+ /* ok */
+ break;
+ case iro_Store:
+ if (get_Store_value(succ) == n)
+ return 1;
+ /* ok if its only the address input */
+ break;
+ case iro_Sel:
+ case iro_Cast:
+ case iro_Confirm:
+ if (is_stored(succ))
+ return 1;
+ break;
+ case iro_Call:
+ ptr = get_Call_ptr(succ);
+ if (is_Global(ptr)) {
+ ir_entity *ent = get_Global_entity(ptr);
+ int i;
+
+ /* we know the called entity */
+ for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
+ if (get_Call_param(succ, i) == n) {
+ /* n is the i'th param of the call */
+ if (get_method_param_access(ent, i) & ptr_access_store) {
+ /* n is store in ent */
+ return 1;
+ }
+ }
+ }
+ } else {
+ /* unknown call address */
+ return 1;
+ }
+ break;
+ default:
+ /* bad, potential alias */
+ return 1;
+ }
+ }
+ return 0;
+} /* is_stored */
+
+/**
+ * Check that the return value of an irg is not stored anywhere.
+ *
+ * return ~mtp_property_malloc if return values are stored, ~0 else
+ */
+static unsigned check_stored_result(ir_graph *irg) {
+ ir_node *end_blk = get_irg_end_block(irg);
+ int i, j;
+ unsigned res = ~0;
+ int old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
+
+ for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+ if (! is_Return(pred))
+ continue;
+ for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
+ const ir_node *irn = get_Return_res(pred, j);
+
+ if (is_stored(irn)) {
+ /* bad, might create an alias */
+ res = ~mtp_property_malloc;
+ goto finish;
+ }
+ }
+ }
+finish:
+ if (! old_edges)
+ edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
+ return res;
+} /* check_stored_result */
+
+/**
+ * Check if a graph represents a nothrow or a malloc function.
+ *
+ * @param irg the graph to check
+ * @param top if set, this is the top call
+ */
+static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
+ ir_node *end_blk = get_irg_end_block(irg);
+ ir_entity *ent;
+ ir_type *mtp;
+ int i, j;
+ unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
+
+ if (IS_IRG_READY(irg)) {
+ /* already checked */
+ return get_irg_additional_properties(irg);
+ }
+ if (IS_IRG_BUSY(irg)) {
+ /* we are still evaluate this method. Be optimistic,
+ return the best possible so far but mark the result as temporary. */
+ return mtp_temporary | mtp_property_malloc | mtp_property_nothrow;
+ }
+ SET_IRG_BUSY(irg);
+
+ ent = get_irg_entity(irg);
+ mtp = get_entity_type(ent);
+
+ if (get_method_n_ress(mtp) <= 0)
+ curr_prop &= ~mtp_property_malloc;
+
+ for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+ ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+ if (is_Return(pred)) {
+ if (curr_prop & mtp_property_malloc) {
+ /* check, if malloc is called here */
+ for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
+ ir_node *res = get_Return_res(pred, j);
+
+ /* skip Confirms and Casts */
+ res = skip_HighLevel_ops(res);
+ /* skip Proj's */
+ while (is_Proj(res))
+ res = get_Proj_pred(res);
+ if (is_malloc_call_result(res)) {
+ /* ok, this is a malloc */
+ } else if (is_Call(res)) {
+ ir_node *ptr = get_Call_ptr(res);
+
+ if (is_Global(ptr)) {
+ /* a direct call */
+ ir_entity *ent = get_Global_entity(ptr);
+ ir_graph *callee = get_entity_irg(ent);
+
+ if (callee == irg) {
+ /* A self-recursive call. The property did not depend on this call. */
+ } else if (callee != NULL) {
+ unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+ curr_prop = update_property(curr_prop, prop);
+ } else {
+ curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
+ }
+ } else if (get_opt_closed_world() &&
+ is_Sel(ptr) &&
+ get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
+ /* check if all possible callees are malloc functions. */
+ int i, n_callees = get_Call_n_callees(res);
+ if (n_callees == 0) {
+ /* This is kind of strange: dying code or a Call that will raise an exception
+ when executed as there is no implementation to call. So better not
+ optimize. */
+ curr_prop &= ~mtp_property_malloc;
+ continue;
+ }
+
+ for (i = 0; i < n_callees; ++i) {
+ ir_entity *ent = get_Call_callee(res, i);
+ if (ent == unknown_entity) {
+ /* we don't know which entity is called here */
+ curr_prop &= ~mtp_property_malloc;
+ break;
+ }
+ if ((get_entity_additional_properties(ent) & mtp_property_malloc) == 0) {
+ curr_prop &= ~mtp_property_malloc;
+ break;
+ }
+ }
+ /* if we pass the for cycle, malloc is still ok */
+ } else {
+ /* unknown call */
+ curr_prop &= ~mtp_property_malloc;
+ }
+ } else {
+ /* unknown return value */
+ curr_prop &= ~mtp_property_malloc;
+ }
+ }
+ }
+ } else if (curr_prop & mtp_property_nothrow) {
+ /* exception flow detected */
+ pred = skip_Proj(pred);
+
+ if (is_Call(pred)) {
+ ir_node *ptr = get_Call_ptr(pred);
+
+ if (is_Global(ptr)) {
+ /* a direct call */
+ ir_entity *ent = get_Global_entity(ptr);
+ ir_graph *callee = get_entity_irg(ent);
+
+ if (callee == irg) {
+ /* A self-recursive call. The property did not depend on this call. */
+ } else if (callee != NULL) {
+ /* Note: we check here for nothrow only, so do NOT reset the malloc property */
+ unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
+ curr_prop = update_property(curr_prop, prop);
+ } else {
+ if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ } else if (get_opt_closed_world() &&
+ is_Sel(ptr) &&
+ get_irg_callee_info_state(irg) == irg_callee_info_consistent) {
+ /* check if all possible callees are nothrow functions. */
+ int i, n_callees = get_Call_n_callees(pred);
+ if (n_callees == 0) {
+ /* This is kind of strange: dying code or a Call that will raise an exception
+ when executed as there is no implementation to call. So better not
+ optimize. */
+ curr_prop &= ~mtp_property_nothrow;
+ continue;
+ }
+
+ for (i = 0; i < n_callees; ++i) {
+ ir_entity *ent = get_Call_callee(pred, i);
+ if (ent == unknown_entity) {
+ /* we don't know which entity is called here */
+ curr_prop &= ~mtp_property_nothrow;
+ break;
+ }
+ if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0) {
+ curr_prop &= ~mtp_property_nothrow;
+ break;
+ }
+ }
+ /* if we pass the for cycle, nothrow is still ok */
+ } else {
+ /* unknown call */
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ } else {
+ /* real exception flow possible. */
+ curr_prop &= ~mtp_property_nothrow;
+ }
+ }
+ if ((curr_prop & ~mtp_temporary) == mtp_no_property) {
+ /* no need to search further */
+ break;
+ }
+ }
+
+ if (curr_prop & mtp_property_malloc) {
+ /*
+ * Note that the malloc property means not only return newly allocated
+ * memory, but also that this memory is ALIAS FREE.
+ * To ensure that, we do NOT allow that the returned memory is somewhere
+ * stored.
+ */
+ curr_prop &= check_stored_result(irg);
+ }
+
+ if (curr_prop != mtp_no_property) {
+ if (top || (curr_prop & mtp_temporary) == 0) {
+ /* We use the temporary flag here to mark an optimistic result.
+ Set the property only if we are sure that it does NOT base on
+ temporary results OR if we are at top-level. */
+ set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
+ SET_IRG_READY(irg);
+ }
+ }
+ if (top)
+ SET_IRG_READY(irg);
+ CLEAR_IRG_BUSY(irg);
+ return curr_prop;
+} /* check_nothrow_or_malloc */
+