fix wrong usage of ircons functions
[libfirm] / ir / opt / funccall.c
index 656dd53..397ccd8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
  *
  * This file is part of libFirm.
  *
  * @author  Michael Beck
  * @version $Id$
  */
-#ifdef HAVE_CONFIG_H
 #include "config.h"
-#endif
 
-#include <adt/raw_bitset.h>
-
-#include "funccall_t.h"
+#include "opt_init.h"
 
 #include "irnode_t.h"
 #include "irgraph_t.h"
 #include "irgmod.h"
 #include "irgwalk.h"
-#include "irvrfy.h"
 #include "dbginfo_t.h"
 #include "irflag_t.h"
+#include "irloop_t.h"
 #include "ircons.h"
+#include "iredges_t.h"
+#include "irpass_t.h"
+#include "iroptimize.h"
+#include "analyze_irg_args.h"
 #include "irhooks.h"
+#include "raw_bitset.h"
 #include "debug.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
@@ -47,13 +48,14 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 /**
  * The walker environment for updating function calls.
  */
-typedef struct _env_t {
-       unsigned n_calls_SymConst;
-       unsigned n_calls_Sel;
-       ir_node  *const_call_list;       /**< The list of all const function calls that will be changed. */
-       ir_node  *pure_call_list;        /**< The list of all pure function calls that will be changed. */
-       ir_node  *nothrow_call_list;     /**< The list of all nothrow function calls that will be changed. */
-       ir_node  *proj_list;             /**< The list of all potential Proj nodes that must be fixed. */
+typedef struct env_t {
+       size_t   n_calls_SymConst;
+       size_t   n_calls_Sel;
+       ir_node  *float_const_call_list;    /**< The list of all floating const function calls that will be changed. */
+       ir_node  *nonfloat_const_call_list; /**< The list of all non-floating const function calls that will be changed. */
+       ir_node  *pure_call_list;           /**< The list of all pure function calls that will be changed. */
+       ir_node  *nothrow_call_list;        /**< The list of all nothrow function calls that will be changed. */
+       ir_node  *proj_list;                /**< The list of all potential Proj nodes that must be fixed. */
 } env_t;
 
 /** If non-null, evaluates entities for being a heap alloc. */
@@ -76,11 +78,13 @@ static unsigned *busy_set;
  * Walker: Collect all calls to const and pure functions
  * to lists. Collect all Proj(Call) nodes into a Proj list.
  */
-static void collect_const_and_pure_calls(ir_node *node, void *env) {
-       env_t *ctx = env;
-       ir_node *call, *ptr;
+static void collect_const_and_pure_calls(ir_node *node, void *env)
+{
+       env_t     *ctx = (env_t*)env;
+       ir_node   *call;
+       ir_node   *ptr;
        ir_entity *ent;
-       unsigned prop;
+       unsigned  and_prop, or_prop, prop;
 
        if (is_Call(node)) {
                call = node;
@@ -88,8 +92,8 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
                /* set the link to NULL for all non-const/pure calls */
                set_irn_link(call, NULL);
                ptr = get_Call_ptr(call);
-               if (is_SymConst_addr_ent(ptr)) {
-                       ent = get_SymConst_entity(ptr);
+               if (is_Global(ptr)) {
+                       ent = get_Global_entity(ptr);
 
                        prop = get_entity_additional_properties(ent);
                        if ((prop & (mtp_property_const|mtp_property_pure)) == 0)
@@ -97,7 +101,7 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
                        ++ctx->n_calls_SymConst;
                } else if (get_opt_closed_world() &&
                           is_Sel(ptr) &&
-                          get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+                          get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
                        /* If all possible callees are const functions, we can remove the memory edge. */
                        int i, n_callees = get_Call_n_callees(call);
                        if (n_callees == 0) {
@@ -108,17 +112,21 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
                        }
 
                        /* note that const function are a subset of pure ones */
-                       prop = mtp_property_const | mtp_property_pure;
+                       and_prop = mtp_property_const | mtp_property_pure;
+                       or_prop  = 0;
                        for (i = 0; i < n_callees; ++i) {
                                ent = get_Call_callee(call, i);
                                if (ent == unknown_entity) {
                                        /* we don't know which entity is called here */
                                        return;
                                }
-                               prop &= get_entity_additional_properties(ent);
-                               if (prop == mtp_no_property)
+                               prop      = get_entity_additional_properties(ent);
+                               and_prop &= prop;
+                               or_prop  &= prop;
+                               if (and_prop == mtp_no_property)
                                        return;
                        }
+                       prop = and_prop | (or_prop & mtp_property_has_loop);
                        ++ctx->n_calls_Sel;
                } else
                        return;
@@ -128,8 +136,13 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
                        set_irn_link(call, ctx->pure_call_list);
                        ctx->pure_call_list = call;
                } else {
-                       set_irn_link(call, ctx->const_call_list);
-                       ctx->const_call_list = call;
+                       if (prop & mtp_property_has_loop) {
+                               set_irn_link(call, ctx->nonfloat_const_call_list);
+                               ctx->nonfloat_const_call_list = call;
+                       } else {
+                               set_irn_link(call, ctx->float_const_call_list);
+                               ctx->float_const_call_list = call;
+                       }
                }
        } else if (is_Proj(node)) {
                /*
@@ -142,10 +155,9 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
 
                /* collect the Proj's in the Proj list */
                switch (get_Proj_proj(node)) {
-               case pn_Call_M_regular:
+               case pn_Call_M:
                case pn_Call_X_except:
                case pn_Call_X_regular:
-               case pn_Call_M_except:
                        set_irn_link(node, ctx->proj_list);
                        ctx->proj_list = node;
                        break;
@@ -158,28 +170,26 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
 /**
  * Fix the list of collected Calls.
  *
- * @param irg        the graph that contained calls to pure functions
- * @param call_list  the list of all call sites of const functions
- * @param proj_list  the list of all memory/exception Proj's of this call sites
+ * @param irg  the graph that contained calls to pure functions
+ * @param ctx  context
  */
-static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+static void fix_const_call_lists(ir_graph *irg, env_t *ctx)
+{
        ir_node *call, *next, *mem, *proj;
        int exc_changed = 0;
-       ir_graph *rem = current_ir_graph;
-
-       current_ir_graph = irg;
 
-       /* First step: fix all calls by removing it's memory input.
-          It's original memory input is preserved in their link fields. */
-       for (call = call_list; call; call = next) {
-               next = get_irn_link(call);
+       /* First step: fix all calls by removing their memory input and let
+        * them floating.
+        * The original memory input is preserved in their link fields. */
+       for (call = ctx->float_const_call_list; call != NULL; call = next) {
+               next = (ir_node*)get_irn_link(call);
                mem  = get_Call_mem(call);
 
                set_irn_link(call, mem);
                set_Call_mem(call, get_irg_no_mem(irg));
 
                /*
-                * Sorrily we cannot simply set the node to 'float'.
+                * Unfortunately we cannot simply set the node to 'float'.
                 * There is a reason for that:
                 *
                 * - The call might be inside a loop/if that is NOT entered
@@ -193,42 +203,41 @@ static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj
                 * observable states...
                 */
 
-               /* finally, this call can float
-               set_irn_pinned(call, op_pin_state_floats); */
+               /* finally, this call can float */
+               set_irn_pinned(call, op_pin_state_floats);
                hook_func_call(irg, call);
        }
 
-       /* Second step: fix all Proj's */
-       for (proj = proj_list; proj; proj = next) {
-               next = get_irn_link(proj);
+       /* Last step: fix all Proj's */
+       for (proj = ctx->proj_list; proj != NULL; proj = next) {
+               next = (ir_node*)get_irn_link(proj);
                call = get_Proj_pred(proj);
-               mem  = get_irn_link(call);
+               mem  = (ir_node*)get_irn_link(call);
 
                /* beware of calls in the pure call list */
-               if (! mem || get_irn_op(mem) == op_Call)
+               if (!mem || is_Call(mem))
                        continue;
                assert(get_irn_mode(mem) == mode_M);
 
                switch (get_Proj_proj(proj)) {
-               case pn_Call_M_regular: {
+               case pn_Call_M: {
                        /* in dead code there might be cycles where proj == mem */
                        if (proj != mem)
                                exchange(proj, mem);
                         break;
                }
                case pn_Call_X_except:
-               case pn_Call_M_except:
                        exc_changed = 1;
                        exchange(proj, get_irg_bad(irg));
                        break;
                case pn_Call_X_regular: {
                        ir_node *block = get_nodes_block(call);
                        exc_changed = 1;
-                       exchange(proj, new_r_Jmp(irg, block));
+                       exchange(proj, new_r_Jmp(block));
                        break;
                }
                default:
-                       ;
+                       break;
                }
        }
 
@@ -240,15 +249,15 @@ static void fix_const_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj
                /* ... including exception edges */
                set_irg_doms_inconsistent(irg);
        }
-       current_ir_graph = rem;
 }  /* fix_const_call_list */
 
 /**
  * Walker: Collect all calls to nothrow functions
  * to lists. Collect all Proj(Call) nodes into a Proj list.
  */
-static void collect_nothrow_calls(ir_node *node, void *env) {
-       env_t *ctx = env;
+static void collect_nothrow_calls(ir_node *node, void *env)
+{
+       env_t *ctx = (env_t*)env;
        ir_node *call, *ptr;
        ir_entity *ent;
        unsigned prop;
@@ -259,8 +268,8 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
                /* set the link to NULL for all non-const/pure calls */
                set_irn_link(call, NULL);
                ptr = get_Call_ptr(call);
-               if (is_SymConst_addr_ent(ptr)) {
-                       ent = get_SymConst_entity(ptr);
+               if (is_Global(ptr)) {
+                       ent = get_Global_entity(ptr);
 
                        prop = get_entity_additional_properties(ent);
                        if ((prop & mtp_property_nothrow) == 0)
@@ -268,7 +277,7 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
                        ++ctx->n_calls_SymConst;
                } else if (get_opt_closed_world() &&
                           is_Sel(ptr) &&
-                          get_irg_callee_info_state(current_ir_graph) == irg_callee_info_consistent) {
+                          get_irg_callee_info_state(get_irn_irg(node)) == irg_callee_info_consistent) {
                        /* If all possible callees are nothrow functions, we can remove the exception edge. */
                        int i, n_callees = get_Call_n_callees(call);
                        if (n_callees == 0) {
@@ -308,10 +317,9 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
 
                /* collect the Proj's in the Proj list */
                switch (get_Proj_proj(node)) {
-               case pn_Call_M_regular:
+               case pn_Call_M:
                case pn_Call_X_except:
                case pn_Call_X_regular:
-               case pn_Call_M_except:
                        set_irn_link(node, ctx->proj_list);
                        ctx->proj_list = node;
                        break;
@@ -328,16 +336,14 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
  * @param call_list  the list of all call sites of const functions
  * @param proj_list  the list of all memory/exception Proj's of this call sites
  */
-static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list) {
+static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *proj_list)
+{
        ir_node *call, *next, *proj;
        int exc_changed = 0;
-       ir_graph *rem = current_ir_graph;
-
-       current_ir_graph = irg;
 
        /* First step: go through the list of calls and mark them. */
        for (call = call_list; call; call = next) {
-               next = get_irn_link(call);
+               next = (ir_node*)get_irn_link(call);
 
                /* current_ir_graph is in memory anyway, so it's a good marker */
                set_irn_link(call, &current_ir_graph);
@@ -346,7 +352,7 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr
 
        /* Second step: Remove all exception Proj's */
        for (proj = proj_list; proj; proj = next) {
-               next = get_irn_link(proj);
+               next = (ir_node*)get_irn_link(proj);
                call = get_Proj_pred(proj);
 
                /* handle only marked calls */
@@ -356,18 +362,17 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr
                /* kill any exception flow */
                switch (get_Proj_proj(proj)) {
                case pn_Call_X_except:
-               case pn_Call_M_except:
                        exc_changed = 1;
                        exchange(proj, get_irg_bad(irg));
                        break;
                case pn_Call_X_regular: {
                        ir_node *block = get_nodes_block(call);
                        exc_changed = 1;
-                       exchange(proj, new_r_Jmp(irg, block));
+                       exchange(proj, new_r_Jmp(block));
                        break;
                }
                default:
-                       ;
+                       break;
                }
        }
 
@@ -379,25 +384,26 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr
                /* ... including exception edges */
                set_irg_doms_inconsistent(irg);
        }
-       current_ir_graph = rem;
 }  /* fix_nothrow_call_list */
 
 /* marking */
-#define SET_IRG_READY(irg)     rbitset_set(ready_set, get_irg_idx(irg))
+#define SET_IRG_READY(irg)  rbitset_set(ready_set, get_irg_idx(irg))
 #define IS_IRG_READY(irg)   rbitset_is_set(ready_set, get_irg_idx(irg))
 #define SET_IRG_BUSY(irg)   rbitset_set(busy_set, get_irg_idx(irg))
 #define CLEAR_IRG_BUSY(irg) rbitset_clear(busy_set, get_irg_idx(irg))
 #define IS_IRG_BUSY(irg)    rbitset_is_set(busy_set, get_irg_idx(irg))
 
 /* forward */
-static unsigned check_const_or_pure_function(ir_graph *irg, int top);
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top);
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top);
 
 /**
  * Calculate the bigger property of two. Handle the temporary flag right.
  */
-static unsigned max_property(unsigned a, unsigned b) {
-       unsigned r, t = (a | b) & mtp_temporary;
+static mtp_additional_properties max_property(mtp_additional_properties a,
+                                              mtp_additional_properties b)
+{
+       mtp_additional_properties r;
+       mtp_additional_properties t = (a | b) & mtp_temporary;
        a &= ~mtp_temporary;
        b &= ~mtp_temporary;
 
@@ -412,12 +418,13 @@ static unsigned max_property(unsigned a, unsigned b) {
  * the mtp_property.
  *
  * @return mtp_property_const if only calls of const functions are detected
- *         mtp_property_pure if only Loads and const/pure
- *         calls detected
- *         mtp_no_property else
+ *         mtp_property_pure  if only Loads and const/pure calls detected
+ *         mtp_no_property    else
  */
-static unsigned _follow_mem(ir_node *node) {
-       unsigned m, mode = mtp_property_const;
+static mtp_additional_properties follow_mem_(ir_node *node)
+{
+       mtp_additional_properties mode = mtp_property_const;
+       mtp_additional_properties m;
        ir_node  *ptr;
        int i;
 
@@ -425,11 +432,9 @@ static unsigned _follow_mem(ir_node *node) {
                if (mode == mtp_no_property)
                        return mtp_no_property;
 
-               if (irn_visited(node))
+               if (irn_visited_else_mark(node))
                        return mode;
 
-               mark_irn_visited(node);
-
                switch (get_irn_opcode(node)) {
                case iro_Proj:
                        node = get_Proj_pred(node);
@@ -443,7 +448,7 @@ static unsigned _follow_mem(ir_node *node) {
                case iro_Sync:
                        /* do a dfs search */
                        for (i = get_irn_arity(node) - 1; i >= 0; --i) {
-                               m    = _follow_mem(get_irn_n(node, i));
+                               m    = follow_mem_(get_irn_n(node, i));
                                mode = max_property(mode, m);
                                if (mode == mtp_no_property)
                                        return mtp_no_property;
@@ -461,12 +466,11 @@ static unsigned _follow_mem(ir_node *node) {
                case iro_Call:
                        /* A call is only tolerable if its either constant or pure. */
                        ptr = get_Call_ptr(node);
-                       if (get_irn_op(ptr) == op_SymConst &&
-                               get_SymConst_kind(ptr) == symconst_addr_ent) {
+                       if (is_SymConst_addr_ent(ptr)) {
                                ir_entity *ent = get_SymConst_entity(ptr);
                                ir_graph  *irg = get_entity_irg(ent);
 
-                               if (irg == current_ir_graph) {
+                               if (irg == get_irn_irg(node)) {
                                        /* A self-recursive call. The property did not depend on this call. */
                                } else if (irg == NULL) {
                                        m = get_entity_additional_properties(ent) & (mtp_property_const|mtp_property_pure);
@@ -485,7 +489,7 @@ static unsigned _follow_mem(ir_node *node) {
                        return mtp_no_property;
                }
        }
-}  /* _follow_mem */
+}
 
 /**
  * Follow the memory chain starting at node and determine
@@ -495,12 +499,11 @@ static unsigned _follow_mem(ir_node *node) {
  *         mtp_property_pure  if only Loads and const/pure calls detected
  *         mtp_no_property else
  */
-static unsigned follow_mem(ir_node *node, unsigned mode) {
-       unsigned m;
-
-       m = _follow_mem(node);
+static mtp_additional_properties follow_mem(ir_node *node, mtp_additional_properties mode)
+{
+       mtp_additional_properties m = follow_mem_(node);
        return max_property(mode, m);
-}  /* follow_mem */
+}
 
 /**
  * Check if a graph represents a const or a pure function.
@@ -508,11 +511,11 @@ static unsigned follow_mem(ir_node *node, unsigned mode) {
  * @param irg  the graph to check
  * @param top  if set, this is the top call
  */
-static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
+static mtp_additional_properties check_const_or_pure_function(ir_graph *irg, int top)
+{
        ir_node *end, *endbl;
        int j;
-       unsigned prop = get_irg_additional_properties(irg);
-       ir_graph *rem = current_ir_graph;
+       mtp_additional_properties prop = get_irg_additional_properties(irg);
 
        if (prop & mtp_property_const) {
                /* already marked as a const function */
@@ -538,16 +541,15 @@ static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
        endbl = get_nodes_block(end);
        prop  = mtp_property_const;
 
-       current_ir_graph = irg;
-
+       ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
        inc_irg_visited(irg);
-       /* mark the initial mem: recursion of follow_mem stops here */
+       /* mark the initial mem: recursion of follow_mem() stops here */
        mark_irn_visited(get_irg_initial_mem(irg));
 
        /* visit every Return */
        for (j = get_Block_n_cfgpreds(endbl) - 1; j >= 0; --j) {
                ir_node   *node = get_Block_cfgpred(endbl, j);
-               ir_opcode code  = get_irn_opcode(node);
+               unsigned   code = get_irn_opcode(node);
                ir_node   *mem;
 
                /* Bad nodes usually do NOT produce anything, so it's ok */
@@ -575,12 +577,17 @@ static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
        if (prop != mtp_no_property) {
                /* check, if a keep-alive exists */
                for (j = get_End_n_keepalives(end) - 1; j >= 0; --j) {
-                       ir_node *mem = get_End_keepalive(end, j);
+                       ir_node *kept = get_End_keepalive(end, j);
+
+                       if (is_Block(kept)) {
+                               prop = mtp_no_property;
+                               break;
+                       }
 
-                       if (mode_M != get_irn_mode(mem))
+                       if (mode_M != get_irn_mode(kept))
                                continue;
 
-                       prop = max_property(prop, follow_mem(mem, prop));
+                       prop = max_property(prop, follow_mem(kept, prop));
                        if (prop == mtp_no_property)
                                break;
                }
@@ -591,14 +598,14 @@ static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
                        /* We use the temporary flag here to mark optimistic result.
                           Set the property only if we are sure that it does NOT base on
                           temporary results OR if we are at top-level. */
-                       set_irg_additional_property(irg, prop & ~mtp_temporary);
+                       add_irg_additional_properties(irg, prop & ~mtp_temporary);
                        SET_IRG_READY(irg);
                }
        }
        if (top)
                SET_IRG_READY(irg);
        CLEAR_IRG_BUSY(irg);
-       current_ir_graph = rem;
+       ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
        return prop;
 }  /* check_const_or_pure_function */
 
@@ -607,28 +614,28 @@ static unsigned check_const_or_pure_function(ir_graph *irg, int top) {
  *
  * @param ctx  context
  */
-static void handle_const_Calls(env_t *ctx) {
-       int i;
+static void handle_const_Calls(env_t *ctx)
+{
+       size_t i, n;
 
        ctx->n_calls_SymConst = 0;
        ctx->n_calls_Sel      = 0;
 
        /* all calls of const functions can be transformed */
-       for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+       for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
                ir_graph *irg  = get_irp_irg(i);
 
-               ctx->const_call_list = NULL;
-               ctx->pure_call_list  = NULL;
-               ctx->proj_list = NULL;
-               irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
+               ctx->float_const_call_list    = NULL;
+               ctx->nonfloat_const_call_list = NULL;
+               ctx->pure_call_list           = NULL;
+               ctx->proj_list                = NULL;
 
-               if (ctx->const_call_list) {
-                       fix_const_call_list(irg, ctx->const_call_list, ctx->proj_list);
+               ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
+               irg_walk_graph(irg, NULL, collect_const_and_pure_calls, ctx);
 
-                       /* this graph was changed, invalidate analysis info */
-                       set_irg_outs_inconsistent(irg);
-                       set_irg_doms_inconsistent(irg);
-               }
+               if (ctx->float_const_call_list != NULL)
+                       fix_const_call_lists(irg, ctx);
+               ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
        }
 }  /* handle_const_Calls */
 
@@ -637,27 +644,26 @@ static void handle_const_Calls(env_t *ctx) {
  *
  * @param ctx  context
  */
-static void handle_nothrow_Calls(env_t *ctx) {
-       int i;
+static void handle_nothrow_Calls(env_t *ctx)
+{
+       size_t i, n;
 
        ctx->n_calls_SymConst = 0;
        ctx->n_calls_Sel      = 0;
 
        /* all calls of const functions can be transformed */
-       for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+       for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
                ir_graph *irg  = get_irp_irg(i);
 
                ctx->nothrow_call_list = NULL;
                ctx->proj_list         = NULL;
+
+               ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
                irg_walk_graph(irg, NULL, collect_nothrow_calls, ctx);
 
-               if (ctx->nothrow_call_list) {
+               if (ctx->nothrow_call_list)
                        fix_nothrow_call_list(irg, ctx->nothrow_call_list, ctx->proj_list);
-
-                       /* this graph was changed, invalidate analysis info */
-                       set_irg_outs_inconsistent(irg);
-                       set_irg_doms_inconsistent(irg);
-               }
+               ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
        }
 }
 
@@ -667,7 +673,8 @@ static void handle_nothrow_Calls(env_t *ctx) {
  *
  * @param node  the node to check
  */
-static int is_malloc_call_result(const ir_node *node) {
+static int is_malloc_call_result(const ir_node *node)
+{
        if (is_Alloc(node) && get_Alloc_where(node) == heap_alloc) {
                /* Firm style high-level allocation */
                return 1;
@@ -675,8 +682,8 @@ static int is_malloc_call_result(const ir_node *node) {
        if (is_alloc_entity != NULL && is_Call(node)) {
                ir_node *ptr = get_Call_ptr(node);
 
-               if (is_SymConst_addr_ent(ptr)) {
-                       ir_entity *ent = get_SymConst_entity(ptr);
+               if (is_Global(ptr)) {
+                       ir_entity *ent = get_Global_entity(ptr);
                        return is_alloc_entity(ent);
                }
        }
@@ -686,11 +693,102 @@ static int is_malloc_call_result(const ir_node *node) {
 /**
  * Update a property depending on a call property.
  */
-static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
-       unsigned t = (orig_prop | call_prop) & mtp_temporary;
-       unsigned r = orig_prop & call_prop;
+static mtp_additional_properties update_property(mtp_additional_properties orig_prop, mtp_additional_properties call_prop)
+{
+       mtp_additional_properties t = (orig_prop | call_prop) & mtp_temporary;
+       mtp_additional_properties r = orig_prop & call_prop;
        return r | t;
-}  /** update_property */
+}
+
+/**
+ * Check if a node is stored.
+ */
+static int is_stored(const ir_node *n)
+{
+       const ir_edge_t *edge;
+       const ir_node   *ptr;
+
+       foreach_out_edge(n, edge) {
+               const ir_node *succ = get_edge_src_irn(edge);
+
+               switch (get_irn_opcode(succ)) {
+               case iro_Return:
+               case iro_Load:
+               case iro_Cmp:
+                       /* ok */
+                       break;
+               case iro_Store:
+                       if (get_Store_value(succ) == n)
+                               return 1;
+                       /* ok if its only the address input */
+                       break;
+               case iro_Sel:
+               case iro_Cast:
+               case iro_Confirm:
+                       if (is_stored(succ))
+                               return 1;
+                       break;
+               case iro_Call:
+                       ptr = get_Call_ptr(succ);
+                       if (is_Global(ptr)) {
+                               ir_entity *ent = get_Global_entity(ptr);
+                               int       i;
+
+                               /* we know the called entity */
+                               for (i = get_Call_n_params(succ) - 1; i >= 0; --i) {
+                                       if (get_Call_param(succ, i) == n) {
+                                               /* n is the i'th param of the call */
+                                               if (get_method_param_access(ent, i) & ptr_access_store) {
+                                                       /* n is store in ent */
+                                                       return 1;
+                                               }
+                                       }
+                               }
+                       } else {
+                               /* unknown call address */
+                               return 1;
+                       }
+                       break;
+               default:
+                       /* bad, potential alias */
+                       return 1;
+               }
+       }
+       return 0;
+}  /* is_stored */
+
+/**
+ * Check that the return value of an irg is not stored anywhere.
+ *
+ * return ~mtp_property_malloc if return values are stored, ~0 else
+ */
+static mtp_additional_properties check_stored_result(ir_graph *irg)
+{
+       ir_node  *end_blk = get_irg_end_block(irg);
+       int      i, j;
+       mtp_additional_properties res = ~mtp_no_property;
+       int      old_edges = edges_assure_kind(irg, EDGE_KIND_NORMAL);
+
+       for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
+               ir_node *pred = get_Block_cfgpred(end_blk, i);
+
+               if (! is_Return(pred))
+                       continue;
+               for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
+                       const ir_node *irn = get_Return_res(pred, j);
+
+                       if (is_stored(irn)) {
+                               /* bad, might create an alias */
+                               res = ~mtp_property_malloc;
+                               goto finish;
+                       }
+               }
+       }
+finish:
+       if (! old_edges)
+               edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
+       return res;
+}
 
 /**
  * Check if a graph represents a nothrow or a malloc function.
@@ -698,10 +796,13 @@ static unsigned update_property(unsigned orig_prop, unsigned call_prop) {
  * @param irg  the graph to check
  * @param top  if set, this is the top call
  */
-static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
-       ir_node  *end_blk = get_irg_end_block(irg);
-       int      i, j;
-       unsigned curr_prop = mtp_property_malloc | mtp_property_nothrow;
+static mtp_additional_properties check_nothrow_or_malloc(ir_graph *irg, int top)
+{
+       mtp_additional_properties curr_prop = mtp_property_malloc | mtp_property_nothrow;
+       ir_node                  *end_blk   = get_irg_end_block(irg);
+       ir_entity *ent;
+       ir_type   *mtp;
+       int       i, j;
 
        if (IS_IRG_READY(irg)) {
                /* already checked */
@@ -714,6 +815,12 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
        }
        SET_IRG_BUSY(irg);
 
+       ent = get_irg_entity(irg);
+       mtp = get_entity_type(ent);
+
+       if (get_method_n_ress(mtp) <= 0)
+               curr_prop &= ~mtp_property_malloc;
+
        for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
                ir_node *pred = get_Block_cfgpred(end_blk, i);
 
@@ -721,22 +828,27 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
                        if (curr_prop & mtp_property_malloc) {
                                /* check, if malloc is called here */
                                for (j = get_Return_n_ress(pred) - 1; j >= 0; --j) {
-                                       const ir_node *res = get_Return_res(pred, j);
-                                       const ir_node *irn = skip_Proj_const(res);
+                                       ir_node *res = get_Return_res(pred, j);
+
+                                       /* skip Confirms and Casts */
+                                       res = skip_HighLevel_ops(res);
+                                       /* skip Proj's */
+                                       while (is_Proj(res))
+                                               res = get_Proj_pred(res);
                                        if (is_malloc_call_result(res)) {
                                                /* ok, this is a malloc */
                                        } else if (is_Call(res)) {
                                                ir_node *ptr = get_Call_ptr(res);
 
-                                               if (is_SymConst_addr_ent(ptr)) {
+                                               if (is_Global(ptr)) {
                                                        /* a direct call */
-                                                       ir_entity *ent    = get_SymConst_entity(ptr);
+                                                       ir_entity *ent    = get_Global_entity(ptr);
                                                        ir_graph  *callee = get_entity_irg(ent);
 
                                                        if (callee == irg) {
                                                                /* A self-recursive call. The property did not depend on this call. */
                                                        } else if (callee != NULL) {
-                                                               unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+                                                               mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0);
                                                                curr_prop = update_property(curr_prop, prop);
                                                        } else {
                                                                curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
@@ -771,6 +883,9 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
                                                        /* unknown call */
                                                        curr_prop &= ~mtp_property_malloc;
                                                }
+                                       } else {
+                                               /* unknown return value */
+                                               curr_prop &= ~mtp_property_malloc;
                                        }
                                }
                        }
@@ -781,18 +896,20 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
                        if (is_Call(pred)) {
                                ir_node *ptr = get_Call_ptr(pred);
 
-                               if (is_SymConst_addr_ent(ptr)) {
+                               if (is_Global(ptr)) {
                                        /* a direct call */
-                                       ir_entity *ent    = get_SymConst_entity(ptr);
+                                       ir_entity *ent    = get_Global_entity(ptr);
                                        ir_graph  *callee = get_entity_irg(ent);
 
                                        if (callee == irg) {
                                                /* A self-recursive call. The property did not depend on this call. */
                                        } else if (callee != NULL) {
-                                               unsigned prop = check_nothrow_or_malloc(callee, /*top=*/0);
+                                               /* Note: we check here for nothrow only, so do NOT reset the malloc property */
+                                               mtp_additional_properties prop = check_nothrow_or_malloc(callee, /*top=*/0) | mtp_property_malloc;
                                                curr_prop = update_property(curr_prop, prop);
                                        } else {
-                                               curr_prop = update_property(curr_prop, get_entity_additional_properties(ent));
+                                               if ((get_entity_additional_properties(ent) & mtp_property_nothrow) == 0)
+                                                       curr_prop &= ~mtp_property_nothrow;
                                        }
                                } else if (get_opt_closed_world() &&
                                           is_Sel(ptr) &&
@@ -834,12 +951,23 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
                        break;
                }
        }
+
+       if (curr_prop & mtp_property_malloc) {
+               /*
+                * Note that the malloc property means not only return newly allocated
+                * memory, but also that this memory is ALIAS FREE.
+                * To ensure that, we do NOT allow that the returned memory is somewhere
+                * stored.
+            */
+               curr_prop &= check_stored_result(irg);
+       }
+
        if (curr_prop != mtp_no_property) {
                if (top || (curr_prop & mtp_temporary) == 0) {
                        /* We use the temporary flag here to mark an optimistic result.
                           Set the property only if we are sure that it does NOT base on
                           temporary results OR if we are at top-level. */
-                       set_irg_additional_property(irg, curr_prop & ~mtp_temporary);
+                       add_irg_additional_properties(irg, curr_prop & ~mtp_temporary);
                        SET_IRG_READY(irg);
                }
        }
@@ -849,25 +977,42 @@ static unsigned check_nothrow_or_malloc(ir_graph *irg, int top) {
        return curr_prop;
 }  /* check_nothrow_or_malloc */
 
+/**
+ * When a function was detected as "const", it might be moved out of loops.
+ * This might be dangerous if the graph can contain endless loops.
+ */
+static void check_for_possible_endless_loops(ir_graph *irg)
+{
+       ir_loop *root_loop;
+       assure_cf_loop(irg);
+
+       root_loop = get_irg_loop(irg);
+       if (root_loop->flags & loop_outer_loop)
+               add_irg_additional_properties(irg, mtp_property_has_loop);
+}
+
 /*
  * optimize function calls by handling const functions
  */
-void optimize_funccalls(int force_run)
+void optimize_funccalls(int force_run, check_alloc_entity_func callback)
 {
-       int i, last_idx;
-       unsigned num_const   = 0;
-       unsigned num_pure    = 0;
-       unsigned num_nothrow = 0;
-       unsigned num_malloc  = 0;
+       size_t i, n;
+       int last_idx;
+       size_t num_const   = 0;
+       size_t num_pure    = 0;
+       size_t num_nothrow = 0;
+       size_t num_malloc  = 0;
+
+       is_alloc_entity = callback;
 
        /* prepare: mark all graphs as not analyzed */
-       last_idx = get_irp_last_idx();
+       last_idx  = get_irp_last_idx();
        ready_set = rbitset_malloc(last_idx);
        busy_set  = rbitset_malloc(last_idx);
 
        /* first step: detect, which functions are nothrow or malloc */
        DB((dbg, LEVEL_2, "Detecting nothrow and malloc properties ...\n"));
-       for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+       for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
                ir_graph *irg = get_irp_irg(i);
                unsigned prop = check_nothrow_or_malloc(irg, /*top=*/1);
 
@@ -886,8 +1031,8 @@ void optimize_funccalls(int force_run)
                env_t ctx;
 
                handle_nothrow_Calls(&ctx);
-               DB((dbg, LEVEL_1, "Detected %u nothrow graphs, %u malloc graphs.\n", num_nothrow, num_malloc));
-               DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to nothrow functions.\n",
+               DB((dbg, LEVEL_1, "Detected %zu nothrow graphs, %zu malloc graphs.\n", num_nothrow, num_malloc));
+               DB((dbg, LEVEL_1, "Optimizes %zu(SymConst) + %zu(Sel) calls to nothrow functions.\n",
                        ctx.n_calls_SymConst, ctx.n_calls_Sel));
        } else {
                DB((dbg, LEVEL_1, "No graphs without side effects detected\n"));
@@ -898,13 +1043,14 @@ void optimize_funccalls(int force_run)
 
        /* third step: detect, which functions are const or pure */
        DB((dbg, LEVEL_2, "Detecting const and pure properties ...\n"));
-       for (i = get_irp_n_irgs() - 1; i >= 0; --i) {
+       for (i = 0, n = get_irp_n_irgs(); i < n; ++i) {
                ir_graph *irg = get_irp_irg(i);
                unsigned prop = check_const_or_pure_function(irg, /*top=*/1);
 
                if (prop & mtp_property_const) {
                        ++num_const;
                        DB((dbg, LEVEL_2, "%+F has the const property\n", irg));
+                       check_for_possible_endless_loops(irg);
                } else if (prop & mtp_property_pure) {
                        ++num_pure;
                        DB((dbg, LEVEL_2, "%+F has the pure property\n", irg));
@@ -915,7 +1061,7 @@ void optimize_funccalls(int force_run)
                env_t ctx;
 
                handle_const_Calls(&ctx);
-               DB((dbg, LEVEL_1, "Detected %u const graphs, %u pure graphs.\n", num_const, num_pure));
+               DB((dbg, LEVEL_1, "Detected %zu const graphs, %zu pure graphs.\n", num_const, num_pure));
                DB((dbg, LEVEL_1, "Optimizes %u(SymConst) + %u(Sel) calls to const functions.\n",
                       ctx.n_calls_SymConst, ctx.n_calls_Sel));
        } else {
@@ -926,7 +1072,39 @@ void optimize_funccalls(int force_run)
 }  /* optimize_funccalls */
 
 /* initialize the funccall optimization */
-void firm_init_funccalls(void) {
+void firm_init_funccalls(void)
+{
        FIRM_DBG_REGISTER(dbg, "firm.opt.funccalls");
-//     firm_dbg_set_mask(dbg, -1);
 }  /* firm_init_funccalls */
+
+typedef struct pass_t {
+       ir_prog_pass_t          pass;
+       int                     force_run;
+       check_alloc_entity_func callback;
+} pass_t;
+
+/**
+ * Wrapper for running optimize_funccalls() as an ir_prog pass.
+ */
+static int pass_wrapper(ir_prog *irp, void *context)
+{
+       pass_t *pass = (pass_t*)context;
+
+       (void)irp;
+       optimize_funccalls(pass->force_run, pass->callback);
+       return 0;
+}  /* pass_wrapper */
+
+/* Creates an ir_prog pass for optimize_funccalls. */
+ir_prog_pass_t *optimize_funccalls_pass(
+       const char *name,
+       int force_run, check_alloc_entity_func callback)
+{
+       struct pass_t *pass = XMALLOCZ(struct pass_t);
+
+       pass->force_run = force_run;
+       pass->callback  = callback;
+
+       return def_prog_pass_constructor(
+               &pass->pass, name ? name : "funccall", pass_wrapper);
+}  /* optimize_funccalls_pass */