remove the concept of M_except, we always use the normal M proj now
authorMatthias Braun <matze@braunis.de>
Fri, 11 Dec 2009 16:19:45 +0000 (16:19 +0000)
committerMatthias Braun <matze@braunis.de>
Fri, 11 Dec 2009 16:19:45 +0000 (16:19 +0000)
[r26778]

22 files changed:
include/libfirm/irnode.h
ir/be/benode.h
ir/be/ia32/ia32_intrinsics.c
ir/be/ppc32/ppc32_transform_conv.c
ir/ir/instrument.c
ir/ir/ircons.c
ir/ir/irdump.c
ir/ir/irnode.c
ir/ir/iropt.c
ir/ir/irprofile.c
ir/ir/irvrfy.c
ir/lower/lower_calls.c
ir/lower/lower_copyb.c
ir/lower/lower_dw.c
ir/lower/lower_intrinsics.c
ir/opt/escape_ana.c
ir/opt/funccall.c
ir/opt/opt_inline.c
ir/opt/opt_ldst.c
ir/opt/tailrec.c
scripts/gen_ir.py
scripts/ir_spec.py

index abbd13f..86e8c97 100644 (file)
@@ -70,7 +70,7 @@
  * Some projection numbers must be always equal to support automatic phi construction
  */
 enum pn_generic {
-       pn_Generic_M_regular = 0,  /**< The memory result. */
+       pn_Generic_M         = 0,  /**< The memory result. */
        pn_Generic_X_regular = 1,  /**< Execution result if no exception occurred. */
        pn_Generic_X_except  = 2,  /**< The control flow result branching to the exception handler */
        pn_Generic_other     = 3   /**< First free projection number */
@@ -596,17 +596,14 @@ void       set_Sel_entity (ir_node *node, ir_entity *ent);
  * Projection numbers for result of Call node: use for Proj nodes!
  */
 typedef enum {
-       pn_Call_M_regular = pn_Generic_M_regular, /**< The memory result. */
+       pn_Call_M         = pn_Generic_M,         /**< The memory result. */
        pn_Call_X_regular = pn_Generic_X_regular, /**< The control flow result when no exception occurs. */
        pn_Call_X_except  = pn_Generic_X_except,  /**< The control flow result branching to the exception handler. */
        pn_Call_T_result  = pn_Generic_other,     /**< The tuple containing all (0, 1, 2, ...) results. */
-       pn_Call_M_except,                         /**< The memory result in case the called method terminated with
-                                                      an exception. */
        pn_Call_P_value_res_base,                 /**< A pointer to the memory region containing copied results
                                                       passed by value (for compound result types). */
        pn_Call_max                               /**< number of projections from a Call */
 } pn_Call;   /* Projection numbers for Call. */
-#define pn_Call_M pn_Call_M_regular
 
 /** Retrieve the memory input of a Call. */
 ir_node *get_Call_mem(const ir_node *node);
@@ -669,9 +666,9 @@ void    remove_Call_callee_arr(ir_node *node);
  * Projection numbers for result of Builtin node: use for Proj nodes!
  */
 typedef enum {
-       pn_Builtin_M        = pn_Generic_M_regular, /**< The memory result. */
-       pn_Builtin_1_result = pn_Generic_other,     /**< first result. */
-       pn_Builtin_max                              /**< number of projections from a Builtin */
+       pn_Builtin_M        = pn_Generic_M,     /**< The memory result. */
+       pn_Builtin_1_result = pn_Generic_other, /**< first result. */
+       pn_Builtin_max                          /**< number of projections from a Builtin */
 } pn_Builtin;   /* Projection numbers for Builtin. */
 
 ir_node         *get_Builtin_mem(const ir_node *node);
@@ -762,7 +759,7 @@ void     set_Quot_resmode(ir_node *node, ir_mode *mode);
  * Projection numbers for Quot: use for Proj nodes!
  */
 typedef enum {
-       pn_Quot_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_Quot_M         = pn_Generic_M,         /**< Memory result. */
        pn_Quot_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Quot_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Quot_res       = pn_Generic_other,     /**< Result of computation. */
@@ -782,7 +779,7 @@ void     set_DivMod_resmode(ir_node *node, ir_mode *mode);
  * Projection numbers for DivMod: use for Proj nodes!
  */
 typedef enum {
-       pn_DivMod_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_DivMod_M         = pn_Generic_M,         /**< Memory result. */
        pn_DivMod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_DivMod_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_DivMod_res_div   = pn_Generic_other,     /**< Result of computation a / b. */
@@ -805,7 +802,7 @@ void     set_Div_no_remainder(ir_node *node, int no_remainder);
  * Projection numbers for Div: use for Proj nodes!
  */
 typedef enum {
-       pn_Div_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_Div_M         = pn_Generic_M,         /**< Memory result. */
        pn_Div_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Div_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Div_res       = pn_Generic_other,     /**< Result of computation. */
@@ -825,7 +822,7 @@ void     set_Mod_resmode(ir_node *node, ir_mode *mode);
  * Projection numbers for Mod: use for Proj nodes!
  */
 typedef enum {
-       pn_Mod_M         = pn_Generic_M_regular, /**< Memory result.    */
+       pn_Mod_M         = pn_Generic_M,         /**< Memory result.    */
        pn_Mod_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Mod_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Mod_res       = pn_Generic_other,     /**< Result of computation. */
@@ -966,7 +963,7 @@ void     set_memop_ptr(ir_node *node, ir_node *ptr);
  * Projection numbers for Load: use for Proj nodes!
  */
 typedef enum {
-       pn_Load_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_Load_M         = pn_Generic_M,         /**< Memory result. */
        pn_Load_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Load_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Load_res       = pn_Generic_other,     /**< Result of load operation. */
@@ -988,7 +985,7 @@ void           set_Load_align(ir_node *node, ir_align align);
  * Projection numbers for Store: use for Proj nodes!
  */
 typedef enum {
-       pn_Store_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_Store_M         = pn_Generic_M,         /**< Memory result. */
        pn_Store_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Store_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Store_max       = pn_Generic_other      /**< number of projections from a Store */
@@ -1009,7 +1006,7 @@ void           set_Store_align(ir_node *node, ir_align align);
  * Projection numbers for Alloc: use for Proj nodes!
  */
 typedef enum {
-       pn_Alloc_M         = pn_Generic_M_regular, /**< Memory result. */
+       pn_Alloc_M         = pn_Generic_M,         /**< Memory result. */
        pn_Alloc_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Alloc_X_except  = pn_Generic_X_except,  /**< Execution result if exception occurred. */
        pn_Alloc_res       = pn_Generic_other,     /**< Result of allocation. */
@@ -1095,12 +1092,10 @@ void     set_Mux_true(ir_node *node, ir_node *ir_true);
  * Projection numbers for result of CopyB node: use for Proj nodes!
  */
 typedef enum {
-       pn_CopyB_M_regular = pn_Generic_M_regular, /**< The memory result. */
+       pn_CopyB_M_regular = pn_Generic_M,         /**< The memory result. */
        pn_CopyB_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_CopyB_X_except  = pn_Generic_X_except,  /**< The control flow result branching to the exception handler */
-       pn_CopyB_M_except  = pn_Generic_other,     /**< The memory result in case the runtime function terminated with
-                                                       an exception */
-       pn_CopyB_max                               /**< number of projections from a CopyB */
+       pn_CopyB_max       = pn_Generic_other      /**< number of projections from a CopyB */
 } pn_CopyB;   /* Projection numbers for CopyB. */
 #define pn_CopyB_M pn_CopyB_M_regular
 
@@ -1117,12 +1112,10 @@ void     set_CopyB_type(ir_node *node, ir_type *data_type);
  * Projection numbers for result of InstOf node: use for Proj nodes!
  */
 typedef enum {
-       pn_InstOf_M_regular = pn_Generic_M_regular, /**< The memory result. */
+       pn_InstOf_M_regular = pn_Generic_M,         /**< The memory result. */
        pn_InstOf_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_InstOf_X_except  = pn_Generic_X_except,  /**< The control flow result branching to the exception handler */
        pn_InstOf_res       = pn_Generic_other,     /**< The checked object pointer. */
-       pn_InstOf_M_except,                         /**< The memory result in case the runtime function terminated with
-                                                        an exception */
        pn_InstOf_max                               /**< number of projections from an InstOf */
 } pn_InstOf;
 #define pn_InstOf_M pn_InstOf_M_regular
@@ -1139,7 +1132,7 @@ void    set_InstOf_obj(ir_node *node, ir_node *obj);
  * Projection numbers for Raise.
  */
 typedef enum {
-       pn_Raise_M = pn_Generic_M_regular,  /**< The Memory result. */
+       pn_Raise_M = pn_Generic_M,          /**< The Memory result. */
        pn_Raise_X = pn_Generic_X_regular,  /**< The control flow to the exception handler. */
        pn_Raise_max                        /**< number of projections from a Raise */
 } pn_Raise;  /* Projection numbers for Raise. */
@@ -1153,7 +1146,7 @@ void     set_Raise_exo_ptr(ir_node *node, ir_node *exoptr);
  * Projection numbers for result of Bound node: use for Proj nodes!
  */
 typedef enum {
-       pn_Bound_M         = pn_Generic_M_regular, /**< The memory result. */
+       pn_Bound_M         = pn_Generic_M,         /**< The memory result. */
        pn_Bound_X_regular = pn_Generic_X_regular, /**< Execution result if no exception occurred. */
        pn_Bound_X_except  = pn_Generic_X_except,  /**< The control flow result branching to the exception handler */
        pn_Bound_res       = pn_Generic_other,     /**< The checked index. */
index ad34f4a..4c61c0d 100644 (file)
@@ -273,9 +273,9 @@ enum {
  * Projection numbers for result of be_Call node: use for Proj nodes!
  */
 typedef enum {
-       pn_be_Call_M_regular = pn_Call_M_regular,  /**< The memory result of a be_Call. */
+       pn_be_Call_M_regular = pn_Call_M,    /**< The memory result of a be_Call. */
        pn_be_Call_sp        = pn_Call_max,
-       pn_be_Call_first_res                      /**< The first result proj number of a be_Call. */
+       pn_be_Call_first_res                 /**< The first result proj number of a be_Call. */
 } pn_be_Call;
 
 /**
index 572fd0c..8f0f495 100644 (file)
@@ -130,7 +130,7 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
                                /* should not happen here */
                                edges_reroute(proj, bad, irg);
                                break;
-                       case pn_Call_M_except:
+                       case pn_Call_M:
                                /* should not happen here */
                                edges_reroute(proj, nomem, irg);
                                break;
@@ -154,7 +154,6 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
                }
 
                turn_into_tuple(call, pn_Call_max);
-               set_Tuple_pred(call, pn_Call_M_regular,        nomem);
                /*
                 * Beware:
                 * We do not check here if this call really has exception and regular Proj's.
@@ -167,10 +166,10 @@ static void resolve_call(ir_node *call, ir_node *l_res, ir_node *h_res, ir_graph
                jmp = new_r_Jmp(block);
                set_opt_cse(old_cse);
 
+               set_Tuple_pred(call, pn_Call_M,                nomem);
                set_Tuple_pred(call, pn_Call_X_regular,        jmp);
                set_Tuple_pred(call, pn_Call_X_except,         bad);
                set_Tuple_pred(call, pn_Call_T_result,         res);
-               set_Tuple_pred(call, pn_Call_M_except,         nomem);
                set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
        }
 }
index 3810f03..8c820be 100644 (file)
@@ -90,7 +90,7 @@ static ir_node *own_gen_convert_call(ppc32_transform_env_t *env, ir_node *op, co
        callee       = new_rd_SymConst_addr_ent(env->dbg, env->irg, mode_P_code, method_ent, method_type);
        call         = new_rd_Call(env->dbg, env->block, memory, callee, 1, in, method_type);
        call_results = new_rd_Proj(env->dbg, env->block, call, mode_T, pn_Call_T_result);
-       memory       = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M_regular);
+       memory       = new_rd_Proj(env->dbg, env->block, call, mode_M, pn_Call_M);
 
        return new_rd_Proj(env->dbg, env->block, call_results, to_mode, 0);
 }
index 58940f7..fe53f4d 100644 (file)
@@ -89,7 +89,7 @@ void instrument_initcall(ir_graph *irg, ir_entity *ent) {
        adr = new_r_SymConst(irg, mode_P_code, sym, symconst_addr_ent);
 
        call    = new_r_Call(first_block, get_irg_no_mem(irg), adr, 0, NULL, get_entity_type(ent));
-       new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M_regular);
+       new_mem = new_r_Proj(first_block, call, mode_M, pn_Call_M);
 
        initial_mem = get_irg_initial_mem(irg);
        edges_reroute(initial_mem, new_mem, irg);
index bdae29f..bc0b254 100644 (file)
@@ -786,9 +786,9 @@ static inline ir_node **new_frag_arr(ir_node *n) {
        opt = get_opt_optimize(); set_optimize(0);
        /* Here we rely on the fact that all frag ops have Memory as first result! */
        if (is_Call(n)) {
-               arr[0] = new_Proj(n, mode_M, pn_Call_M_except);
+               arr[0] = new_Proj(n, mode_M, pn_Call_M);
        } else if (is_CopyB(n)) {
-               arr[0] = new_Proj(n, mode_M, pn_CopyB_M_except);
+               arr[0] = new_Proj(n, mode_M, pn_CopyB_M);
        } else {
                assert((pn_Quot_M == pn_DivMod_M) &&
                       (pn_Quot_M == pn_Div_M)    &&
index cd64da2..e9f431e 100644 (file)
@@ -946,11 +946,10 @@ static const pns_lookup_t cond_lut[] = {
 /** the lookup table for Proj(Call) names */
 static const pns_lookup_t call_lut[] = {
 #define X(a)    { pn_Call_##a, #a }
-       X(M_regular),
+       X(M),
        X(X_regular),
        X(X_except),
        X(T_result),
-       X(M_except),
        X(P_value_res_base)
 #undef X
 };
@@ -1031,7 +1030,6 @@ static const pns_lookup_t copyb_lut[] = {
        X(M),
        X(X_regular),
        X(X_except),
-       X(M_except)
 #undef X
 };
 
@@ -1042,7 +1040,6 @@ static const pns_lookup_t instof_lut[] = {
        X(X_regular),
        X(X_except),
        X(res),
-       X(M_except),
 #undef X
 };
 
index 05b5af6..b950c0c 100644 (file)
@@ -2752,7 +2752,7 @@ ir_node *get_fragile_op_mem(ir_node *node) {
        case iro_Alloc :
        case iro_Bound :
        case iro_CopyB :
-               return get_irn_n(node, pn_Generic_M_regular);
+               return get_irn_n(node, pn_Generic_M);
        case iro_Bad   :
        case iro_Unknown:
                return node;
index 536eda6..89d4cc5 100644 (file)
@@ -1596,7 +1596,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
                        DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
                        break;
 
-               case pn_CopyB_M_except:
                case pn_CopyB_X_except:
                        DBG_OPT_EXC_REM(proj);
                        proj = get_irg_bad(current_ir_graph);
@@ -4654,7 +4653,6 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj) {
                        DBG_OPT_EXC_REM(proj);
                        proj = new_r_Jmp(get_nodes_block(copyb));
                        break;
-               case pn_CopyB_M_except:
                case pn_CopyB_X_except:
                        DBG_OPT_EXC_REM(proj);
                        proj = get_irg_bad(get_irn_irg(proj));
index 521b488..7f211b2 100644 (file)
@@ -242,7 +242,7 @@ gen_initializer_irg(ir_entity *ent_filename, ir_entity *bblock_id, ir_entity *bb
        ins[3] = new_r_Const_long(irg, mode_Iu, n_blocks);
 
        call = new_r_Call(bb, get_irg_initial_mem(irg), symconst, 4, ins, init_type);
-       ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M_regular), 0, NULL);
+       ret = new_r_Return(bb, new_r_Proj(bb, call, mode_M, pn_Call_M), 0, NULL);
        mature_immBlock(bb);
 
        add_immBlock_pred(get_irg_end_block(irg), ret);
index 3ce4a24..61697d6 100644 (file)
@@ -359,11 +359,10 @@ static int verify_node_Proj_InstOf(ir_node *n, ir_node *p) {
 
        ASSERT_AND_RET_DBG(
                (
-                       (proj == pn_InstOf_M_regular && mode == mode_M) ||
+                       (proj == pn_InstOf_M         && mode == mode_M) ||
                        (proj == pn_InstOf_X_regular && mode == mode_X) ||
                        (proj == pn_InstOf_X_except  && mode == mode_X) ||
-                       (proj == pn_InstOf_res       && mode_is_reference(mode)) ||
-                       (proj == pn_InstOf_M_except  && mode == mode_M)
+                       (proj == pn_InstOf_res       && mode_is_reference(mode))
                ),
                "wrong Proj from InstOf", 0,
                show_proj_failure(p);
@@ -380,11 +379,10 @@ static int verify_node_Proj_Call(ir_node *n, ir_node *p) {
 
        ASSERT_AND_RET_DBG(
                (
-                       (proj == pn_Call_M_regular        && mode == mode_M) ||
+                       (proj == pn_Call_M                && mode == mode_M) ||
                        (proj == pn_Call_X_regular        && mode == mode_X) ||
                        (proj == pn_Call_X_except         && mode == mode_X) ||
                        (proj == pn_Call_T_result         && mode == mode_T) ||
-                       (proj == pn_Call_M_except         && mode == mode_M) ||
                        (proj == pn_Call_P_value_res_base && mode_is_reference(mode))
                ),
                "wrong Proj from Call", 0,
@@ -399,7 +397,7 @@ static int verify_node_Proj_Call(ir_node *n, ir_node *p) {
                ASSERT_AND_RET(
                        !is_NoMem(get_Call_mem(n)),
                        "Exception Proj from FunctionCall", 0);
-       else if (proj == pn_Call_M_regular || proj == pn_Call_M_except)
+       else if (proj == pn_Call_M)
                ASSERT_AND_RET(
                        (!is_NoMem(get_Call_mem(n)) || 1),
                        "Memory Proj from FunctionCall", 0);
@@ -781,10 +779,9 @@ static int verify_node_Proj_CopyB(ir_node *n, ir_node *p) {
 
        ASSERT_AND_RET_DBG(
                (
-                       (proj == pn_CopyB_M_regular && mode == mode_M) ||
+                       (proj == pn_CopyB_M         && mode == mode_M) ||
                        (proj == pn_CopyB_X_regular && mode == mode_X) ||
-                       (proj == pn_CopyB_X_except  && mode == mode_X) ||
-                       (proj == pn_CopyB_M_except  && mode == mode_M)
+                       (proj == pn_CopyB_X_except  && mode == mode_X)
                ),
                "wrong Proj from CopyB", 0,
                show_proj_failure(p);
index 59ff27f..f89e4f7 100644 (file)
@@ -566,8 +566,7 @@ static void add_hidden_param(ir_graph *irg, int n_com, ir_node **ins, cl_entry *
 
                /* get rid of the CopyB */
                turn_into_tuple(p, pn_CopyB_max);
-               set_Tuple_pred(p, pn_CopyB_M_regular, mem);
-               set_Tuple_pred(p, pn_CopyB_M_except,  get_irg_bad(irg));
+               set_Tuple_pred(p, pn_CopyB_M,         mem);
                set_Tuple_pred(p, pn_CopyB_X_regular, new_r_Jmp(blk));
                set_Tuple_pred(p, pn_CopyB_X_except,  get_irg_bad(irg));
                ++n_args;
@@ -795,7 +794,7 @@ static void transform_irg(const lower_params_t *lp, ir_graph *irg)
                                                        pred,
                                                        tp
                                                        );
-                                               mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M_regular);
+                                               mem = new_r_Proj(bl, copy, mode_M, pn_CopyB_M);
                                        }
                                }
                                if (lp->flags & LF_RETURN_HIDDEN) {
index d2ebc56..2d5400a 100644 (file)
@@ -118,10 +118,9 @@ static void lower_copyb_nodes(ir_node *irn, unsigned mode_bytes) {
        }
 
        turn_into_tuple(irn, pn_CopyB_max);
-       set_Tuple_pred(irn, pn_CopyB_M_regular, mem);
+       set_Tuple_pred(irn, pn_CopyB_M,         mem);
        set_Tuple_pred(irn, pn_CopyB_X_regular, get_irg_bad(irg));
        set_Tuple_pred(irn, pn_CopyB_X_except,  get_irg_bad(irg));
-       set_Tuple_pred(irn, pn_CopyB_M_except,  get_irg_bad(irg));
 }
 
 /**
index 9ce41fb..483ad47 100644 (file)
@@ -575,7 +575,7 @@ static void lower_Div(ir_node *node, ir_mode *mode, lower_env_t *env) {
                case pn_Div_M:         /* Memory result. */
                        /* reroute to the call */
                        set_Proj_pred(proj, call);
-                       set_Proj_proj(proj, pn_Call_M_except);
+                       set_Proj_proj(proj, pn_Call_M);
                        break;
                case pn_Div_X_except:  /* Execution result if exception occurred. */
                        /* reroute to the call */
@@ -652,7 +652,7 @@ static void lower_Mod(ir_node *node, ir_mode *mode, lower_env_t *env) {
                case pn_Mod_M:         /* Memory result. */
                        /* reroute to the call */
                        set_Proj_pred(proj, call);
-                       set_Proj_proj(proj, pn_Call_M_except);
+                       set_Proj_proj(proj, pn_Call_M);
                        break;
                case pn_Mod_X_except:  /* Execution result if exception occurred. */
                        /* reroute to the call */
@@ -755,7 +755,7 @@ static void lower_DivMod(ir_node *node, ir_mode *mode, lower_env_t *env) {
                case pn_DivMod_M:         /* Memory result. */
                        /* reroute to the first call */
                        set_Proj_pred(proj, callDiv ? callDiv : (callMod ? callMod : mem));
-                       set_Proj_proj(proj, pn_Call_M_except);
+                       set_Proj_proj(proj, pn_Call_M);
                        break;
                case pn_DivMod_X_except:  /* Execution result if exception occurred. */
                        /* reroute to the first call */
index 98ba0a8..ab5b09e 100644 (file)
@@ -225,11 +225,10 @@ static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg
        irn = new_r_Tuple(block, 1, &irn);
 
        turn_into_tuple(call, pn_Call_max);
-       set_Tuple_pred(call, pn_Call_M_regular, mem);
+       set_Tuple_pred(call, pn_Call_M, mem);
        set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
        set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
        set_Tuple_pred(call, pn_Call_T_result, irn);
-       set_Tuple_pred(call, pn_Call_M_except, mem);
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
 }  /* replace_call */
 
@@ -1101,7 +1100,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
                for (i = 0; i < n_proj; ++i)
                        set_Tuple_pred(node, i, new_r_Bad(irg));
                if (rt->mem_proj_nr >= 0)
-                       set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_regular));
+                       set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
                if (!is_NoMem(mem)) {
                        /* Exceptions can only be handled with real memory */
                        if (rt->regular_proj_nr >= 0)
@@ -1109,7 +1108,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) {
                        if (rt->exc_proj_nr >= 0)
                                set_Tuple_pred(node, rt->exc_proj_nr, new_r_Proj(bl, call, mode_X, pn_Call_X_except));
                        if (rt->exc_mem_proj_nr >= 0)
-                               set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M_except));
+                               set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(bl, call, mode_M, pn_Call_M));
                }
 
                if (rt->res_proj_nr >= 0)
index fae75f1..48f0b03 100644 (file)
@@ -485,11 +485,10 @@ static void transform_alloc_calls(ir_graph *irg, walk_env_t *env)
     mem = get_Call_mem(call);
        blk = get_nodes_block(call);
     turn_into_tuple(call, pn_Call_max);
-    set_Tuple_pred(call, pn_Call_M_regular, mem);
-       set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(blk));
-    set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
-    set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
-    set_Tuple_pred(call, pn_Call_M_except, mem);
+    set_Tuple_pred(call, pn_Call_M,                mem);
+       set_Tuple_pred(call, pn_Call_X_regular,        new_r_Jmp(blk));
+    set_Tuple_pred(call, pn_Call_X_except,         new_r_Bad(irg));
+    set_Tuple_pred(call, pn_Call_T_result,         new_r_Bad(irg));
     set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
 
     ++env->nr_deads;
index 973b00e..25d3e92 100644 (file)
@@ -154,10 +154,9 @@ static void collect_const_and_pure_calls(ir_node *node, void *env) {
 
                /* collect the Proj's in the Proj list */
                switch (get_Proj_proj(node)) {
-               case pn_Call_M_regular:
+               case pn_Call_M:
                case pn_Call_X_except:
                case pn_Call_X_regular:
-               case pn_Call_M_except:
                        set_irn_link(node, ctx->proj_list);
                        ctx->proj_list = node;
                        break;
@@ -222,14 +221,13 @@ static void fix_const_call_lists(ir_graph *irg, env_t *ctx) {
                assert(get_irn_mode(mem) == mode_M);
 
                switch (get_Proj_proj(proj)) {
-               case pn_Call_M_regular: {
+               case pn_Call_M: {
                        /* in dead code there might be cycles where proj == mem */
                        if (proj != mem)
                                exchange(proj, mem);
                         break;
                }
                case pn_Call_X_except:
-               case pn_Call_M_except:
                        exc_changed = 1;
                        exchange(proj, get_irg_bad(irg));
                        break;
@@ -320,10 +318,9 @@ static void collect_nothrow_calls(ir_node *node, void *env) {
 
                /* collect the Proj's in the Proj list */
                switch (get_Proj_proj(node)) {
-               case pn_Call_M_regular:
+               case pn_Call_M:
                case pn_Call_X_except:
                case pn_Call_X_regular:
-               case pn_Call_M_except:
                        set_irn_link(node, ctx->proj_list);
                        ctx->proj_list = node;
                        break;
@@ -368,7 +365,6 @@ static void fix_nothrow_call_list(ir_graph *irg, ir_node *call_list, ir_node *pr
                /* kill any exception flow */
                switch (get_Proj_proj(proj)) {
                case pn_Call_X_except:
-               case pn_Call_M_except:
                        exc_changed = 1;
                        exchange(proj, get_irg_bad(irg));
                        break;
index a143165..5ec85e6 100644 (file)
@@ -837,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
 }
 
 enum exc_mode {
-       exc_handler    = 0, /**< There is a handler. */
-       exc_to_end     = 1, /**< Branches to End. */
-       exc_no_handler = 2  /**< Exception handling not represented. */
+       exc_handler,    /**< There is a handler. */
+       exc_no_handler  /**< Exception handling not represented. */
 };
 
 /* Inlines a method at the given call site. */
@@ -853,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node             **args_in;
        ir_node             *ret, *phi;
        int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+       int                 n_mem_phi;
        enum exc_mode       exc_handling;
        ir_type             *called_frame, *curr_frame, *mtp, *ctp;
        ir_entity           *ent;
@@ -964,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           for the Call node, or do we branch directly to End on an exception?
           exc_handling:
           0 There is a handler.
-          1 Branches to End.
           2 Exception handling not represented in Firm. -- */
        {
-               ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+               ir_node *Xproj = NULL;
+               ir_node *proj;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
                        long proj_nr = get_Proj_proj(proj);
                        if (proj_nr == pn_Call_X_except) Xproj = proj;
-                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
+               exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
        }
 
        /* create the argument tuple */
@@ -1123,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Add Phi node if there was more than one Return.  -- */
        turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
-       n_ret = 0;
+       n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = get_Return_mem(ret);
-                       n_ret++;
+                       cf_pred[n_mem_phi++] = get_Return_mem(ret);
+               }
+               /* memory output for some exceptions is directly connected to End */
+               if (is_Call(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+               } else if (is_fragile_op(ret)) {
+                       /* We rely that all cfops have the memory output at the same position. */
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+               } else if (is_Raise(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
                }
        }
-       phi = new_Phi(n_ret, cf_pred, mode_M);
-       set_Tuple_pred(call, pn_Call_M_regular, phi);
+       phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+       set_Tuple_pred(call, pn_Call_M, phi);
        /* Conserve Phi-list for further inlinings -- but might be optimized */
        if (get_nodes_block(phi) == post_bl) {
                set_irn_link(phi, get_irn_link(post_bl));
@@ -1178,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
 
        /* Finally the exception control flow.
-          We have two (three) possible situations:
-          First if the Call branches to an exception handler: We need to add a Phi node to
+          We have two possible situations:
+          First if the Call branches to an exception handler:
+          We need to add a Phi node to
           collect the memory containing the exception objects.  Further we need
           to add another block to get a correct representation of this Phi.  To
           this block we add a Jmp that resolves into the X output of the Call
           when the Call is turned into a tuple.
-          Second the Call branches to End, the exception is not handled.  Just
-          add all inlined exception branches to the End node.
-          Third: there is no Exception edge at all. Handle as case two. */
+          Second: There is no exception edge. Just add all inlined exception
+          branches to the End node.
+        */
        if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
@@ -1201,29 +1207,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                if (n_exc > 0) {
                        ir_node *block = new_Block(n_exc, cf_pred);
                        set_cur_block(block);
-
                        set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
-                       /* The Phi for the memories with the exception objects */
-                       n_exc = 0;
-                       for (i = 0; i < arity; i++) {
-                               ir_node *ret;
-                               ret = skip_Proj(get_Block_cfgpred(end_bl, i));
-                               if (is_Call(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
-                                       n_exc++;
-                               } else if (is_fragile_op(ret)) {
-                                       /* We rely that all cfops have the memory output at the same position. */
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
-                                       n_exc++;
-                               } else if (is_Raise(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
-                                       n_exc++;
-                               }
-                       }
-                       set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
                } else {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-                       set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
        } else {
                ir_node *main_end_bl;
@@ -1251,7 +1237,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
                set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                free(end_preds);
        }
        free(res_pred);
index 11f5571..aba92ee 100644 (file)
@@ -1167,7 +1167,7 @@ static void update_Call_memop(memop_t *m) {
                case pn_Call_X_except:
                        m->flags |= FLAG_EXCEPTION;
                        break;
-               case pn_Call_M_regular:
+               case pn_Call_M:
                        m->mem = proj;
                        break;
                }
@@ -1194,7 +1194,7 @@ static void update_DivOp_memop(memop_t *m) {
                case pn_Generic_X_except:
                        m->flags |= FLAG_EXCEPTION;
                        break;
-               case pn_Generic_M_regular:
+               case pn_Generic_M:
                        m->mem = proj;
                        break;
                }
index 26d4c1f..15932a5 100644 (file)
@@ -347,7 +347,6 @@ static void do_opt_tail_rec(ir_graph *irg, tr_env *env) {
                        set_Tuple_pred(call, pn_Call_X_regular,        jmp);
                        set_Tuple_pred(call, pn_Call_X_except,         bad);
                        set_Tuple_pred(call, pn_Call_T_result,         tuple);
-                       set_Tuple_pred(call, pn_Call_M_except,         mem);
                        set_Tuple_pred(call, pn_Call_P_value_res_base, bad);
 
                        for (i = 0; i < env->n_ress; ++i) {
index 97ed83c..98219f0 100755 (executable)
@@ -303,6 +303,19 @@ ir_node *new_{{node.constrname}}({{node|argdecls(True, True)}})
 }
 ''')
 
+# not used - as we have the pn_ declarations in libfirm/irnode.h where they
+# contain informative comments
+# {% for node in nodes %}
+# {% if node.outs %}
+# typedef enum {
+#      {%- for out in node.outs %}
+#      pn_{{node.name}}_{{out}},
+#      {%- endfor %}
+#      pn_{{node.name}}_max
+# } pn_{{node.name}};
+# {% endif %}
+# {% endfor %}
+
 irnode_h_template = env.from_string('''
 /* Warning: automatically generated code */
 
index 2934c0e..38db834 100755 (executable)
@@ -207,7 +207,7 @@ class Break(Op):
 class Builtin(Op):
        ins      = [ "mem" ]
        arity    = "variable"
-       outs     = [ "M_regular", "X_regular", "X_except", "T_result", "M_except", "P_value_res_base" ]
+       outs     = [ "M", "X_regular", "X_except", "T_result", "P_value_res_base" ]
        flags    = [ "uses_memory" ]
        attrs    = [
                dict(
@@ -229,7 +229,7 @@ class Builtin(Op):
 class Call(Op):
        ins      = [ "mem", "ptr" ]
        arity    = "variable"
-       outs     = [ "M_regular", "X_regular", "X_except", "T_result", "M_except", "P_value_res_base" ]
+       outs     = [ "M", "X_regular", "X_except", "T_result", "P_value_res_base" ]
        flags    = [ "fragile", "uses_memory" ]
        attrs    = [
                dict(
@@ -492,7 +492,7 @@ class IJmp(Op):
 
 class InstOf(Op):
        ins   = [ "store", "obj" ]
-       outs  = [ "M", "X_regular", "X_except", "res", "M_except" ]
+       outs  = [ "M", "X_regular", "X_except", "res" ]
        flags = [ "highlevel" ]
        attrs = [
                dict(