lowering: fix i_mapper for new exception attributes
[libfirm] / ir / lower / lower_intrinsics.c
index f78daa8..9051cb3 100644 (file)
@@ -139,13 +139,11 @@ size_t lower_intrinsics(i_record *list, size_t length, int part_block_used)
 
                if (wenv.nr_of_intrinsics > 0) {
                        /* Changes detected: we might have added/removed nodes. */
-                       set_irg_outs_inconsistent(irg);
                        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
 
                        /* Exception control flow might have changed / new block might have added. */
                        set_irg_doms_inconsistent(irg);
                        set_irg_extblk_inconsistent(irg);
-                       set_irg_loopinfo_inconsistent(irg);
 
                        /* Calls might be removed/added. */
                        set_trouts_inconsistent();
@@ -153,9 +151,6 @@ size_t lower_intrinsics(i_record *list, size_t length, int part_block_used)
                        /* verify here */
                        irg_verify(irg, VERIFY_NORMAL);
 
-                       /* Optimize it, tuple might be created. */
-                       optimize_graph_df(irg);
-
                        nr_of_intrinsics += wenv.nr_of_intrinsics;
                }
        }
@@ -196,8 +191,7 @@ ir_prog_pass_t *lower_intrinsics_pass(
        const char *name,
        i_record *list, size_t length, int part_block_used)
 {
-       pass_t *pass = (pass_t*)xmalloc(sizeof(*pass) + (length-1) * sizeof(pass->list[0]));
-
+       pass_t *const pass = XMALLOCF(pass_t, list, length);
        memcpy(pass->list, list, sizeof(list[0]) * length);
        pass->length          = length;
        pass->part_block_used = part_block_used;
@@ -215,29 +209,34 @@ ir_prog_pass_t *lower_intrinsics_pass(
  * @param reg_jmp  new regular control flow, if NULL, a Jmp will be used
  * @param exc_jmp  new exception control flow, if reg_jmp == NULL, a Bad will be used
  */
-static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp)
+static void replace_call(ir_node *irn, ir_node *call, ir_node *mem,
+                         ir_node *reg_jmp, ir_node *exc_jmp)
 {
        ir_node  *block = get_nodes_block(call);
        ir_graph *irg   = get_irn_irg(block);
+       ir_node  *rest  = new_r_Tuple(block, 1, &irn);
 
-       if (reg_jmp == NULL) {
-
-               /* Beware: do we need here a protection against CSE? Better we do it. */
-               int old_cse = get_opt_cse();
-               set_opt_cse(0);
-               reg_jmp = new_r_Jmp(block);
-               set_opt_cse(old_cse);
-               exc_jmp = new_r_Bad(irg);
+       if (ir_throws_exception(call)) {
+               turn_into_tuple(call, pn_Call_max+1);
+               if (reg_jmp == NULL) {
+                       reg_jmp = new_r_Jmp(block);
+               }
+               if (exc_jmp == NULL) {
+                       exc_jmp = new_r_Bad(irg, mode_X);
+               }
+               set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
+               set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
+       } else {
+               assert(reg_jmp == NULL);
+               assert(exc_jmp == NULL);
+               turn_into_tuple(call, pn_Call_T_result+1);
+               assert(pn_Call_M <= pn_Call_T_result);
+               assert(pn_Call_X_regular > pn_Call_T_result);
+               assert(pn_Call_X_except > pn_Call_T_result);
        }
-       irn = new_r_Tuple(block, 1, &irn);
-
-       turn_into_tuple(call, pn_Call_max);
        set_Tuple_pred(call, pn_Call_M, mem);
-       set_Tuple_pred(call, pn_Call_X_regular, reg_jmp);
-       set_Tuple_pred(call, pn_Call_X_except, exc_jmp);
-       set_Tuple_pred(call, pn_Call_T_result, irn);
-       set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-}  /* replace_call */
+       set_Tuple_pred(call, pn_Call_T_result, rest);
+}
 
 /* A mapper for the integer abs. */
 int i_mapper_abs(ir_node *call, void *ctx)
@@ -249,19 +248,18 @@ int i_mapper_abs(ir_node *call, void *ctx)
        ir_mode  *mode     = get_irn_mode(op);
        dbg_info *dbg      = get_irn_dbg_info(call);
        ir_node  *zero     = new_r_Const(irg, get_mode_null(mode));
-       ir_node  *cmp      = new_rd_Cmp(dbg, block, op, zero);
-       ir_node  *cond     = new_r_Proj(cmp, mode_b, pn_Cmp_Lt);
+       ir_node  *cmp      = new_rd_Cmp(dbg, block, op, zero, ir_relation_less);
        ir_node  *minus_op = new_rd_Minus(dbg, block, op, mode);
        ir_node  *mux;
        arch_allow_ifconv_func allow_ifconv = be_get_backend_param()->allow_ifconv;
        (void) ctx;
 
        /* mux allowed by backend? */
-       if (!allow_ifconv(cond, op, minus_op))
+       if (!allow_ifconv(cmp, op, minus_op))
                return 0;
 
        /* construct Mux */
-       mux = new_rd_Mux(dbg, block, cond, op, minus_op, mode);
+       mux = new_rd_Mux(dbg, block, cmp, op, minus_op, mode);
        DBG_OPT_ALGSIM0(call, mux, FS_OPT_RTS_ABS);
        replace_call(mux, call, mem, NULL, NULL);
        return 1;
@@ -306,9 +304,15 @@ int i_mapper_alloca(ir_node *call, void *ctx)
 
        irn    = new_rd_Alloc(dbg, block, mem, op, firm_unknown_type, stack_alloc);
        mem    = new_rd_Proj(dbg, irn, mode_M, pn_Alloc_M);
-       no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular);
-       exc    = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except);
        irn    = new_rd_Proj(dbg, irn, get_modeP_data(), pn_Alloc_res);
+       if (ir_throws_exception(call)) {
+               no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular);
+               exc    = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except);
+               ir_set_throws_exception(irn, true);
+       } else {
+               no_exc = NULL;
+               exc    = NULL;
+       }
 
        DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_ALLOCA);
        replace_call(irn, call, mem, no_exc, exc);
@@ -364,13 +368,15 @@ int i_mapper_cbrt(ir_node *call, void *ctx)
 /* A mapper for the floating point pow. */
 int i_mapper_pow(ir_node *call, void *ctx)
 {
+       ir_node  *left    = get_Call_param(call, 0);
+       ir_node  *right   = get_Call_param(call, 1);
+       ir_node  *block   = get_nodes_block(call);
+       ir_graph *irg     = get_irn_irg(block);
+       ir_node  *reg_jmp = NULL;
+       ir_node  *exc_jmp = NULL;
+       ir_node  *irn;
        dbg_info *dbg;
        ir_node  *mem;
-       ir_node  *left  = get_Call_param(call, 0);
-       ir_node  *right = get_Call_param(call, 1);
-       ir_node  *block = get_nodes_block(call);
-       ir_graph *irg   = get_irn_irg(block);
-       ir_node  *irn, *reg_jmp = NULL, *exc_jmp = NULL;
        (void) ctx;
 
        if (is_Const(left) && is_Const_one(left)) {
@@ -399,14 +405,17 @@ int i_mapper_pow(ir_node *call, void *ctx)
 
        if (irn == NULL) {
                ir_mode *mode = get_irn_mode(left);
-               ir_node *quot;
+               ir_node *div;
 
                irn  = new_r_Const(irg, get_mode_one(mode));
-               quot = new_rd_Quot(dbg, block, mem, irn, left, mode, op_pin_state_pinned);
-               mem  = new_r_Proj(quot, mode_M, pn_Quot_M);
-               irn  = new_r_Proj(quot, mode, pn_Quot_res);
-               reg_jmp = new_r_Proj(quot, mode_X, pn_Quot_X_regular);
-               exc_jmp = new_r_Proj(quot, mode_X, pn_Quot_X_except);
+               div  = new_rd_Div(dbg, block, mem, irn, left, mode, op_pin_state_pinned);
+               mem  = new_r_Proj(div, mode_M, pn_Div_M);
+               irn  = new_r_Proj(div, mode, pn_Div_res);
+               if (ir_throws_exception(call)) {
+                       reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular);
+                       exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except);
+                       ir_set_throws_exception(div, true);
+               }
        }
        DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_POW);
        replace_call(irn, call, mem, reg_jmp, exc_jmp);
@@ -478,7 +487,8 @@ static int i_mapper_one_to_zero(ir_node *call, void *ctx, int reason)
  */
 static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason)
 {
-       ir_node *val  = get_Call_param(call, 0);
+       int      changed = 0;
+       ir_node *val     = get_Call_param(call, 0);
        (void) ctx;
 
        if (is_strictConv(val)) {
@@ -497,12 +507,14 @@ static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason)
                        }
                        DBG_OPT_ALGSIM2(call, op, call, FS_OPT_RTS_SYMMETRIC);
                        set_Call_param(call, 0, val);
+                       changed = 1;
                }
        } else if (is_Minus(val)) {
                /* f(-x) = f(x) */
                val = get_Minus_op(val);
                DBG_OPT_ALGSIM2(call, val, call, FS_OPT_RTS_SYMMETRIC);
                set_Call_param(call, 0, val);
+               changed = 1;
        }
 
        if (is_Const(val) && is_Const_null(val)) {
@@ -513,9 +525,9 @@ static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason)
                ir_node *mem   = get_Call_mem(call);
                DBG_OPT_ALGSIM0(call, irn, reason);
                replace_call(irn, call, mem, NULL, NULL);
-               return 1;
+               changed = 1;
        }
-       return 0;
+       return changed;
 }  /* i_mapper_symmetric_zero_to_one */
 
 /* A mapper for the floating point log. */
@@ -642,8 +654,8 @@ static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp)
        ir_type *tp = get_entity_type(ent);
        ir_mode *mode;
        ir_initializer_t *initializer;
-       unsigned          size;
-       unsigned          i;
+       size_t            size;
+       size_t            i;
 
        if (! is_Array_type(tp))
                return NULL;
@@ -924,9 +936,15 @@ replace_by_call:
                        /* replace the strcmp by (*x) */
                        irn = new_rd_Load(dbg, block, mem, v, mode, cons_none);
                        mem = new_r_Proj(irn, mode_M, pn_Load_M);
-                       exc = new_r_Proj(irn, mode_X, pn_Load_X_except);
-                       reg = new_r_Proj(irn, mode_X, pn_Load_X_regular);
                        irn = new_r_Proj(irn, mode, pn_Load_res);
+                       if (ir_throws_exception(call)) {
+                               exc = new_r_Proj(irn, mode_X, pn_Load_X_except);
+                               reg = new_r_Proj(irn, mode_X, pn_Load_X_regular);
+                               ir_set_throws_exception(irn, true);
+                       } else {
+                               exc = NULL;
+                               reg = NULL;
+                       }
 
                        /* conv to the result mode */
                        mode = get_type_mode(res_tp);
@@ -1113,7 +1131,6 @@ static ir_mode *get_irn_res_mode(ir_node *node)
 {
        switch (get_irn_opcode(node)) {
        case iro_Load:   return get_Load_mode(node);
-       case iro_Quot:   return get_Quot_resmode(node);
        case iro_Div:    return get_Div_resmode(node);
        case iro_Mod:    return get_Mod_resmode(node);
        default: return NULL;
@@ -1130,6 +1147,8 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
        ir_type *mtp;
        ir_node *mem, *bl, *call, *addr, *res_proj;
        ir_node **in;
+       bool     throws_exception;
+       ir_op   *op;
        ir_graph *irg;
        symconst_symbol sym;
        ir_mode *mode = get_irn_mode(node);
@@ -1170,15 +1189,17 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
                        return 0;
        }
 
-       n_res = get_method_n_ress(mtp);
+       n_res            = get_method_n_ress(mtp);
+       throws_exception = ir_throws_exception(node);
 
        /* step 0: calculate the number of needed Proj's */
        n_proj = 0;
        n_proj = LMAX(n_proj, rt->mem_proj_nr + 1);
-       n_proj = LMAX(n_proj, rt->regular_proj_nr + 1);
-       n_proj = LMAX(n_proj, rt->exc_proj_nr + 1);
-       n_proj = LMAX(n_proj, rt->exc_mem_proj_nr + 1);
        n_proj = LMAX(n_proj, rt->res_proj_nr + 1);
+       if (throws_exception) {
+               n_proj = LMAX(n_proj, rt->regular_proj_nr + 1);
+               n_proj = LMAX(n_proj, rt->exc_proj_nr + 1);
+       }
 
        if (n_proj > 0) {
                if (rt->mode != mode_T) /* must be mode_T */
@@ -1192,6 +1213,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
 
        /* ok, when we are here, the number of predecessors match as well as the parameter modes */
        bl = get_nodes_block(node);
+       op = get_irn_op(node);
 
        in = NULL;
        if (n_param > 0) {
@@ -1217,25 +1239,21 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt)
                /* we are ready */
                turn_into_tuple(node, n_proj);
 
-               for (i = 0; i < n_proj; ++i)
-                       set_Tuple_pred(node, i, new_r_Bad(irg));
                if (rt->mem_proj_nr >= 0)
                        set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M));
-               if (!is_NoMem(mem)) {
-                       /* Exceptions can only be handled with real memory */
-                       if (rt->regular_proj_nr >= 0)
-                               set_Tuple_pred(node, rt->regular_proj_nr, new_r_Proj(call, mode_X, pn_Call_X_regular));
-                       if (rt->exc_proj_nr >= 0)
-                               set_Tuple_pred(node, rt->exc_proj_nr, new_r_Proj(call, mode_X, pn_Call_X_except));
-                       if (rt->exc_mem_proj_nr >= 0)
-                               set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M));
+               if (throws_exception) {
+                       set_Tuple_pred(node, op->pn_x_regular, new_r_Proj(call, mode_X, pn_Call_X_regular));
+                       set_Tuple_pred(node, op->pn_x_except, new_r_Proj(call, mode_X, pn_Call_X_except));
                }
 
-               if (rt->res_proj_nr >= 0)
-                       for (i = 0; i < n_res; ++i)
-                               set_Tuple_pred(node, rt->res_proj_nr + i,
-                               new_r_Proj(res_proj, get_type_mode(get_method_res_type(mtp, i)), i));
-                       return 1;
+               if (rt->res_proj_nr >= 0) {
+                       for (i = 0; i < n_res; ++i) {
+                               ir_mode *mode = get_type_mode(get_method_res_type(mtp, i));
+                               ir_node *proj = new_r_Proj(res_proj, mode, i);
+                               set_Tuple_pred(node, rt->res_proj_nr + i, proj);
+                       }
+               }
+               return 1;
        } else {
                /* only one return value supported */
                if (n_res > 0) {