X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Flower%2Flower_intrinsics.c;h=9051cb373b0dd3f93bbeef7dfca635f1e4b60bba;hb=5c34afb830f4a9233b659c95a9f71643f8421f86;hp=09faa9d1097777f6921082655b67ef3bc4499681;hpb=ca42e4ac671c49f7ba8e9dfffd805abf49b2c733;p=libfirm diff --git a/ir/lower/lower_intrinsics.c b/ir/lower/lower_intrinsics.c index 09faa9d10..9051cb373 100644 --- a/ir/lower/lower_intrinsics.c +++ b/ir/lower/lower_intrinsics.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -48,7 +48,7 @@ /** Walker environment. */ typedef struct walker_env { pmap *c_map; /**< The intrinsic call map. */ - unsigned nr_of_intrinsics; /**< statistics */ + size_t nr_of_intrinsics; /**< statistics */ i_instr_record **i_map; /**< The intrinsic instruction map. */ } walker_env_t; @@ -57,7 +57,7 @@ typedef struct walker_env { */ static void call_mapper(ir_node *node, void *env) { - walker_env_t *wenv = env; + walker_env_t *wenv = (walker_env_t*)env; ir_op *op = get_irn_op(node); if (op == op_Call) { @@ -74,7 +74,7 @@ static void call_mapper(ir_node *node, void *env) p = pmap_find(wenv->c_map, ent); if (p) { - r = p->value; + r = (const i_call_record*)p->value; wenv->nr_of_intrinsics += r->i_mapper(node, r->ctx) ? 1 : 0; } } else { @@ -86,20 +86,21 @@ static void call_mapper(ir_node *node, void *env) ++wenv->nr_of_intrinsics; break; } - r = r->link; + r = (const i_instr_record*)r->link; } } } } /* call_mapper */ /* Go through all graphs and map calls to intrinsic functions. */ -unsigned lower_intrinsics(i_record *list, int length, int part_block_used) +size_t lower_intrinsics(i_record *list, size_t length, int part_block_used) { - int i, n_ops = get_irp_n_opcodes(); + size_t i, n; + size_t n_ops = get_irp_n_opcodes(); ir_graph *irg; pmap *c_map = pmap_create_ex(length); i_instr_record **i_map; - unsigned nr_of_intrinsics = 0; + size_t nr_of_intrinsics = 0; walker_env_t wenv; /* we use the ir_op generic pointers here */ @@ -107,7 +108,7 @@ unsigned lower_intrinsics(i_record *list, int length, int part_block_used) memset((void *)i_map, 0, sizeof(*i_map) * n_ops); /* fill a map for faster search */ - for (i = length - 1; i >= 0; --i) { + for (i = 0; i < length; ++i) { if (list[i].i_call.kind == INTRINSIC_CALL) { pmap_insert(c_map, list[i].i_call.i_ent, (void *)&list[i].i_call); } else { @@ -122,7 +123,7 @@ unsigned lower_intrinsics(i_record *list, int length, int part_block_used) wenv.c_map = c_map; wenv.i_map = i_map; - for (i = get_irp_n_irgs() - 1; i >= 0; --i) { + for (i = 0, n = get_irp_n_irgs(); i < n; ++i) { irg = get_irp_irg(i); if (part_block_used) { @@ -138,13 +139,11 @@ unsigned lower_intrinsics(i_record *list, int length, int part_block_used) if (wenv.nr_of_intrinsics > 0) { /* Changes detected: we might have added/removed nodes. */ - set_irg_outs_inconsistent(irg); set_irg_callee_info_state(irg, irg_callee_info_inconsistent); /* Exception control flow might have changed / new block might have added. */ set_irg_doms_inconsistent(irg); set_irg_extblk_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); /* Calls might be removed/added. */ set_trouts_inconsistent(); @@ -152,9 +151,6 @@ unsigned lower_intrinsics(i_record *list, int length, int part_block_used) /* verify here */ irg_verify(irg, VERIFY_NORMAL); - /* Optimize it, tuple might be created. */ - optimize_graph_df(irg); - nr_of_intrinsics += wenv.nr_of_intrinsics; } } @@ -163,20 +159,20 @@ unsigned lower_intrinsics(i_record *list, int length, int part_block_used) return nr_of_intrinsics; } /* lower_intrinsics */ -struct pass_t { +typedef struct pass_t { ir_prog_pass_t pass; - int part_block_used; - int length; + int part_block_used; + size_t length; i_record list[1]; -}; +} pass_t; /** * Wrapper for running lower_intrinsics() as an ir_prog pass. */ static int pass_wrapper(ir_prog *irp, void *context) { - struct pass_t *pass = context; + pass_t *pass = (pass_t*)context; (void) irp; /* TODO: set current irp, or remove parameter */ lower_intrinsics(pass->list, pass->length, pass->part_block_used); /* probably this pass should not run again */ @@ -193,10 +189,9 @@ static int pass_wrapper(ir_prog *irp, void *context) */ ir_prog_pass_t *lower_intrinsics_pass( const char *name, - i_record *list, int length, int part_block_used) + i_record *list, size_t length, int part_block_used) { - struct pass_t *pass = xmalloc(sizeof(*pass) + (length-1) * sizeof(pass->list[0])); - + pass_t *const pass = XMALLOCF(pass_t, list, length); memcpy(pass->list, list, sizeof(list[0]) * length); pass->length = length; pass->part_block_used = part_block_used; @@ -214,29 +209,34 @@ ir_prog_pass_t *lower_intrinsics_pass( * @param reg_jmp new regular control flow, if NULL, a Jmp will be used * @param exc_jmp new exception control flow, if reg_jmp == NULL, a Bad will be used */ -static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, ir_node *reg_jmp, ir_node *exc_jmp) +static void replace_call(ir_node *irn, ir_node *call, ir_node *mem, + ir_node *reg_jmp, ir_node *exc_jmp) { ir_node *block = get_nodes_block(call); ir_graph *irg = get_irn_irg(block); + ir_node *rest = new_r_Tuple(block, 1, &irn); - if (reg_jmp == NULL) { - - /* Beware: do we need here a protection against CSE? Better we do it. */ - int old_cse = get_opt_cse(); - set_opt_cse(0); - reg_jmp = new_r_Jmp(block); - set_opt_cse(old_cse); - exc_jmp = new_r_Bad(irg); + if (ir_throws_exception(call)) { + turn_into_tuple(call, pn_Call_max+1); + if (reg_jmp == NULL) { + reg_jmp = new_r_Jmp(block); + } + if (exc_jmp == NULL) { + exc_jmp = new_r_Bad(irg, mode_X); + } + set_Tuple_pred(call, pn_Call_X_regular, reg_jmp); + set_Tuple_pred(call, pn_Call_X_except, exc_jmp); + } else { + assert(reg_jmp == NULL); + assert(exc_jmp == NULL); + turn_into_tuple(call, pn_Call_T_result+1); + assert(pn_Call_M <= pn_Call_T_result); + assert(pn_Call_X_regular > pn_Call_T_result); + assert(pn_Call_X_except > pn_Call_T_result); } - irn = new_r_Tuple(block, 1, &irn); - - turn_into_tuple(call, pn_Call_max); set_Tuple_pred(call, pn_Call_M, mem); - set_Tuple_pred(call, pn_Call_X_regular, reg_jmp); - set_Tuple_pred(call, pn_Call_X_except, exc_jmp); - set_Tuple_pred(call, pn_Call_T_result, irn); - set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg)); -} /* replace_call */ + set_Tuple_pred(call, pn_Call_T_result, rest); +} /* A mapper for the integer abs. */ int i_mapper_abs(ir_node *call, void *ctx) @@ -248,19 +248,18 @@ int i_mapper_abs(ir_node *call, void *ctx) ir_mode *mode = get_irn_mode(op); dbg_info *dbg = get_irn_dbg_info(call); ir_node *zero = new_r_Const(irg, get_mode_null(mode)); - ir_node *cmp = new_rd_Cmp(dbg, block, op, zero); - ir_node *cond = new_r_Proj(cmp, mode_b, pn_Cmp_Lt); + ir_node *cmp = new_rd_Cmp(dbg, block, op, zero, ir_relation_less); ir_node *minus_op = new_rd_Minus(dbg, block, op, mode); ir_node *mux; arch_allow_ifconv_func allow_ifconv = be_get_backend_param()->allow_ifconv; (void) ctx; /* mux allowed by backend? */ - if (!allow_ifconv(cond, op, minus_op)) + if (!allow_ifconv(cmp, op, minus_op)) return 0; /* construct Mux */ - mux = new_rd_Mux(dbg, block, cond, op, minus_op, mode); + mux = new_rd_Mux(dbg, block, cmp, op, minus_op, mode); DBG_OPT_ALGSIM0(call, mux, FS_OPT_RTS_ABS); replace_call(mux, call, mem, NULL, NULL); return 1; @@ -305,9 +304,15 @@ int i_mapper_alloca(ir_node *call, void *ctx) irn = new_rd_Alloc(dbg, block, mem, op, firm_unknown_type, stack_alloc); mem = new_rd_Proj(dbg, irn, mode_M, pn_Alloc_M); - no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular); - exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except); irn = new_rd_Proj(dbg, irn, get_modeP_data(), pn_Alloc_res); + if (ir_throws_exception(call)) { + no_exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_regular); + exc = new_rd_Proj(dbg, irn, mode_X, pn_Alloc_X_except); + ir_set_throws_exception(irn, true); + } else { + no_exc = NULL; + exc = NULL; + } DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_ALLOCA); replace_call(irn, call, mem, no_exc, exc); @@ -317,9 +322,9 @@ int i_mapper_alloca(ir_node *call, void *ctx) /* A mapper for the floating point sqrt. */ int i_mapper_sqrt(ir_node *call, void *ctx) { - ir_node *mem; - tarval *tv; - ir_node *op = get_Call_param(call, 0); + ir_node *mem; + ir_tarval *tv; + ir_node *op = get_Call_param(call, 0); (void) ctx; if (!is_Const(op)) @@ -340,9 +345,9 @@ int i_mapper_sqrt(ir_node *call, void *ctx) /* A mapper for the floating point cbrt. */ int i_mapper_cbrt(ir_node *call, void *ctx) { - ir_node *mem; - tarval *tv; - ir_node *op = get_Call_param(call, 0); + ir_node *mem; + ir_tarval *tv; + ir_node *op = get_Call_param(call, 0); (void) ctx; if (!is_Const(op)) @@ -363,20 +368,22 @@ int i_mapper_cbrt(ir_node *call, void *ctx) /* A mapper for the floating point pow. */ int i_mapper_pow(ir_node *call, void *ctx) { + ir_node *left = get_Call_param(call, 0); + ir_node *right = get_Call_param(call, 1); + ir_node *block = get_nodes_block(call); + ir_graph *irg = get_irn_irg(block); + ir_node *reg_jmp = NULL; + ir_node *exc_jmp = NULL; + ir_node *irn; dbg_info *dbg; ir_node *mem; - ir_node *left = get_Call_param(call, 0); - ir_node *right = get_Call_param(call, 1); - ir_node *block = get_nodes_block(call); - ir_graph *irg = get_irn_irg(block); - ir_node *irn, *reg_jmp = NULL, *exc_jmp = NULL; (void) ctx; if (is_Const(left) && is_Const_one(left)) { /* pow (1.0, x) = 1.0 */ irn = left; } else if (is_Const(right)) { - tarval *tv = get_Const_tarval(right); + ir_tarval *tv = get_Const_tarval(right); if (tarval_is_null(tv)) { /* pow(x, 0.0) = 1.0 */ ir_mode *mode = get_tarval_mode(tv); @@ -398,14 +405,17 @@ int i_mapper_pow(ir_node *call, void *ctx) if (irn == NULL) { ir_mode *mode = get_irn_mode(left); - ir_node *quot; + ir_node *div; irn = new_r_Const(irg, get_mode_one(mode)); - quot = new_rd_Quot(dbg, block, mem, irn, left, mode, op_pin_state_pinned); - mem = new_r_Proj(quot, mode_M, pn_Quot_M); - irn = new_r_Proj(quot, mode, pn_Quot_res); - reg_jmp = new_r_Proj(quot, mode_X, pn_Quot_X_regular); - exc_jmp = new_r_Proj(quot, mode_X, pn_Quot_X_except); + div = new_rd_Div(dbg, block, mem, irn, left, mode, op_pin_state_pinned); + mem = new_r_Proj(div, mode_M, pn_Div_M); + irn = new_r_Proj(div, mode, pn_Div_res); + if (ir_throws_exception(call)) { + reg_jmp = new_r_Proj(div, mode_X, pn_Div_X_regular); + exc_jmp = new_r_Proj(div, mode_X, pn_Div_X_except); + ir_set_throws_exception(div, true); + } } DBG_OPT_ALGSIM0(call, irn, FS_OPT_RTS_POW); replace_call(irn, call, mem, reg_jmp, exc_jmp); @@ -477,7 +487,8 @@ static int i_mapper_one_to_zero(ir_node *call, void *ctx, int reason) */ static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason) { - ir_node *val = get_Call_param(call, 0); + int changed = 0; + ir_node *val = get_Call_param(call, 0); (void) ctx; if (is_strictConv(val)) { @@ -496,12 +507,14 @@ static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason) } DBG_OPT_ALGSIM2(call, op, call, FS_OPT_RTS_SYMMETRIC); set_Call_param(call, 0, val); + changed = 1; } } else if (is_Minus(val)) { /* f(-x) = f(x) */ val = get_Minus_op(val); DBG_OPT_ALGSIM2(call, val, call, FS_OPT_RTS_SYMMETRIC); set_Call_param(call, 0, val); + changed = 1; } if (is_Const(val) && is_Const_null(val)) { @@ -512,9 +525,9 @@ static int i_mapper_symmetric_zero_to_one(ir_node *call, void *ctx, int reason) ir_node *mem = get_Call_mem(call); DBG_OPT_ALGSIM0(call, irn, reason); replace_call(irn, call, mem, NULL, NULL); - return 1; + changed = 1; } - return 0; + return changed; } /* i_mapper_symmetric_zero_to_one */ /* A mapper for the floating point log. */ @@ -608,7 +621,7 @@ static ir_entity *get_const_entity(ir_node *ptr) static bool initializer_val_is_null(ir_initializer_t *init) { - tarval *tv; + ir_tarval *tv; if (get_initializer_kind(init) == IR_INITIALIZER_NULL) return true; @@ -641,8 +654,8 @@ static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp) ir_type *tp = get_entity_type(ent); ir_mode *mode; ir_initializer_t *initializer; - unsigned size; - unsigned i; + size_t size; + size_t i; if (! is_Array_type(tp)) return NULL; @@ -656,9 +669,7 @@ static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp) return NULL; if (!has_entity_initializer(ent)) { - int len = 0; - int n; - int i = -1; + size_t i, n; n = get_compound_ent_n_values(ent); for (i = 0; i < n; ++i) { @@ -669,14 +680,10 @@ static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp) if (is_Const_null(irn)) { /* found the length */ - len = i; - break; + ir_tarval *tv = new_tarval_from_long(i, get_type_mode(res_tp)); + return new_r_Const(irg, tv); } } - if (len >= 0) { - tarval *tv = new_tarval_from_long(len, get_type_mode(res_tp)); - return new_r_Const(irg, tv); - } return NULL; } @@ -688,7 +695,7 @@ static ir_node *eval_strlen(ir_graph *irg, ir_entity *ent, ir_type *res_tp) for (i = 0; i < size; ++i) { ir_initializer_t *val = get_initializer_compound_value(initializer, i); if (initializer_val_is_null(val)) { - tarval *tv = new_tarval_from_long(i, get_type_mode(res_tp)); + ir_tarval *tv = new_tarval_from_long(i, get_type_mode(res_tp)); return new_r_Const(irg, tv); } } @@ -738,7 +745,6 @@ static ir_node *eval_strcmp(ir_graph *irg, ir_entity *left, ir_entity *right, { ir_type *tp; ir_mode *mode; - int i, n, n_r, res; tp = get_entity_type(left); if (! is_Array_type(tp)) @@ -766,16 +772,17 @@ static ir_node *eval_strcmp(ir_graph *irg, ir_entity *left, ir_entity *right, if (!has_entity_initializer(left) && !has_entity_initializer(right)) { /* code that uses deprecated compound_graph_path stuff */ + size_t n = get_compound_ent_n_values(left); + size_t n_r = get_compound_ent_n_values(right); + size_t i; + int res = 0; - n = get_compound_ent_n_values(left); - n_r = get_compound_ent_n_values(right); if (n_r < n) n = n_r; - res = 0; for (i = 0; i < n; ++i) { ir_node *irn; long v_l, v_r; - tarval *tv; + ir_tarval *tv; irn = get_compound_ent_value(left, i); if (! is_Const(irn)) @@ -805,7 +812,7 @@ static ir_node *eval_strcmp(ir_graph *irg, ir_entity *left, ir_entity *right, } if (i < n) { /* we found an end */ - tarval *tv = new_tarval_from_long(res, get_type_mode(res_tp)); + ir_tarval *tv = new_tarval_from_long(res, get_type_mode(res_tp)); return new_r_Const(irg, tv); } return NULL; @@ -844,7 +851,7 @@ static int is_empty_string(ir_entity *ent) if (!has_entity_initializer(ent)) { /* code for deprecated compound_graph_path stuff */ - int n = get_compound_ent_n_values(ent); + size_t n = get_compound_ent_n_values(ent); if (n < 1) return 0; irn = get_compound_ent_value(ent, 0); @@ -927,11 +934,17 @@ replace_by_call: mode = get_type_mode(char_tp); /* replace the strcmp by (*x) */ - irn = new_rd_Load(dbg, block, mem, v, mode, 0); + irn = new_rd_Load(dbg, block, mem, v, mode, cons_none); mem = new_r_Proj(irn, mode_M, pn_Load_M); - exc = new_r_Proj(irn, mode_X, pn_Load_X_except); - reg = new_r_Proj(irn, mode_X, pn_Load_X_regular); irn = new_r_Proj(irn, mode, pn_Load_res); + if (ir_throws_exception(call)) { + exc = new_r_Proj(irn, mode_X, pn_Load_X_except); + reg = new_r_Proj(irn, mode_X, pn_Load_X_regular); + ir_set_throws_exception(irn, true); + } else { + exc = NULL; + reg = NULL; + } /* conv to the result mode */ mode = get_type_mode(res_tp); @@ -1118,10 +1131,8 @@ static ir_mode *get_irn_res_mode(ir_node *node) { switch (get_irn_opcode(node)) { case iro_Load: return get_Load_mode(node); - case iro_Quot: return get_Quot_resmode(node); case iro_Div: return get_Div_resmode(node); case iro_Mod: return get_Mod_resmode(node); - case iro_DivMod: return get_DivMod_resmode(node); default: return NULL; } } /* get_irn_res_mode */ @@ -1136,6 +1147,8 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) ir_type *mtp; ir_node *mem, *bl, *call, *addr, *res_proj; ir_node **in; + bool throws_exception; + ir_op *op; ir_graph *irg; symconst_symbol sym; ir_mode *mode = get_irn_mode(node); @@ -1176,15 +1189,17 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) return 0; } - n_res = get_method_n_ress(mtp); + n_res = get_method_n_ress(mtp); + throws_exception = ir_throws_exception(node); /* step 0: calculate the number of needed Proj's */ n_proj = 0; n_proj = LMAX(n_proj, rt->mem_proj_nr + 1); - n_proj = LMAX(n_proj, rt->regular_proj_nr + 1); - n_proj = LMAX(n_proj, rt->exc_proj_nr + 1); - n_proj = LMAX(n_proj, rt->exc_mem_proj_nr + 1); n_proj = LMAX(n_proj, rt->res_proj_nr + 1); + if (throws_exception) { + n_proj = LMAX(n_proj, rt->regular_proj_nr + 1); + n_proj = LMAX(n_proj, rt->exc_proj_nr + 1); + } if (n_proj > 0) { if (rt->mode != mode_T) /* must be mode_T */ @@ -1198,6 +1213,7 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) /* ok, when we are here, the number of predecessors match as well as the parameter modes */ bl = get_nodes_block(node); + op = get_irn_op(node); in = NULL; if (n_param > 0) { @@ -1223,25 +1239,21 @@ int i_mapper_RuntimeCall(ir_node *node, runtime_rt *rt) /* we are ready */ turn_into_tuple(node, n_proj); - for (i = 0; i < n_proj; ++i) - set_Tuple_pred(node, i, new_r_Bad(irg)); if (rt->mem_proj_nr >= 0) set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M)); - if (!is_NoMem(mem)) { - /* Exceptions can only be handled with real memory */ - if (rt->regular_proj_nr >= 0) - set_Tuple_pred(node, rt->regular_proj_nr, new_r_Proj(call, mode_X, pn_Call_X_regular)); - if (rt->exc_proj_nr >= 0) - set_Tuple_pred(node, rt->exc_proj_nr, new_r_Proj(call, mode_X, pn_Call_X_except)); - if (rt->exc_mem_proj_nr >= 0) - set_Tuple_pred(node, rt->mem_proj_nr, new_r_Proj(call, mode_M, pn_Call_M)); + if (throws_exception) { + set_Tuple_pred(node, op->pn_x_regular, new_r_Proj(call, mode_X, pn_Call_X_regular)); + set_Tuple_pred(node, op->pn_x_except, new_r_Proj(call, mode_X, pn_Call_X_except)); } - if (rt->res_proj_nr >= 0) - for (i = 0; i < n_res; ++i) - set_Tuple_pred(node, rt->res_proj_nr + i, - new_r_Proj(res_proj, get_type_mode(get_method_res_type(mtp, i)), i)); - return 1; + if (rt->res_proj_nr >= 0) { + for (i = 0; i < n_res; ++i) { + ir_mode *mode = get_type_mode(get_method_res_type(mtp, i)); + ir_node *proj = new_r_Proj(res_proj, mode, i); + set_Tuple_pred(node, rt->res_proj_nr + i, proj); + } + } + return 1; } else { /* only one return value supported */ if (n_res > 0) {