X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=d105777e159426551a080e6a387f65773a15d31c;hb=8ac7f010beb0c30fcbae390e6582661f9f98d417;hp=076938689aa9dcf42a55cace2f515a3530c2fd02;hpb=61adb50426dee69f0a5cfef689513a2aa41caaa5;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 076938689..d105777e1 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -1009,7 +1009,7 @@ static ir_node *equivalent_node_Div(ir_node *n) if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */ /* Turn Div into a tuple (mem, bad, a) */ ir_node *mem = get_Div_mem(n); - turn_into_tuple(n, 3); + turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */ set_Tuple_pred(n, pn_Div_res, a); @@ -1031,7 +1031,7 @@ static ir_node *equivalent_node_DivMod(ir_node *n) ir_node *mem = get_Div_mem(n); ir_mode *mode = get_irn_mode(b); - turn_into_tuple(n, 4); + turn_into_tuple(n, pn_DivMod_max); set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ set_Tuple_pred(n, pn_DivMod_res_div, a); @@ -1219,7 +1219,8 @@ static ir_node *equivalent_node_Phi(ir_node *n) } /** - * optimize Proj(Tuple) and gigo() for ProjX in Bad block + * optimize Proj(Tuple) and gigo() for ProjX in Bad block, + * ProjX(Load) and ProjX(Store) */ static ir_node *equivalent_node_Proj(ir_node *n) { @@ -1242,6 +1243,20 @@ static ir_node *equivalent_node_Proj(ir_node *n) /* Remove dead control flow -- early gigo(). */ n = new_Bad(); } + else if (get_opt_ldst_only_null_ptr_exceptions()) { + ir_op *op = get_irn_op(a); + + if (op == op_Load || op == op_Store) { + /* get the load/store address */ + ir_node *addr = get_irn_n(a, 1); + if (value_not_null(addr)) { + /* this node may float */ + set_irn_pinned(a, op_pin_state_floats); + DBG_OPT_EXC_REM(n); + return new_Bad(); + } + } + } } return n; @@ -1370,7 +1385,7 @@ static ir_node *equivalent_node_Confirm(ir_node *n) * rare case: two identical Confirms one after another, * replace the second one with the first. */ - return pred; + n = pred; } if (pnc == pn_Cmp_Eq) { ir_node *bound = get_Confirm_bound(n); @@ -1406,6 +1421,54 @@ static ir_node *equivalent_node_CopyB(ir_node *n) return n; } +/** + * Optimize Bounds(idx, idx, upper) into idx. + */ +static ir_node *equivalent_node_Bound(ir_node *n) +{ + ir_node *idx = get_Bound_index(n); + ir_node *lower = get_Bound_lower(n); + int ret_tuple = 0; + + /* By definition lower < upper, so if idx == lower --> + lower <= idx && idx < upper */ + if (idx == lower) { + /* Turn Bound into a tuple (mem, bad, idx) */ + ret_tuple = 1; + } + else { + ir_node *pred = skip_Proj(idx); + + if (get_irn_op(pred) == op_Bound) { + /* + * idx was Bounds_check previously, it is still valid if + * lower <= pred_lower && pred_upper <= upper. + */ + ir_node *upper = get_Bound_upper(n); + if (get_Bound_lower(pred) == lower && + get_Bound_upper(pred) == upper) { + /* + * One could expect that we simple return the previous + * Bound here. However, this would be wrong, as we could + * add an exception Proj to a new location than. + * So, we must turn in into a tuple + */ + ret_tuple = 1; + } + } + } + if (ret_tuple) { + /* Turn Bound into a tuple (mem, bad, idx) */ + ir_node *mem = get_Bound_mem(n); + turn_into_tuple(n, pn_Bound_max); + set_Tuple_pred(n, pn_Bound_M_regular, mem); + set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */ + set_Tuple_pred(n, pn_Bound_res, idx); + set_Tuple_pred(n, pn_Bound_M_except, mem); + } + return n; +} + /** * equivalent_node() returns a node equivalent to input n. It skips all nodes that * perform no actual computation, as, e.g., the Id nodes. It does not create @@ -1464,6 +1527,7 @@ static ir_op_ops *firm_set_default_equivalent_node(opcode code, ir_op_ops *ops) CASE(Cmp); CASE(Confirm); CASE(CopyB); + CASE(Bound); default: /* leave NULL */; } @@ -1569,7 +1633,9 @@ static ir_node *transform_node_AddSub(ir_node *n) } /** - * Do the AddSub optimization, then Transform Add(a,a) into Mul(a, 2) + * Do the AddSub optimization, then Transform + * Add(a,a) -> Mul(a, 2) + * Add(Mul(a, x), a) -> Mul(a, x+1) * if the mode is integer or float. * Transform Add(a,-b) into Sub(a,b). * Reassociation might fold this further. @@ -1584,8 +1650,9 @@ static ir_node *transform_node_Add(ir_node *n) mode = get_irn_mode(n); if (mode_is_num(mode)) { ir_node *a = get_Add_left(n); + ir_node *b = get_Add_right(n); - if (a == get_Add_right(n)) { + if (a == b) { ir_node *block = get_irn_n(n, -1); n = new_rd_Mul( @@ -1597,28 +1664,88 @@ static ir_node *transform_node_Add(ir_node *n) mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A); } - else { - ir_node *b = get_Add_right(n); - - if (get_irn_op(a) == op_Minus) { - n = new_rd_Sub( - get_irn_dbg_info(n), - current_ir_graph, - get_irn_n(n, -1), - b, - get_Minus_op(a), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); + else if (get_irn_op(a) == op_Minus) { + n = new_rd_Sub( + get_irn_dbg_info(n), + current_ir_graph, + get_irn_n(n, -1), + b, + get_Minus_op(a), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); + } + else if (get_irn_op(b) == op_Minus) { + n = new_rd_Sub( + get_irn_dbg_info(n), + current_ir_graph, + get_irn_n(n, -1), + a, + get_Minus_op(b), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); + } + /* do NOT execute this code if reassociation is enabled, it does the inverse! */ + else if (!get_opt_reassociation() && get_irn_op(a) == op_Mul) { + ir_node *ma = get_Mul_left(a); + ir_node *mb = get_Mul_right(a); + + if (b == ma) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), current_ir_graph, blk, + ma, + new_rd_Add( + get_irn_dbg_info(n), current_ir_graph, blk, + mb, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); } - else if (get_irn_op(b) == op_Minus) { - n = new_rd_Sub( - get_irn_dbg_info(n), - current_ir_graph, - get_irn_n(n, -1), - a, - get_Minus_op(b), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); + else if (b == mb) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), current_ir_graph, blk, + mb, + new_rd_Add( + get_irn_dbg_info(n), current_ir_graph, blk, + ma, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); + } + } + /* do NOT execute this code if reassociation is enabled, it does the inverse! */ + else if (!get_opt_reassociation() && get_irn_op(b) == op_Mul) { + ir_node *ma = get_Mul_left(b); + ir_node *mb = get_Mul_right(b); + + if (a == ma) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), current_ir_graph, blk, + ma, + new_rd_Add( + get_irn_dbg_info(n), current_ir_graph, blk, + mb, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); + } + else if (a == mb) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), current_ir_graph, blk, + mb, + new_rd_Add( + get_irn_dbg_info(n), current_ir_graph, blk, + ma, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); } } } @@ -1626,25 +1753,66 @@ static ir_node *transform_node_Add(ir_node *n) } /** - * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a). + * Do the AddSub optimization, then Transform + * Sub(0,a) -> Minus(a) + * Sub(Mul(a, x), a) -> Mul(a, x-1) */ static ir_node *transform_node_Sub(ir_node *n) { ir_mode *mode; ir_node *oldn = n; + ir_node *a, *b; n = transform_node_AddSub(n); mode = get_irn_mode(n); - if (mode_is_num(mode) && (classify_Const(get_Sub_left(n)) == CNST_NULL)) { + a = get_Sub_left(n); + b = get_Sub_right(n); + if (mode_is_num(mode) && (classify_Const(a) == CNST_NULL)) { n = new_rd_Minus( get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), - get_Sub_right(n), + b, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A); } + /* do NOT execute this code if reassociation is enabled, it does the inverse! */ + else if (get_opt_reassociation() && get_irn_op(a) == op_Mul) { + ir_node *ma = get_Mul_left(a); + ir_node *mb = get_Mul_right(a); + + if (ma == b) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), + current_ir_graph, blk, + ma, + new_rd_Sub( + get_irn_dbg_info(n), + current_ir_graph, blk, + mb, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A); + } + else if (mb == b) { + ir_node *blk = get_irn_n(n, -1); + n = new_rd_Mul( + get_irn_dbg_info(n), + current_ir_graph, blk, + mb, + new_rd_Sub( + get_irn_dbg_info(n), + current_ir_graph, blk, + ma, + new_r_Const_long(current_ir_graph, blk, mode, 1), + mode), + mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A); + } + } return n; } @@ -1727,7 +1895,7 @@ static ir_node *transform_node_Mod(ir_node *n) /* Turn Mod into a tuple (mem, bad, value) */ ir_node *mem = get_Mod_mem(n); - turn_into_tuple(n, 3); + turn_into_tuple(n, pn_Mod_max); set_Tuple_pred(n, pn_Mod_M, mem); set_Tuple_pred(n, pn_Mod_X_except, new_Bad()); set_Tuple_pred(n, pn_Mod_res, value); @@ -1786,7 +1954,7 @@ static ir_node *transform_node_DivMod(ir_node *n) if (evaluated) { /* replace by tuple */ ir_node *mem = get_DivMod_mem(n); - turn_into_tuple(n, 4); + turn_into_tuple(n, pn_DivMod_max); set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ set_Tuple_pred(n, pn_DivMod_res_div, a); @@ -1852,7 +2020,7 @@ static ir_node *transform_node_Cond(ir_node *n) /* It's a boolean Cond, branching on a boolean constant. Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */ jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n)); - turn_into_tuple(n, 2); + turn_into_tuple(n, pn_Cond_max); if (ta == tarval_b_true) { set_Tuple_pred(n, pn_Cond_false, new_Bad()); set_Tuple_pred(n, pn_Cond_true, jmp); @@ -1966,6 +2134,7 @@ static ir_node *transform_node_Proj_Div(ir_node *proj) if (proj_nr == pn_Div_X_except) { /* we found an exception handler, remove it */ + DBG_OPT_EXC_REM(proj); return new_Bad(); } else if (proj_nr == pn_Div_M) { /* the memory Proj can be removed */ @@ -1997,6 +2166,7 @@ static ir_node *transform_node_Proj_Mod(ir_node *proj) if (proj_nr == pn_Mod_X_except) { /* we found an exception handler, remove it */ + DBG_OPT_EXC_REM(proj); return new_Bad(); } else if (proj_nr == pn_Mod_M) { /* the memory Proj can be removed */ @@ -2036,6 +2206,7 @@ static ir_node *transform_node_Proj_DivMod(ir_node *proj) if (proj_nr == pn_DivMod_X_except) { /* we found an exception handler, remove it */ + DBG_OPT_EXC_REM(proj); return new_Bad(); } else if (proj_nr == pn_DivMod_M) { @@ -2964,7 +3135,7 @@ static ir_op_ops *firm_set_default_node_cmp_attr(opcode code, ir_op_ops *ops) * Compare function for two nodes in the hash table. Gets two * nodes as parameters. Returns 0 if the nodes are a cse. */ -static int identities_cmp(const void *elt, const void *key) +int identities_cmp(const void *elt, const void *key) { ir_node *a, *b; int i, irn_arity_a; @@ -3105,7 +3276,7 @@ identify_cons (pset *value_table, ir_node *n) { * Looks up the node in a hash table, enters it in the table * if it isn't there yet. */ -static ir_node * +ir_node * identify_remember (pset *value_table, ir_node *n) { ir_node *o = NULL;