X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=6261b306c41bce9e09c024e4fb17a6f0d496d5ff;hb=76a6c3b40390427bc516a943316642bb57daa465;hp=b1144c983d5baeecdcf9c9b7fe9657ee6b9c821e;hpb=5710356093f8cb5bfb9ac4952e3eaa42aba4e5ee;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index b1144c983..6261b306c 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -6,7 +6,7 @@ * Modified by: Goetz Lindenmaier * Created: * CVS-ID: $Id$ - * Copyright: (c) 1998-2003 Universität Karlsruhe + * Copyright: (c) 1998-2005 Universität Karlsruhe * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. */ @@ -39,6 +39,7 @@ # include "irhooks.h" # include "irarch.h" # include "hashptr.h" +# include "opt_polymorphy.h" /* Make types visible to allow most efficient access */ # include "entity_t.h" @@ -441,16 +442,16 @@ static tarval *computed_value_Proj(ir_node *n) /* BEWARE: a == a is NOT always True for floating Point!!! */ /* This is a trick with the bits used for encoding the Cmp Proj numbers, the following statement is not the same: - return new_tarval_from_long (proj_nr == Eq, mode_b) */ - return new_tarval_from_long (proj_nr & Eq, mode_b); + return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */ + return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b); } else { tarval *taa = value_of(aa); tarval *tab = value_of(ab); if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */ /* strange checks... */ - pnc_number flags = tarval_cmp (taa, tab); - if (flags != False) { + pn_Cmp flags = tarval_cmp (taa, tab); + if (flags != pn_Cmp_False) { return new_tarval_from_long (proj_nr & flags, mode_b); } } else { /* check for 3.: */ @@ -478,7 +479,7 @@ static tarval *computed_value_Proj(ir_node *n) && (mode_is_reference(get_irn_mode(ab))) && (get_irn_op(aba) == op_Alloc))) /* 3.: */ - return new_tarval_from_long (proj_nr & Ne, mode_b); + return new_tarval_from_long (proj_nr & pn_Cmp_Ne, mode_b); } } break; @@ -1083,15 +1084,21 @@ static ir_node *equivalent_node_Id(ir_node *n) */ static ir_node *equivalent_node_Mux(ir_node *n) { - ir_node *sel = get_Mux_sel(n); + ir_node *oldn = n, *sel = get_Mux_sel(n); tarval *ts = value_of(sel); - if (ts == get_tarval_b_true()) - return get_Mux_true(n); - else if (ts == get_tarval_b_false()) - return get_Mux_false(n); - else if(get_Mux_false(n) == get_Mux_true(n)) - return get_Mux_true(n); + if (ts == get_tarval_b_true()) { + n = get_Mux_true(n); + DBG_OPT_ALGSIM0(oldn, n); + } + else if (ts == get_tarval_b_false()) { + n = get_Mux_false(n); + DBG_OPT_ALGSIM0(oldn, n); + } + else if(get_Mux_false(n) == get_Mux_true(n)) { + n = get_Mux_true(n); + DBG_OPT_ALGSIM0(oldn, n); + } return n; } @@ -1188,7 +1195,8 @@ optimize_preds(ir_node *n) { /** * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and - * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)) if possible. + * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)). + * If possible, remove the Conv's. */ static ir_node *transform_node_AddSub(ir_node *n) { @@ -1248,8 +1256,63 @@ static ir_node *transform_node_AddSub(ir_node *n) return n; } -#define transform_node_Add transform_node_AddSub -#define transform_node_Sub transform_node_AddSub +/** + * Do the AddSub optimization, then Transform Add(a,a) into Mul(a, 2) + * if the mode is integer or float. + * Reassociation might fold this further. + */ +static ir_node *transform_node_Add(ir_node *n) +{ + ir_mode *mode; + ir_node *oldn = n; + + n = transform_node_AddSub(n); + + mode = get_irn_mode(n); + if (mode_is_num(mode)) { + ir_node *a = get_Add_left(n); + + if (a == get_Add_right(n)) { + ir_node *block = get_nodes_block(n); + + n = new_rd_Mul( + get_irn_dbg_info(n), + current_ir_graph, + block, + a, + new_r_Const_long(current_ir_graph, block, mode, 2), + mode); + DBG_OPT_ALGSIM0(oldn, n); + } + } + return n; +} + +/** + * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a). + */ +static ir_node *transform_node_Sub(ir_node *n) +{ + ir_mode *mode; + ir_node *oldn = n; + + n = transform_node_AddSub(n); + + mode = get_irn_mode(n); + if (mode_is_num(mode)) { + if (classify_Const(get_Sub_left(n)) == CNST_NULL) { + n = new_rd_Minus( + get_irn_dbg_info(n), + current_ir_graph, + get_nodes_block(n), + get_Sub_right(n), + mode); + DBG_OPT_ALGSIM0(oldn, n); + } + } + + return n; +} /** Do architecture dependend optimizations on Mul nodes */ static ir_node *transform_node_Mul(ir_node *n) { @@ -1263,8 +1326,11 @@ static ir_node *transform_node_Div(ir_node *n) /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */ - if (tv != tarval_bad) + if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); + + DBG_OPT_CSTEVAL(n, value); + } else /* Try architecture dependand optimization */ value = arch_dep_replace_div_by_const(n); @@ -1287,8 +1353,11 @@ static ir_node *transform_node_Mod(ir_node *n) /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */ - if (tv != tarval_bad) + if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); + + DBG_OPT_CSTEVAL(n, value); + } else /* Try architecture dependand optimization */ value = arch_dep_replace_mod_by_const(n); @@ -1323,7 +1392,10 @@ static ir_node *transform_node_DivMod(ir_node *n) if (tb == get_mode_one(get_tarval_mode(tb))) { b = new_Const (mode, get_mode_null(mode)); evaluated = 1; - } else if (ta != tarval_bad) { + + DBG_OPT_CSTEVAL(n, b); + } + else if (ta != tarval_bad) { tarval *resa, *resb; resa = tarval_div (ta, tb); if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through @@ -1333,6 +1405,9 @@ static ir_node *transform_node_DivMod(ir_node *n) a = new_Const (mode, resa); b = new_Const (mode, resb); evaluated = 1; + + DBG_OPT_CSTEVAL(n, a); + DBG_OPT_CSTEVAL(n, b); } else { /* Try architecture dependand optimization */ arch_dep_replace_divmod_by_const(&a, &b, n); @@ -1414,6 +1489,7 @@ static ir_node *transform_node_Cond(ir_node *n) */ static ir_node *transform_node_Eor(ir_node *n) { + ir_node *oldn = n; ir_node *a = get_Eor_left(n); ir_node *b = get_Eor_right(n); @@ -1421,16 +1497,22 @@ static ir_node *transform_node_Eor(ir_node *n) && (get_irn_op(a) == op_Proj) && (get_irn_mode(a) == mode_b) && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE) - && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) + && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* The Eor negates a Cmp. The Cmp has the negated result anyways! */ n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a))); + + DBG_OPT_ALGSIM0(oldn, n); + } else if ((get_irn_mode(n) == mode_b) - && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) + && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) { /* The Eor is a Not. Replace it by a Not. */ /* ????!!!Extend to bitfield 1111111. */ n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b); + DBG_OPT_ALGSIM0(oldn, n); + } + return n; } @@ -1439,15 +1521,18 @@ static ir_node *transform_node_Eor(ir_node *n) */ static ir_node *transform_node_Not(ir_node *n) { + ir_node *oldn = n; ir_node *a = get_Not_op(n); if ( (get_irn_mode(n) == mode_b) && (get_irn_op(a) == op_Proj) && (get_irn_mode(a) == mode_b) - && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) + && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* We negate a Cmp. The Cmp has the negated result anyways! */ n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), mode_b, get_negated_pnc(get_Proj_proj(a))); + DBG_OPT_ALGSIM0(oldn, n); + } return n; } @@ -1456,15 +1541,18 @@ static ir_node *transform_node_Not(ir_node *n) * Transform a Cast of a Const into a new Const */ static ir_node *transform_node_Cast(ir_node *n) { + ir_node *oldn = n; ir_node *pred = get_Cast_op(n); type *tp = get_irn_type(pred); if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) { n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred), get_Const_tarval(pred), tp); + DBG_OPT_CSTEVAL(oldn, n); } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) { n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred), get_SymConst_kind(pred), tp); + DBG_OPT_CSTEVAL(oldn, n); } return n; } @@ -1696,7 +1784,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { ir_mode *mode = get_irn_mode(or); ir_node *shl, *shr, *block; - ir_node *irn, *x, *c1, *c2, *v, *sub; + ir_node *irn, *x, *c1, *c2, *v, *sub, *n; tarval *tv1, *tv2; if (! mode_is_int(mode)) @@ -1740,7 +1828,10 @@ static ir_node *transform_node_Or_Rot(ir_node *or) /* yet, condition met */ block = get_nodes_block(or); - return new_r_Rot(current_ir_graph, block, x, c1, mode); + n = new_r_Rot(current_ir_graph, block, x, c1, mode); + + DBG_OPT_ALGSIM1(or, shl, shr, n); + return n; } else if (get_irn_op(c1) == op_Sub) { v = c2; @@ -1764,7 +1855,10 @@ static ir_node *transform_node_Or_Rot(ir_node *or) block = get_nodes_block(or); /* a Rot right is not supported, so use a rot left */ - return new_r_Rot(current_ir_graph, block, x, sub, mode); + n = new_r_Rot(current_ir_graph, block, x, sub, mode); + + DBG_OPT_ALGSIM0(or, n); + return n; } else if (get_irn_op(c2) == op_Sub) { v = c1; @@ -1785,7 +1879,10 @@ static ir_node *transform_node_Or_Rot(ir_node *or) block = get_nodes_block(or); /* a Rot Left */ - return new_r_Rot(current_ir_graph, block, x, v, mode); + n = new_r_Rot(current_ir_graph, block, x, v, mode); + + DBG_OPT_ALGSIM0(or, n); + return n; } return or; @@ -1810,7 +1907,7 @@ static ir_node *transform_node(ir_node *n); */ static ir_node * transform_node_shift(ir_node *n) { - ir_node *left; + ir_node *left, *right; tarval *tv1, *tv2, *res; ir_mode *mode; int modulo_shf, flag; @@ -1821,7 +1918,8 @@ static ir_node * transform_node_shift(ir_node *n) if (get_irn_op(left) != get_irn_op(n)) return n; - tv1 = value_of(get_binop_right(n)); + right = get_binop_right(n); + tv1 = value_of(right); if (tv1 == tarval_bad) return n; @@ -1840,7 +1938,7 @@ static ir_node * transform_node_shift(ir_node *n) if (modulo_shf > 0) { tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res)); - if (tarval_cmp(res, modulo) & Lt) + if (tarval_cmp(res, modulo) & pn_Cmp_Lt) flag = 1; } else @@ -1855,6 +1953,8 @@ static ir_node * transform_node_shift(ir_node *n) irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in); + DBG_OPT_ALGSIM0(n, irn); + return transform_node(irn); } return n; @@ -1911,6 +2011,7 @@ static ir_op *firm_set_default_transform_node(ir_op *op) CASE(Proj); CASE(Or); CASE(End); + CASE(Sel); case iro_Shr: case iro_Shrs: case iro_Shl: @@ -2308,7 +2409,7 @@ optimize_node (ir_node *n) old_tp = get_irn_type(get_irn_n(n, i)); } - /* Allways optimize Phi nodes: part of the construction. */ + /* Always optimize Phi nodes: part of the construction. */ if ((!get_opt_optimize()) && (iro != iro_Phi)) return n; /* constant expression evaluation / constant folding */ @@ -2379,17 +2480,18 @@ optimize_node (ir_node *n) free the node. */ iro = get_irn_opcode(n); if (get_opt_constant_folding() || - (iro == iro_Cond) || - (iro == iro_Proj)) /* Flags tested local. */ - n = transform_node (n); + (iro == iro_Cond) || + (iro == iro_Proj) || + (iro == iro_Sel)) /* Flags tested local. */ + n = transform_node (n); /* Remove nodes with dead (Bad) input. - Run always for transformation induced Bads. */ + Run always for transformation induced Bads. */ n = gigo (n); /* Now we have a legal, useful node. Enter it in hash table for cse */ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) { - n = identify_remember (current_ir_graph->value_table, n); + n = identify_remember (current_ir_graph->value_table, n); } return n; @@ -2466,7 +2568,8 @@ optimize_in_place_2 (ir_node *n) iro = get_irn_opcode(n); if (get_opt_constant_folding() || (iro == iro_Cond) || - (iro == iro_Proj)) /* Flags tested local. */ + (iro == iro_Proj) || + (iro == iro_Sel)) /* Flags tested local. */ n = transform_node (n); /* Remove nodes with dead (Bad) input. @@ -2515,6 +2618,7 @@ ir_op *firm_set_default_operations(ir_op *op) op = firm_set_default_equivalent_node(op); op = firm_set_default_transform_node(op); op = firm_set_default_node_cmp_attr(op); + op = firm_set_default_get_type(op); return op; }