X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=2d222b5240ee3bf457368e5b070d130174efcf33;hb=cb91bddc9cacdab7c28e4336847bd3dc248aa549;hp=06957f9d01910094f674e5cb883a748f66def5db;hpb=00725c695f6b4f3061665262230fabe2756b2737;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 06957f9d0..2d222b524 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -692,10 +692,11 @@ static ir_node *equivalent_node_left_zero(ir_node *n) static ir_node *equivalent_node_symmetric_unop(ir_node *n) { ir_node *oldn = n; + ir_node *pred = get_unop_op(n); /* optimize symmetric unop */ - if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) { - n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2; + if (get_irn_op(pred) == get_irn_op(n)) { + n = get_unop_op(pred); DBG_OPT_ALGSIM2; } return n; } @@ -895,75 +896,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) return n; } -/** - * Optimize Loads after Store. - * - * @todo FAILS for volatile entities - */ -static ir_node *equivalent_node_Load(ir_node *n) -{ - ir_node *oldn = n; - - if (!get_opt_redundant_LoadStore()) return n; - - /* remove unnecessary Load. */ - ir_node *a = skip_Proj(get_Load_mem(n)); - ir_node *b = get_Load_ptr(n); - ir_node *c; - - /* TODO: check for volatile */ - if (get_irn_op(a) == op_Store && get_Store_ptr(a) == b) { - /* We load immediately after a store -- a read after write. */ - ir_node *mem = get_Load_mem(n); - - c = get_Store_value(a); - turn_into_tuple(n, 3); - set_Tuple_pred(n, pn_Load_M, mem); - set_Tuple_pred(n, pn_Load_X_except, new_Bad()); - set_Tuple_pred(n, pn_Load_res, c); DBG_OPT_RAW; - } - else if (get_irn_op(a) == op_Load && get_Load_ptr(a) == b) { - /* We load immediately after a Load -- a read after read. */ - return a; - } - - return n; -} - -/** - * Optimize store after store and load after store. - * - * @todo FAILS for volatile entities - */ -static ir_node *equivalent_node_Store(ir_node *n) -{ - ir_node *oldn = n; - - if (!get_opt_redundant_LoadStore()) return n; - - /* remove unnecessary store. */ - ir_node *a = skip_Proj(get_Store_mem(n)); - ir_node *b = get_Store_ptr(n); - ir_node *c = skip_Proj(get_Store_value(n)); - - if (get_irn_op(a) == op_Store - && get_Store_ptr(a) == b - && skip_Proj(get_Store_value(a)) == c) { - /* We have twice exactly the same store -- a write after write. */ - n = a; DBG_OPT_WAW; - } else if (get_irn_op(c) == op_Load - && (a == c || skip_Proj(get_Load_mem(c)) == a) - && get_Load_ptr(c) == b ) { - /* We just loaded the value from the same memory, i.e., the store - doesn't change the memory -- a write after read. */ - a = get_Store_mem(n); - turn_into_tuple(n, 2); - set_Tuple_pred(n, pn_Store_M, a); - set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR; - } - return n; -} - /** * optimize Proj(Tuple) and gigo for ProjX in Bad block */ @@ -1044,8 +976,6 @@ static ir_op *firm_set_default_equivalent_node(ir_op *op) CASE(And); CASE(Conv); CASE(Phi); - CASE(Load); /* dangerous */ - CASE(Store); /* dangerous, see todo */ CASE(Proj); CASE(Id); default: @@ -1375,23 +1305,6 @@ static ir_node *transform_node_Proj(ir_node *proj) } return proj; - /* - * Ugly case: due to the walk order it may happen, that a proj is visited - * before the preceding Load/Store is optimized (happens in cycles). - * This will lead to a Tuple that will be alive after local_optimize(), which - * is bad. So, we do it here again. - */ - case iro_Load: - case iro_Store: - { - ir_node *old_n = n; - n = equivalent_node(n); - if (n != old_n) { - set_Proj_pred(proj, n); - } - } - break; - case iro_Tuple: /* should not happen, but if it doest will optimize */ break; @@ -1405,33 +1318,6 @@ static ir_node *transform_node_Proj(ir_node *proj) return equivalent_node_Proj(proj); } -/** - * Transform a Store before a Store to the same address... - * Both nodes must be in the same block. - * - * @todo Check for volatile! Moreover, what if the first store - * has a exception handler while the other has not? - */ -static ir_node *transform_node_Store(ir_node *store) -{ - ir_node *pred = skip_Proj(get_Store_mem(store)); - ir_node *ptr = get_Store_ptr(store); - - if (!get_opt_redundant_LoadStore()) return store; - - if (get_irn_op(pred) == op_Store && - get_Store_ptr(pred) == ptr && - get_nodes_block(pred) == get_nodes_block(store)) { - /* the Store n is useless, as it is overwritten by the store store */ - ir_node *mem = get_Store_mem(pred); - - turn_into_tuple(pred, 2); - set_Tuple_pred(pred, pn_Store_M, mem); - set_Tuple_pred(pred, pn_Store_X_except, new_Bad()); - } - return store; -} - /** * returns the operands of a commutative bin-op, if one operand is * a const, it is returned as the second one. @@ -1564,7 +1450,6 @@ static ir_op *firm_set_default_transform_node(ir_op *op) CASE(Eor); CASE(Not); CASE(Proj); - CASE(Store); /* dangerous, see todo */ CASE(Or); default: op->transform_node = NULL; @@ -1643,6 +1528,23 @@ static int node_cmp_attr_Cast(ir_node *a, ir_node *b) return get_Cast_type(a) != get_Cast_type(b); } +static int node_cmp_attr_Load(ir_node *a, ir_node *b) +{ + if (get_Load_volatility(a) == volatility_is_volatile || + get_Load_volatility(b) == volatility_is_volatile) + /* NEVER do CSE on volatile Loads */ + return 1; + + return get_Load_mode(a) != get_Load_mode(b); +} + +static int node_cmp_attr_Store(ir_node *a, ir_node *b) +{ + /* NEVER do CSE on volatile Stores */ + return (get_Store_volatility(a) == volatility_is_volatile || + get_Load_volatility(b) == volatility_is_volatile); +} + /** * set the default node attribute compare operation */ @@ -1665,6 +1567,8 @@ static ir_op *firm_set_default_node_cmp_attr(ir_op *op) CASE(Sel); CASE(Phi); CASE(Cast); + CASE(Load); + CASE(Store); default: op->node_cmp_attr = NULL; }