-/**
- * Optimize Loads after Store.
- *
- * @todo FAILS for volatile entities
- */
-static ir_node *equivalent_node_Load(ir_node *n)
-{
- ir_node *oldn = n;
-
- if (!get_opt_redundant_LoadStore()) return n;
-
- /* remove unnecessary Load. */
- ir_node *a = skip_Proj(get_Load_mem(n));
- ir_node *b = get_Load_ptr(n);
- ir_node *c;
-
- /* TODO: check for volatile */
- if (get_irn_op(a) == op_Store && get_Store_ptr(a) == b) {
- /* We load immediately after a store -- a read after write. */
- ir_node *mem = get_Load_mem(n);
-
- c = get_Store_value(a);
- turn_into_tuple(n, 3);
- set_Tuple_pred(n, pn_Load_M, mem);
- set_Tuple_pred(n, pn_Load_X_except, new_Bad());
- set_Tuple_pred(n, pn_Load_res, c); DBG_OPT_RAW;
- }
- else if (get_irn_op(a) == op_Load && get_Load_ptr(a) == b) {
- /* We load immediately after a Load -- a read after read. */
- return a;
- }
-
- return n;
-}
-
-/**
- * Optimize store after store and load after store.
- *
- * @todo FAILS for volatile entities
- */
-static ir_node *equivalent_node_Store(ir_node *n)
-{
- ir_node *oldn = n;
-
- if (!get_opt_redundant_LoadStore()) return n;
-
- /* remove unnecessary store. */
- ir_node *a = skip_Proj(get_Store_mem(n));
- ir_node *b = get_Store_ptr(n);
- ir_node *c = skip_Proj(get_Store_value(n));
-
- if (get_irn_op(a) == op_Store
- && get_Store_ptr(a) == b
- && skip_Proj(get_Store_value(a)) == c) {
- /* We have twice exactly the same store -- a write after write. */
- n = a; DBG_OPT_WAW;
- } else if (get_irn_op(c) == op_Load
- && (a == c || skip_Proj(get_Load_mem(c)) == a)
- && get_Load_ptr(c) == b ) {
- /* We just loaded the value from the same memory, i.e., the store
- doesn't change the memory -- a write after read. */
- a = get_Store_mem(n);
- turn_into_tuple(n, 2);
- set_Tuple_pred(n, pn_Store_M, a);
- set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
- }
- return n;
-}
-