move load mode shrinking into ldst phase
authorMatthias Braun <matthias.braun@kit.edu>
Mon, 27 Aug 2012 12:50:17 +0000 (14:50 +0200)
committerMatthias Braun <matthias.braun@kit.edu>
Mon, 27 Aug 2012 12:50:17 +0000 (14:50 +0200)
Performing it as localopt was hairy as the optimisation dependet on
correct iredges values which were misleading/wrong during construction
of new stuff.

ir/ir/iropt.c
ir/opt/ldstopt.c

index f9958a1..d37db02 100644 (file)
@@ -5597,26 +5597,6 @@ static ir_node *transform_node_Conv(ir_node *n)
                }
        }
 
-       /* shrink mode of load if possible. */
-       if (is_Proj(a)) {
-               ir_node *pred = get_Proj_pred(a);
-               if (is_Load(pred)) {
-                       /* only do it if we are the only user (otherwise the risk is too
-                        * great that we end up with 2 loads instead of one). */
-                       ir_graph *irg = get_irn_irg(n);
-                       if (edges_activated(irg) && get_irn_n_edges(a) == 1) {
-                               ir_mode *load_mode = get_Load_mode(pred);
-                               if (!mode_is_float(load_mode) && !mode_is_float(mode) &&
-                                       get_mode_size_bits(mode) <= get_mode_size_bits(load_mode)
-                                       && !be_get_backend_param()->byte_order_big_endian) {
-                                       set_Load_mode(pred, mode);
-                                       set_irn_mode(a, mode);
-                                       return a;
-                               }
-                       }
-               }
-       }
-
        return n;
 }
 
index ad7e967..e1c2c71 100644 (file)
@@ -1487,6 +1487,44 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
        return res | DF_CHANGED;
 }  /* optimize_phi */
 
+static int optimize_conv_load(ir_node *conv)
+{
+       ir_node *op = get_Conv_op(conv);
+       if (!is_Proj(op))
+               return 0;
+       if (get_irn_n_edges(op) > 1)
+               return 0;
+       /* shrink mode of load if possible. */
+       ir_node *load = get_Proj_pred(op);
+       if (!is_Load(load))
+               return 0;
+
+       /* only do it if we are the only user (otherwise the risk is too
+        * great that we end up with 2 loads instead of one). */
+       ir_mode *mode      = get_irn_mode(conv);
+       ir_mode *load_mode = get_Load_mode(load);
+       int      bits_diff
+               = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
+       if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
+           return 0;
+
+       if (be_get_backend_param()->byte_order_big_endian) {
+               if (bits_diff % 8 != 0)
+                       return 0;
+               ir_graph *irg   = get_irn_irg(conv);
+               ir_node  *ptr   = get_Load_ptr(load);
+               ir_mode  *mode  = get_irn_mode(ptr);
+               ir_node  *delta = new_r_Const_long(irg, mode, bits_diff/8);
+               ir_node  *block = get_nodes_block(load);
+               ir_node  *add   = new_r_Add(block, ptr, delta, mode);
+               set_Load_ptr(load, add);
+       }
+       set_Load_mode(load, mode);
+       set_irn_mode(op, mode);
+       exchange(conv, op);
+       return DF_CHANGED;
+}
+
 /**
  * walker, do the optimizations
  */
@@ -1508,6 +1546,10 @@ static void do_load_store_optimize(ir_node *n, void *env)
                wenv->changes |= optimize_phi(n, wenv);
                break;
 
+       case iro_Conv:
+               wenv->changes |= optimize_conv_load(n);
+               break;
+
        default:
                break;
        }