Removed ANNOUNCE macro
[libfirm] / ir / opt / ldstopt.c
index cee7142..ec09418 100644 (file)
@@ -431,6 +431,23 @@ static void reduce_adr_usage(ir_node *ptr) {
        }
 }  /* reduce_adr_usage */
 
+/**
+ * Check, if an already existing value of mode old_mode can be converted
+ * into the needed one new_mode without loss.
+ */
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+       if (old_mode == new_mode)
+               return 1;
+
+       /* if both modes are two-complement ones, we can always convert the
+          Stored value into the needed one. */
+       if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+                 get_mode_arithmetic(old_mode) == irma_twos_complement &&
+                 get_mode_arithmetic(new_mode) == irma_twos_complement)
+               return 1;
+       return 0;
+}  /* can_use_stored_value */
+
 /**
  * Follow the memory chain as long as there are only Loads
  * and alias free Stores and try to replace current Load or Store
@@ -459,7 +476,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                 * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
                 */
                if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
-                       get_irn_mode(get_Store_value(pred)) == load_mode) {
+                   can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
                        /*
                         * a Load immediately after a Store -- a read after write.
                         * We may remove the Load, if both Load & Store does not have an exception handler
@@ -472,11 +489,17 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                         * We could make it a little bit better if we would know that the exception
                         * handler of the Store jumps directly to the end...
                         */
-                       if ((!pred_info->projs[pn_Store_X_except] && !info->projs[pn_Load_X_except]) ||
+                       if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
                            get_nodes_block(load) == get_nodes_block(pred)) {
                                ir_node *value = get_Store_value(pred);
 
                                DBG_OPT_RAW(load, value);
+
+                               /* add an convert if needed */
+                               if (get_irn_mode(get_Store_value(pred)) != load_mode) {
+                                       value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+                               }
+
                                if (info->projs[pn_Load_M])
                                        exchange(info->projs[pn_Load_M], mem);
 
@@ -494,7 +517,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                                return res | DF_CHANGED;
                        }
                } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
-                          get_Load_mode(pred) == load_mode) {
+                          can_use_stored_value(get_Load_mode(pred), load_mode)) {
                        /*
                         * a Load after a Load -- a read after read.
                         * We may remove the second Load, if it does not have an exception handler
@@ -504,29 +527,30 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                         * Here, there is no need to check if the previous Load has an exception
                         * hander because they would have exact the same exception...
                         */
-                       if (! info->projs[pn_Load_X_except] || get_nodes_block(load) == get_nodes_block(pred)) {
-                               DBG_OPT_RAR(load, pred);
+                       if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
+                               ir_node *value;
 
-                               if (pred_info->projs[pn_Load_res]) {
-                                       /* we need a data proj from the previous load for this optimization */
-                                       if (info->projs[pn_Load_res])
-                                               exchange(info->projs[pn_Load_res], pred_info->projs[pn_Load_res]);
+                               DBG_OPT_RAR(load, pred);
 
-                                       if (info->projs[pn_Load_M])
-                                               exchange(info->projs[pn_Load_M], mem);
-                               } else {
-                                       if (info->projs[pn_Load_res]) {
-                                               set_Proj_pred(info->projs[pn_Load_res], pred);
-                                               set_nodes_block(info->projs[pn_Load_res], get_nodes_block(pred));
-                                               pred_info->projs[pn_Load_res] = info->projs[pn_Load_res];
+                               /* the result is used */
+                               if (info->projs[pn_Load_res]) {
+                                       if (pred_info->projs[pn_Load_res] == NULL) {
+                                               /* create a new Proj again */
+                                               pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
                                        }
-                                       if (info->projs[pn_Load_M]) {
-                                               /* Actually, this if should not be necessary.  Construct the Loads
-                                               properly!!! */
-                                               exchange(info->projs[pn_Load_M], mem);
+                                       value = pred_info->projs[pn_Load_res];
+
+                                       /* add an convert if needed */
+                                       if (get_Load_mode(pred) != load_mode) {
+                                               value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
                                        }
+
+                                       exchange(info->projs[pn_Load_res], value);
                                }
 
+                               if (info->projs[pn_Load_M])
+                                       exchange(info->projs[pn_Load_M], mem);
+
                                /* no exception */
                                if (info->projs[pn_Load_X_except]) {
                                        exchange(info->projs[pn_Load_X_except], new_Bad());
@@ -540,12 +564,12 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                }
 
                if (get_irn_op(pred) == op_Store) {
-                       /* check if we can pass thru this store */
+                       /* check if we can pass through this store */
                        ir_alias_relation rel = get_alias_relation(
                                current_ir_graph,
                                get_Store_ptr(pred),
                                get_irn_mode(get_Store_value(pred)),
-                               ptr, load_mode, opt_non_opt);
+                               ptr, load_mode);
                        /* if the might be an alias, we cannot pass this Store */
                        if (rel != no_alias)
                                break;
@@ -779,6 +803,15 @@ static unsigned optimize_load(ir_node *load)
        return res;
 }  /* optimize_load */
 
+/**
+ * Check whether a value of mode new_mode would completely overwrite a value
+ * of mode old_mode in memory.
+ */
+static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
+{
+       return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
+}  /* is_completely_overwritten */
+
 /**
  * follow the memory chain as long as there are only Loads and alias free Stores.
  *
@@ -802,9 +835,12 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
                 * if the pointers are identical, they refer to the same object.
                 * This is only true in strong typed languages, not is C were the following
                 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
+                * However, if the mode that is written have a bigger  or equal size the the old
+                * one, the old value is completely overwritten and can be killed ...
                 */
                if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
-                   get_nodes_block(pred) == block && get_irn_mode(get_Store_value(pred)) == mode) {
+                   get_nodes_block(pred) == block &&
+                   is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
                        /*
                         * a Store after a Store in the same block -- a write after write.
                         * We may remove the first Store, if it does not have an exception handler.
@@ -839,7 +875,7 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
                                current_ir_graph,
                                get_Store_ptr(pred),
                                get_irn_mode(get_Store_value(pred)),
-                               ptr, mode, opt_non_opt);
+                               ptr, mode);
                        /* if the might be an alias, we cannot pass this Store */
                        if (rel != no_alias)
                                break;
@@ -1098,19 +1134,19 @@ static void do_load_store_optimize(ir_node *n, void *env) {
 
        switch (get_irn_opcode(n)) {
 
-  case iro_Load:
-         wenv->changes |= optimize_load(n);
-         break;
+       case iro_Load:
+               wenv->changes |= optimize_load(n);
+               break;
 
-  case iro_Store:
-         wenv->changes |= optimize_store(n);
-         break;
+       case iro_Store:
+               wenv->changes |= optimize_store(n);
+               break;
 
-  case iro_Phi:
-         wenv->changes |= optimize_phi(n, wenv);
+       case iro_Phi:
+               wenv->changes |= optimize_phi(n, wenv);
 
-  default:
-         ;
+       default:
+               ;
        }
 }  /* do_load_store_optimize */