+/**
+ * Check, if an already existing value of mode old_mode can be converted
+ * into the needed one new_mode without loss.
+ */
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+ if (old_mode == new_mode)
+ return 1;
+
+ /* if both modes are two-complement ones, we can always convert the
+ Stored value into the needed one. */
+ if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ get_mode_arithmetic(old_mode) == irma_twos_complement &&
+ get_mode_arithmetic(new_mode) == irma_twos_complement)
+ return 1;
+ return 0;
+} /* can_use_stored_value */
+
+/**
+ * Follow the memory chain as long as there are only Loads
+ * and alias free Stores and try to replace current Load or Store
+ * by a previous ones.
+ * Note that in unreachable loops it might happen that we reach
+ * load again, as well as we can fall into a cycle.
+ * We break such cycles using a special visited flag.
+ *
+ * INC_MASTER() must be called before dive into
+ */
+static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
+ unsigned res = 0;
+ ldst_info_t *info = get_irn_link(load);
+ ir_node *pred;
+ ir_node *ptr = get_Load_ptr(load);
+ ir_node *mem = get_Load_mem(load);
+ ir_mode *load_mode = get_Load_mode(load);
+
+ for (pred = curr; load != pred; ) {
+ ldst_info_t *pred_info = get_irn_link(pred);
+
+ /*
+ * BEWARE: one might think that checking the modes is useless, because
+ * if the pointers are identical, they refer to the same object.
+ * This is only true in strong typed languages, not in C were the following
+ * is possible a = *(ir_type1 *)p; b = *(ir_type2 *)p ...
+ */
+ if (get_irn_op(pred) == op_Store && get_Store_ptr(pred) == ptr &&
+ can_use_stored_value(get_irn_mode(get_Store_value(pred)), load_mode)) {
+ /*
+ * a Load immediately after a Store -- a read after write.
+ * We may remove the Load, if both Load & Store does not have an exception handler
+ * OR they are in the same block. In the latter case the Load cannot
+ * throw an exception when the previous Store was quiet.
+ *
+ * Why we need to check for Store Exception? If the Store cannot
+ * be executed (ROM) the exception handler might simply jump into
+ * the load block :-(
+ * We could make it a little bit better if we would know that the exception
+ * handler of the Store jumps directly to the end...
+ */
+ if ((pred_info->projs[pn_Store_X_except] == NULL && info->projs[pn_Load_X_except] == NULL) ||
+ get_nodes_block(load) == get_nodes_block(pred)) {
+ ir_node *value = get_Store_value(pred);
+
+ DBG_OPT_RAW(load, value);
+
+ /* add an convert if needed */
+ if (get_irn_mode(get_Store_value(pred)) != load_mode) {
+ value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+ }
+
+ if (info->projs[pn_Load_M])
+ exchange(info->projs[pn_Load_M], mem);
+
+ /* no exception */
+ if (info->projs[pn_Load_X_except]) {
+ exchange( info->projs[pn_Load_X_except], new_Bad());
+ res |= CF_CHANGED;
+ }
+
+ if (info->projs[pn_Load_res])
+ exchange(info->projs[pn_Load_res], value);
+
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res | DF_CHANGED;
+ }
+ } else if (get_irn_op(pred) == op_Load && get_Load_ptr(pred) == ptr &&
+ can_use_stored_value(get_Load_mode(pred), load_mode)) {
+ /*
+ * a Load after a Load -- a read after read.
+ * We may remove the second Load, if it does not have an exception handler
+ * OR they are in the same block. In the later case the Load cannot
+ * throw an exception when the previous Load was quiet.
+ *
+ * Here, there is no need to check if the previous Load has an exception
+ * hander because they would have exact the same exception...
+ */
+ if (info->projs[pn_Load_X_except] == NULL || get_nodes_block(load) == get_nodes_block(pred)) {
+ ir_node *value;
+
+ DBG_OPT_RAR(load, pred);
+
+ /* the result is used */
+ if (info->projs[pn_Load_res]) {
+ if (pred_info->projs[pn_Load_res] == NULL) {
+ /* create a new Proj again */
+ pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
+ }
+ value = pred_info->projs[pn_Load_res];
+
+ /* add an convert if needed */
+ if (get_Load_mode(pred) != load_mode) {
+ value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+ }
+
+ exchange(info->projs[pn_Load_res], value);
+ }
+
+ if (info->projs[pn_Load_M])
+ exchange(info->projs[pn_Load_M], mem);
+
+ /* no exception */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ res |= CF_CHANGED;
+ }
+
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res |= DF_CHANGED;
+ }
+ }
+
+ if (get_irn_op(pred) == op_Store) {
+ /* check if we can pass through this store */
+ ir_alias_relation rel = get_alias_relation(
+ current_ir_graph,
+ get_Store_ptr(pred),
+ get_irn_mode(get_Store_value(pred)),
+ ptr, load_mode);
+ /* if the might be an alias, we cannot pass this Store */
+ if (rel != no_alias)
+ break;
+ pred = skip_Proj(get_Store_mem(pred));
+ } else if (get_irn_op(pred) == op_Load) {
+ pred = skip_Proj(get_Load_mem(pred));
+ } else {
+ /* follow only Load chains */
+ break;
+ }
+
+ /* check for cycles */
+ if (NODE_VISITED(pred_info))
+ break;
+ MARK_NODE(pred_info);
+ }
+
+ if (get_irn_op(pred) == op_Sync) {
+ int i;
+
+ /* handle all Sync predecessors */
+ for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
+ res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
+ if (res)
+ break;
+ }
+ }
+
+ return res;
+} /* follow_Mem_chain */