+/* forward */
+static void reduce_adr_usage(ir_node *ptr);
+
+/**
+ * Update a Load that may have lost its users.
+ */
+static void handle_load_update(ir_node *load) {
+ ldst_info_t *info = get_irn_link(load);
+
+ /* do NOT touch volatile loads for now */
+ if (get_Load_volatility(load) == volatility_is_volatile)
+ return;
+
+ if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
+ ir_node *ptr = get_Load_ptr(load);
+ ir_node *mem = get_Load_mem(load);
+
+ /* a Load whose value is neither used nor exception checked, remove it */
+ exchange(info->projs[pn_Load_M], mem);
+ if (info->projs[pn_Load_X_regular])
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ kill_node(load);
+ reduce_adr_usage(ptr);
+ }
+} /* handle_load_update */
+
+/**
+ * A use of an address node has vanished. Check if this was a Proj
+ * node and update the counters.
+ */
+static void reduce_adr_usage(ir_node *ptr) {
+ if (is_Proj(ptr)) {
+ if (get_irn_n_edges(ptr) <= 0) {
+ /* this Proj is dead now */
+ ir_node *pred = get_Proj_pred(ptr);
+
+ if (is_Load(pred)) {
+ ldst_info_t *info = get_irn_link(pred);
+ info->projs[get_Proj_proj(ptr)] = NULL;
+
+ /* this node lost its result proj, handle that */
+ handle_load_update(pred);
+ }
+ }
+ }
+} /* reduce_adr_usage */
+
+/**
+ * Check, if an already existing value of mode old_mode can be converted
+ * into the needed one new_mode without loss.
+ */
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+ if (old_mode == new_mode)
+ return 1;
+
+ /* if both modes are two-complement ones, we can always convert the
+ Stored value into the needed one. */
+ if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ get_mode_arithmetic(old_mode) == irma_twos_complement &&
+ get_mode_arithmetic(new_mode) == irma_twos_complement)
+ return 1;
+ return 0;
+} /* can_use_stored_value */
+
+/**
+ * Check whether a Call is at least pure, ie. does only read memory.
+ */
+static unsigned is_Call_pure(ir_node *call) {
+ ir_type *call_tp = get_Call_type(call);
+ unsigned prop = get_method_additional_properties(call_tp);
+
+ /* check first the call type */
+ if ((prop & (mtp_property_const|mtp_property_pure)) == 0) {
+ /* try the called entity */
+ ir_node *ptr = get_Call_ptr(call);
+
+ if (is_Global(ptr)) {
+ ir_entity *ent = get_Global_entity(ptr);
+
+ prop = get_entity_additional_properties(ent);
+ }
+ }
+ return (prop & (mtp_property_const|mtp_property_pure)) != 0;
+} /* is_Call_pure */
+
+static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
+{
+ ir_mode *mode = get_irn_mode(ptr);
+ long offset = 0;
+
+ /* TODO: long might not be enough, we should probably use some tarval thingy... */
+ for (;;) {
+ if (is_Add(ptr)) {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+
+ if (get_irn_mode(l) != mode || !is_Const(r))
+ break;
+
+ offset += get_tarval_long(get_Const_tarval(r));
+ ptr = l;
+ } else if (is_Sub(ptr)) {
+ ir_node *l = get_Sub_left(ptr);
+ ir_node *r = get_Sub_right(ptr);
+
+ if (get_irn_mode(l) != mode || !is_Const(r))
+ break;
+
+ offset -= get_tarval_long(get_Const_tarval(r));
+ ptr = l;
+ } else if (is_Sel(ptr)) {
+ ir_entity *ent = get_Sel_entity(ptr);
+ ir_type *tp = get_entity_owner(ent);
+
+ if (is_Array_type(tp)) {
+ int size;
+ ir_node *index;
+
+ /* only one dimensional arrays yet */
+ if (get_Sel_n_indexs(ptr) != 1)
+ break;
+ index = get_Sel_index(ptr, 0);
+ if (! is_Const(index))
+ break;
+
+ tp = get_entity_type(ent);
+ if (get_type_state(tp) != layout_fixed)
+ break;
+
+ size = get_type_size_bytes(tp);
+ offset += size * get_tarval_long(get_Const_tarval(index));
+ } else {
+ if (get_type_state(tp) != layout_fixed)
+ break;
+ offset += get_entity_offset(ent);
+ }
+ ptr = get_Sel_ptr(ptr);
+ } else
+ break;
+ }
+
+ *pOffset = offset;
+ return ptr;
+}
+
+static int try_load_after_store(ir_node *load,
+ ir_node *load_base_ptr, long load_offset, ir_node *store)
+{
+ ldst_info_t *info;
+ ir_node *store_ptr = get_Store_ptr(store);
+ long store_offset;
+ ir_node *store_base_ptr = get_base_and_offset(store_ptr, &store_offset);
+ ir_node *store_value;
+ ir_mode *store_mode;
+ ir_node *load_ptr;
+ ir_mode *load_mode;
+ long load_mode_len;
+ long store_mode_len;
+ long delta;
+ int res;
+
+ if (load_base_ptr != store_base_ptr)
+ return 0;
+
+ load_mode = get_Load_mode(load);
+ load_mode_len = get_mode_size_bytes(load_mode);
+ store_mode = get_irn_mode(get_Store_value(store));
+ store_mode_len = get_mode_size_bytes(store_mode);
+ delta = load_offset - store_offset;
+ store_value = get_Store_value(store);
+
+ if (delta != 0 || store_mode != load_mode) {
+ if (delta < 0 || delta + load_mode_len > store_mode_len)
+ return 0;
+
+ if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
+ get_mode_arithmetic(load_mode) != irma_twos_complement)
+ return 0;
+
+
+ /* produce a shift to adjust offset delta */
+ if (delta > 0) {
+ ir_node *cnst;
+
+ /* FIXME: only true for little endian */
+ cnst = new_Const_long(mode_Iu, delta * 8);
+ store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
+ store_value, cnst, store_mode);
+ }
+
+ /* add an convert if needed */
+ if (store_mode != load_mode) {
+ store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
+ store_value, load_mode);
+ }
+ }
+
+ DBG_OPT_RAW(load, store_value);
+
+ info = get_irn_link(load);
+ if (info->projs[pn_Load_M])
+ exchange(info->projs[pn_Load_M], get_Load_mem(load));
+
+ res = 0;
+ /* no exception */
+ if (info->projs[pn_Load_X_except]) {
+ exchange( info->projs[pn_Load_X_except], new_Bad());
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ res |= CF_CHANGED;
+ }
+
+ if (info->projs[pn_Load_res])
+ exchange(info->projs[pn_Load_res], store_value);
+
+ load_ptr = get_Load_ptr(load);
+ kill_node(load);
+ reduce_adr_usage(load_ptr);
+ return res | DF_CHANGED;
+}
+
+/**
+ * Follow the memory chain as long as there are only Loads,
+ * alias free Stores, and constant Calls and try to replace the
+ * current Load by a previous ones.
+ * Note that in unreachable loops it might happen that we reach
+ * load again, as well as we can fall into a cycle.
+ * We break such cycles using a special visited flag.
+ *
+ * INC_MASTER() must be called before dive into
+ */
+static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
+ unsigned res = 0;
+ ldst_info_t *info = get_irn_link(load);
+ ir_node *pred;
+ ir_node *ptr = get_Load_ptr(load);
+ ir_node *mem = get_Load_mem(load);
+ ir_mode *load_mode = get_Load_mode(load);
+
+ for (pred = curr; load != pred; ) {
+ ldst_info_t *pred_info = get_irn_link(pred);
+
+ /*
+ * a Load immediately after a Store -- a read after write.
+ * We may remove the Load, if both Load & Store does not have an
+ * exception handler OR they are in the same MacroBlock. In the latter
+ * case the Load cannot throw an exception when the previous Store was
+ * quiet.
+ *
+ * Why we need to check for Store Exception? If the Store cannot
+ * be executed (ROM) the exception handler might simply jump into
+ * the load MacroBlock :-(
+ * We could make it a little bit better if we would know that the
+ * exception handler of the Store jumps directly to the end...
+ */
+ if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
+ && info->projs[pn_Load_X_except] == NULL)
+ || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
+ {
+ long load_offset;
+ ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
+ int changes = try_load_after_store(load, base_ptr, load_offset, pred);
+
+ if (changes != 0)
+ return res | changes;
+ } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
+ can_use_stored_value(get_Load_mode(pred), load_mode)) {
+ /*
+ * a Load after a Load -- a read after read.
+ * We may remove the second Load, if it does not have an exception handler
+ * OR they are in the same MacroBlock. In the later case the Load cannot
+ * throw an exception when the previous Load was quiet.
+ *
+ * Here, there is no need to check if the previous Load has an exception
+ * hander because they would have exact the same exception...
+ */
+ if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
+ ir_node *value;
+
+ DBG_OPT_RAR(load, pred);
+
+ /* the result is used */
+ if (info->projs[pn_Load_res]) {
+ if (pred_info->projs[pn_Load_res] == NULL) {
+ /* create a new Proj again */
+ pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
+ }
+ value = pred_info->projs[pn_Load_res];
+
+ /* add an convert if needed */
+ if (get_Load_mode(pred) != load_mode) {
+ value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+ }
+
+ exchange(info->projs[pn_Load_res], value);
+ }
+
+ if (info->projs[pn_Load_M])
+ exchange(info->projs[pn_Load_M], mem);
+
+ /* no exception */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ res |= CF_CHANGED;
+ }
+
+ kill_node(load);
+ reduce_adr_usage(ptr);
+ return res |= DF_CHANGED;
+ }
+ }
+
+ if (is_Store(pred)) {
+ /* check if we can pass through this store */
+ ir_alias_relation rel = get_alias_relation(
+ current_ir_graph,
+ get_Store_ptr(pred),
+ get_irn_mode(get_Store_value(pred)),
+ ptr, load_mode);
+ /* if the might be an alias, we cannot pass this Store */
+ if (rel != ir_no_alias)
+ break;
+ pred = skip_Proj(get_Store_mem(pred));
+ } else if (is_Load(pred)) {
+ pred = skip_Proj(get_Load_mem(pred));
+ } else if (is_Call(pred)) {
+ if (is_Call_pure(pred)) {
+ /* The called graph is at least pure, so there are no Store's
+ in it. We can handle it like a Load and skip it. */
+ pred = skip_Proj(get_Call_mem(pred));
+ } else {
+ /* there might be Store's in the graph, stop here */
+ break;
+ }
+ } else {
+ /* follow only Load chains */
+ break;
+ }
+
+ /* check for cycles */
+ if (NODE_VISITED(pred_info))
+ break;
+ MARK_NODE(pred_info);
+ }
+
+ if (is_Sync(pred)) {
+ int i;
+
+ /* handle all Sync predecessors */
+ for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
+ res |= follow_Mem_chain(load, skip_Proj(get_Sync_pred(pred, i)));
+ if (res)
+ return res;
+ }
+ }
+
+ return res;
+} /* follow_Mem_chain */
+
+/*
+ * Check if we can replace the load by a given const from
+ * the const code irg.
+ */
+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *res = NULL;
+
+ if (c_mode != l_mode) {
+ /* check, if the mode matches OR can be easily converted info */
+ if (is_reinterpret_cast(c_mode, l_mode)) {
+ /* we can safely cast */
+ dbg_info *dbg = get_irn_dbg_info(load);
+ ir_node *block = get_nodes_block(load);
+
+ /* copy the value from the const code irg and cast it */
+ res = copy_const_value(dbg, c);
+ res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
+ }
+ } else {
+ /* copy the value from the const code irg */
+ res = copy_const_value(get_irn_dbg_info(load), c);
+ }
+ return res;
+} /* can_replace_load_by_const */
+