+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *res = NULL;
+
+ if (c_mode != l_mode) {
+ /* check, if the mode matches OR can be easily converted info */
+ if (is_reinterpret_cast(c_mode, l_mode)) {
+ /* we can safely cast */
+ dbg_info *dbg = get_irn_dbg_info(load);
+ ir_node *block = get_nodes_block(load);
+
+ /* copy the value from the const code irg and cast it */
+ res = copy_const_value(dbg, c);
+ res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
+ }
+ } else {
+ /* copy the value from the const code irg */
+ res = copy_const_value(get_irn_dbg_info(load), c);
+ }
+ return res;
+} /* can_replace_load_by_const */
+
+/**
+ * optimize a Load
+ *
+ * @param load the Load node
+ */
+static unsigned optimize_load(ir_node *load)
+{
+ ldst_info_t *info = get_irn_link(load);
+ ir_node *mem, *ptr, *value;
+ ir_entity *ent;
+ long dummy;
+ unsigned res = 0;
+
+ /* do NOT touch volatile loads for now */
+ if (get_Load_volatility(load) == volatility_is_volatile)
+ return 0;
+
+ /* the address of the load to be optimized */
+ ptr = get_Load_ptr(load);
+
+ /*
+ * Check if we can remove the exception from a Load:
+ * This can be done, if the address is from an Sel(Alloc) and
+ * the Sel type is a subtype of the allocated type.
+ *
+ * This optimizes some often used OO constructs,
+ * like x = new O; x->t;
+ */
+ if (info->projs[pn_Load_X_except]) {
+ ir_node *addr = ptr;
+
+ /* find base address */
+ while (is_Sel(addr))
+ addr = get_Sel_ptr(addr);
+ if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
+ /* simple case: a direct load after an Alloc. Firm Alloc throw
+ * an exception in case of out-of-memory. So, there is no way for an
+ * exception in this load.
+ * This code is constructed by the "exception lowering" in the Jack compiler.
+ */
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+ }
+
+ /* The mem of the Load. Must still be returned after optimization. */
+ mem = get_Load_mem(load);
+
+ if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
+ /* a Load which value is neither used nor exception checked, remove it */
+ exchange(info->projs[pn_Load_M], mem);
+
+ if (info->projs[pn_Load_X_regular]) {
+ /* should not happen, but if it does, remove it */
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ res |= CF_CHANGED;
+ }
+ kill_node(load);
+ reduce_adr_usage(ptr);
+ return res | DF_CHANGED;
+ }
+
+ /* Load from a constant polymorphic field, where we can resolve
+ polymorphism. */
+ value = transform_polymorph_Load(load);
+ if (value == load) {
+ value = NULL;
+ /* check if we can determine the entity that will be loaded */
+ ent = find_constant_entity(ptr);
+ if (ent != NULL &&
+ allocation_static == get_entity_allocation(ent) &&
+ visibility_external_allocated != get_entity_visibility(ent)) {
+ /* a static allocation that is not external: there should be NO exception
+ * when loading even if we cannot replace the load itself. */
+
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+
+ if (variability_constant == get_entity_variability(ent)) {
+ if (is_atomic_entity(ent)) {
+ /* Might not be atomic after lowering of Sels. In this case we
+ * could also load, but it's more complicated. */
+ /* more simpler case: we load the content of a constant value:
+ * replace it by the constant itself */
+ value = get_atomic_ent_value(ent);
+ } else if (ent->has_initializer) {
+ /* new style initializer */
+ value = find_compound_ent_value(ptr);
+ } else {
+ /* old style initializer */
+ compound_graph_path *path = get_accessed_path(ptr);
+
+ if (path != NULL) {
+ assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+
+ value = get_compound_ent_value_by_path(ent, path);
+ DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
+ free_compound_graph_path(path);
+ }
+ }
+ if (value != NULL)
+ value = can_replace_load_by_const(load, value);
+ }
+ }
+ }
+ if (value != NULL) {
+ /* we completely replace the load by this value */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_M]) {
+ exchange(info->projs[pn_Load_M], mem);
+ res |= DF_CHANGED;
+ }
+ if (info->projs[pn_Load_res]) {
+ exchange(info->projs[pn_Load_res], value);
+ res |= DF_CHANGED;
+ }
+ kill_node(load);
+ reduce_adr_usage(ptr);
+ return res;
+ }
+
+ /* Check, if the address of this load is used more than once.
+ * If not, more load cannot be removed in any case. */
+ if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
+ return res;
+
+ /*
+ * follow the memory chain as long as there are only Loads
+ * and try to replace current Load or Store by a previous one.
+ * Note that in unreachable loops it might happen that we reach
+ * load again, as well as we can fall into a cycle.
+ * We break such cycles using a special visited flag.
+ */
+ INC_MASTER();
+ res = follow_Mem_chain(load, skip_Proj(mem));
+ return res;
+} /* optimize_load */
+
+/**
+ * Check whether a value of mode new_mode would completely overwrite a value
+ * of mode old_mode in memory.
+ */
+static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
+{
+ return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
+} /* is_completely_overwritten */
+
+/**
+ * Check whether small is a part of large (starting at same address).
+ */
+static int is_partially_same(ir_node *small, ir_node *large)
+{
+ ir_mode *sm = get_irn_mode(small);
+ ir_mode *lm = get_irn_mode(large);
+
+ /* FIXME: Check endianness */
+ return is_Conv(small) && get_Conv_op(small) == large
+ && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
+ && get_mode_arithmetic(sm) == irma_twos_complement
+ && get_mode_arithmetic(lm) == irma_twos_complement;
+} /* is_partially_same */
+
+/**
+ * follow the memory chain as long as there are only Loads and alias free Stores.
+ *
+ * INC_MASTER() must be called before dive into
+ */
+static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
+ unsigned res = 0;
+ ldst_info_t *info = get_irn_link(store);
+ ir_node *pred;
+ ir_node *ptr = get_Store_ptr(store);
+ ir_node *mem = get_Store_mem(store);
+ ir_node *value = get_Store_value(store);
+ ir_mode *mode = get_irn_mode(value);
+ ir_node *block = get_nodes_block(store);
+ ir_node *mblk = get_Block_MacroBlock(block);
+
+ for (pred = curr; pred != store;) {
+ ldst_info_t *pred_info = get_irn_link(pred);
+
+ /*
+ * BEWARE: one might think that checking the modes is useless, because
+ * if the pointers are identical, they refer to the same object.
+ * This is only true in strong typed languages, not is C were the following
+ * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
+ * However, if the size of the mode that is written is bigger or equal the
+ * size of the old one, the old value is completely overwritten and can be
+ * killed ...
+ */
+ if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
+ get_nodes_MacroBlock(pred) == mblk) {
+ /*
+ * a Store after a Store in the same MacroBlock -- a write after write.
+ */
+
+ /*
+ * We may remove the first Store, if the old value is completely
+ * overwritten or the old value is a part of the new value,
+ * and if it does not have an exception handler.
+ *
+ * TODO: What, if both have the same exception handler ???
+ */
+ if (get_Store_volatility(pred) != volatility_is_volatile
+ && !pred_info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+ ir_mode *predmode = get_irn_mode(predvalue);
+
+ if(is_completely_overwritten(predmode, mode)
+ || is_partially_same(predvalue, value)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
+ kill_node(pred);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
+ }
+
+ /*
+ * We may remove the Store, if the old value already contains
+ * the new value, and if it does not have an exception handler.
+ *
+ * TODO: What, if both have the same exception handler ???
+ */
+ if (get_Store_volatility(store) != volatility_is_volatile
+ && !info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+
+ if(is_partially_same(value, predvalue)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(info->projs[pn_Store_M], mem);
+ kill_node(store);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
+ }
+ } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
+ value == pred_info->projs[pn_Load_res]) {
+ /*
+ * a Store of a value just loaded from the same address
+ * -- a write after read.
+ * We may remove the Store, if it does not have an exception
+ * handler.
+ */
+ if (! info->projs[pn_Store_X_except]) {
+ DBG_OPT_WAR(store, pred);
+ exchange(info->projs[pn_Store_M], mem);
+ kill_node(store);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
+ }
+
+ if (is_Store(pred)) {
+ /* check if we can pass through this store */
+ ir_alias_relation rel = get_alias_relation(
+ current_ir_graph,
+ get_Store_ptr(pred),
+ get_irn_mode(get_Store_value(pred)),
+ ptr, mode);
+ /* if the might be an alias, we cannot pass this Store */
+ if (rel != ir_no_alias)
+ break;
+ pred = skip_Proj(get_Store_mem(pred));
+ } else if (is_Load(pred)) {
+ ir_alias_relation rel = get_alias_relation(
+ current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+ ptr, mode);
+ if (rel != ir_no_alias)
+ break;
+
+ pred = skip_Proj(get_Load_mem(pred));
+ } else {
+ /* follow only Load chains */
+ break;
+ }
+
+ /* check for cycles */
+ if (NODE_VISITED(pred_info))
+ break;
+ MARK_NODE(pred_info);
+ }
+
+ if (is_Sync(pred)) {
+ int i;
+
+ /* handle all Sync predecessors */
+ for (i = get_Sync_n_preds(pred) - 1; i >= 0; --i) {
+ res |= follow_Mem_chain_for_Store(store, skip_Proj(get_Sync_pred(pred, i)));
+ if (res)
+ break;
+ }
+ }
+ return res;
+} /* follow_Mem_chain_for_Store */
+
+/** find entity used as base for an address calculation */
+static ir_entity *find_entity(ir_node *ptr)