+ ldst_info_t *info = get_irn_link(load);
+ ir_node *mem, *ptr, *new_node;
+ ir_entity *ent;
+ unsigned res = 0;
+
+ /* do NOT touch volatile loads for now */
+ if (get_Load_volatility(load) == volatility_is_volatile)
+ return 0;
+
+ /* the address of the load to be optimized */
+ ptr = get_Load_ptr(load);
+
+ /*
+ * Check if we can remove the exception from a Load:
+ * This can be done, if the address is from an Sel(Alloc) and
+ * the Sel type is a subtype of the allocated type.
+ *
+ * This optimizes some often used OO constructs,
+ * like x = new O; x->t;
+ */
+ if (info->projs[pn_Load_X_except]) {
+ if (is_Sel(ptr)) {
+ ir_node *mem = get_Sel_mem(ptr);
+
+ /* FIXME: works with the current FE, but better use the base */
+ if (is_Alloc(skip_Proj(mem))) {
+ /* ok, check the types */
+ ir_entity *ent = get_Sel_entity(ptr);
+ ir_type *s_type = get_entity_type(ent);
+ ir_type *a_type = get_Alloc_type(mem);
+
+ if (is_SubClass_of(s_type, a_type)) {
+ /* ok, condition met: there can't be an exception because
+ * Alloc guarantees that enough memory was allocated */
+
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+ }
+ } else if (is_Alloc(skip_Proj(skip_Cast(ptr)))) {
+ /* simple case: a direct load after an Alloc. Firm Alloc throw
+ * an exception in case of out-of-memory. So, there is no way for an
+ * exception in this load.
+ * This code is constructed by the "exception lowering" in the Jack compiler.
+ */
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+ }
+
+ /* The mem of the Load. Must still be returned after optimization. */
+ mem = get_Load_mem(load);
+
+ if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
+ /* a Load which value is neither used nor exception checked, remove it */
+ exchange(info->projs[pn_Load_M], mem);
+
+ if (info->projs[pn_Load_X_regular]) {
+ /* should not happen, but if it does, remove it */
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ res |= CF_CHANGED;
+ }
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res | DF_CHANGED;
+ }
+
+ /* Load from a constant polymorphic field, where we can resolve
+ polymorphism. */
+ new_node = transform_node_Load(load);
+ if (new_node != load) {
+ if (info->projs[pn_Load_M]) {
+ exchange(info->projs[pn_Load_M], mem);
+ info->projs[pn_Load_M] = NULL;
+ }
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_res])
+ exchange(info->projs[pn_Load_res], new_node);
+
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res | DF_CHANGED;
+ }
+
+ /* check if we can determine the entity that will be loaded */
+ ent = find_constant_entity(ptr);
+ if (ent) {
+ if ((allocation_static == get_entity_allocation(ent)) &&
+ (visibility_external_allocated != get_entity_visibility(ent))) {
+ /* a static allocation that is not external: there should be NO exception
+ * when loading. */
+
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
+
+ if (variability_constant == get_entity_variability(ent)) {
+ if (is_atomic_entity(ent)) {
+ /* Might not be atomic after
+ lowering of Sels. In this
+ case we could also load, but
+ it's more complicated. */
+ /* more simpler case: we load the content of a constant value:
+ * replace it by the constant itself
+ */
+
+ /* no memory */
+ if (info->projs[pn_Load_M]) {
+ exchange(info->projs[pn_Load_M], mem);
+ res |= DF_CHANGED;
+ }
+ /* no result :-) */
+ if (info->projs[pn_Load_res]) {
+ if (is_atomic_entity(ent)) {
+ ir_node *c = copy_const_value(get_irn_dbg_info(load), get_atomic_ent_value(ent));
+
+ DBG_OPT_RC(load, c);
+ exchange(info->projs[pn_Load_res], c);
+ res |= DF_CHANGED;
+ }
+ }
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res;
+ } else {
+ compound_graph_path *path = get_accessed_path(ptr);
+
+ if (path) {
+ ir_node *c;
+
+ assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+ /*
+ {
+ int j;
+ for (j = 0; j < get_compound_graph_path_length(path); ++j) {
+ ir_entity *node = get_compound_graph_path_node(path, j);
+ fprintf(stdout, ".%s", get_entity_name(node));
+ if (is_Array_type(get_entity_owner(node)))
+ fprintf(stdout, "[%d]", get_compound_graph_path_array_index(path, j));
+ }
+ printf("\n");
+ }
+ */
+
+ c = get_compound_ent_value_by_path(ent, path);
+ free_compound_graph_path(path);
+
+ /* printf(" cons: "); DDMN(c); */
+
+ if (info->projs[pn_Load_M]) {
+ exchange(info->projs[pn_Load_M], mem);
+ res |= DF_CHANGED;
+ }
+ if (info->projs[pn_Load_res]) {
+ exchange(info->projs[pn_Load_res], copy_const_value(get_irn_dbg_info(load), c));
+ res |= DF_CHANGED;
+ }
+ exchange(load, new_Bad());
+ reduce_adr_usage(ptr);
+ return res;
+ } else {
+ /* We can not determine a correct access path. E.g., in jack, we load
+ a byte from an object to generate an exception. Happens in test program
+ Reflectiontest.
+ printf(">>>>>>>>>>>>> Found access to constant entity %s in function %s\n", get_entity_name(ent),
+ get_entity_name(get_irg_entity(current_ir_graph)));
+ printf(" load: "); DDMN(load);
+ printf(" ptr: "); DDMN(ptr);
+ */
+ }
+ }
+ }
+ }
+ }
+
+ /* Check, if the address of this load is used more than once.
+ * If not, this load cannot be removed in any case. */
+ if (get_irn_n_uses(ptr) <= 1)
+ return res;
+
+ /*
+ * follow the memory chain as long as there are only Loads
+ * and try to replace current Load or Store by a previous one.
+ * Note that in unreachable loops it might happen that we reach
+ * load again, as well as we can fall into a cycle.
+ * We break such cycles using a special visited flag.
+ */
+ INC_MASTER();
+ res = follow_Mem_chain(load, skip_Proj(mem));
+ return res;
+} /* optimize_load */