* @author Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <string.h>
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irflag_t.h"
-#include "array.h"
+#include "array_t.h"
#include "irhooks.h"
#include "iredges.h"
#include "irtools.h"
#include "opt_polymorphy.h"
#include "irmemory.h"
-#include "xmalloc.h"
#include "irphase_t.h"
#include "irgopt.h"
#include "debug.h"
{
for (;;) {
if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
- ir_entity *ent = get_SymConst_entity(ptr);
- if (variability_constant == get_entity_variability(ent))
- return ent;
- return NULL;
+ return get_SymConst_entity(ptr);
} else if (is_Sel(ptr)) {
ir_entity *ent = get_Sel_entity(ptr);
ir_type *tp = get_entity_owner(ent);
if (is_SymConst(ptr)) {
/* a SymConst. If the depth is 0, this is an access to a global
* entity and we don't need a component path, else we know
- * at least it's length.
+ * at least its length.
*/
assert(get_SymConst_kind(ptr) == symconst_addr_ent);
root = get_SymConst_entity(ptr);
set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
}
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode;
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ ir_mode *mode = get_irn_mode(ptr);
+ tarval *tmp;
- if (is_Const(r)) {
+ if (is_Const(r) && get_irn_mode(l) == mode) {
ptr = l;
tv = get_Const_tarval(r);
} else {
}
ptr_arith:
mode = get_tarval_mode(tv);
+ tmp = tv;
/* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
if (is_Sel(ptr)) {
size = get_type_size_bytes(get_entity_type(ent));
sz = new_tarval_from_long(size, mode);
- tv_index = tarval_div(tv, sz);
- tv = tarval_mod(tv, sz);
+ tv_index = tarval_div(tmp, sz);
+ tmp = tarval_mod(tmp, sz);
- if (tv_index == tarval_bad || tv == tarval_bad)
+ if (tv_index == tarval_bad || tmp == tarval_bad)
return NULL;
assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
/* ok, bounds check finished */
++idx;
}
- if (! tarval_is_null(tv)) {
+ if (! tarval_is_null(tmp)) {
/* access to some struct/union member */
return NULL;
}
* valid, if the graph is in phase_high and _no_ address computation is used.
*/
static compound_graph_path *get_accessed_path(ir_node *ptr) {
- return rec_get_accessed_path(ptr, 0);
+ compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
+ return gr;
} /* get_accessed_path */
typedef struct path_entry {
static void reduce_adr_usage(ir_node *ptr);
/**
- * Update a Load that may lost it's usage.
+ * Update a Load that may have lost its users.
*/
static void handle_load_update(ir_node *load) {
ldst_info_t *info = get_irn_link(load);
ir_node *ptr = get_Load_ptr(load);
ir_node *mem = get_Load_mem(load);
- /* a Load which value is neither used nor exception checked, remove it */
+ /* a Load whose value is neither used nor exception checked, remove it */
exchange(info->projs[pn_Load_M], mem);
if (info->projs[pn_Load_X_regular])
exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
} /* handle_load_update */
/**
- * A Use of an address node is vanished. Check if this was a Proj
+ * A use of an address node has vanished. Check if this was a Proj
* node and update the counters.
*/
static void reduce_adr_usage(ir_node *ptr) {
ldst_info_t *info = get_irn_link(pred);
info->projs[get_Proj_proj(ptr)] = NULL;
- /* this node lost it's result proj, handle that */
+ /* this node lost its result proj, handle that */
handle_load_update(pred);
}
}
store_mode = get_irn_mode(get_Store_value(store));
store_mode_len = get_mode_size_bytes(store_mode);
delta = load_offset - store_offset;
- if (delta < 0 || delta + load_mode_len > store_mode_len)
- return 0;
+ store_value = get_Store_value(store);
- if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
- get_mode_arithmetic(load_mode) != irma_twos_complement)
- return 0;
+ if (delta != 0 || store_mode != load_mode) {
+ if (delta < 0 || delta + load_mode_len > store_mode_len)
+ return 0;
- store_value = get_Store_value(store);
+ if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
+ get_mode_arithmetic(load_mode) != irma_twos_complement)
+ return 0;
- /* produce a shift to adjust offset delta */
- if (delta > 0) {
- ir_node *cnst;
- /* FIXME: only true for little endian */
- cnst = new_Const_long(mode_Iu, delta * 8);
- store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
- store_value, cnst, store_mode);
- }
+ /* produce a shift to adjust offset delta */
+ if (delta > 0) {
+ ir_node *cnst;
- /* add an convert if needed */
- if (store_mode != load_mode) {
- store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
- store_value, load_mode);
+ /* FIXME: only true for little endian */
+ cnst = new_Const_long(mode_Iu, delta * 8);
+ store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
+ store_value, cnst, store_mode);
+ }
+
+ /* add an convert if needed */
+ if (store_mode != load_mode) {
+ store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
+ store_value, load_mode);
+ }
}
DBG_OPT_RAW(load, store_value);
}
/* The mem of the Load. Must still be returned after optimization. */
- mem = get_Load_mem(load);
+ mem = get_Load_mem(load);
if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
/* a Load which value is neither used nor exception checked, remove it */
value = NULL;
/* check if we can determine the entity that will be loaded */
ent = find_constant_entity(ptr);
- if (ent != NULL) {
- if ((allocation_static == get_entity_allocation(ent)) &&
- (visibility_external_allocated != get_entity_visibility(ent))) {
- /* a static allocation that is not external: there should be NO exception
- * when loading even if we cannot replace the load itself. */
-
- /* no exception, clear the info field as it might be checked later again */
- if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
- info->projs[pn_Load_X_except] = NULL;
- res |= CF_CHANGED;
- }
- if (info->projs[pn_Load_X_regular]) {
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
- info->projs[pn_Load_X_regular] = NULL;
- res |= CF_CHANGED;
- }
+ if (ent != NULL &&
+ allocation_static == get_entity_allocation(ent) &&
+ visibility_external_allocated != get_entity_visibility(ent)) {
+ /* a static allocation that is not external: there should be NO exception
+ * when loading even if we cannot replace the load itself. */
+
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ exchange(info->projs[pn_Load_X_except], new_Bad());
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
- if (variability_constant == get_entity_variability(ent)) {
- if (is_atomic_entity(ent)) {
- /* Might not be atomic after
- lowering of Sels. In this
- case we could also load, but
- it's more complicated. */
- /* more simpler case: we load the content of a constant value:
- * replace it by the constant itself
- */
- value = get_atomic_ent_value(ent);
- } else {
- if (ent->has_initializer) {
- /* new style initializer */
- value = find_compound_ent_value(ptr);
- } else {
- /* old style initializer */
- compound_graph_path *path = get_accessed_path(ptr);
-
- if (path != NULL) {
- assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
-
- value = get_compound_ent_value_by_path(ent, path);
- free_compound_graph_path(path);
- }
- }
+ if (variability_constant == get_entity_variability(ent)) {
+ if (is_atomic_entity(ent)) {
+ /* Might not be atomic after lowering of Sels. In this case we
+ * could also load, but it's more complicated. */
+ /* more simpler case: we load the content of a constant value:
+ * replace it by the constant itself */
+ value = get_atomic_ent_value(ent);
+ } else if (ent->has_initializer) {
+ /* new style initializer */
+ value = find_compound_ent_value(ptr);
+ } else {
+ /* old style initializer */
+ compound_graph_path *path = get_accessed_path(ptr);
+
+ if (path != NULL) {
+ assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+
+ value = get_compound_ent_value_by_path(ent, path);
+ DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
+ free_compound_graph_path(path);
}
- if (value != NULL)
- value = can_replace_load_by_const(load, value);
}
+ if (value != NULL)
+ value = can_replace_load_by_const(load, value);
}
}
}
return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
} /* is_completely_overwritten */
+/**
+ * Check whether small is a part of large (starting at same address).
+ */
+static int is_partially_same(ir_node *small, ir_node *large)
+{
+ ir_mode *sm = get_irn_mode(small);
+ ir_mode *lm = get_irn_mode(large);
+
+ /* FIXME: Check endianness */
+ return is_Conv(small) && get_Conv_op(small) == large
+ && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
+ && get_mode_arithmetic(sm) == irma_twos_complement
+ && get_mode_arithmetic(lm) == irma_twos_complement;
+} /* is_partially_same */
+
/**
* follow the memory chain as long as there are only Loads and alias free Stores.
*
* if the pointers are identical, they refer to the same object.
* This is only true in strong typed languages, not is C were the following
* is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
- * However, if the mode that is written have a bigger or equal size the the old
- * one, the old value is completely overwritten and can be killed ...
+ * However, if the size of the mode that is written is bigger or equal the
+ * size of the old one, the old value is completely overwritten and can be
+ * killed ...
*/
if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
- get_nodes_MacroBlock(pred) == mblk &&
- is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
+ get_nodes_MacroBlock(pred) == mblk) {
/*
* a Store after a Store in the same MacroBlock -- a write after write.
- * We may remove the first Store, if it does not have an exception handler.
+ */
+
+ /*
+ * We may remove the first Store, if the old value is completely
+ * overwritten or the old value is a part of the new value,
+ * and if it does not have an exception handler.
*
* TODO: What, if both have the same exception handler ???
*/
- if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
- DBG_OPT_WAW(pred, store);
- exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
- kill_node(pred);
- reduce_adr_usage(ptr);
- return DF_CHANGED;
+ if (get_Store_volatility(pred) != volatility_is_volatile
+ && !pred_info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+ ir_mode *predmode = get_irn_mode(predvalue);
+
+ if(is_completely_overwritten(predmode, mode)
+ || is_partially_same(predvalue, value)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
+ kill_node(pred);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
+ }
+
+ /*
+ * We may remove the Store, if the old value already contains
+ * the new value, and if it does not have an exception handler.
+ *
+ * TODO: What, if both have the same exception handler ???
+ */
+ if (get_Store_volatility(store) != volatility_is_volatile
+ && !info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+
+ if(is_partially_same(value, predvalue)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(info->projs[pn_Store_M], mem);
+ kill_node(store);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
}
} else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
value == pred_info->projs[pn_Load_res]) {
ir_node *pred = get_irn_n(irn, i);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
ir_node *pred = get_fragile_op_mem(irn);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
ir_node *pred = get_Proj_pred(irn);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
ir_node *ka = get_End_keepalive(end, i);
- if (is_Phi(ka) && irn_not_visited(ka))
+ if (is_Phi(ka) && !irn_visited(ka))
dfs(ka, env);
}
current_ir_graph = rem;
/*
* do the load store optimization
*/
-void optimize_load_store(ir_graph *irg) {
+int optimize_load_store(ir_graph *irg) {
walk_env_t env;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
have Bad() predecessors. */
set_irg_doms_inconsistent(irg);
}
+ return (int) env.changes;
} /* optimize_load_store */