/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @file
* @brief Load/Store optimizations.
* @author Michael Beck
- * @version $Id$
*/
#include "config.h"
#include "ircons_t.h"
#include "irgmod.h"
#include "irgwalk.h"
+#include "irtools.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irhooks.h"
#include "iredges.h"
#include "irpass.h"
-#include "opt_polymorphy.h"
#include "irmemory.h"
-#include "irphase_t.h"
+#include "irnodehashmap.h"
#include "irgopt.h"
#include "set.h"
+#include "be.h"
#include "debug.h"
/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
-#ifdef DO_CACHEOPT
-#include "cacheopt/cachesim.h"
-#endif
-
#undef IMAX
-#define IMAX(a,b) ((a) > (b) ? (a) : (b))
+#define IMAX(a,b) ((a) > (b) ? (a) : (b))
-#define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
+#define MAX_PROJ IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
enum changes_t {
DF_CHANGED = 1, /**< data flow changed */
/** A Load/Store info. */
typedef struct ldst_info_t {
- ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
+ ir_node *projs[MAX_PROJ+1]; /**< list of Proj's of this node */
ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
unsigned visited; /**< visited counter for breaking loops */
*/
static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
{
- ldst_info_t *info = get_irn_link(node);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
if (! info) {
info = OALLOCZ(obst, ldst_info_t);
*/
static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
{
- block_info_t *info = get_irn_link(node);
+ block_info_t *info = (block_info_t*)get_irn_link(node);
if (! info) {
info = OALLOCZ(obst, block_info_t);
return 0;
} /* update_exc */
-/** Return the number of uses of an address node */
-#define get_irn_n_uses(adr) get_irn_n_edges(adr)
-
/**
* walker, collects all Load/Store/Proj nodes
*
*/
static void collect_nodes(ir_node *node, void *env)
{
- ir_opcode opcode = get_irn_opcode(node);
+ walk_env_t *wenv = (walk_env_t *)env;
+ unsigned opcode = get_irn_opcode(node);
ir_node *pred, *blk, *pred_blk;
ldst_info_t *ldst_info;
- walk_env_t *wenv = env;
if (opcode == iro_Proj) {
pred = get_Proj_pred(node);
if (is_Proj(proj)) {
pred = get_Proj_pred(proj);
- is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
+ is_exc = is_x_except_Proj(proj);
}
/* ignore Bad predecessors, they will be removed later */
int i, n;
for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
- ir_node *bound;
- tarval *tlower, *tupper;
- ir_node *index = get_Sel_index(ptr, i);
- tarval *tv = computed_value(index);
+ ir_node *bound;
+ ir_tarval *tlower, *tupper;
+ ir_node *index = get_Sel_index(ptr, i);
+ ir_tarval *tv = computed_value(index);
/* check if the index is constant */
if (tv == tarval_bad)
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
ir_node *l = get_Sub_left(ptr);
ir_node *r = get_Sub_right(ptr);
- if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
+ if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
ptr = l;
else
return NULL;
return get_tarval_long(get_Const_tarval(index));
} /* get_Sel_array_index_long */
-/**
- * Returns the accessed component graph path for an
- * node computing an address.
- *
- * @param ptr the node computing the address
- * @param depth current depth in steps upward from the root
- * of the address
- */
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
-{
- compound_graph_path *res = NULL;
- ir_entity *root, *field, *ent;
- int path_len, pos, idx;
- tarval *tv;
- ir_type *tp;
-
- if (is_SymConst(ptr)) {
- /* a SymConst. If the depth is 0, this is an access to a global
- * entity and we don't need a component path, else we know
- * at least its length.
- */
- assert(get_SymConst_kind(ptr) == symconst_addr_ent);
- root = get_SymConst_entity(ptr);
- res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
- } else if (is_Sel(ptr)) {
- /* it's a Sel, go up until we find the root */
- res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
- if (res == NULL)
- return NULL;
-
- /* fill up the step in the path at the current position */
- field = get_Sel_entity(ptr);
- path_len = get_compound_graph_path_length(res);
- pos = path_len - depth - 1;
- set_compound_graph_path_node(res, pos, field);
-
- if (is_Array_type(get_entity_owner(field))) {
- assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
- set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
- }
- } else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode = get_irn_mode(ptr);
- tarval *tmp;
-
- if (is_Const(r) && get_irn_mode(l) == mode) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
- }
-ptr_arith:
- mode = get_tarval_mode(tv);
- tmp = tv;
-
- /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
- if (is_Sel(ptr)) {
- field = get_Sel_entity(ptr);
- } else {
- field = get_SymConst_entity(ptr);
- }
- idx = 0;
- for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- ir_node *bound;
-
- tp = get_entity_type(ent);
- if (! is_Array_type(tp))
- break;
- ent = get_array_element_entity(tp);
- size = get_type_size_bytes(get_entity_type(ent));
- sz = new_tarval_from_long(size, mode);
-
- tv_index = tarval_div(tmp, sz);
- tmp = tarval_mod(tmp, sz);
-
- if (tv_index == tarval_bad || tmp == tarval_bad)
- return NULL;
-
- assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
- bound = get_array_lower_bound(tp, 0);
- tlower = computed_value(bound);
- bound = get_array_upper_bound(tp, 0);
- tupper = computed_value(bound);
-
- if (tlower == tarval_bad || tupper == tarval_bad)
- return NULL;
-
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
- return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
- return NULL;
-
- /* ok, bounds check finished */
- ++idx;
- }
- if (! tarval_is_null(tmp)) {
- /* access to some struct/union member */
- return NULL;
- }
-
- /* should be at least ONE array */
- if (idx == 0)
- return NULL;
-
- res = rec_get_accessed_path(ptr, depth + idx);
- if (res == NULL)
- return NULL;
-
- path_len = get_compound_graph_path_length(res);
- pos = path_len - depth - idx;
-
- for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index;
- long index;
-
- tp = get_entity_type(ent);
- if (! is_Array_type(tp))
- break;
- ent = get_array_element_entity(tp);
- set_compound_graph_path_node(res, pos, ent);
-
- size = get_type_size_bytes(get_entity_type(ent));
- sz = new_tarval_from_long(size, mode);
-
- tv_index = tarval_div(tv, sz);
- tv = tarval_mod(tv, sz);
-
- /* worked above, should work again */
- assert(tv_index != tarval_bad && tv != tarval_bad);
-
- /* bounds already checked above */
- index = get_tarval_long(tv_index);
- set_compound_graph_path_array_index(res, pos, index);
- ++pos;
- }
- } else if (is_Sub(ptr)) {
- ir_node *l = get_Sub_left(ptr);
- ir_node *r = get_Sub_right(ptr);
-
- ptr = l;
- tv = get_Const_tarval(r);
- tv = tarval_neg(tv);
- goto ptr_arith;
- }
- return res;
-} /* rec_get_accessed_path */
-
-/**
- * Returns an access path or NULL. The access path is only
- * valid, if the graph is in phase_high and _no_ address computation is used.
- */
-static compound_graph_path *get_accessed_path(ir_node *ptr)
-{
- compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
- return gr;
-} /* get_accessed_path */
-
typedef struct path_entry {
ir_entity *ent;
struct path_entry *next;
- long index;
+ size_t index;
} path_entry;
static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
path_entry entry, *p;
ir_entity *ent, *field;
ir_initializer_t *initializer;
- tarval *tv;
+ ir_tarval *tv;
ir_type *tp;
- unsigned n;
+ size_t n;
entry.next = next;
if (is_SymConst(ptr)) {
continue;
}
}
- if (p->index >= (int) n)
+ if (p->index >= n)
return NULL;
initializer = get_initializer_compound_value(initializer, p->index);
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
- int i, n_members = get_compound_n_members(tp);
+ size_t i, n_members = get_compound_n_members(tp);
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
}
return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
ir_mode *mode;
unsigned pos;
- if (is_Const(r)) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
/* fill them up */
pos = 0;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- long index;
- ir_node *bound;
+ unsigned size;
+ ir_tarval *sz, *tv_index, *tlower, *tupper;
+ long index;
+ ir_node *bound;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
*/
static void handle_load_update(ir_node *load)
{
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
/* do NOT touch volatile loads for now */
if (get_Load_volatility(load) == volatility_is_volatile)
/* this Proj is dead now */
pred = get_Proj_pred(ptr);
if (is_Load(pred)) {
- ldst_info_t *info = get_irn_link(pred);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
info->projs[get_Proj_proj(ptr)] = NULL;
/* this node lost its result proj, handle that */
*/
static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
{
+ unsigned old_size;
+ unsigned new_size;
if (old_mode == new_mode)
- return 1;
+ return true;
+
+ old_size = get_mode_size_bits(old_mode);
+ new_size = get_mode_size_bits(new_mode);
/* if both modes are two-complement ones, we can always convert the
- Stored value into the needed one. */
- if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ Stored value into the needed one. (on big endian machines we currently
+ only support this for modes of same size) */
+ if (old_size >= new_size &&
get_mode_arithmetic(old_mode) == irma_twos_complement &&
- get_mode_arithmetic(new_mode) == irma_twos_complement)
- return 1;
- return 0;
-} /* can_use_stored_value */
+ get_mode_arithmetic(new_mode) == irma_twos_complement &&
+ (!be_get_backend_param()->byte_order_big_endian
+ || old_size == new_size)) {
+ return true;
+ }
+ return false;
+}
/**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
*/
static unsigned is_Call_pure(ir_node *call)
{
/* try the called entity */
ir_node *ptr = get_Call_ptr(call);
- if (is_Global(ptr)) {
- ir_entity *ent = get_Global_entity(ptr);
+ if (is_SymConst_addr_ent(ptr)) {
+ ir_entity *ent = get_SymConst_entity(ptr);
prop = get_entity_additional_properties(ent);
}
store_value = get_Store_value(store);
if (delta != 0 || store_mode != load_mode) {
- if (delta < 0 || delta + load_mode_len > store_mode_len)
+ /* TODO: implement for big-endian */
+ if (delta < 0 || delta + load_mode_len > store_mode_len
+ || (be_get_backend_param()->byte_order_big_endian
+ && load_mode_len != store_mode_len))
return 0;
if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
/* produce a shift to adjust offset delta */
if (delta > 0) {
ir_node *cnst;
+ ir_graph *irg = get_irn_irg(load);
- /* FIXME: only true for little endian */
- cnst = new_Const_long(mode_Iu, delta * 8);
+ cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
}
DBG_OPT_RAW(load, store_value);
- info = get_irn_link(load);
+ info = (ldst_info_t*)get_irn_link(load);
if (info->projs[pn_Load_M])
exchange(info->projs[pn_Load_M], get_Load_mem(load));
res = 0;
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange( info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *pred;
ir_node *ptr = get_Load_ptr(load);
ir_node *mem = get_Load_mem(load);
ir_mode *load_mode = get_Load_mode(load);
for (pred = curr; load != pred; ) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* a Load immediately after a Store -- a read after write.
* We may remove the Load, if both Load & Store does not have an
- * exception handler OR they are in the same MacroBlock. In the latter
+ * exception handler OR they are in the same Block. In the latter
* case the Load cannot throw an exception when the previous Store was
* quiet.
*
* Why we need to check for Store Exception? If the Store cannot
* be executed (ROM) the exception handler might simply jump into
- * the load MacroBlock :-(
+ * the load Block :-(
* We could make it a little bit better if we would know that the
* exception handler of the Store jumps directly to the end...
*/
if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
&& info->projs[pn_Load_X_except] == NULL)
- || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
+ || get_nodes_block(load) == get_nodes_block(pred)))
{
long load_offset;
ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
can_use_stored_value(get_Load_mode(pred), load_mode)) {
/*
* a Load after a Load -- a read after read.
- * We may remove the second Load, if it does not have an exception handler
- * OR they are in the same MacroBlock. In the later case the Load cannot
- * throw an exception when the previous Load was quiet.
+ * We may remove the second Load, if it does not have an exception
+ * handler OR they are in the same Block. In the later case
+ * the Load cannot throw an exception when the previous Load was
+ * quiet.
*
- * Here, there is no need to check if the previous Load has an exception
- * hander because they would have exact the same exception...
+ * Here, there is no need to check if the previous Load has an
+ * exception hander because they would have exact the same
+ * exception...
+ *
+ * TODO: implement load-after-load with different mode for big
+ * endian
*/
- if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
+ if (info->projs[pn_Load_X_except] == NULL
+ || get_nodes_block(load) == get_nodes_block(pred)) {
ir_node *value;
DBG_OPT_RAR(load, pred);
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, load_mode);
*/
ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
{
- ir_mode *c_mode = get_irn_mode(c);
- ir_mode *l_mode = get_Load_mode(load);
- ir_node *res = NULL;
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *block = get_nodes_block(load);
+ dbg_info *dbgi = get_irn_dbg_info(load);
+ ir_node *res = copy_const_value(dbgi, c, block);
if (c_mode != l_mode) {
/* check, if the mode matches OR can be easily converted info */
if (is_reinterpret_cast(c_mode, l_mode)) {
- /* we can safely cast */
- dbg_info *dbg = get_irn_dbg_info(load);
- ir_node *block = get_nodes_block(load);
-
/* copy the value from the const code irg and cast it */
- res = copy_const_value(dbg, c);
- res = new_rd_Conv(dbg, block, res, l_mode);
+ res = new_rd_Conv(dbgi, block, res, l_mode);
+ } else {
+ return NULL;
}
- } else {
- /* copy the value from the const code irg */
- res = copy_const_value(get_irn_dbg_info(load), c);
}
return res;
-} /* can_replace_load_by_const */
+}
/**
* optimize a Load
*/
static unsigned optimize_load(ir_node *load)
{
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *mem, *ptr, *value;
ir_entity *ent;
long dummy;
return res | DF_CHANGED;
}
- /* Load from a constant polymorphic field, where we can resolve
- polymorphism. */
- value = transform_polymorph_Load(load);
- if (value == load) {
- value = NULL;
- /* check if we can determine the entity that will be loaded */
- ent = find_constant_entity(ptr);
- if (ent != NULL
- && get_entity_visibility(ent) != ir_visibility_external) {
- /* a static allocation that is not external: there should be NO
- * exception when loading even if we cannot replace the load itself.
- */
-
- /* no exception, clear the info field as it might be checked later again */
- if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
- info->projs[pn_Load_X_except] = NULL;
- res |= CF_CHANGED;
- }
- if (info->projs[pn_Load_X_regular]) {
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
- info->projs[pn_Load_X_regular] = NULL;
- res |= CF_CHANGED;
- }
-
- if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
- if (ent->initializer != NULL) {
- /* new style initializer */
- value = find_compound_ent_value(ptr);
- } else if (entity_has_compound_ent_values(ent)) {
- /* old style initializer */
- compound_graph_path *path = get_accessed_path(ptr);
+ value = NULL;
+ /* check if we can determine the entity that will be loaded */
+ ent = find_constant_entity(ptr);
+ if (ent != NULL
+ && get_entity_visibility(ent) != ir_visibility_external) {
+ /* a static allocation that is not external: there should be NO
+ * exception when loading even if we cannot replace the load itself.
+ */
- if (path != NULL) {
- assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
- value = get_compound_ent_value_by_path(ent, path);
- DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
- free_compound_graph_path(path);
- }
+ if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
+ if (has_entity_initializer(ent)) {
+ /* new style initializer */
+ value = find_compound_ent_value(ptr);
+ }
+ if (value != NULL) {
+ ir_graph *irg = get_irn_irg(load);
+ value = can_replace_load_by_const(load, value);
+ if (value != NULL && is_Sel(ptr)) {
+ /* frontend has inserted masking operations after bitfield accesses,
+ * so we might have to shift the const. */
+ unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
+ ir_tarval *tv_old = get_Const_tarval(value);
+ ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
+ ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
+ value = new_r_Const(irg, tv_new);
}
- if (value != NULL)
- value = can_replace_load_by_const(load, value);
}
}
}
if (value != NULL) {
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
/* Check, if the address of this load is used more than once.
* If not, more load cannot be removed in any case. */
- if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
+ if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(get_base_and_offset(ptr, &dummy)) <= 1)
return res;
/*
static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
ir_node *pred;
ir_node *ptr = get_Store_ptr(store);
ir_node *mem = get_Store_mem(store);
ir_node *value = get_Store_value(store);
ir_mode *mode = get_irn_mode(value);
ir_node *block = get_nodes_block(store);
- ir_node *mblk = get_Block_MacroBlock(block);
for (pred = curr; pred != store;) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* BEWARE: one might think that checking the modes is useless, because
* killed ...
*/
if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
- get_nodes_MacroBlock(pred) == mblk) {
+ get_nodes_block(pred) == block) {
/*
- * a Store after a Store in the same MacroBlock -- a write after write.
+ * a Store after a Store in the same Block -- a write after write.
*/
/*
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, mode);
pred = skip_Proj(get_Store_mem(pred));
} else if (is_Load(pred)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+ get_Load_ptr(pred), get_Load_mode(pred),
ptr, mode);
if (rel != ir_no_alias)
break;
/* a store to an entity which is never read is unnecessary */
if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
if (info->projs[pn_Store_X_except] == NULL) {
DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
exchange(info->projs[pn_Store_M], get_Store_mem(store));
/* Check, if the address of this Store is used more than once.
* If not, this Store cannot be removed in any case. */
- if (get_irn_n_uses(ptr) <= 1)
+ if (get_irn_n_edges(ptr) <= 1)
return 0;
mem = get_Store_mem(store);
return follow_Mem_chain_for_Store(store, skip_Proj(mem));
} /* optimize_store */
+/* check if a node has more than one real user. Keepalive edges do not count as
+ * real users */
+static bool has_multiple_users(const ir_node *node)
+{
+ unsigned real_users = 0;
+ foreach_out_edge(node, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ if (is_End(user))
+ continue;
+ ++real_users;
+ if (real_users > 1)
+ return true;
+ }
+ return false;
+}
+
/**
* walker, optimizes Phi after Stores to identical places:
* Does the following optimization:
static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
{
int i, n;
- ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+ ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+ ir_node *old_store;
+#endif
ir_mode *mode;
ir_node **inM, **inD, **projMs;
int *idx;
/* must be only one user */
projM = get_Phi_pred(phi, 0);
- if (get_irn_n_edges(projM) != 1)
+ if (has_multiple_users(projM))
return 0;
store = skip_Proj(projM);
+#ifdef DO_CACHEOPT
old_store = store;
+#endif
if (!is_Store(store))
return 0;
block = get_nodes_block(store);
- /* abort on dead blocks */
- if (is_Block_dead(block))
- return 0;
-
/* check if the block is post dominated by Phi-block
and has no exception exit */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
/* this is the address of the store */
ptr = get_Store_ptr(store);
mode = get_irn_mode(get_Store_value(store));
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
exc = info->exc_block;
for (i = 1; i < n; ++i) {
ir_node *pred = get_Phi_pred(phi, i);
- if (get_irn_n_edges(pred) != 1)
+ if (has_multiple_users(pred))
return 0;
pred = skip_Proj(pred);
if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
return 0;
- info = get_irn_link(pred);
+ info = (ldst_info_t*)get_irn_link(pred);
/* check, if all stores have the same exception flow */
if (exc != info->exc_block)
return 0;
- /* abort on dead blocks */
block = get_nodes_block(pred);
- if (is_Block_dead(block))
- return 0;
/* check if the block is post dominated by Phi-block
and has no exception exit. Note that block must be different from
Phi-block, else we would move a Store from end End of a block to its
Start... */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
if (block == phi_block || ! block_postdominates(phi_block, block))
assert(is_Proj(projMs[i]));
store = get_Proj_pred(projMs[i]);
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
inM[i] = get_Store_mem(store);
inD[i] = get_Store_value(store);
}
/* fourth step: create the Store */
- store = new_rd_Store(db, block, phiM, ptr, phiD, 0);
+ store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
#ifdef DO_CACHEOPT
co_set_irn_name(store, co_get_irn_ident(old_store));
#endif
return res | DF_CHANGED;
} /* optimize_phi */
+static int optimize_conv_load(ir_node *conv)
+{
+ ir_node *op = get_Conv_op(conv);
+ if (!is_Proj(op))
+ return 0;
+ if (has_multiple_users(op))
+ return 0;
+ /* shrink mode of load if possible. */
+ ir_node *load = get_Proj_pred(op);
+ if (!is_Load(load))
+ return 0;
+
+ /* only do it if we are the only user (otherwise the risk is too
+ * great that we end up with 2 loads instead of one). */
+ ir_mode *mode = get_irn_mode(conv);
+ ir_mode *load_mode = get_Load_mode(load);
+ int bits_diff
+ = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
+ if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
+ return 0;
+
+ if (be_get_backend_param()->byte_order_big_endian) {
+ if (bits_diff % 8 != 0)
+ return 0;
+ ir_graph *irg = get_irn_irg(conv);
+ ir_node *ptr = get_Load_ptr(load);
+ ir_mode *mode = get_irn_mode(ptr);
+ ir_node *delta = new_r_Const_long(irg, mode, bits_diff/8);
+ ir_node *block = get_nodes_block(load);
+ ir_node *add = new_r_Add(block, ptr, delta, mode);
+ set_Load_ptr(load, add);
+ }
+ set_Load_mode(load, mode);
+ set_irn_mode(op, mode);
+ exchange(conv, op);
+ return DF_CHANGED;
+}
+
/**
* walker, do the optimizations
*/
static void do_load_store_optimize(ir_node *n, void *env)
{
- walk_env_t *wenv = env;
+ walk_env_t *wenv = (walk_env_t*)env;
switch (get_irn_opcode(n)) {
wenv->changes |= optimize_phi(n, wenv);
break;
+ case iro_Conv:
+ wenv->changes |= optimize_conv_load(n);
+ break;
+
default:
- ;
+ break;
}
} /* do_load_store_optimize */
/** A scc. */
typedef struct scc {
- ir_node *head; /**< the head of the list */
+ ir_node *head; /**< the head of the list */
} scc;
/** A node entry. */
/** A loop entry. */
typedef struct loop_env {
- ir_phase ph; /**< the phase object */
- ir_node **stack; /**< the node stack */
- int tos; /**< tos index */
- unsigned nextDFSnum; /**< the current DFS number */
- unsigned POnum; /**< current post order number */
-
- unsigned changes; /**< a bitmask of graph changes */
+ ir_nodehashmap_t map;
+ struct obstack obst;
+ ir_node **stack; /**< the node stack */
+ size_t tos; /**< tos index */
+ unsigned nextDFSnum; /**< the current DFS number */
+ unsigned POnum; /**< current post order number */
+
+ unsigned changes; /**< a bitmask of graph changes */
} loop_env;
/**
*/
static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
{
- ir_phase *ph = &env->ph;
- node_entry *e = phase_get_irn_data(&env->ph, irn);
+ node_entry *e = ir_nodehashmap_get(node_entry, &env->map, irn);
- if (! e) {
- e = phase_alloc(ph, sizeof(*e));
+ if (e == NULL) {
+ e = OALLOC(&env->obst, node_entry);
memset(e, 0, sizeof(*e));
- phase_set_irn_data(ph, irn, e);
+ ir_nodehashmap_insert(&env->map, irn, e);
}
return e;
} /* get_irn_ne */
node_entry *e;
if (env->tos == ARR_LEN(env->stack)) {
- int nlen = ARR_LEN(env->stack) * 2;
+ size_t nlen = ARR_LEN(env->stack) * 2;
ARR_RESIZE(ir_node *, env->stack, nlen);
}
env->stack[env->tos++] = n;
*/
static int cmp_avail_entry(const void *elt, const void *key, size_t size)
{
- const avail_entry_t *a = elt;
- const avail_entry_t *b = key;
+ const avail_entry_t *a = (const avail_entry_t*)elt;
+ const avail_entry_t *b = (const avail_entry_t*)key;
(void) size;
return a->ptr != b->ptr || a->mode != b->mode;
*/
static unsigned hash_cache_entry(const avail_entry_t *entry)
{
- return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
+ return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
} /* hash_cache_entry */
/**
static void move_loads_out_of_loops(scc *pscc, loop_env *env)
{
ir_node *phi, *load, *next, *other, *next_other;
- ir_entity *ent;
int j;
phi_entry *phi_list = NULL;
set *avail;
- avail = new_set(cmp_avail_entry, 8);
-
/* collect all outer memories */
for (phi = pscc->head; phi != NULL; phi = next) {
node_entry *ne = get_irn_ne(phi, env);
if (pe->pscc != ne->pscc) {
/* not in the same SCC, is region const */
- phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
+ phi_entry *pe = OALLOC(&env->obst, phi_entry);
pe->phi = phi;
pe->pos = j;
if (phi_list->next != NULL)
return;
+ avail = new_set(cmp_avail_entry, 8);
+
for (load = pscc->head; load; load = next) {
ir_mode *load_mode;
node_entry *ne = get_irn_ne(load, env);
next = ne->next;
if (is_Load(load)) {
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *ptr = get_Load_ptr(load);
/* for now, we cannot handle Loads with exceptions */
continue;
/* for now, we can only move Load(Global) */
- if (! is_Global(ptr))
+ if (! is_SymConst_addr_ent(ptr))
continue;
- ent = get_Global_entity(ptr);
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
node_entry *ne = get_irn_ne(other, env);
if (is_Store(other)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(other),
get_irn_mode(get_Store_value(other)),
ptr, load_mode);
entry.ptr = ptr;
entry.mode = load_mode;
- res = set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+ res = set_find(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
if (res != NULL) {
irn = res->load;
} else {
- irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, 0);
+ irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
entry.load = irn;
- set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+ (void)set_insert(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
}
pe->load = irn;
- ninfo = get_ldst_info(irn, phase_obst(&env->ph));
+ ninfo = get_ldst_info(irn, &env->obst);
ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
- set_Phi_pred(phi, pos, mem);
+ if (res == NULL) {
+ /* irn is from cache, so do not set phi pred again.
+ * There might be other Loads between phi and irn already.
+ */
+ set_Phi_pred(phi, pos, mem);
+ }
ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
}
node->low = MIN(o->DFSnum, node->low);
}
} else if (is_fragile_op(irn)) {
- ir_node *pred = get_fragile_op_mem(irn);
+ ir_node *pred = get_memop_mem(irn);
node_entry *o = get_irn_ne(pred, env);
if (!irn_visited(pred)) {
}
if (node->low == node->DFSnum) {
- scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
+ scc *pscc = OALLOC(&env->obst, scc);
ir_node *x;
pscc->head = NULL;
*/
static void do_dfs(ir_graph *irg, loop_env *env)
{
- ir_graph *rem = current_ir_graph;
ir_node *endblk, *end;
int i;
- current_ir_graph = irg;
inc_irg_visited(irg);
/* visit all memory nodes */
ir_node *pred = get_Block_cfgpred(endblk, i);
pred = skip_Proj(pred);
- if (is_Return(pred))
+ if (is_Return(pred)) {
dfs(get_Return_mem(pred), env);
- else if (is_Raise(pred))
+ } else if (is_Raise(pred)) {
dfs(get_Raise_mem(pred), env);
- else if (is_fragile_op(pred))
- dfs(get_fragile_op_mem(pred), env);
- else {
+ } else if (is_fragile_op(pred)) {
+ dfs(get_memop_mem(pred), env);
+ } else if (is_Bad(pred)) {
+ /* ignore non-optimized block predecessor */
+ } else {
assert(0 && "Unknown EndBlock predecessor");
}
}
if (is_Phi(ka) && !irn_visited(ka))
dfs(ka, env);
}
- current_ir_graph = rem;
} /* do_dfs */
/**
env.nextDFSnum = 0;
env.POnum = 0;
env.changes = 0;
- phase_init(&env.ph, irg, phase_irn_init_default);
+ ir_nodehashmap_init(&env.map);
+ obstack_init(&env.obst);
/* calculate the SCC's and drive loop optimization. */
do_dfs(irg, &env);
DEL_ARR_F(env.stack);
- phase_deinit(&env.ph);
+ obstack_free(&env.obst, NULL);
+ ir_nodehashmap_destroy(&env.map);
return env.changes;
} /* optimize_loops */
/*
* do the load store optimization
*/
-int optimize_load_store(ir_graph *irg)
+void optimize_load_store(ir_graph *irg)
{
walk_env_t env;
+ assure_irg_properties(irg,
+ IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+ | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
+ | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
+ | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+ | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
+
FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) != op_pin_state_floats &&
"LoadStore optimization needs pinned graph");
- /* we need landing pads */
- remove_critical_cf_edges(irg);
-
- edges_assure(irg);
-
- /* for Phi optimization post-dominators are needed ... */
- assure_postdoms(irg);
-
if (get_opt_alias_analysis()) {
- assure_irg_entity_usage_computed(irg);
assure_irp_globals_entity_usage_computed();
}
obstack_free(&env.obst, NULL);
- /* Handle graph state */
- if (env.changes) {
- set_irg_outs_inconsistent(irg);
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
- }
-
- if (env.changes & CF_CHANGED) {
- /* is this really needed: Yes, control flow changed, block might
- have Bad() predecessors. */
- set_irg_doms_inconsistent(irg);
- }
- return env.changes != 0;
-} /* optimize_load_store */
+ confirm_irg_properties(irg,
+ env.changes
+ ? env.changes & CF_CHANGED
+ ? IR_GRAPH_PROPERTIES_NONE
+ : IR_GRAPH_PROPERTIES_CONTROL_FLOW
+ : IR_GRAPH_PROPERTIES_ALL);
+}
ir_graph_pass_t *optimize_load_store_pass(const char *name)
{
- return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
+ return def_graph_pass(name ? name : "ldst", optimize_load_store);
} /* optimize_load_store_pass */