/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* @author Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <string.h>
#include "ircons_t.h"
#include "irgmod.h"
#include "irgwalk.h"
-#include "irvrfy.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#include "irflag_t.h"
-#include "array.h"
+#include "array_t.h"
#include "irhooks.h"
#include "iredges.h"
-#include "irtools.h"
+#include "irpass.h"
#include "opt_polymorphy.h"
#include "irmemory.h"
-#include "xmalloc.h"
#include "irphase_t.h"
#include "irgopt.h"
+#include "set.h"
+#include "be.h"
#include "debug.h"
/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
-#ifdef DO_CACHEOPT
-#include "cacheopt/cachesim.h"
-#endif
-
#undef IMAX
-#define IMAX(a,b) ((a) > (b) ? (a) : (b))
+#define IMAX(a,b) ((a) > (b) ? (a) : (b))
-#define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
+#define MAX_PROJ IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
enum changes_t {
DF_CHANGED = 1, /**< data flow changed */
/**
* walker environment
*/
-typedef struct _walk_env_t {
+typedef struct walk_env_t {
struct obstack obst; /**< list of all stores */
unsigned changes; /**< a bitmask of graph changes */
} walk_env_t;
/** A Load/Store info. */
-typedef struct _ldst_info_t {
+typedef struct ldst_info_t {
ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
/**
* a Block info.
*/
-typedef struct _block_info_t {
+typedef struct block_info_t {
unsigned flags; /**< flags for the block */
} block_info_t;
/**
* get the Load/Store info of a node
*/
-static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
- ldst_info_t *info = get_irn_link(node);
+static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
+{
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
if (! info) {
- info = obstack_alloc(obst, sizeof(*info));
- memset(info, 0, sizeof(*info));
+ info = OALLOCZ(obst, ldst_info_t);
set_irn_link(node, info);
}
return info;
/**
* get the Block info of a node
*/
-static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
- block_info_t *info = get_irn_link(node);
+static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
+{
+ block_info_t *info = (block_info_t*)get_irn_link(node);
if (! info) {
- info = obstack_alloc(obst, sizeof(*info));
- memset(info, 0, sizeof(*info));
+ info = OALLOCZ(obst, block_info_t);
set_irn_link(node, info);
}
return info;
*/
static void collect_nodes(ir_node *node, void *env)
{
- ir_opcode opcode = get_irn_opcode(node);
+ walk_env_t *wenv = (walk_env_t *)env;
+ unsigned opcode = get_irn_opcode(node);
ir_node *pred, *blk, *pred_blk;
ldst_info_t *ldst_info;
- walk_env_t *wenv = env;
if (opcode == iro_Proj) {
pred = get_Proj_pred(node);
{
for (;;) {
if (is_SymConst(ptr) && get_SymConst_kind(ptr) == symconst_addr_ent) {
- ir_entity *ent = get_SymConst_entity(ptr);
- if (variability_constant == get_entity_variability(ent))
- return ent;
- return NULL;
+ return get_SymConst_entity(ptr);
} else if (is_Sel(ptr)) {
ir_entity *ent = get_Sel_entity(ptr);
ir_type *tp = get_entity_owner(ent);
int i, n;
for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
- ir_node *bound;
- tarval *tlower, *tupper;
- ir_node *index = get_Sel_index(ptr, i);
- tarval *tv = computed_value(index);
+ ir_node *bound;
+ ir_tarval *tlower, *tupper;
+ ir_node *index = get_Sel_index(ptr, i);
+ ir_tarval *tv = computed_value(index);
/* check if the index is constant */
if (tv == tarval_bad)
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
}
}
- if (variability_constant == get_entity_variability(ent))
+ if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
return ent;
/* try next */
ir_node *l = get_Sub_left(ptr);
ir_node *r = get_Sub_right(ptr);
- if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
+ if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
ptr = l;
else
return NULL;
/**
* Return the Selection index of a Sel node from dimension n
*/
-static long get_Sel_array_index_long(ir_node *n, int dim) {
+static long get_Sel_array_index_long(ir_node *n, int dim)
+{
ir_node *index = get_Sel_index(n, dim);
assert(is_Const(index));
return get_tarval_long(get_Const_tarval(index));
* @param depth current depth in steps upward from the root
* of the address
*/
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
+{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
- int path_len, pos, idx;
- tarval *tv;
+ size_t path_len, pos, idx;
+ ir_tarval *tv;
ir_type *tp;
if (is_SymConst(ptr)) {
/* a SymConst. If the depth is 0, this is an access to a global
* entity and we don't need a component path, else we know
- * at least it's length.
+ * at least its length.
*/
assert(get_SymConst_kind(ptr) == symconst_addr_ent);
root = get_SymConst_entity(ptr);
set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
}
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode = get_irn_mode(ptr);
- tarval *tmp;
-
- if (is_Const(r) && get_irn_mode(l) == mode) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ ir_mode *mode;
+ ir_tarval *tmp;
+
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
}
idx = 0;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- ir_node *bound;
+ unsigned size;
+ ir_tarval *sz, *tv_index, *tlower, *tupper;
+ ir_node *bound;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
pos = path_len - depth - idx;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index;
- long index;
+ unsigned size;
+ ir_tarval *sz, *tv_index;
+ long index;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
* Returns an access path or NULL. The access path is only
* valid, if the graph is in phase_high and _no_ address computation is used.
*/
-static compound_graph_path *get_accessed_path(ir_node *ptr) {
+static compound_graph_path *get_accessed_path(ir_node *ptr)
+{
compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
return gr;
} /* get_accessed_path */
typedef struct path_entry {
ir_entity *ent;
struct path_entry *next;
- long index;
+ size_t index;
} path_entry;
-static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
+static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
+{
path_entry entry, *p;
ir_entity *ent, *field;
ir_initializer_t *initializer;
- tarval *tv;
+ ir_tarval *tv;
ir_type *tp;
- unsigned n;
+ size_t n;
entry.next = next;
if (is_SymConst(ptr)) {
continue;
}
}
- if (p->index >= (int) n)
+ if (p->index >= n)
return NULL;
initializer = get_initializer_compound_value(initializer, p->index);
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
- int i, n_members = get_compound_n_members(tp);
+ size_t i, n_members = get_compound_n_members(tp);
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
}
return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
ir_mode *mode;
unsigned pos;
- if (is_Const(r)) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
/* fill them up */
pos = 0;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- long index;
- ir_node *bound;
+ unsigned size;
+ ir_tarval *sz, *tv_index, *tlower, *tupper;
+ long index;
+ ir_node *bound;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
return NULL;
}
-static ir_node *find_compound_ent_value(ir_node *ptr) {
+static ir_node *find_compound_ent_value(ir_node *ptr)
+{
return rec_find_compound_ent_value(ptr, NULL);
}
static void reduce_adr_usage(ir_node *ptr);
/**
- * Update a Load that may lost it's usage.
+ * Update a Load that may have lost its users.
*/
-static void handle_load_update(ir_node *load) {
- ldst_info_t *info = get_irn_link(load);
+static void handle_load_update(ir_node *load)
+{
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
/* do NOT touch volatile loads for now */
if (get_Load_volatility(load) == volatility_is_volatile)
ir_node *ptr = get_Load_ptr(load);
ir_node *mem = get_Load_mem(load);
- /* a Load which value is neither used nor exception checked, remove it */
+ /* a Load whose value is neither used nor exception checked, remove it */
exchange(info->projs[pn_Load_M], mem);
if (info->projs[pn_Load_X_regular])
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
kill_node(load);
reduce_adr_usage(ptr);
}
} /* handle_load_update */
/**
- * A Use of an address node is vanished. Check if this was a Proj
+ * A use of an address node has vanished. Check if this was a Proj
* node and update the counters.
*/
-static void reduce_adr_usage(ir_node *ptr) {
- if (is_Proj(ptr)) {
- if (get_irn_n_edges(ptr) <= 0) {
- /* this Proj is dead now */
- ir_node *pred = get_Proj_pred(ptr);
-
- if (is_Load(pred)) {
- ldst_info_t *info = get_irn_link(pred);
- info->projs[get_Proj_proj(ptr)] = NULL;
-
- /* this node lost it's result proj, handle that */
- handle_load_update(pred);
- }
- }
+static void reduce_adr_usage(ir_node *ptr)
+{
+ ir_node *pred;
+ if (!is_Proj(ptr))
+ return;
+ if (get_irn_n_edges(ptr) > 0)
+ return;
+
+ /* this Proj is dead now */
+ pred = get_Proj_pred(ptr);
+ if (is_Load(pred)) {
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
+ info->projs[get_Proj_proj(ptr)] = NULL;
+
+ /* this node lost its result proj, handle that */
+ handle_load_update(pred);
}
} /* reduce_adr_usage */
* Check, if an already existing value of mode old_mode can be converted
* into the needed one new_mode without loss.
*/
-static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
+{
+ unsigned old_size;
+ unsigned new_size;
if (old_mode == new_mode)
- return 1;
+ return true;
+
+ old_size = get_mode_size_bits(old_mode);
+ new_size = get_mode_size_bits(new_mode);
/* if both modes are two-complement ones, we can always convert the
- Stored value into the needed one. */
- if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ Stored value into the needed one. (on big endian machines we currently
+ only support this for modes of same size) */
+ if (old_size >= new_size &&
get_mode_arithmetic(old_mode) == irma_twos_complement &&
- get_mode_arithmetic(new_mode) == irma_twos_complement)
- return 1;
- return 0;
-} /* can_use_stored_value */
+ get_mode_arithmetic(new_mode) == irma_twos_complement &&
+ (!be_get_backend_param()->byte_order_big_endian
+ || old_size == new_size)) {
+ return true;
+ }
+ return false;
+}
/**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
*/
-static unsigned is_Call_pure(ir_node *call) {
+static unsigned is_Call_pure(ir_node *call)
+{
ir_type *call_tp = get_Call_type(call);
unsigned prop = get_method_additional_properties(call_tp);
store_mode = get_irn_mode(get_Store_value(store));
store_mode_len = get_mode_size_bytes(store_mode);
delta = load_offset - store_offset;
- if (delta < 0 || delta + load_mode_len > store_mode_len)
- return 0;
+ store_value = get_Store_value(store);
- if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
- get_mode_arithmetic(load_mode) != irma_twos_complement)
- return 0;
+ if (delta != 0 || store_mode != load_mode) {
+ /* TODO: implement for big-endian */
+ if (delta < 0 || delta + load_mode_len > store_mode_len
+ || (be_get_backend_param()->byte_order_big_endian
+ && load_mode_len != store_mode_len))
+ return 0;
- store_value = get_Store_value(store);
+ if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
+ get_mode_arithmetic(load_mode) != irma_twos_complement)
+ return 0;
- /* produce a shift to adjust offset delta */
- if (delta > 0) {
- ir_node *cnst;
- /* FIXME: only true for little endian */
- cnst = new_Const_long(mode_Iu, delta * 8);
- store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
- store_value, cnst, store_mode);
- }
+ /* produce a shift to adjust offset delta */
+ if (delta > 0) {
+ ir_node *cnst;
+ ir_graph *irg = get_irn_irg(load);
+
+ cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
+ store_value = new_r_Shr(get_nodes_block(load),
+ store_value, cnst, store_mode);
+ }
- /* add an convert if needed */
- if (store_mode != load_mode) {
- store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
- store_value, load_mode);
+ /* add an convert if needed */
+ if (store_mode != load_mode) {
+ store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
+ }
}
DBG_OPT_RAW(load, store_value);
- info = get_irn_link(load);
+ info = (ldst_info_t*)get_irn_link(load);
if (info->projs[pn_Load_M])
exchange(info->projs[pn_Load_M], get_Load_mem(load));
res = 0;
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange( info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
- exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
res |= CF_CHANGED;
}
*
* INC_MASTER() must be called before dive into
*/
-static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
+static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
+{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *pred;
ir_node *ptr = get_Load_ptr(load);
ir_node *mem = get_Load_mem(load);
ir_mode *load_mode = get_Load_mode(load);
for (pred = curr; load != pred; ) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* a Load immediately after a Store -- a read after write.
* We may remove the Load, if both Load & Store does not have an
- * exception handler OR they are in the same MacroBlock. In the latter
+ * exception handler OR they are in the same Block. In the latter
* case the Load cannot throw an exception when the previous Store was
* quiet.
*
* Why we need to check for Store Exception? If the Store cannot
* be executed (ROM) the exception handler might simply jump into
- * the load MacroBlock :-(
+ * the load Block :-(
* We could make it a little bit better if we would know that the
* exception handler of the Store jumps directly to the end...
*/
if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
&& info->projs[pn_Load_X_except] == NULL)
- || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
+ || get_nodes_block(load) == get_nodes_block(pred)))
{
long load_offset;
ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
can_use_stored_value(get_Load_mode(pred), load_mode)) {
/*
* a Load after a Load -- a read after read.
- * We may remove the second Load, if it does not have an exception handler
- * OR they are in the same MacroBlock. In the later case the Load cannot
- * throw an exception when the previous Load was quiet.
+ * We may remove the second Load, if it does not have an exception
+ * handler OR they are in the same Block. In the later case
+ * the Load cannot throw an exception when the previous Load was
+ * quiet.
*
- * Here, there is no need to check if the previous Load has an exception
- * hander because they would have exact the same exception...
+ * Here, there is no need to check if the previous Load has an
+ * exception hander because they would have exact the same
+ * exception...
+ *
+ * TODO: implement load-after-load with different mode for big
+ * endian
*/
- if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
+ if (info->projs[pn_Load_X_except] == NULL
+ || get_nodes_block(load) == get_nodes_block(pred)) {
ir_node *value;
DBG_OPT_RAR(load, pred);
if (info->projs[pn_Load_res]) {
if (pred_info->projs[pn_Load_res] == NULL) {
/* create a new Proj again */
- pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
+ pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
}
value = pred_info->projs[pn_Load_res];
/* add an convert if needed */
if (get_Load_mode(pred) != load_mode) {
- value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+ value = new_r_Conv(get_nodes_block(load), value, load_mode);
}
exchange(info->projs[pn_Load_res], value);
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
- exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
res |= CF_CHANGED;
}
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, load_mode);
* Check if we can replace the load by a given const from
* the const code irg.
*/
-ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
- ir_mode *c_mode = get_irn_mode(c);
- ir_mode *l_mode = get_Load_mode(load);
- ir_node *res = NULL;
+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
+{
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *block = get_nodes_block(load);
+ dbg_info *dbgi = get_irn_dbg_info(load);
+ ir_node *res = copy_const_value(dbgi, c, block);
if (c_mode != l_mode) {
/* check, if the mode matches OR can be easily converted info */
if (is_reinterpret_cast(c_mode, l_mode)) {
- /* we can safely cast */
- dbg_info *dbg = get_irn_dbg_info(load);
- ir_node *block = get_nodes_block(load);
-
/* copy the value from the const code irg and cast it */
- res = copy_const_value(dbg, c);
- res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
+ res = new_rd_Conv(dbgi, block, res, l_mode);
}
- } else {
- /* copy the value from the const code irg */
- res = copy_const_value(get_irn_dbg_info(load), c);
+ return NULL;
}
return res;
-} /* can_replace_load_by_const */
+}
/**
* optimize a Load
*/
static unsigned optimize_load(ir_node *load)
{
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *mem, *ptr, *value;
ir_entity *ent;
long dummy;
/* the address of the load to be optimized */
ptr = get_Load_ptr(load);
- /*
- * Check if we can remove the exception from a Load:
- * This can be done, if the address is from an Sel(Alloc) and
- * the Sel type is a subtype of the allocated type.
- *
- * This optimizes some often used OO constructs,
- * like x = new O; x->t;
- */
- if (info->projs[pn_Load_X_except]) {
- ir_node *addr = ptr;
-
- /* find base address */
- while (is_Sel(addr))
- addr = get_Sel_ptr(addr);
- if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
- /* simple case: a direct load after an Alloc. Firm Alloc throw
- * an exception in case of out-of-memory. So, there is no way for an
- * exception in this load.
- * This code is constructed by the "exception lowering" in the Jack compiler.
- */
- exchange(info->projs[pn_Load_X_except], new_Bad());
- info->projs[pn_Load_X_except] = NULL;
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
- info->projs[pn_Load_X_regular] = NULL;
- res |= CF_CHANGED;
- }
- }
-
/* The mem of the Load. Must still be returned after optimization. */
- mem = get_Load_mem(load);
+ mem = get_Load_mem(load);
- if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
- /* a Load which value is neither used nor exception checked, remove it */
+ if (info->projs[pn_Load_res] == NULL
+ && info->projs[pn_Load_X_except] == NULL) {
+ /* the value is never used and we don't care about exceptions, remove */
exchange(info->projs[pn_Load_M], mem);
if (info->projs[pn_Load_X_regular]) {
/* should not happen, but if it does, remove it */
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
res |= CF_CHANGED;
}
kill_node(load);
value = NULL;
/* check if we can determine the entity that will be loaded */
ent = find_constant_entity(ptr);
- if (ent != NULL) {
- if ((allocation_static == get_entity_allocation(ent)) &&
- (visibility_external_allocated != get_entity_visibility(ent))) {
- /* a static allocation that is not external: there should be NO exception
- * when loading even if we cannot replace the load itself. */
+ if (ent != NULL
+ && get_entity_visibility(ent) != ir_visibility_external) {
+ /* a static allocation that is not external: there should be NO
+ * exception when loading even if we cannot replace the load itself.
+ */
- /* no exception, clear the info field as it might be checked later again */
- if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
- info->projs[pn_Load_X_except] = NULL;
- res |= CF_CHANGED;
- }
- if (info->projs[pn_Load_X_regular]) {
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
- info->projs[pn_Load_X_regular] = NULL;
- res |= CF_CHANGED;
- }
+ /* no exception, clear the info field as it might be checked later again */
+ if (info->projs[pn_Load_X_except]) {
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
+ info->projs[pn_Load_X_except] = NULL;
+ res |= CF_CHANGED;
+ }
+ if (info->projs[pn_Load_X_regular]) {
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
+ info->projs[pn_Load_X_regular] = NULL;
+ res |= CF_CHANGED;
+ }
- if (variability_constant == get_entity_variability(ent)) {
- if (is_atomic_entity(ent)) {
- /* Might not be atomic after
- lowering of Sels. In this
- case we could also load, but
- it's more complicated. */
- /* more simpler case: we load the content of a constant value:
- * replace it by the constant itself
- */
- value = get_atomic_ent_value(ent);
- } else {
- if (ent->has_initializer) {
- /* new style initializer */
- value = find_compound_ent_value(ptr);
- } else {
- /* old style initializer */
- compound_graph_path *path = get_accessed_path(ptr);
-
- if (path != NULL) {
- assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
-
- value = get_compound_ent_value_by_path(ent, path);
- DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
- free_compound_graph_path(path);
- }
- }
+ if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
+ if (ent->initializer != NULL) {
+ /* new style initializer */
+ value = find_compound_ent_value(ptr);
+ } else if (entity_has_compound_ent_values(ent)) {
+ /* old style initializer */
+ compound_graph_path *path = get_accessed_path(ptr);
+
+ if (path != NULL) {
+ assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+
+ value = get_compound_ent_value_by_path(ent, path);
+ DB((dbg, LEVEL_1, " Constant access at %F%F resulted in %+F\n", ent, path, value));
+ free_compound_graph_path(path);
}
- if (value != NULL)
- value = can_replace_load_by_const(load, value);
}
+ if (value != NULL)
+ value = can_replace_load_by_const(load, value);
}
}
}
if (value != NULL) {
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
- exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+ exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
info->projs[pn_Load_X_regular] = NULL;
res |= CF_CHANGED;
}
return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
} /* is_completely_overwritten */
+/**
+ * Check whether small is a part of large (starting at same address).
+ */
+static int is_partially_same(ir_node *small, ir_node *large)
+{
+ ir_mode *sm = get_irn_mode(small);
+ ir_mode *lm = get_irn_mode(large);
+
+ /* FIXME: Check endianness */
+ return is_Conv(small) && get_Conv_op(small) == large
+ && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
+ && get_mode_arithmetic(sm) == irma_twos_complement
+ && get_mode_arithmetic(lm) == irma_twos_complement;
+} /* is_partially_same */
+
/**
* follow the memory chain as long as there are only Loads and alias free Stores.
*
* INC_MASTER() must be called before dive into
*/
-static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
+static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
+{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
ir_node *pred;
ir_node *ptr = get_Store_ptr(store);
ir_node *mem = get_Store_mem(store);
ir_node *value = get_Store_value(store);
ir_mode *mode = get_irn_mode(value);
ir_node *block = get_nodes_block(store);
- ir_node *mblk = get_Block_MacroBlock(block);
for (pred = curr; pred != store;) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* BEWARE: one might think that checking the modes is useless, because
* if the pointers are identical, they refer to the same object.
* This is only true in strong typed languages, not is C were the following
* is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
- * However, if the mode that is written have a bigger or equal size the the old
- * one, the old value is completely overwritten and can be killed ...
+ * However, if the size of the mode that is written is bigger or equal the
+ * size of the old one, the old value is completely overwritten and can be
+ * killed ...
*/
if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
- get_nodes_MacroBlock(pred) == mblk &&
- is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
+ get_nodes_block(pred) == block) {
/*
- * a Store after a Store in the same MacroBlock -- a write after write.
- * We may remove the first Store, if it does not have an exception handler.
+ * a Store after a Store in the same Block -- a write after write.
+ */
+
+ /*
+ * We may remove the first Store, if the old value is completely
+ * overwritten or the old value is a part of the new value,
+ * and if it does not have an exception handler.
*
* TODO: What, if both have the same exception handler ???
*/
- if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
- DBG_OPT_WAW(pred, store);
- exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
- kill_node(pred);
- reduce_adr_usage(ptr);
- return DF_CHANGED;
+ if (get_Store_volatility(pred) != volatility_is_volatile
+ && !pred_info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+ ir_mode *predmode = get_irn_mode(predvalue);
+
+ if (is_completely_overwritten(predmode, mode)
+ || is_partially_same(predvalue, value)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
+ kill_node(pred);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
+ }
+
+ /*
+ * We may remove the Store, if the old value already contains
+ * the new value, and if it does not have an exception handler.
+ *
+ * TODO: What, if both have the same exception handler ???
+ */
+ if (get_Store_volatility(store) != volatility_is_volatile
+ && !info->projs[pn_Store_X_except]) {
+ ir_node *predvalue = get_Store_value(pred);
+
+ if (is_partially_same(value, predvalue)) {
+ DBG_OPT_WAW(pred, store);
+ exchange(info->projs[pn_Store_M], mem);
+ kill_node(store);
+ reduce_adr_usage(ptr);
+ return DF_CHANGED;
+ }
}
} else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
value == pred_info->projs[pn_Load_res]) {
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, mode);
pred = skip_Proj(get_Store_mem(pred));
} else if (is_Load(pred)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+ get_Load_ptr(pred), get_Load_mode(pred),
ptr, mode);
if (rel != ir_no_alias)
break;
/** find entity used as base for an address calculation */
static ir_entity *find_entity(ir_node *ptr)
{
- switch(get_irn_opcode(ptr)) {
+ switch (get_irn_opcode(ptr)) {
case iro_SymConst:
return get_SymConst_entity(ptr);
case iro_Sel: {
*
* @param store the Store node
*/
-static unsigned optimize_store(ir_node *store) {
+static unsigned optimize_store(ir_node *store)
+{
ir_node *ptr;
ir_node *mem;
ir_entity *entity;
/* a store to an entity which is never read is unnecessary */
if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
if (info->projs[pn_Store_X_except] == NULL) {
+ DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
exchange(info->projs[pn_Store_M], get_Store_mem(store));
kill_node(store);
reduce_adr_usage(ptr);
block = get_nodes_block(store);
- /* abort on dead blocks */
- if (is_Block_dead(block))
- return 0;
-
/* check if the block is post dominated by Phi-block
and has no exception exit */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
/* this is the address of the store */
ptr = get_Store_ptr(store);
mode = get_irn_mode(get_Store_value(store));
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
exc = info->exc_block;
for (i = 1; i < n; ++i) {
if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
return 0;
- info = get_irn_link(pred);
+ info = (ldst_info_t*)get_irn_link(pred);
/* check, if all stores have the same exception flow */
if (exc != info->exc_block)
return 0;
- /* abort on dead blocks */
block = get_nodes_block(pred);
- if (is_Block_dead(block))
- return 0;
/* check if the block is post dominated by Phi-block
and has no exception exit. Note that block must be different from
Phi-block, else we would move a Store from end End of a block to its
Start... */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
if (block == phi_block || ! block_postdominates(phi_block, block))
assert(is_Proj(projMs[i]));
store = get_Proj_pred(projMs[i]);
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
inM[i] = get_Store_mem(store);
inD[i] = get_Store_value(store);
block = get_nodes_block(phi);
/* second step: create a new memory Phi */
- phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
+ phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
/* third step: create a new data Phi */
- phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
+ phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
/* rewire memory and kill the node */
for (i = n - 1; i >= 0; --i) {
ir_node *proj = projMs[i];
- if(is_Proj(proj)) {
+ if (is_Proj(proj)) {
ir_node *store = get_Proj_pred(proj);
exchange(proj, inM[i]);
kill_node(store);
}
/* fourth step: create the Store */
- store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
+ store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
#ifdef DO_CACHEOPT
co_set_irn_name(store, co_get_irn_ident(old_store));
#endif
- projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
+ projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
info = get_ldst_info(store, &wenv->obst);
info->projs[pn_Store_M] = projM;
/* fifths step: repair exception flow */
if (exc) {
- ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
+ ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
info->projs[pn_Store_X_except] = projX;
info->exc_block = exc;
/**
* walker, do the optimizations
*/
-static void do_load_store_optimize(ir_node *n, void *env) {
- walk_env_t *wenv = env;
+static void do_load_store_optimize(ir_node *n, void *env)
+{
+ walk_env_t *wenv = (walk_env_t*)env;
switch (get_irn_opcode(n)) {
/** A scc. */
typedef struct scc {
- ir_node *head; /**< the head of the list */
+ ir_node *head; /**< the head of the list */
} scc;
/** A node entry. */
typedef struct node_entry {
unsigned DFSnum; /**< the DFS number of this node */
unsigned low; /**< the low number of this node */
- ir_node *header; /**< the header of this node */
int in_stack; /**< flag, set if the node is on the stack */
ir_node *next; /**< link to the next node the the same scc */
scc *pscc; /**< the scc of this node */
typedef struct loop_env {
ir_phase ph; /**< the phase object */
ir_node **stack; /**< the node stack */
- int tos; /**< tos index */
+ size_t tos; /**< tos index */
unsigned nextDFSnum; /**< the current DFS number */
unsigned POnum; /**< current post order number */
/**
* Gets the node_entry of a node
*/
-static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
+static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
+{
ir_phase *ph = &env->ph;
- node_entry *e = phase_get_irn_data(&env->ph, irn);
+ node_entry *e = (node_entry*)phase_get_irn_data(&env->ph, irn);
if (! e) {
- e = phase_alloc(ph, sizeof(*e));
+ e = (node_entry*)phase_alloc(ph, sizeof(*e));
memset(e, 0, sizeof(*e));
phase_set_irn_data(ph, irn, e);
}
* @param env the loop environment
* @param n the node to push
*/
-static void push(loop_env *env, ir_node *n) {
+static void push(loop_env *env, ir_node *n)
+{
node_entry *e;
if (env->tos == ARR_LEN(env->stack)) {
- int nlen = ARR_LEN(env->stack) * 2;
+ size_t nlen = ARR_LEN(env->stack) * 2;
ARR_RESIZE(ir_node *, env->stack, nlen);
}
env->stack[env->tos++] = n;
*
* @return The topmost node
*/
-static ir_node *pop(loop_env *env) {
+static ir_node *pop(loop_env *env)
+{
ir_node *n = env->stack[--env->tos];
node_entry *e = get_irn_ne(n, env);
* @param irn the node to check
* @param header_block the header block of the induction variable
*/
-static int is_rc(ir_node *irn, ir_node *header_block) {
+static int is_rc(ir_node *irn, ir_node *header_block)
+{
ir_node *block = get_nodes_block(irn);
return (block != header_block) && block_dominates(block, header_block);
phi_entry *next;
};
+/**
+ * An entry in the avail set.
+ */
+typedef struct avail_entry_t {
+ ir_node *ptr; /**< the address pointer */
+ ir_mode *mode; /**< the load mode */
+ ir_node *load; /**< the associated Load */
+} avail_entry_t;
+
+/**
+ * Compare two avail entries.
+ */
+static int cmp_avail_entry(const void *elt, const void *key, size_t size)
+{
+ const avail_entry_t *a = (const avail_entry_t*)elt;
+ const avail_entry_t *b = (const avail_entry_t*)key;
+ (void) size;
+
+ return a->ptr != b->ptr || a->mode != b->mode;
+} /* cmp_avail_entry */
+
+/**
+ * Calculate the hash value of an avail entry.
+ */
+static unsigned hash_cache_entry(const avail_entry_t *entry)
+{
+ return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
+} /* hash_cache_entry */
+
/**
* Move loops out of loops if possible.
*
* @param pscc the loop described by an SCC
* @param env the loop environment
*/
-static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
+static void move_loads_out_of_loops(scc *pscc, loop_env *env)
+{
ir_node *phi, *load, *next, *other, *next_other;
ir_entity *ent;
int j;
phi_entry *phi_list = NULL;
+ set *avail;
+
+ avail = new_set(cmp_avail_entry, 8);
/* collect all outer memories */
for (phi = pscc->head; phi != NULL; phi = next) {
if (! is_Phi(phi))
continue;
- assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
+ assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
ir_node *pred = get_irn_n(phi, j);
if (pe->pscc != ne->pscc) {
/* not in the same SCC, is region const */
- phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
+ phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
pe->phi = phi;
pe->pos = j;
/* no Phis no fun */
assert(phi_list != NULL && "DFS found a loop without Phi");
+ /* for now, we cannot handle more than one input (only reducible cf) */
+ if (phi_list->next != NULL)
+ return;
+
for (load = pscc->head; load; load = next) {
ir_mode *load_mode;
node_entry *ne = get_irn_ne(load, env);
next = ne->next;
if (is_Load(load)) {
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *ptr = get_Load_ptr(load);
/* for now, we cannot handle Loads with exceptions */
if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
continue;
- /* for now, we can only handle Load(Global) */
+ /* for now, we can only move Load(Global) */
if (! is_Global(ptr))
continue;
- ent = get_Global_entity(ptr);
+ ent = get_Global_entity(ptr);
load_mode = get_Load_mode(load);
for (other = pscc->head; other != NULL; other = next_other) {
node_entry *ne = get_irn_ne(other, env);
if (is_Store(other)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(other),
get_irn_mode(get_Store_value(other)),
ptr, load_mode);
if (rel != ir_no_alias)
break;
}
- /* only pure Calls are allowed here, so ignore them */
+ /* only Phis and pure Calls are allowed here, so ignore them */
}
if (other == NULL) {
- ldst_info_t *ninfo;
+ ldst_info_t *ninfo = NULL;
phi_entry *pe;
dbg_info *db;
- /* for now, we cannot handle more than one input */
- if (phi_list->next != NULL)
- return;
-
/* yep, no aliasing Store found, Load can be moved */
DB((dbg, LEVEL_1, " Found a Load that could be moved: %+F\n", load));
ir_node *blk = get_nodes_block(phi);
ir_node *pred = get_Block_cfgpred_block(blk, pos);
ir_node *irn, *mem;
+ avail_entry_t entry, *res;
- pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
+ entry.ptr = ptr;
+ entry.mode = load_mode;
+ res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+ if (res != NULL) {
+ irn = res->load;
+ } else {
+ irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
+ entry.load = irn;
+ set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+ DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
+ }
+ pe->load = irn;
ninfo = get_ldst_info(irn, phase_obst(&env->ph));
- ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
- set_Phi_pred(phi, pos, mem);
-
- ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
+ ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
+ if (res == NULL) {
+ /* irn is from cache, so do not set phi pred again.
+ * There might be other Loads between phi and irn already.
+ */
+ set_Phi_pred(phi, pos, mem);
+ }
- DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
+ ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
}
/* now kill the old Load */
}
}
}
+ del_set(avail);
} /* move_loads_out_of_loops */
/**
* @param pscc the SCC
* @param env the loop environment
*/
-static void process_loop(scc *pscc, loop_env *env) {
+static void process_loop(scc *pscc, loop_env *env)
+{
ir_node *irn, *next, *header = NULL;
node_entry *b, *h = NULL;
int j, only_phi, num_outside, process = 0;
next = e->next;
b = get_irn_ne(block, env);
- if (header) {
+ if (header != NULL) {
if (h->POnum < b->POnum) {
header = block;
h = b;
}
- }
- else {
+ } else {
header = block;
h = b;
}
/* not a memory loop */
goto fail;
}
- if (! out_rc) {
+ if (out_rc == NULL) {
+ /* first region constant */
out_rc = pred;
++num_outside;
} else if (out_rc != pred) {
+ /* another region constant */
++num_outside;
}
}
for (irn = pscc->head; irn; irn = next) {
node_entry *e = get_irn_ne(irn, env);
next = e->next;
- e->header = NULL;
exchange(irn, out_rc);
}
env->changes |= DF_CHANGED;
return;
}
- /* set the header for every node in this scc */
+#ifdef DEBUG_libfirm
for (irn = pscc->head; irn; irn = next) {
node_entry *e = get_irn_ne(irn, env);
- e->header = header;
next = e->next;
DB((dbg, LEVEL_2, " %+F,", irn));
}
DB((dbg, LEVEL_2, "\n"));
-
+#endif
move_loads_out_of_loops(pscc, env);
fail:
* @param pscc the SCC
* @param env the loop environment
*/
-static void process_scc(scc *pscc, loop_env *env) {
+static void process_scc(scc *pscc, loop_env *env)
+{
ir_node *head = pscc->head;
node_entry *e = get_irn_ne(head, env);
ir_node *pred = get_irn_n(irn, i);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
ir_node *pred = get_fragile_op_mem(irn);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
ir_node *pred = get_Proj_pred(irn);
node_entry *o = get_irn_ne(pred, env);
- if (irn_not_visited(pred)) {
+ if (!irn_visited(pred)) {
dfs(pred, env);
node->low = MIN(node->low, o->low);
}
}
if (node->low == node->DFSnum) {
- scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
+ scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
ir_node *x;
pscc->head = NULL;
* @param irg the graph to process
* @param env the loop environment
*/
-static void do_dfs(ir_graph *irg, loop_env *env) {
- ir_graph *rem = current_ir_graph;
+static void do_dfs(ir_graph *irg, loop_env *env)
+{
ir_node *endblk, *end;
int i;
- current_ir_graph = irg;
inc_irg_visited(irg);
/* visit all memory nodes */
dfs(get_Raise_mem(pred), env);
else if (is_fragile_op(pred))
dfs(get_fragile_op_mem(pred), env);
+ else if (is_Bad(pred))
+ /* ignore non-optimized block predecessor */;
else {
assert(0 && "Unknown EndBlock predecessor");
}
for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
ir_node *ka = get_End_keepalive(end, i);
- if (is_Phi(ka) && irn_not_visited(ka))
+ if (is_Phi(ka) && !irn_visited(ka))
dfs(ka, env);
}
- current_ir_graph = rem;
} /* do_dfs */
-/**
- * Initialize new phase data. We do this always explicit, so return NULL here
- */
-static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
- (void)ph;
- (void)irn;
- (void)data;
- return NULL;
-} /* init_loop_data */
-
/**
* Optimize Loads/Stores in loops.
*
* @param irg the graph
*/
-static int optimize_loops(ir_graph *irg) {
+static int optimize_loops(ir_graph *irg)
+{
loop_env env;
env.stack = NEW_ARR_F(ir_node *, 128);
env.nextDFSnum = 0;
env.POnum = 0;
env.changes = 0;
- phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
+ phase_init(&env.ph, irg, phase_irn_init_default);
/* calculate the SCC's and drive loop optimization. */
do_dfs(irg, &env);
DEL_ARR_F(env.stack);
- phase_free(&env.ph);
+ phase_deinit(&env.ph);
return env.changes;
} /* optimize_loops */
/*
* do the load store optimization
*/
-void optimize_load_store(ir_graph *irg) {
+int optimize_load_store(ir_graph *irg)
+{
walk_env_t env;
FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
have Bad() predecessors. */
set_irg_doms_inconsistent(irg);
}
+ return env.changes != 0;
} /* optimize_load_store */
+
+ir_graph_pass_t *optimize_load_store_pass(const char *name)
+{
+ return def_graph_pass_ret(name ? name : "ldst", optimize_load_store);
+} /* optimize_load_store_pass */