#include "irphase_t.h"
#include "irgopt.h"
#include "set.h"
+#include "be.h"
#include "debug.h"
/** The debug handle. */
DEBUG_ONLY(static firm_dbg_module_t *dbg;)
-#ifdef DO_CACHEOPT
-#include "cacheopt/cachesim.h"
-#endif
-
#undef IMAX
#define IMAX(a,b) ((a) > (b) ? (a) : (b))
if (is_Proj(proj)) {
pred = get_Proj_pred(proj);
- is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
+ is_exc = is_x_except_Proj(proj);
}
/* ignore Bad predecessors, they will be removed later */
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
* @param depth current depth in steps upward from the root
* of the address
*/
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
- int path_len, pos, idx;
+ size_t path_len, pos, idx;
ir_tarval *tv;
ir_type *tp;
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
typedef struct path_entry {
ir_entity *ent;
struct path_entry *next;
- long index;
+ size_t index;
} path_entry;
static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
ir_initializer_t *initializer;
ir_tarval *tv;
ir_type *tp;
- unsigned n;
+ size_t n;
entry.next = next;
if (is_SymConst(ptr)) {
continue;
}
}
- if (p->index >= (int) n)
+ if (p->index >= n)
return NULL;
initializer = get_initializer_compound_value(initializer, p->index);
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
- int i, n_members = get_compound_n_members(tp);
+ size_t i, n_members = get_compound_n_members(tp);
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
*/
static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
{
+ unsigned old_size;
+ unsigned new_size;
if (old_mode == new_mode)
- return 1;
+ return true;
+
+ old_size = get_mode_size_bits(old_mode);
+ new_size = get_mode_size_bits(new_mode);
/* if both modes are two-complement ones, we can always convert the
- Stored value into the needed one. */
- if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+ Stored value into the needed one. (on big endian machines we currently
+ only support this for modes of same size) */
+ if (old_size >= new_size &&
get_mode_arithmetic(old_mode) == irma_twos_complement &&
- get_mode_arithmetic(new_mode) == irma_twos_complement)
- return 1;
- return 0;
-} /* can_use_stored_value */
+ get_mode_arithmetic(new_mode) == irma_twos_complement &&
+ (!be_get_backend_param()->byte_order_big_endian
+ || old_size == new_size)) {
+ return true;
+ }
+ return false;
+}
/**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
*/
static unsigned is_Call_pure(ir_node *call)
{
store_value = get_Store_value(store);
if (delta != 0 || store_mode != load_mode) {
- if (delta < 0 || delta + load_mode_len > store_mode_len)
+ /* TODO: implement for big-endian */
+ if (delta < 0 || delta + load_mode_len > store_mode_len
+ || (be_get_backend_param()->byte_order_big_endian
+ && load_mode_len != store_mode_len))
return 0;
if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
ir_node *cnst;
ir_graph *irg = get_irn_irg(load);
- /* FIXME: only true for little endian */
cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
/* no exception */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
* Here, there is no need to check if the previous Load has an
* exception hander because they would have exact the same
* exception...
+ *
+ * TODO: implement load-after-load with different mode for big
+ * endian
*/
if (info->projs[pn_Load_X_except] == NULL
|| get_nodes_block(load) == get_nodes_block(pred)) {
/* no exception */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(load);
- exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
block = get_nodes_block(store);
- /* abort on dead blocks */
- if (is_Block_dead(block))
- return 0;
-
/* check if the block is post dominated by Phi-block
and has no exception exit */
bl_info = (block_info_t*)get_irn_link(block);
if (exc != info->exc_block)
return 0;
- /* abort on dead blocks */
block = get_nodes_block(pred);
- if (is_Block_dead(block))
- return 0;
/* check if the block is post dominated by Phi-block
and has no exception exit. Note that block must be different from
ninfo = get_ldst_info(irn, phase_obst(&env->ph));
ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
- set_Phi_pred(phi, pos, mem);
+ if (res == NULL) {
+ /* irn is from cache, so do not set phi pred again.
+ * There might be other Loads between phi and irn already.
+ */
+ set_Phi_pred(phi, pos, mem);
+ }
ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
}
dfs(get_Raise_mem(pred), env);
else if (is_fragile_op(pred))
dfs(get_fragile_op_mem(pred), env);
+ else if (is_Bad(pred))
+ /* ignore non-optimized block predecessor */;
else {
assert(0 && "Unknown EndBlock predecessor");
}
/* Handle graph state */
if (env.changes) {
- set_irg_outs_inconsistent(irg);
set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ edges_deactivate(irg);
}
if (env.changes & CF_CHANGED) {