/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "irpass.h"
/* maximum number of output Proj's */
-#define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max)
+#define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max)
/**
* Mapping an address to an dense ID.
memop_t *next; /**< links to the next memory op in the block in forward order. */
memop_t *prev; /**< links to the previous memory op in the block in forward order. */
unsigned flags; /**< memop flags */
- ir_node *projs[MAX_PROJ]; /**< Projs of this memory op */
+ ir_node *projs[MAX_PROJ+1]; /**< Projs of this memory op */
};
/**
memop_t **curr_id_2_memop; /**< current map of address ids to memops */
unsigned curr_adr_id; /**< number for address mapping */
unsigned n_mem_ops; /**< number of memory operations (Loads/Stores) */
- unsigned rbs_size; /**< size of all bitsets in bytes */
+ size_t rbs_size; /**< size of all bitsets in bytes */
int max_cfg_preds; /**< maximum number of block cfg predecessors */
int changed; /**< Flags for changed graph state */
#ifdef DEBUG_libfirm
*/
static void dump_curr(block_t *bl, const char *s)
{
- unsigned end = env.rbs_size - 1;
- unsigned pos;
- int i;
+ size_t end = env.rbs_size - 1;
+ size_t pos;
+ int i;
DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
i = 0;
{
assert(is_Block(block));
- return get_irn_link(block);
+ return (block_t*)get_irn_link(block);
} /* get_block_entry */
/** Get the memop entry for a memory operation node */
static memop_t *get_irn_memop(const ir_node *irn)
{
assert(! is_Block(irn));
- return get_irn_link(irn);
+ return (memop_t*)get_irn_link(irn);
} /* get_irn_memop */
/**
goto restart;
}
- entry = ir_nodemap_get(&env.adr_map, adr);
+ entry = (address_entry*)ir_nodemap_get(&env.adr_map, adr);
if (entry == NULL) {
/* new address */
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
* @param depth current depth in steps upward from the root
* of the address
*/
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
{
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
- int path_len, pos, idx;
+ size_t path_len, pos, idx;
ir_tarval *tv;
ir_type *tp;
set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
}
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode = get_irn_mode(ptr);
+ ir_mode *mode;
ir_tarval *tmp;
- if (is_Const(r) && get_irn_mode(l) == mode) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
typedef struct path_entry {
ir_entity *ent;
struct path_entry *next;
- long index;
+ size_t index;
} path_entry;
static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
ir_initializer_t *initializer;
ir_tarval *tv;
ir_type *tp;
- unsigned n;
+ size_t n;
entry.next = next;
if (is_SymConst(ptr)) {
continue;
}
}
- if (p->index >= (int) n)
+ if (p->index >= n)
return NULL;
initializer = get_initializer_compound_value(initializer, p->index);
assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
} else {
- int i, n_members = get_compound_n_members(tp);
+ size_t i, n_members = get_compound_n_members(tp);
for (i = 0; i < n_members; ++i) {
if (get_compound_member(tp, i) == field)
break;
}
return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode;
+ ir_mode *mode;
unsigned pos;
- if (is_Const(r)) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
for (ent = field;;) {
unsigned size;
ir_tarval *sz, *tv_index, *tlower, *tupper;
- long index;
+ size_t index;
ir_node *bound;
tp = get_entity_type(ent);
if (tlower == tarval_bad || tupper == tarval_bad)
return NULL;
- if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+ if (tarval_cmp(tv_index, tlower) == ir_relation_less)
return NULL;
- if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+ if (tarval_cmp(tupper, tv_index) == ir_relation_less)
return NULL;
/* ok, bounds check finished */
/* no exception, clear the m fields as it might be checked later again */
if (m->projs[pn_Load_X_except]) {
ir_graph *irg = get_irn_irg(ptr);
- exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
+ exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
m->projs[pn_Load_X_except] = NULL;
m->flags &= ~FLAG_EXCEPTION;
env.changed = 1;
} /* update_Call_memop */
/**
- * Update a memop for a Div/Mod/Quot/DivMod.
+ * Update a memop for a Div/Mod.
*
* @param m the memop
*/
-static void update_DivOp_memop(memop_t *m)
+static void update_Div_memop(memop_t *m)
{
ir_node *div = m->node;
int i;
continue;
switch (get_Proj_proj(proj)) {
- case pn_Generic_X_except:
+ case pn_Div_X_except:
m->flags |= FLAG_EXCEPTION;
break;
- case pn_Generic_M:
+ case pn_Div_M:
m->mem = proj;
break;
}
}
-} /* update_DivOp_memop */
+}
+
+static void update_Mod_memop(memop_t *m)
+{
+ ir_node *div = m->node;
+ int i;
+
+ for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+ ir_node *proj = get_irn_out(div, i);
+
+ /* beware of keep edges */
+ if (is_End(proj))
+ continue;
+
+ switch (get_Proj_proj(proj)) {
+ case pn_Mod_X_except:
+ m->flags |= FLAG_EXCEPTION;
+ break;
+ case pn_Mod_M:
+ m->mem = proj;
+ break;
+ }
+ }
+}
/**
* Update a memop for a Phi.
*/
static void update_Phi_memop(memop_t *m)
{
- /* the Phi is it's own mem */
+ /* the Phi is its own mem */
m->mem = m->node;
} /* update_Phi_memop */
/* we can those to find the memory edge */
break;
case iro_Div:
- case iro_DivMod:
- case iro_Quot:
+ update_Div_memop(op);
+ break;
case iro_Mod:
- update_DivOp_memop(op);
+ update_Mod_memop(op);
break;
case iro_Builtin:
*/
static void kill_memops(const value_t *value)
{
- unsigned end = env.rbs_size - 1;
- unsigned pos;
+ size_t end = env.rbs_size - 1;
+ size_t pos;
for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
memop_t *op = env.curr_id_2_memop[pos];
ir_node *succ = get_Block_cfg_out(block, 0);
block_t *succ_bl = get_block_entry(succ);
int pred_pos = get_Block_cfgpred_pos(succ, block);
- unsigned end = env.rbs_size - 1;
- unsigned pos;
+ size_t end = env.rbs_size - 1;
+ size_t pos;
kill_all();
proj = op->projs[pn_Load_X_except];
if (proj != NULL) {
ir_graph *irg = get_irn_irg(load);
- exchange(proj, new_r_Bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
}
proj = op->projs[pn_Load_X_regular];
if (proj != NULL) {
proj = op->projs[pn_Store_X_except];
if (proj != NULL) {
ir_graph *irg = get_irn_irg(store);
- exchange(proj, new_r_Bad(irg));
+ exchange(proj, new_r_Bad(irg, mode_X));
}
proj = op->projs[pn_Store_X_regular];
if (proj != NULL) {
{
ir_node *block = bl->block;
int i, n = get_Block_n_cfgpreds(block);
- unsigned end = env.rbs_size - 1;
- unsigned pos;
+ size_t end = env.rbs_size - 1;
DB((dbg, LEVEL_3, "processing %+F\n", block));
if (n > 1) {
ir_node **ins;
- int pos;
+ size_t pos;
NEW_ARR_A(ir_node *, ins, n);
}
/*
* Ensure that all values are in the map: build Phi's if necessary:
- * Note: the last bit is the sentinel and ALWAYS set, so start with -2.
+ * Note: the last bit is the sentinel and ALWAYS set, so end with -2.
*/
- for (pos = env.rbs_size - 2; pos >= 0; --pos) {
+ for (pos = 0; pos < env.rbs_size - 1; ++pos) {
if (! rbitset_is_set(env.curr_set, pos))
env.curr_id_2_memop[pos] = NULL;
else {
}
if (n > 1) {
+ size_t pos;
+
/* check for partly redundant values */
for (pos = rbitset_next(bl->anticL_in, 0, 1);
pos < end;