/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "ircons_t.h"
#include "irgmod.h"
#include "irgwalk.h"
-#include "irvrfy.h"
#include "tv_t.h"
#include "dbginfo_t.h"
#include "iropt_dbg.h"
#endif
#undef IMAX
-#define IMAX(a,b) ((a) > (b) ? (a) : (b))
+#define IMAX(a,b) ((a) > (b) ? (a) : (b))
-#define MAX_PROJ IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
+#define MAX_PROJ IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
enum changes_t {
DF_CHANGED = 1, /**< data flow changed */
/**
* walker environment
*/
-typedef struct _walk_env_t {
+typedef struct walk_env_t {
struct obstack obst; /**< list of all stores */
unsigned changes; /**< a bitmask of graph changes */
} walk_env_t;
/** A Load/Store info. */
-typedef struct _ldst_info_t {
+typedef struct ldst_info_t {
ir_node *projs[MAX_PROJ]; /**< list of Proj's of this node */
ir_node *exc_block; /**< the exception block if available */
int exc_idx; /**< predecessor index in the exception block */
/**
* a Block info.
*/
-typedef struct _block_info_t {
+typedef struct block_info_t {
unsigned flags; /**< flags for the block */
} block_info_t;
*/
static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
{
- ldst_info_t *info = get_irn_link(node);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
if (! info) {
info = OALLOCZ(obst, ldst_info_t);
*/
static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
{
- block_info_t *info = get_irn_link(node);
+ block_info_t *info = (block_info_t*)get_irn_link(node);
if (! info) {
info = OALLOCZ(obst, block_info_t);
*/
static void collect_nodes(ir_node *node, void *env)
{
- ir_opcode opcode = get_irn_opcode(node);
+ walk_env_t *wenv = (walk_env_t *)env;
+ unsigned opcode = get_irn_opcode(node);
ir_node *pred, *blk, *pred_blk;
ldst_info_t *ldst_info;
- walk_env_t *wenv = env;
if (opcode == iro_Proj) {
pred = get_Proj_pred(node);
int i, n;
for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
- ir_node *bound;
- tarval *tlower, *tupper;
- ir_node *index = get_Sel_index(ptr, i);
- tarval *tv = computed_value(index);
+ ir_node *bound;
+ ir_tarval *tlower, *tupper;
+ ir_node *index = get_Sel_index(ptr, i);
+ ir_tarval *tv = computed_value(index);
/* check if the index is constant */
if (tv == tarval_bad)
ir_node *l = get_Sub_left(ptr);
ir_node *r = get_Sub_right(ptr);
- if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
+ if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
ptr = l;
else
return NULL;
compound_graph_path *res = NULL;
ir_entity *root, *field, *ent;
int path_len, pos, idx;
- tarval *tv;
+ ir_tarval *tv;
ir_type *tp;
if (is_SymConst(ptr)) {
set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
}
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
- ir_mode *mode = get_irn_mode(ptr);
- tarval *tmp;
-
- if (is_Const(r) && get_irn_mode(l) == mode) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ ir_mode *mode;
+ ir_tarval *tmp;
+
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r) && get_irn_mode(l) == get_irn_mode(ptr)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
}
idx = 0;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- ir_node *bound;
+ unsigned size;
+ ir_tarval *sz, *tv_index, *tlower, *tupper;
+ ir_node *bound;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
pos = path_len - depth - idx;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index;
- long index;
+ unsigned size;
+ ir_tarval *sz, *tv_index;
+ long index;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
path_entry entry, *p;
ir_entity *ent, *field;
ir_initializer_t *initializer;
- tarval *tv;
+ ir_tarval *tv;
ir_type *tp;
unsigned n;
}
return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
} else if (is_Add(ptr)) {
- ir_node *l = get_Add_left(ptr);
- ir_node *r = get_Add_right(ptr);
ir_mode *mode;
unsigned pos;
- if (is_Const(r)) {
- ptr = l;
- tv = get_Const_tarval(r);
- } else {
- ptr = r;
- tv = get_Const_tarval(l);
+ {
+ ir_node *l = get_Add_left(ptr);
+ ir_node *r = get_Add_right(ptr);
+ if (is_Const(r)) {
+ ptr = l;
+ tv = get_Const_tarval(r);
+ } else {
+ ptr = r;
+ tv = get_Const_tarval(l);
+ }
}
ptr_arith:
mode = get_tarval_mode(tv);
/* fill them up */
pos = 0;
for (ent = field;;) {
- unsigned size;
- tarval *sz, *tv_index, *tlower, *tupper;
- long index;
- ir_node *bound;
+ unsigned size;
+ ir_tarval *sz, *tv_index, *tlower, *tupper;
+ long index;
+ ir_node *bound;
tp = get_entity_type(ent);
if (! is_Array_type(tp))
*/
static void handle_load_update(ir_node *load)
{
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
/* do NOT touch volatile loads for now */
if (get_Load_volatility(load) == volatility_is_volatile)
/* this Proj is dead now */
pred = get_Proj_pred(ptr);
if (is_Load(pred)) {
- ldst_info_t *info = get_irn_link(pred);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
info->projs[get_Proj_proj(ptr)] = NULL;
/* this node lost its result proj, handle that */
/* produce a shift to adjust offset delta */
if (delta > 0) {
ir_node *cnst;
+ ir_graph *irg = get_irn_irg(load);
/* FIXME: only true for little endian */
- cnst = new_Const_long(mode_Iu, delta * 8);
+ cnst = new_r_Const_long(irg, mode_Iu, delta * 8);
store_value = new_r_Shr(get_nodes_block(load),
store_value, cnst, store_mode);
}
DBG_OPT_RAW(load, store_value);
- info = get_irn_link(load);
+ info = (ldst_info_t*)get_irn_link(load);
if (info->projs[pn_Load_M])
exchange(info->projs[pn_Load_M], get_Load_mem(load));
res = 0;
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange( info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *pred;
ir_node *ptr = get_Load_ptr(load);
ir_node *mem = get_Load_mem(load);
ir_mode *load_mode = get_Load_mode(load);
for (pred = curr; load != pred; ) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* a Load immediately after a Store -- a read after write.
* We may remove the Load, if both Load & Store does not have an
- * exception handler OR they are in the same MacroBlock. In the latter
+ * exception handler OR they are in the same Block. In the latter
* case the Load cannot throw an exception when the previous Store was
* quiet.
*
* Why we need to check for Store Exception? If the Store cannot
* be executed (ROM) the exception handler might simply jump into
- * the load MacroBlock :-(
+ * the load Block :-(
* We could make it a little bit better if we would know that the
* exception handler of the Store jumps directly to the end...
*/
if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
&& info->projs[pn_Load_X_except] == NULL)
- || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
+ || get_nodes_block(load) == get_nodes_block(pred)))
{
long load_offset;
ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
can_use_stored_value(get_Load_mode(pred), load_mode)) {
/*
* a Load after a Load -- a read after read.
- * We may remove the second Load, if it does not have an exception handler
- * OR they are in the same MacroBlock. In the later case the Load cannot
- * throw an exception when the previous Load was quiet.
+ * We may remove the second Load, if it does not have an exception
+ * handler OR they are in the same Block. In the later case
+ * the Load cannot throw an exception when the previous Load was
+ * quiet.
*
- * Here, there is no need to check if the previous Load has an exception
- * hander because they would have exact the same exception...
+ * Here, there is no need to check if the previous Load has an
+ * exception hander because they would have exact the same
+ * exception...
*/
- if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
+ if (info->projs[pn_Load_X_except] == NULL
+ || get_nodes_block(load) == get_nodes_block(pred)) {
ir_node *value;
DBG_OPT_RAR(load, pred);
/* no exception */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
res |= CF_CHANGED;
}
if (info->projs[pn_Load_X_regular]) {
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, load_mode);
*/
ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
{
- ir_mode *c_mode = get_irn_mode(c);
- ir_mode *l_mode = get_Load_mode(load);
- ir_node *res = NULL;
+ ir_mode *c_mode = get_irn_mode(c);
+ ir_mode *l_mode = get_Load_mode(load);
+ ir_node *block = get_nodes_block(load);
+ dbg_info *dbgi = get_irn_dbg_info(load);
+ ir_node *res = copy_const_value(dbgi, c, block);
if (c_mode != l_mode) {
/* check, if the mode matches OR can be easily converted info */
if (is_reinterpret_cast(c_mode, l_mode)) {
- /* we can safely cast */
- dbg_info *dbg = get_irn_dbg_info(load);
- ir_node *block = get_nodes_block(load);
-
/* copy the value from the const code irg and cast it */
- res = copy_const_value(dbg, c);
- res = new_rd_Conv(dbg, block, res, l_mode);
+ res = new_rd_Conv(dbgi, block, res, l_mode);
}
- } else {
- /* copy the value from the const code irg */
- res = copy_const_value(get_irn_dbg_info(load), c);
+ return NULL;
}
return res;
-} /* can_replace_load_by_const */
+}
/**
* optimize a Load
*/
static unsigned optimize_load(ir_node *load)
{
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *mem, *ptr, *value;
ir_entity *ent;
long dummy;
/* no exception, clear the info field as it might be checked later again */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
if (value != NULL) {
/* we completely replace the load by this value */
if (info->projs[pn_Load_X_except]) {
- exchange(info->projs[pn_Load_X_except], new_Bad());
+ ir_graph *irg = get_irn_irg(load);
+ exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
info->projs[pn_Load_X_except] = NULL;
res |= CF_CHANGED;
}
static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
{
unsigned res = 0;
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
ir_node *pred;
ir_node *ptr = get_Store_ptr(store);
ir_node *mem = get_Store_mem(store);
ir_node *value = get_Store_value(store);
ir_mode *mode = get_irn_mode(value);
ir_node *block = get_nodes_block(store);
- ir_node *mblk = get_Block_MacroBlock(block);
for (pred = curr; pred != store;) {
- ldst_info_t *pred_info = get_irn_link(pred);
+ ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
/*
* BEWARE: one might think that checking the modes is useless, because
* killed ...
*/
if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
- get_nodes_MacroBlock(pred) == mblk) {
+ get_nodes_block(pred) == block) {
/*
- * a Store after a Store in the same MacroBlock -- a write after write.
+ * a Store after a Store in the same Block -- a write after write.
*/
/*
if (is_Store(pred)) {
/* check if we can pass through this store */
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(pred),
get_irn_mode(get_Store_value(pred)),
ptr, mode);
pred = skip_Proj(get_Store_mem(pred));
} else if (is_Load(pred)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+ get_Load_ptr(pred), get_Load_mode(pred),
ptr, mode);
if (rel != ir_no_alias)
break;
/* a store to an entity which is never read is unnecessary */
if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
- ldst_info_t *info = get_irn_link(store);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
if (info->projs[pn_Store_X_except] == NULL) {
DB((dbg, LEVEL_1, " Killing useless %+F to never read entity %+F\n", store, entity));
exchange(info->projs[pn_Store_M], get_Store_mem(store));
/* check if the block is post dominated by Phi-block
and has no exception exit */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
/* this is the address of the store */
ptr = get_Store_ptr(store);
mode = get_irn_mode(get_Store_value(store));
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
exc = info->exc_block;
for (i = 1; i < n; ++i) {
if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
return 0;
- info = get_irn_link(pred);
+ info = (ldst_info_t*)get_irn_link(pred);
/* check, if all stores have the same exception flow */
if (exc != info->exc_block)
and has no exception exit. Note that block must be different from
Phi-block, else we would move a Store from end End of a block to its
Start... */
- bl_info = get_irn_link(block);
+ bl_info = (block_info_t*)get_irn_link(block);
if (bl_info->flags & BLOCK_HAS_EXC)
return 0;
if (block == phi_block || ! block_postdominates(phi_block, block))
assert(is_Proj(projMs[i]));
store = get_Proj_pred(projMs[i]);
- info = get_irn_link(store);
+ info = (ldst_info_t*)get_irn_link(store);
inM[i] = get_Store_mem(store);
inD[i] = get_Store_value(store);
}
/* fourth step: create the Store */
- store = new_rd_Store(db, block, phiM, ptr, phiD, 0);
+ store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
#ifdef DO_CACHEOPT
co_set_irn_name(store, co_get_irn_ident(old_store));
#endif
*/
static void do_load_store_optimize(ir_node *n, void *env)
{
- walk_env_t *wenv = env;
+ walk_env_t *wenv = (walk_env_t*)env;
switch (get_irn_opcode(n)) {
/** A scc. */
typedef struct scc {
- ir_node *head; /**< the head of the list */
+ ir_node *head; /**< the head of the list */
} scc;
/** A node entry. */
typedef struct loop_env {
ir_phase ph; /**< the phase object */
ir_node **stack; /**< the node stack */
- int tos; /**< tos index */
+ size_t tos; /**< tos index */
unsigned nextDFSnum; /**< the current DFS number */
unsigned POnum; /**< current post order number */
static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
{
ir_phase *ph = &env->ph;
- node_entry *e = phase_get_irn_data(&env->ph, irn);
+ node_entry *e = (node_entry*)phase_get_irn_data(&env->ph, irn);
if (! e) {
- e = phase_alloc(ph, sizeof(*e));
+ e = (node_entry*)phase_alloc(ph, sizeof(*e));
memset(e, 0, sizeof(*e));
phase_set_irn_data(ph, irn, e);
}
node_entry *e;
if (env->tos == ARR_LEN(env->stack)) {
- int nlen = ARR_LEN(env->stack) * 2;
+ size_t nlen = ARR_LEN(env->stack) * 2;
ARR_RESIZE(ir_node *, env->stack, nlen);
}
env->stack[env->tos++] = n;
*/
static int cmp_avail_entry(const void *elt, const void *key, size_t size)
{
- const avail_entry_t *a = elt;
- const avail_entry_t *b = key;
+ const avail_entry_t *a = (const avail_entry_t*)elt;
+ const avail_entry_t *b = (const avail_entry_t*)key;
(void) size;
return a->ptr != b->ptr || a->mode != b->mode;
if (pe->pscc != ne->pscc) {
/* not in the same SCC, is region const */
- phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
+ phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
pe->phi = phi;
pe->pos = j;
next = ne->next;
if (is_Load(load)) {
- ldst_info_t *info = get_irn_link(load);
+ ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
ir_node *ptr = get_Load_ptr(load);
/* for now, we cannot handle Loads with exceptions */
if (is_Store(other)) {
ir_alias_relation rel = get_alias_relation(
- current_ir_graph,
get_Store_ptr(other),
get_irn_mode(get_Store_value(other)),
ptr, load_mode);
/* only Phis and pure Calls are allowed here, so ignore them */
}
if (other == NULL) {
- ldst_info_t *ninfo;
+ ldst_info_t *ninfo = NULL;
phi_entry *pe;
dbg_info *db;
entry.ptr = ptr;
entry.mode = load_mode;
- res = set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+ res = (avail_entry_t*)set_find(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
if (res != NULL) {
irn = res->load;
} else {
- irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, 0);
+ irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
entry.load = irn;
set_insert(avail, &entry, sizeof(entry), hash_cache_entry(&entry));
DB((dbg, LEVEL_1, " Created %+F in %+F\n", irn, pred));
}
if (node->low == node->DFSnum) {
- scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
+ scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
ir_node *x;
pscc->head = NULL;
*/
static void do_dfs(ir_graph *irg, loop_env *env)
{
- ir_graph *rem = current_ir_graph;
ir_node *endblk, *end;
int i;
- current_ir_graph = irg;
inc_irg_visited(irg);
/* visit all memory nodes */
if (is_Phi(ka) && !irn_visited(ka))
dfs(ka, env);
}
- current_ir_graph = rem;
} /* do_dfs */
-/**
- * Initialize new phase data. We do this always explicit, so return NULL here
- */
-static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data)
-{
- (void)ph;
- (void)irn;
- (void)data;
- return NULL;
-} /* init_loop_data */
-
/**
* Optimize Loads/Stores in loops.
*
env.nextDFSnum = 0;
env.POnum = 0;
env.changes = 0;
- phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
+ phase_init(&env.ph, irg, phase_irn_init_default);
/* calculate the SCC's and drive loop optimization. */
do_dfs(irg, &env);
DEL_ARR_F(env.stack);
- phase_free(&env.ph);
+ phase_deinit(&env.ph);
return env.changes;
} /* optimize_loops */