/* Attempt to reduce register pressure and reduce code size
for hoisted nodes. */
-#define HOIST_HIGH 0
+#define HOIST_HIGH 1
#define COMMON_DOM 1
/* Seamless implementation of handling loads and generally memory
TODO Broken for yet unknown reasons. */
#define OPTIMIZE_NODES 0
-#define OLD_DIVMODS 0
-
-/* NIY Choose to be optimized nodes in a more sophisticated way
- to reduce number of newly introduced phi nodes. */
-#define BETTER_GREED 0
-
/** Additional info we need for every block. */
typedef struct block_info {
int infinite_loops;
} gvnpre_statistics;
-gvnpre_statistics *gvnpre_stats = NULL;
+static gvnpre_statistics *gvnpre_stats = NULL;
-static void init_stats()
+static void init_stats(void)
{
gvnpre_stats = XMALLOCZ(gvnpre_statistics);
}
-static void free_stats()
+static void free_stats(void)
{
free(gvnpre_stats);
gvnpre_stats = NULL;
}
-static void print_stats()
+static void print_stats(void)
{
gvnpre_statistics *stats = gvnpre_stats;
DB((dbg, LEVEL_1, "replaced : %d\n", stats->replaced));
/* memops are not the same, even if we want to optimize them
we have to take the order in account */
- if (is_memop(a) || is_memop(b)) {
+ if (is_memop(a) || is_memop(b) ||
+ get_irn_mode(a) == mode_T ||
+ get_irn_mode(b) == mode_T) {
/* Loads with the same predecessors are the same value;
this should only happen after phi translation. */
if ((! is_Load(a) || ! is_Load(b)) && (! is_Store(a) || ! is_Store(b)))
if (is_Phi(n))
return 1;
-#if LOADS || OLD_DIVMODS || DIVMODS
+#if LOADS || DIVMODS
if (is_Proj(n) && mode != mode_X && mode != mode_T)
return 1;
#else
ir_nodehashmap_insert(map, node, trans);
}
-#if OLD_DIVMODS
-/* Helper function to compare the values of pred and avail_pred. */
-static unsigned match_pred(ir_node *pred, ir_node *avail_pred, ir_node *block, int pos)
-{
- ir_node *avail_value = identify(avail_pred);
- ir_node *pred_block = get_Block_cfgpred_block(block, pos);
- ir_node *trans_pred = get_translated(pred_block, pred);
- ir_node *value;
-
- if (trans_pred == NULL)
- trans_pred = pred;
- value = identify(trans_pred);
-
- DB((dbg, LEVEL_3, "manual compare %+F %+F\n", pred, avail_pred));
-
- return (value == avail_value);
-}
-
-/**
- * Does phi translation for redundant Div/Mod nodes only.
- * Returns NULL for non-redundant node, which needs to be phi translated.
- */
-static ir_node *phi_translate_divmod(ir_node *divmod, ir_node *block, int pos)
-{
- ir_node *mem = get_memop_mem(divmod);
- ir_node *trans = get_translated_pred(block, pos, mem);
-
- if (trans == NULL)
- trans = mem;
-
- /* no partial redundancy if this is a mode_M phi */
- if (is_Proj(trans)) {
- /* The last memory operation in predecessor block */
- ir_node *avail_op = get_Proj_pred(trans);
-
- if (get_irn_op(divmod) == get_irn_op(avail_op)) {
- unsigned left, right;
-
- if (is_Div(avail_op)) {
- if (get_Div_resmode(divmod) == get_Div_resmode(avail_op) &&
- get_Div_no_remainder(divmod) == get_Div_no_remainder(avail_op)) {
-
- left = match_pred(get_Div_left(divmod), get_Div_left(avail_op), block, pos);
- right = match_pred(get_Div_right(divmod), get_Div_right(avail_op), block, pos);
-
- if (left && right)
- return avail_op;
- }
- } else if (is_Mod(avail_op)) {
- if (get_Mod_resmode(divmod) == get_Mod_resmode(avail_op)) {
-
- left = match_pred(get_Mod_left(divmod), get_Mod_left(avail_op), block, pos);
- right = match_pred(get_Mod_right(divmod), get_Mod_right(avail_op), block, pos);
-
- if (left && right)
- return avail_op;
- }
- }
- }
- }
- return NULL;
-}
-#endif
-
/**
* Translates an expression above a Phi.
*
}
arity = get_irn_arity(node);
-#if OLD_DIVMODS
- if (is_Div(node) || is_Mod(node)) {
- ir_node *avail_op = phi_translate_divmod(node, block, pos);
- if (avail_op)
- return avail_op;
- }
-#endif
-
needed = 0;
in = ALLOCANZ(ir_node *, arity);
}
}
-#if BETTER_GREED
- /* value is redundant from last iteration,
- but has not been removed from antic_in (is not optimized) */
- if (! environ->first_iter && is_redundant(block, expr))
- return mode;
-#endif
-
/* If it is not the same value already existing along every predecessor
and it is defined by some predecessor then it is partially redundant. */
if (! partially_redundant || fully_redundant)
#endif
} /* update_new_set */
-#if BETTER_GREED
-/*
- * Returns redundant flag of node irn in block block.
- */
-static unsigned is_redundant(ir_node *block, ir_node *irn)
-{
- (void) block;
- (void) irn;
-
- /* TODO Needs to use a flag, because antic_done should only be used
- if node is finally processed by insert_nodes. */
- return 0;
-}
-#endif
-
/**
* Checks if hoisting irn is greedy.
* Greedy hoisting means that there are non partially redundant nodes
ir_node *idom;
int pos;
ir_valueset_iterator_t iter;
-#if BETTER_GREED
- plist_t *stack;
-#endif
/* only blocks */
if (! is_Block(block))
return;
}
-#if BETTER_GREED
- stack = plist_new();
- foreach_valueset(info->antic_in, value, expr, iter) {
- /* inverse topologic */
- plist_insert_front(stack, expr);
- }
-#endif
-
/* This is the main reason antic_in is preverred over antic_out;
we may iterate over every anticipated value first and not
over the predecessor blocks. */
continue;
}
-#if !BETTER_GREED
if (is_hoisting_greedy(expr, block)) {
DB((dbg, LEVEL_2, "greedy\n"));
continue;
}
-#endif
mode = is_partially_redundant(block, expr, value);
if (mode == NULL)
continue;
-#if BETTER_GREED
- if (is_hoisting_greedy(expr, block)) {
- DB((dbg, LEVEL_2, "Better greed: greedy\n"));
- continue;
- }
-#endif
-
-#if LOADS || OLD_DIVMODS || DIVMODS
+#if LOADS || DIVMODS
/* save old mode_M phis to remove keepalive edges later */
if (is_memop(expr)) {
ir_node *mem = get_memop_mem(expr);
ir_valueset_insert(info->antic_done, value, expr);
env->changes |= 1;
}
-
-#if BETTER_GREED
- /* TODO Unfinished
- Better greed first determines which values are redundant
- and decides then which to take.
- insert_nodes needs to be split up for that. The cycle could be
- for each block: flag redundant nodes,
- use heuristic to adjust these flags (also consider antic_done),
- do insert nodes.
- This way we could decide if we should hoist a non redundant node,
- if all its successors are redundant.
- Or we might try to minimize the cut along hoisted nodes and their
- non redundant successors.
- */
- if (env->changes) {
- plist_element_t *it;
- /* iterate in inverse topological order */
- foreach_plist(stack, it) {
- ir_node *irn = (ir_node *)plist_element_get_value(it);
- ir_node *block = get_nodes_block(irn);
- int j;
- char redundant = 1;
-
- /* does irn only have redundant successors? */
-
- foreach_out_edge(irn, edge) {
- ir_node *succ = get_edge_src_irn(edge);
-
- /* if succ and irn are in the same block */
- if (get_nodes_block(succ) == block && is_redundant(block, succ)) {
- continue;
- } else {
- redundant = 0;
- break;
- }
- }
-
- if (redundant)
- flag_redundant(irn, 1);
- }
- }
- plist_free(stack);
-#endif
-
}
#if HOIST_HIGH