}
/**
- * Optimize Loads after Store.
- *
- * @todo FAILS for volatile entities
- */
-static ir_node *equivalent_node_Load(ir_node *n)
-{
- ir_node *oldn = n;
-
- if (!get_opt_redundant_LoadStore()) return n;
-
- /* remove unnecessary Load. */
- ir_node *a = skip_Proj(get_Load_mem(n));
- ir_node *b = get_Load_ptr(n);
- ir_node *c;
-
- /* TODO: check for volatile */
- if (get_irn_op(a) == op_Store && get_Store_ptr(a) == b) {
- /* We load immediately after a store -- a read after write. */
- ir_node *mem = get_Load_mem(n);
-
- c = get_Store_value(a);
- turn_into_tuple(n, 3);
- set_Tuple_pred(n, pn_Load_M, mem);
- set_Tuple_pred(n, pn_Load_res, c);
- set_Tuple_pred(n, pn_Load_X_except, new_Bad()); DBG_OPT_RAW;
- }
- return n;
-}
-
-/**
- * Optimize store after store and load after store.
- *
- * @todo FAILS for volatile entities
+ * optimize Proj(Tuple) and gigo for ProjX in Bad block
*/
-static ir_node *equivalent_node_Store(ir_node *n)
-{
- ir_node *oldn = n;
-
- if (!get_opt_redundant_LoadStore()) return n;
-
- /* remove unnecessary store. */
- ir_node *a = skip_Proj(get_Store_mem(n));
- ir_node *b = get_Store_ptr(n);
- ir_node *c = skip_Proj(get_Store_value(n));
-
- if (get_irn_op(a) == op_Store
- && get_Store_ptr(a) == b
- && skip_Proj(get_Store_value(a)) == c) {
- /* We have twice exactly the same store -- a write after write. */
- n = a; DBG_OPT_WAW;
- } else if (get_irn_op(c) == op_Load
- && (a == c || skip_Proj(get_Load_mem(c)) == a)
- && get_Load_ptr(c) == b ) {
- /* We just loaded the value from the same memory, i.e., the store
- doesn't change the memory -- a write after read. */
- a = get_Store_mem(n);
- turn_into_tuple(n, 2);
- set_Tuple_pred(n, pn_Store_M, a);
- set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
- }
- return n;
-}
-
static ir_node *equivalent_node_Proj(ir_node *n)
{
ir_node *oldn = n;
n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
} else {
assert(0); /* This should not happen! */
-// n = new_Bad();
- //dump_ir_block_graph(current_ir_graph, "-CRASH");
- //printf(">>>%d\n", get_irn_node_nr(n));
- //exit(1);
+ n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
is_Bad(get_nodes_block(n))) {
return n;
}
-/*
-case iro_Mod, Quot, DivMod
- DivMod allocates new nodes --> it's treated in transform node.
- What about Quot, DivMod?
-*/
-
/**
* equivalent_node() returns a node equivalent to input n. It skips all nodes that
* perform no actual computation, as, e.g., the Id nodes. It does not create
CASE(And);
CASE(Conv);
CASE(Phi);
- CASE(Load); /* dangerous */
- CASE(Store); /* dangerous, see todo */
CASE(Proj);
CASE(Id);
default:
/**
* Transform a Div/Mod/DivMod with a non-zero constant. Must be
- * done here to avoid that this optimization runs more than once...
+ * done here instead of equivalent node because in creates new
+ * nodes.
*/
static ir_node *transform_node_Proj(ir_node *proj)
{
}
return proj;
+ case iro_Tuple:
+ /* should not happen, but if it doest will optimize */
+ break;
+
default:
/* do nothing */
return proj;
return equivalent_node_Proj(proj);
}
-/**
- * Transform a Store before a Store to the same address...
- * Both nodes must be in the same block.
- *
- * @todo Check for volatile! Moreover, what if the first store
- * has a exception handler while the other has not?
- */
-static ir_node *transform_node_Store(ir_node *store)
-{
- ir_node *pred = skip_Proj(get_Store_mem(store));
- ir_node *ptr = get_Store_ptr(store);
-
- if (!get_opt_redundant_LoadStore()) return store;
-
- if (get_irn_op(pred) == op_Store &&
- get_Store_ptr(pred) == ptr &&
- get_nodes_block(pred) == get_nodes_block(store)) {
- /* the Store n is useless, as it is overwritten by the store store */
- ir_node *mem = get_Store_mem(pred);
-
- turn_into_tuple(pred, 2);
- set_Tuple_pred(pred, pn_Store_M, mem);
- set_Tuple_pred(pred, pn_Store_X_except, new_Bad());
- }
- return store;
-}
-
/**
* returns the operands of a commutative bin-op, if one operand is
* a const, it is returned as the second one.
CASE(Eor);
CASE(Not);
CASE(Proj);
- CASE(Store); /* dangerous, see todo */
CASE(Or);
default:
op->transform_node = NULL;
tv = computed_value (n);
if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/*
- * we MUST copy the node here temparary, because it's still needed
+ * we MUST copy the node here temporary, because it's still needed
* for DBG_OPT_ALGSIM0
*/
int node_size = offsetof(ir_node, attr) + n->op->attr_size;
}
/* remove unnecessary nodes */
- /*if (get_opt_constant_folding()) */
if (get_opt_constant_folding() ||
(iro == iro_Phi) || /* always optimize these nodes. */
(iro == iro_Id) || /* ... */
set_irg_pinned(current_ir_graph, op_pin_state_floats);
if (get_irg_outs_state(current_ir_graph) == outs_consistent)
set_irg_outs_inconsistent(current_ir_graph);
+
/* Maybe we could also test whether optimizing the node can
change the control graph. */
if (get_irg_dom_state(current_ir_graph) == dom_consistent)