#include "archop.h"
#include "opt_polymorphy.h"
#include "opt_confirms.h"
+#include "irtools.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
*/
static tarval *computed_value_SymConst(ir_node *n)
{
- if ((get_SymConst_kind(n) == symconst_size) &&
- (get_type_state(get_SymConst_type(n))) == layout_fixed)
- return new_tarval_from_long(get_type_size_bytes(get_SymConst_type(n)), get_irn_mode(n));
+ ir_type *type;
+
+ switch (get_SymConst_kind(n)) {
+ case symconst_type_size:
+ type = get_SymConst_type(n);
+ if (get_type_state(type) == layout_fixed)
+ return new_tarval_from_long(get_type_size_bytes(type), get_irn_mode(n));
+ break;
+ case symconst_type_align:
+ type = get_SymConst_type(n);
+ if (get_type_state(type) == layout_fixed)
+ return new_tarval_from_long(get_type_alignment_bytes(type), get_irn_mode(n));
+ break;
+ default:
+ break;
+ }
return tarval_bad;
}
return new_tarval_from_long(proj_nr & pn_Cmp_Ne, mode_b);
}
}
-
return computed_value_Cmp_Confirm(a, aa, ab, proj_nr);
}
*/
static tarval *computed_value_Psi(ir_node *n)
{
+ if (is_Mux(n))
+ return computed_value_Mux(n);
return tarval_bad;
}
}
}
else if (get_opt_unreachable_code() &&
- (n != current_ir_graph->start_block) &&
- (n != current_ir_graph->end_block) ) {
+ (n != get_irg_start_block(current_ir_graph)) &&
+ (n != get_irg_end_block(current_ir_graph)) ) {
int i;
/* If all inputs are dead, this block is dead too, except if it is
/**
* Returns a equivalent node for a Jmp, a Bad :-)
- * Of course this only happens if the Block of the Jmp is Bad.
+ * Of course this only happens if the Block of the Jmp is dead.
*/
static ir_node *equivalent_node_Jmp(ir_node *n)
{
return n;
}
-/* Same for op_Raise */
+/** Raise is handled in the same way as Jmp. */
#define equivalent_node_Raise equivalent_node_Jmp
return n;
}
+/**
+ * Eor is commutative and has neutral 0.
+ */
#define equivalent_node_Eor equivalent_node_neutral_zero
/*
return n;
}
-/* Not(Not(x)) == x */
+/** Not(Not(x)) == x */
#define equivalent_node_Not equivalent_node_idempotent_unop
-/* --x == x */ /* ??? Is this possible or can --x raise an
+/** --x == x ??? Is this possible or can --x raise an
out of bounds exception if min =! max? */
#define equivalent_node_Minus equivalent_node_idempotent_unop
/* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
if ((is_Block_dead(block)) || /* Control dead */
- (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
- return new_Bad(); /* in the Start Block. */
+ (block == get_irg_start_block(current_ir_graph))) /* There should be no Phi nodes */
+ return new_Bad(); /* in the Start Block. */
if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
}
/**
- * Returns a equivalent node of a Psi: if a condition is true
+ * Returns a equivalent node of a Psi: if a condition is true
* and all previous conditions are false we know its value.
* If all conditions are false its value is the default one.
*/
static ir_node *equivalent_node_Psi(ir_node *n) {
+ if (is_Mux(n))
+ return equivalent_node_Mux(n);
return n;
}
* lower <= pred_lower && pred_upper <= upper.
*/
ir_node *upper = get_Bound_upper(n);
- if (get_Bound_lower(pred) == lower &&
- get_Bound_upper(pred) == upper) {
- /*
- * One could expect that we simple return the previous
- * Bound here. However, this would be wrong, as we could
- * add an exception Proj to a new location than.
- * So, we must turn in into a tuple
- */
- ret_tuple = 1;
- }
+ if (get_Bound_lower(pred) == lower &&
+ get_Bound_upper(pred) == upper) {
+ /*
+ * One could expect that we simply return the previous
+ * Bound here. However, this would be wrong, as we could
+ * add an exception Proj to a new location than.
+ * So, we must turn in into a tuple
+ */
+ ret_tuple = 1;
+ }
}
}
if (ret_tuple) {
/* Turn Bound into a tuple (mem, bad, idx) */
ir_node *mem = get_Bound_mem(n);
turn_into_tuple(n, pn_Bound_max);
- set_Tuple_pred(n, pn_Bound_M_regular, mem);
- set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Bound_res, idx);
- set_Tuple_pred(n, pn_Bound_M_except, mem);
+ set_Tuple_pred(n, pn_Bound_M, mem);
+ set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Bound_res, idx);
}
return n;
}
} /* end switch */
}
+/**
+ * Returns non-zero if all Phi predecessors are constants
+ */
+static int is_const_Phi(ir_node *phi) {
+ int i;
+
+ for (i = get_irn_arity(phi) - 1; i >= 0; --i)
+ if (! is_Const(get_irn_n(phi, i)))
+ return 0;
+ return 1;
+}
+
/**
* Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
* SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
proj_nr = get_inversed_pnc(proj_nr);
changed |= 1;
}
- else if (left > right) {
+ else if (get_irn_idx(left) > get_irn_idx(right)) {
ir_node *t = left;
left = right;
}
}
+/**
+ * Move Confirms down through Phi nodes.
+ */
+static ir_node *transform_node_Phi(ir_node *phi) {
+ int i, n;
+ ir_mode *mode = get_irn_mode(phi);
+
+ if (mode_is_reference(mode)) {
+ n = get_irn_arity(phi);
+
+ /* Beware of Phi0 */
+ if (n > 0) {
+ ir_node *pred = get_irn_n(phi, 0);
+ ir_node *bound, *new_Phi, *block, **in;
+ pn_Cmp pnc;
+
+ if (! is_Confirm(pred))
+ return phi;
+
+ bound = get_Confirm_bound(pred);
+ pnc = get_Confirm_cmp(pred);
+
+ NEW_ARR_A(ir_node *, in, n);
+ in[0] = get_Confirm_value(pred);
+
+ for (i = 1; i < n; ++i) {
+ pred = get_irn_n(phi, i);
+
+ if (! is_Confirm(pred) ||
+ get_Confirm_bound(pred) != bound ||
+ get_Confirm_cmp(pred) != pnc)
+ return phi;
+ in[i] = get_Confirm_value(pred);
+ }
+ /* move the Confirm nodes "behind" the Phi */
+ block = get_irn_n(phi, -1);
+ new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
+ return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
+ }
+ }
+ return phi;
+}
+
/**
* returns the operands of a commutative bin-op, if one operand is
* a const, it is returned as the second one.
CASE(Not);
CASE(Cast);
CASE(Proj);
+ CASE(Phi);
CASE(Sel);
CASE(Or);
CASE(Shr);
* nodes are extremely time critical because of their frequent use in
* constant string arrays.
*/
-static INLINE ir_node *
-identify (pset *value_table, ir_node *n)
+static INLINE ir_node *identify(pset *value_table, ir_node *n)
{
ir_node *o = NULL;
ir_node *r = get_binop_right(n);
/* for commutative operators perform a OP b == b OP a */
- if (l > r) {
+ if (get_irn_idx(l) > get_irn_idx(r)) {
set_binop_left(n, r);
set_binop_right(n, l);
}
}
}
- o = pset_find (value_table, n, ir_node_hash (n));
+ o = pset_find(value_table, n, ir_node_hash (n));
if (!o) return n;
DBG_OPT_CSE(n, o);
* optimization is performed. The flag turning on procedure global cse could
* be changed between two allocations. This way we are safe.
*/
-static INLINE ir_node *
-identify_cons (pset *value_table, ir_node *n) {
+static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify(value_table, n);
* Looks up the node in a hash table, enters it in the table
* if it isn't there yet.
*/
-ir_node *
-identify_remember (pset *value_table, ir_node *n)
+ir_node *identify_remember(pset *value_table, ir_node *n)
{
ir_node *o = NULL;
return o;
}
-void
-add_identities (pset *value_table, ir_node *node) {
- if (get_opt_cse() && (get_irn_opcode(node) != iro_Block))
- identify_remember (value_table, node);
+/* Add a node to the identities value table. */
+void add_identities(pset *value_table, ir_node *node) {
+ if (get_opt_cse() && is_no_Block(node))
+ identify_remember(value_table, node);
+}
+
+/* Visit each node in the value table of a graph. */
+void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) {
+ ir_node *node;
+ ir_graph *rem = current_ir_graph;
+
+ current_ir_graph = irg;
+ foreach_pset(irg->value_table, node)
+ visit(node, env);
+ current_ir_graph = rem;
}
/**
* garbage in, garbage out. If a node has a dead input, i.e., the
* Bad node is input to the node, return the Bad node.
*/
-static INLINE ir_node *
-gigo (ir_node *node)
+static INLINE ir_node *gigo(ir_node *node)
{
int i, irn_arity;
ir_op *op = get_irn_op(node);
if (is_Bad(pred))
return new_Bad();
+#if 0
+ /* Propagating Unknowns here seems to be a bad idea, because
+ sometimes we need a node as a input and did not want that
+ it kills it's user.
+ However, i might be useful to move this into a later phase
+ (it you thing optimizing such code is useful). */
if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
return new_Unknown(get_irn_mode(node));
+#endif
}
}
#if 0
return node;
}
-
/**
* These optimizations deallocate nodes from the obstack.
* It can only be called if it is guaranteed that no other nodes
*
* current_ir_graph must be set to the graph of the node!
*/
-ir_node *
-optimize_node(ir_node *n)
+ir_node *optimize_node(ir_node *n)
{
tarval *tv;
ir_node *oldn = n;
edges_node_deleted(n, current_ir_graph);
/* evaluation was successful -- replace the node. */
- obstack_free(current_ir_graph->obst, n);
+ irg_kill_node(current_ir_graph, n);
nw = new_Const(get_tarval_mode (tv), tv);
if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
edges_node_deleted(oldn, current_ir_graph);
/* We found an existing, better node, so we can deallocate the old node. */
- obstack_free (current_ir_graph->obst, oldn);
-
+ irg_kill_node(current_ir_graph, oldn);
return n;
}
* nodes lying on the obstack. Remove these by a dead node elimination,
* i.e., a copying garbage collection.
*/
-ir_node *
-optimize_in_place_2 (ir_node *n)
+ir_node *optimize_in_place_2(ir_node *n)
{
tarval *tv;
ir_node *oldn = n;
/**
* Wrapper for external use, set proper status bits after optimization.
*/
-ir_node *
-optimize_in_place (ir_node *n)
+ir_node *optimize_in_place(ir_node *n)
{
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);