*/
#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
+#endif
+
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
#endif
# include "irnode_t.h"
# include "irflag_t.h"
# include "firmstat.h"
# include "irarch.h"
+# include "hashptr.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
tarval *tb;
/* a - a */
- if (a == b)
+ if (a == b && !is_Bad(a))
return get_tarval_null(get_irn_mode(n));
ta = value_of(a);
ab = get_Cmp_right(a);
proj_nr = get_Proj_proj(n);
- if (aa == ab) { /* 1.: */
+ if (aa == ab && !mode_is_float(get_irn_mode(aa))) { /* 1.: */
+ /* BEWARE: a == a is NOT always True for floating Point!!! */
/* This is a trick with the bits used for encoding the Cmp
Proj numbers, the following statement is not the same:
return new_tarval_from_long (proj_nr == Eq, mode_b) */
return tarval_bad;
}
+/**
+ * calculate the value of a Mux: can be evaluated, if the
+ * sel and the right input are known
+ */
+static tarval *computed_value_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true()) {
+ ir_node *v = get_Mux_true(n);
+ return value_of(v);
+ }
+ else if (ts == get_tarval_b_false()) {
+ ir_node *v = get_Mux_false(n);
+ return value_of(v);
+ }
+ return tarval_bad;
+}
+
/**
* If the parameter n can be computed, return its value, else tarval_bad.
* Performs constant folding.
CASE(Rot);
CASE(Conv);
CASE(Proj);
+ CASE(Mux);
default:
op->computed_value = NULL;
}
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
} else if (get_opt_control_flow_straightening()) {
n = predblock;
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
}
}
} else if (get_opt_unreachable_code() &&
(n != current_ir_graph->start_block) &&
(n != current_ir_graph->end_block) ) {
- int i;
+ int i, n_cfg = get_Block_n_cfgpreds(n);
+
/* If all inputs are dead, this block is dead too, except if it is
the start or end block. This is a step of unreachable code
elimination */
- for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
- if (!is_Bad(get_Block_cfgpred(n, i))) break;
+ for (i = 0; i < n_cfg; i++) {
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ir_node *pred_blk;
+
+ if (is_Bad(pred)) continue;
+ pred_blk = get_nodes_block(pred);
+
+ if (is_Block_dead(pred_blk)) continue;
+
+ if (pred_blk != n) {
+ /* really found a living input */
+ break;
+ }
}
- if (i == get_Block_n_cfgpreds(n))
- n = new_Bad();
+ if (i == n_cfg)
+ n = set_Block_dead(n);
}
return n;
{
/* GL: Why not same for op_Raise?? */
/* unreachable code elimination */
- if (is_Bad(get_nodes_block(n)))
+ if (is_Block_dead(get_nodes_block(n)))
n = new_Bad();
return n;
if (n_mode == b_mode) {
if (n_mode == mode_b) {
n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n);
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
if (smaller_mode(b_mode, a_mode)){
n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n);
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
}
}
return n;
}
+/**
+ * A Cast may be removed if the type of the previous node
+ * is already to type of the Cast.
+ */
static ir_node *equivalent_node_Cast(ir_node *n) {
ir_node *pred = get_Cast_op(n);
if (get_irn_type(pred) == get_Cast_type(n))
return n;
}
+/* Several optimizations:
+ - no Phi in start block.
+ - remove Id operators that are inputs to Phi
+ - fold Phi-nodes, iff they have only one predecessor except
+ themselves.
+*/
static ir_node *equivalent_node_Phi(ir_node *n)
{
- /* Several optimizations:
- - no Phi in start block.
- - remove Id operators that are inputs to Phi
- - fold Phi-nodes, iff they have only one predecessor except
- themselves.
- */
int i, n_preds;
ir_node *oldn = n;
block = get_nodes_block(n);
/* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
- if ((is_Bad(block)) || /* Control dead */
+ if ((is_Block_dead(block)) || /* Control dead */
(block == current_ir_graph->start_block)) /* There should be no Phi nodes */
return new_Bad(); /* in the Start Block. */
n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
- is_Bad(get_nodes_block(n))) {
+ is_Block_dead(get_nodes_block(n))) {
/* Remove dead control flow -- early gigo. */
n = new_Bad();
}
return n;
}
+/**
+ * optimize a Mux
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true())
+ return get_Mux_true(n);
+ else if (ts == get_tarval_b_false())
+ return get_Mux_false(n);
+
+ return n;
+}
+
/**
* equivalent_node() returns a node equivalent to input n. It skips all nodes that
* perform no actual computation, as, e.g., the Id nodes. It does not create
CASE(Phi);
CASE(Proj);
CASE(Id);
+ CASE(Mux);
default:
op->equivalent_node = NULL;
}
} /* end switch */
}
+/**
+ * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
+ * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)) if possible.
+ */
+static ir_node *transform_node_AddSub(ir_node *n)
+{
+ ir_mode *mode = get_irn_mode(n);
+
+ if (mode_is_reference(mode)) {
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ int ref_bits = get_mode_size_bits(mode);
+
+ if (get_irn_op(left) == op_Conv) {
+ ir_mode *mode = get_irn_mode(left);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(left);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_left(n, pre);
+ }
+ }
+ }
+
+ if (get_irn_op(right) == op_Conv) {
+ ir_mode *mode = get_irn_mode(right);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(right);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_right(n, pre);
+ }
+ }
+ }
+ }
+ return n;
+}
+
+#define transform_node_Add transform_node_AddSub
+#define transform_node_Sub transform_node_AddSub
+
/** Do architecture dependend optimizations on Mul nodes */
static ir_node *transform_node_Mul(ir_node *n) {
return arch_dep_replace_mul_with_shifts(n);
return n;
}
+/**
+ * Transform an Eor.
+ */
static ir_node *transform_node_Eor(ir_node *n)
{
ir_node *a = get_Eor_left(n);
if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred),
- get_Const_tarval(pred), tp);
+ get_Const_tarval(pred), tp);
} else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred),
- get_SymConst_kind(pred), tp);
+ get_SymConst_kind(pred), tp);
}
return n;
}
* Transform a Div/Mod/DivMod with a non-zero constant. Must be
* done here instead of equivalent node because it creates new
* nodes.
- * Removes the exceptions and routes the memory to the initial mem.
+ * Removes the exceptions and routes the memory to the NoMem node.
*
* Further, it optimizes jump tables by removing all impossible cases.
*/
if (proj_nr == pn_Div_X_except) {
/* we found an exception handler, remove it */
return new_Bad();
- }
- else {
- /* the memory Proj can be removed */
+ } else {
+ /* the memory Proj can be removed */
ir_node *res = get_Div_mem(n);
- set_Div_mem(n, get_irg_initial_mem(current_ir_graph));
-
- if (proj_nr == pn_Div_M)
+ set_Div_mem(n, get_irg_no_mem(current_ir_graph));
+ if (proj_nr == pn_Div_M)
return res;
}
}
if (proj_nr == pn_Mod_X_except) {
/* we found an exception handler, remove it */
return new_Bad();
- }
- else {
- /* the memory Proj can be removed */
+ } else {
+ /* the memory Proj can be removed */
ir_node *res = get_Mod_mem(n);
- set_Mod_mem(n, get_irg_initial_mem(current_ir_graph));
+ set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
if (proj_nr == pn_Mod_M)
- return res;
+ return res;
}
}
break;
return new_Bad();
}
else {
- /* the memory Proj can be removed */
+ /* the memory Proj can be removed */
ir_node *res = get_DivMod_mem(n);
- set_DivMod_mem(n, get_irg_initial_mem(current_ir_graph));
+ set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
if (proj_nr == pn_DivMod_M)
- return res;
+ return res;
}
}
break;
break
switch (op->code) {
+ CASE(Add);
+ CASE(Sub);
CASE(Mul);
CASE(Div);
CASE(Mod);
return (get_irn_call_attr(a) != get_irn_call_attr(b));
}
-/** Compares the attributes of two FuncCall nodes. */
-static int node_cmp_attr_FuncCall(ir_node *a, ir_node *b)
-{
- return (get_irn_funccall_attr(a) != get_irn_funccall_attr(b));
-}
-
/** Compares the attributes of two Sel nodes. */
static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
{
CASE(Free);
CASE(SymConst);
CASE(Call);
- CASE(FuncCall);
CASE(Sel);
CASE(Phi);
CASE(Cast);
return 0;
}
-#define ADDR_TO_VAL(p) (((unsigned)(p)) >> 3)
-
/*
* Calculate a hash value of a node.
*/
if (node->op == op_Const) {
/* special value for const, as they only differ in their tarval. */
- h = ADDR_TO_VAL(node->attr.con.tv);
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = HASH_PTR(node->attr.con.tv);
+ h = 9*h + HASH_PTR(get_irn_mode(node));
} else if (node->op == op_SymConst) {
/* special value for const, as they only differ in their symbol. */
- h = ADDR_TO_VAL(node->attr.i.sym.type_p);
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = HASH_PTR(node->attr.i.sym.type_p);
+ h = 9*h + HASH_PTR(get_irn_mode(node));
} else {
/* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
/* consider all in nodes... except the block if not a control flow. */
for (i = is_cfop(node) ? -1 : 0; i < irn_arity; i++) {
- h = 9*h + ADDR_TO_VAL(get_irn_intra_n(node, i));
+ h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
}
/* ...mode,... */
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = 9*h + HASH_PTR(get_irn_mode(node));
/* ...and code */
- h = 9*h + ADDR_TO_VAL(get_irn_op(node));
+ h = 9*h + HASH_PTR(get_irn_op(node));
}
return h;
if (get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
+
if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
irn_arity = get_irn_arity(block);
for (i = 0; i < irn_arity; i++) {
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
irn_arity = get_irn_arity(node);
- for (i = -1; i < irn_arity; i++) {
+
+ if (is_Block_dead(get_nodes_block(node)))
+ return new_Bad();
+
+ for (i = 0; i < irn_arity; i++) {
if (is_Bad(get_irn_n(node, i))) {
return new_Bad();
}
oldn = alloca(node_size);
memcpy(oldn, n, node_size);
- CLONE_ARR_A(ir_node *, oldn->in, n->in);
+ CLONE_ARR_A(ir_node *, oldn->in, n->in);
- /* ARG, copy the in array, we need it for statistics */
- memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
+ /* ARG, copy the in array, we need it for statistics */
+ memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
/* evaluation was successful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
n = new_Const (get_tarval_mode (tv), tv);
- if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
- set_Const_type(n, old_tp);
+ if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
+ set_Const_type(n, old_tp);
DBG_OPT_CSTEVAL(oldn, n);
return n;
}
/* evaluation was successful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
- if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
- set_Const_type(n, old_tp);
+ if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
+ set_Const_type(n, old_tp);
DBG_OPT_CSTEVAL(oldn, n);
return n;