*/
#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
+#endif
+
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
#endif
# include "irnode_t.h"
# include "irflag_t.h"
# include "firmstat.h"
# include "irarch.h"
+# include "hashptr.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
-# ifdef DO_HEAPANALYSIS
-/* heapanal can't cope with NoMems */
-# else /* if defined DO_HEAPANALYSIS */
-# define USE_NOMEM
-# endif /* defined DO_HEAPANALYSIS */
-
/**
* Trivial INLINEable routine for copy propagation.
* Does follow Ids, needed to optimize INLINEd code.
tarval *tb;
/* a - a */
- if (a == b)
+ if (a == b && !is_Bad(a))
return get_tarval_null(get_irn_mode(n));
ta = value_of(a);
return tarval_bad;
}
+/**
+ * calculate the value of a Mux: can be evaluated, if the
+ * sel and the right input are known
+ */
+static tarval *computed_value_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true()) {
+ ir_node *v = get_Mux_true(n);
+ return value_of(v);
+ }
+ else if (ts == get_tarval_b_false()) {
+ ir_node *v = get_Mux_false(n);
+ return value_of(v);
+ }
+ return tarval_bad;
+}
+
/**
* If the parameter n can be computed, return its value, else tarval_bad.
* Performs constant folding.
CASE(Rot);
CASE(Conv);
CASE(Proj);
+ CASE(Mux);
default:
op->computed_value = NULL;
}
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
} else if (get_opt_control_flow_straightening()) {
n = predblock;
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
}
}
} else if (get_opt_unreachable_code() &&
(n != current_ir_graph->start_block) &&
(n != current_ir_graph->end_block) ) {
- int i;
+ int i, n_cfg = get_Block_n_cfgpreds(n);
+
/* If all inputs are dead, this block is dead too, except if it is
the start or end block. This is a step of unreachable code
elimination */
- for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
- if (!is_Bad(get_Block_cfgpred(n, i))) break;
+ for (i = 0; i < n_cfg; i++) {
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ir_node *pred_blk;
+
+ if (is_Bad(pred)) continue;
+ pred_blk = get_nodes_block(pred);
+
+ if (is_Block_dead(pred_blk)) continue;
+
+ if (pred_blk != n) {
+ /* really found a living input */
+ break;
+ }
}
- if (i == get_Block_n_cfgpreds(n))
- n = new_Bad();
+ if (i == n_cfg)
+ n = set_Block_dead(n);
}
return n;
{
/* GL: Why not same for op_Raise?? */
/* unreachable code elimination */
- if (is_Bad(get_nodes_block(n)))
+ if (is_Block_dead(get_nodes_block(n)))
n = new_Bad();
return n;
return n;
}
+/**
+ * A Cast may be removed if the type of the previous node
+ * is already to type of the Cast.
+ */
static ir_node *equivalent_node_Cast(ir_node *n) {
ir_node *pred = get_Cast_op(n);
if (get_irn_type(pred) == get_Cast_type(n))
return n;
}
+/* Several optimizations:
+ - no Phi in start block.
+ - remove Id operators that are inputs to Phi
+ - fold Phi-nodes, iff they have only one predecessor except
+ themselves.
+*/
static ir_node *equivalent_node_Phi(ir_node *n)
{
- /* Several optimizations:
- - no Phi in start block.
- - remove Id operators that are inputs to Phi
- - fold Phi-nodes, iff they have only one predecessor except
- themselves.
- */
int i, n_preds;
ir_node *oldn = n;
block = get_nodes_block(n);
/* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
- if ((is_Bad(block)) || /* Control dead */
+ if ((is_Block_dead(block)) || /* Control dead */
(block == current_ir_graph->start_block)) /* There should be no Phi nodes */
return new_Bad(); /* in the Start Block. */
n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
- is_Bad(get_nodes_block(n))) {
+ is_Block_dead(get_nodes_block(n))) {
/* Remove dead control flow -- early gigo. */
n = new_Bad();
}
return n;
}
+/**
+ * optimize a Mux
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true())
+ return get_Mux_true(n);
+ else if (ts == get_tarval_b_false())
+ return get_Mux_false(n);
+
+ return n;
+}
+
/**
* equivalent_node() returns a node equivalent to input n. It skips all nodes that
* perform no actual computation, as, e.g., the Id nodes. It does not create
CASE(Phi);
CASE(Proj);
CASE(Id);
+ CASE(Mux);
default:
op->equivalent_node = NULL;
}
} /* end switch */
}
+/**
+ * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
+ * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)) if possible.
+ */
+static ir_node *transform_node_AddSub(ir_node *n)
+{
+ ir_mode *mode = get_irn_mode(n);
+
+ if (mode_is_reference(mode)) {
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ int ref_bits = get_mode_size_bits(mode);
+
+ if (get_irn_op(left) == op_Conv) {
+ ir_mode *mode = get_irn_mode(left);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(left);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_left(n, pre);
+ }
+ }
+ }
+
+ if (get_irn_op(right) == op_Conv) {
+ ir_mode *mode = get_irn_mode(right);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(right);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_right(n, pre);
+ }
+ }
+ }
+ }
+ return n;
+}
+
+#define transform_node_Add transform_node_AddSub
+#define transform_node_Sub transform_node_AddSub
+
/** Do architecture dependend optimizations on Mul nodes */
static ir_node *transform_node_Mul(ir_node *n) {
return arch_dep_replace_mul_with_shifts(n);
return n;
}
+/**
+ * Transform an Eor.
+ */
static ir_node *transform_node_Eor(ir_node *n)
{
ir_node *a = get_Eor_left(n);
} else {
/* the memory Proj can be removed */
ir_node *res = get_Div_mem(n);
-# ifdef USE_NOMEM
set_Div_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_Div_M)
return res;
}
} else {
/* the memory Proj can be removed */
ir_node *res = get_Mod_mem(n);
-# ifdef USE_NOMEM
set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_Mod_M)
return res;
}
else {
/* the memory Proj can be removed */
ir_node *res = get_DivMod_mem(n);
-# ifdef USE_NOMEM
set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_DivMod_M)
return res;
}
break
switch (op->code) {
+ CASE(Add);
+ CASE(Sub);
CASE(Mul);
CASE(Div);
CASE(Mod);
return 0;
}
-#define ADDR_TO_VAL(p) (((unsigned)(p)) >> 3)
-
/*
* Calculate a hash value of a node.
*/
if (node->op == op_Const) {
/* special value for const, as they only differ in their tarval. */
- h = ADDR_TO_VAL(node->attr.con.tv);
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = HASH_PTR(node->attr.con.tv);
+ h = 9*h + HASH_PTR(get_irn_mode(node));
} else if (node->op == op_SymConst) {
/* special value for const, as they only differ in their symbol. */
- h = ADDR_TO_VAL(node->attr.i.sym.type_p);
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = HASH_PTR(node->attr.i.sym.type_p);
+ h = 9*h + HASH_PTR(get_irn_mode(node));
} else {
/* hash table value = 9*(9*(9*(9*(9*arity+in[0])+in[1])+ ...)+mode)+code */
/* consider all in nodes... except the block if not a control flow. */
for (i = is_cfop(node) ? -1 : 0; i < irn_arity; i++) {
- h = 9*h + ADDR_TO_VAL(get_irn_intra_n(node, i));
+ h = 9*h + HASH_PTR(get_irn_intra_n(node, i));
}
/* ...mode,... */
- h = 9*h + ADDR_TO_VAL(get_irn_mode(node));
+ h = 9*h + HASH_PTR(get_irn_mode(node));
/* ...and code */
- h = 9*h + ADDR_TO_VAL(get_irn_op(node));
+ h = 9*h + HASH_PTR(get_irn_op(node));
}
return h;
if (get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
+
if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
irn_arity = get_irn_arity(block);
for (i = 0; i < irn_arity; i++) {
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
irn_arity = get_irn_arity(node);
- for (i = -1; i < irn_arity; i++) {
+
+ if (is_Block_dead(get_nodes_block(node)))
+ return new_Bad();
+
+ for (i = 0; i < irn_arity; i++) {
if (is_Bad(get_irn_n(node, i))) {
return new_Bad();
}