*/
#ifdef HAVE_CONFIG_H
-# include <config.h>
+# include "config.h"
+#endif
+
+#ifdef HAVE_ALLOCA_H
+#include <alloca.h>
+#endif
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
#endif
# include "irnode_t.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
-# ifdef DO_HEAPANALYSIS
-/* heapanal can't cope with NoMems */
-# else /* if defined DO_HEAPANALYSIS */
-# define USE_NOMEM
-# endif /* defined DO_HEAPANALYSIS */
-
/**
* Trivial INLINEable routine for copy propagation.
* Does follow Ids, needed to optimize INLINEd code.
return tarval_bad;
}
+/**
+ * calculate the value of a Mux: can be evaluated, if the
+ * sel and the right input are known
+ */
+static tarval *computed_value_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true()) {
+ ir_node *v = get_Mux_true(n);
+ return value_of(v);
+ }
+ else if (ts == get_tarval_b_false()) {
+ ir_node *v = get_Mux_false(n);
+ return value_of(v);
+ }
+ return tarval_bad;
+}
+
/**
* If the parameter n can be computed, return its value, else tarval_bad.
* Performs constant folding.
CASE(Rot);
CASE(Conv);
CASE(Proj);
+ CASE(Mux);
default:
op->computed_value = NULL;
}
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
} else if (get_opt_control_flow_straightening()) {
n = predblock;
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad();
+ n = set_Block_dead(n);
DBG_OPT_DEAD(oldn, n);
}
}
} else if (get_opt_unreachable_code() &&
(n != current_ir_graph->start_block) &&
(n != current_ir_graph->end_block) ) {
- int i;
+ int i, n_cfg = get_Block_n_cfgpreds(n);
+
/* If all inputs are dead, this block is dead too, except if it is
the start or end block. This is a step of unreachable code
elimination */
- for (i = 0; i < get_Block_n_cfgpreds(n); i++) {
- if (!is_Bad(get_Block_cfgpred(n, i))) break;
+ for (i = 0; i < n_cfg; i++) {
+ ir_node *pred = get_Block_cfgpred(n, i);
+ ir_node *pred_blk;
+
+ if (is_Bad(pred)) continue;
+ pred_blk = get_nodes_block(pred);
+
+ if (is_Block_dead(pred_blk)) continue;
+
+ if (pred_blk != n) {
+ /* really found a living input */
+ break;
+ }
}
- if (i == get_Block_n_cfgpreds(n))
- n = new_Bad();
+ if (i == n_cfg)
+ n = set_Block_dead(n);
}
return n;
{
/* GL: Why not same for op_Raise?? */
/* unreachable code elimination */
- if (is_Bad(get_nodes_block(n)))
+ if (is_Block_dead(get_nodes_block(n)))
n = new_Bad();
return n;
block = get_nodes_block(n);
/* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
- if ((is_Bad(block)) || /* Control dead */
+ if ((is_Block_dead(block)) || /* Control dead */
(block == current_ir_graph->start_block)) /* There should be no Phi nodes */
return new_Bad(); /* in the Start Block. */
n = new_Bad();
}
} else if (get_irn_mode(n) == mode_X &&
- is_Bad(get_nodes_block(n))) {
+ is_Block_dead(get_nodes_block(n))) {
/* Remove dead control flow -- early gigo. */
n = new_Bad();
}
return n;
}
+/**
+ * optimize a Mux
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ if (ts == get_tarval_b_true())
+ return get_Mux_true(n);
+ else if (ts == get_tarval_b_false())
+ return get_Mux_false(n);
+
+ return n;
+}
+
/**
* equivalent_node() returns a node equivalent to input n. It skips all nodes that
* perform no actual computation, as, e.g., the Id nodes. It does not create
CASE(Phi);
CASE(Proj);
CASE(Id);
+ CASE(Mux);
default:
op->equivalent_node = NULL;
}
} else {
/* the memory Proj can be removed */
ir_node *res = get_Div_mem(n);
-# ifdef USE_NOMEM
set_Div_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_Div_M)
return res;
}
} else {
/* the memory Proj can be removed */
ir_node *res = get_Mod_mem(n);
-# ifdef USE_NOMEM
set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_Mod_M)
return res;
}
else {
/* the memory Proj can be removed */
ir_node *res = get_DivMod_mem(n);
-# ifdef USE_NOMEM
set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
-# endif /* defined USE_NOMEM */
if (proj_nr == pn_DivMod_M)
return res;
}
if (get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
+
if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
irn_arity = get_irn_arity(block);
for (i = 0; i < irn_arity; i++) {
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
irn_arity = get_irn_arity(node);
- for (i = -1; i < irn_arity; i++) {
+
+ if (is_Block_dead(get_nodes_block(node)))
+ return new_Bad();
+
+ for (i = 0; i < irn_arity; i++) {
if (is_Bad(get_irn_n(node, i))) {
return new_Bad();
}