# include "irnode_t.h"
# include "irgraph_t.h"
+# include "irmode_t.h"
# include "iropt_t.h"
# include "ircons.h"
# include "irgmod.h"
# include "dbginfo_t.h"
# include "iropt_dbg.h"
# include "irflag_t.h"
+# include "firmstat.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
This should be true, as the block is matured before optimize is called.
But what about Phi-cycles with the Phi0/Id that could not be resolved?
Remaining Phi nodes are just Ids. */
- if ((get_Block_n_cfgpreds(n) == 1) &&
- (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
- (get_opt_control_flow_straightening())) {
- n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
-
- } else if ((get_Block_n_cfgpreds(n) == 2) &&
- (get_opt_control_flow_weak_simplification())) {
+ if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
+ ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ } else if (get_opt_control_flow_straightening()) {
+ n = predblock; DBG_OPT_STG;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 1) &&
+ (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
+ ir_node *predblock = get_nodes_Block(get_Block_cfgpred(n, 0));
+ if (predblock == oldn) {
+ /* Jmp jumps into the block it is in -- deal self cycle. */
+ n = new_Bad(); DBG_OPT_DEAD;
+ }
+ }
+ else if ((get_Block_n_cfgpreds(n) == 2) &&
+ (get_opt_control_flow_weak_simplification())) {
/* Test whether Cond jumps twice to this block
@@@ we could do this also with two loops finding two preds from several ones. */
ir_node *a = get_Block_cfgpred(n, 0);
static ir_node *equivalent_node_Or(ir_node *n)
{
+ ir_node *oldn = n;
+
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
/* remove a v a */
- if (a == b)
- n = a;
+ if (a == b) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
return n;
}
+/**
+ * optimize operations that are commutative and have neutral 0.
+ */
static ir_node *equivalent_node_neutral_zero(ir_node *n)
{
ir_node *oldn = n;
return equivalent_node_neutral_zero(n);
}
+/**
+ * optimize operations that are not commutative but have neutral 0 on left.
+ * Test only one predecessor.
+ */
static ir_node *equivalent_node_left_zero(ir_node *n)
{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
- /* optimize operations that are not commutative but have neutral 0 on left. Test only one predecessor. */
if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
n = a; DBG_OPT_ALGSIM1;
}
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, a);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
}
return n;
}
n_preds = get_Phi_n_preds(n);
- block = get_nodes_Block(n);
+ block = get_nodes_block(n);
/* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
if ((is_Bad(block)) || /* Control dead */
value that is known at a certain point. This is useful for
dataflow analysis. */
if (n_preds == 2) {
- ir_node *a = follow_Id (get_Phi_pred(n, 0));
- ir_node *b = follow_Id (get_Phi_pred(n, 1));
+ ir_node *a = get_Phi_pred(n, 0);
+ ir_node *b = get_Phi_pred(n, 1);
if ( (get_irn_op(a) == op_Confirm)
&& (get_irn_op(b) == op_Confirm)
- && (follow_Id (get_irn_n(a, 0)) == follow_Id(get_irn_n(b, 0)))
+ && follow_Id (get_irn_n(a, 0) == get_irn_n(b, 0))
&& (get_irn_n(a, 1) == get_irn_n (b, 1))
&& (a->data.num == (~b->data.num & irpn_True) )) {
- return follow_Id (get_irn_n(a, 0));
+ return get_irn_n(a, 0);
}
}
#endif
+ /* If the Block has a Bad pred, we also have one. */
+ for (i = 0; i < n_preds; ++i)
+ if (is_Bad (get_Block_cfgpred(block, i)))
+ set_Phi_pred(n, i, new_Bad());
+
/* Find first non-self-referencing input */
for (i = 0; i < n_preds; ++i) {
- first_val = follow_Id(get_Phi_pred(n, i));
- /* skip Id's */
- set_Phi_pred(n, i, first_val);
+ first_val = get_Phi_pred(n, i);
if ( (first_val != n) /* not self pointer */
- && (get_irn_op(first_val) != op_Bad) /* value not dead */
- && !(is_Bad (get_Block_cfgpred(block, i))) ) { /* not dead control flow */
- break; /* then found first value. */
+#if 1
+ && (get_irn_op(first_val) != op_Bad)
+#endif
+ ) { /* value not dead */
+ break; /* then found first value. */
}
}
/* follow_Id () for rest of inputs, determine if any of these
are non-self-referencing */
while (++i < n_preds) {
- scnd_val = follow_Id(get_Phi_pred(n, i));
- /* skip Id's */
- set_Phi_pred(n, i, scnd_val);
+ scnd_val = get_Phi_pred(n, i);
if ( (scnd_val != n)
&& (scnd_val != first_val)
+#if 1
&& (get_irn_op(scnd_val) != op_Bad)
- && !(is_Bad (get_Block_cfgpred(block, i))) ) {
+#endif
+ ) {
break;
}
}
if (i >= n_preds) {
n = first_val; DBG_OPT_PHI;
} else {
- /* skip the remaining Ids. */
- while (++i < n_preds) {
- set_Phi_pred(n, i, follow_Id(get_Phi_pred(n, i)));
- }
+ /* skip the remaining Ids (done in get_Phi_pred). */
+ /* superfluous, since we walk all to propagate Block's Bads.
+ while (++i < n_preds) get_Phi_pred(n, i); */
}
return n;
}
doesn't change the memory -- a write after read. */
a = get_Store_mem(n);
turn_into_tuple(n, 2);
- set_Tuple_pred(n, 0, a);
- set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
+ set_Tuple_pred(n, pn_Store_M, a);
+ set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
}
return n;
}
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
/* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
b = new_Const (mode, resb);
evaluated = 1;
}
- } else if (ta == get_mode_null(get_tarval_mode(ta))) {
+ } else if (ta == get_mode_null(mode)) {
b = a;
evaluated = 1;
}
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
turn_into_tuple(n, 4);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad()); /* no exception */
- set_Tuple_pred(n, 2, a);
- set_Tuple_pred(n, 3, b);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, b);
assert(get_nodes_Block(n));
}
jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
turn_into_tuple(n, 2);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, 0, new_Bad());
- set_Tuple_pred(n, 1, jmp);
+ set_Tuple_pred(n, pn_Cond_false, new_Bad());
+ set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
- set_Tuple_pred(n, 0, jmp);
- set_Tuple_pred(n, 1, new_Bad());
+ set_Tuple_pred(n, pn_Cond_false, jmp);
+ set_Tuple_pred(n, pn_Cond_true, new_Bad());
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
CASE(Call);
CASE(FuncCall);
CASE(Sel);
+ CASE(Phi);
+ CASE(Cast);
default:
op->node_cmp_attr = NULL;
}
optimize_node (ir_node *n)
{
tarval *tv;
- ir_node *old_n = n;
+ ir_node *oldn = n;
opcode iro = get_irn_opcode(n);
/* Allways optimize Phi nodes: part of the construction. */
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* constants can not be evaluated */
- if (get_irn_op(n) != op_Const) {
+ if (iro != iro_Const) {
/* try to evaluate */
tv = computed_value (n);
if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
- /* evaluation was succesful -- replace the node. */
+ /*
+ * we MUST copy the node here temparary, because it's still needed
+ * for DBG_OPT_ALGSIM0
+ */
+ ir_node x = *n;
+ oldn = &x;
+ /* evaluation was successful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
- return new_Const (get_tarval_mode (tv), tv);
+ n = new_Const (get_tarval_mode (tv), tv);
+ DBG_OPT_ALGSIM0;
+ return n;
}
}
}
if (get_opt_cse())
n = identify_cons (current_ir_graph->value_table, n);
- if (n != old_n) {
+ if (n != oldn) {
/* We found an existing, better node, so we can deallocate the old node. */
- obstack_free (current_ir_graph->obst, old_n);
+ obstack_free (current_ir_graph->obst, oldn);
+
+ return n;
}
/* Some more constant expression evaluation that does not allow to
optimize_in_place_2 (ir_node *n)
{
tarval *tv;
- ir_node *old_n = n;
+ ir_node *oldn = n;
opcode iro = get_irn_opcode(n);
if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
/* try to evaluate */
tv = computed_value (n);
if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
- /* evaluation was succesful -- replace the node. */
+ /* evaluation was successful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
- __dbg_info_merge_pair(n, old_n, dbg_const_eval);
+ DBG_OPT_ALGSIM0;
return n;
}
}