# include "dbginfo_t.h"
# include "iropt_dbg.h"
# include "irflag_t.h"
+# include "firmstat.h"
/* Make types visible to allow most efficient access */
# include "entity_t.h"
static ir_node *equivalent_node_Or(ir_node *n)
{
+ ir_node *oldn = n;
+
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
/* remove a v a */
- if (a == b)
- n = a;
+ if (a == b) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
return n;
}
+/**
+ * optimize operations that are commutative and have neutral 0.
+ */
static ir_node *equivalent_node_neutral_zero(ir_node *n)
{
ir_node *oldn = n;
return equivalent_node_neutral_zero(n);
}
+/**
+ * optimize operations that are not commutative but have neutral 0 on left.
+ * Test only one predecessor.
+ */
static ir_node *equivalent_node_left_zero(ir_node *n)
{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
- /* optimize operations that are not commutative but have neutral 0 on left. Test only one predecessor. */
if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
n = a; DBG_OPT_ALGSIM1;
}
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, a);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
}
return n;
}
doesn't change the memory -- a write after read. */
a = get_Store_mem(n);
turn_into_tuple(n, 2);
- set_Tuple_pred(n, 0, a);
- set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
+ set_Tuple_pred(n, pn_Store_M, a);
+ set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
}
return n;
}
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
/* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
turn_into_tuple(n, 4);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad()); /* no exception */
- set_Tuple_pred(n, 2, a);
- set_Tuple_pred(n, 3, b);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, b);
assert(get_nodes_Block(n));
}
jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
turn_into_tuple(n, 2);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, 0, new_Bad());
- set_Tuple_pred(n, 1, jmp);
+ set_Tuple_pred(n, pn_Cond_false, new_Bad());
+ set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
- set_Tuple_pred(n, 0, jmp);
- set_Tuple_pred(n, 1, new_Bad());
+ set_Tuple_pred(n, pn_Cond_false, jmp);
+ set_Tuple_pred(n, pn_Cond_true, new_Bad());
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
optimize_node (ir_node *n)
{
tarval *tv;
- ir_node *old_n = n;
+ ir_node *oldn = n;
opcode iro = intern_get_irn_opcode(n);
/* Allways optimize Phi nodes: part of the construction. */
/* try to evaluate */
tv = computed_value (n);
if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ /*
+ * we MUST copy the node here temparary, because it's still needed
+ * for DBG_OPT_ALGSIM0
+ */
+ ir_node x = *n;
+ oldn = &x;
/* evaluation was succesful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
- return new_Const (get_tarval_mode (tv), tv);
+ n = new_Const (get_tarval_mode (tv), tv);
+ DBG_OPT_ALGSIM0;
+ return n;
}
}
}
if (get_opt_cse())
n = identify_cons (current_ir_graph->value_table, n);
- if (n != old_n) {
+ if (n != oldn) {
/* We found an existing, better node, so we can deallocate the old node. */
- obstack_free (current_ir_graph->obst, old_n);
+ obstack_free (current_ir_graph->obst, oldn);
+
+ return n;
}
/* Some more constant expression evaluation that does not allow to
optimize_in_place_2 (ir_node *n)
{
tarval *tv;
- ir_node *old_n = n;
+ ir_node *oldn = n;
opcode iro = intern_get_irn_opcode(n);
if (!get_opt_optimize() && (intern_get_irn_op(n) != op_Phi)) return n;
if ((intern_get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
- __dbg_info_merge_pair(n, old_n, dbg_const_eval);
+ DBG_OPT_ALGSIM0;
return n;
}
}