* Copyright: (c) 1998-2004 Universität Karlsruhe
* Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
*/
-
#ifdef HAVE_CONFIG_H
-# include "config.h"
+#include "config.h"
#endif
-# include "irnode_t.h"
-# include "irgraph_t.h"
-# include "irmode_t.h"
-# include "iropt_t.h"
-# include "ircons_t.h"
-# include "irgmod.h"
-# include "dbginfo.h"
-# include "iropt_dbg.h"
-# include "irflag_t.h"
-# include "irgwalk.h"
-# include "reassoc_t.h"
-# include "irhooks.h"
-# include "irloop.h"
-# include "debug.h"
-
-static firm_dbg_module_t *dbg;
+#include "irnode_t.h"
+#include "irgraph_t.h"
+#include "irmode_t.h"
+#include "iropt_t.h"
+#include "ircons_t.h"
+#include "irgmod.h"
+#include "dbginfo.h"
+#include "iropt_dbg.h"
+#include "irflag_t.h"
+#include "irgwalk.h"
+#include "reassoc_t.h"
+#include "irhooks.h"
+#include "irloop.h"
+#include "debug.h"
+
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
typedef struct _walker_t {
int changes; /* set, if a reassociation take place */
typedef enum {
NO_CONSTANT = 0, /**< node is not constant */
REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
- CONST_EXPR = 4 /**< node is a constant expression in the current context,
+ REGION_CONST = 4 /**< node is a constant expression in the current context,
use 4 here to simplify implementation of get_comm_Binop_ops() */
} const_class_t;
/**
* returns whether a node is constant ie is a constant or
- * is loop invariant
+ * is loop invariant (called region constant)
*
* @param n the node to be checked for constant
* @param block a block that might be in a loop
if (op == op_Const)
return REAL_CONSTANT;
+
+ /* although SymConst's are of course real constant, we cannot
+ fold them, so handle them like region constants */
if (op == op_SymConst)
- return CONST_EXPR;
- if (is_loop_invariant(n, block))
- return CONST_EXPR;
+ return REGION_CONST;
+
+ /*
+ * Beware: Bad nodes are always loop-invariant, but
+ * cannot handled in later code, so filter them here.
+ */
+ if (! is_Bad(n) && is_loop_invariant(n, block))
+ return REGION_CONST;
return NO_CONSTANT;
}
/**
* returns the operands of a commutative bin-op, if one operand is
- * a constant in the current context, it is returned as the second one.
+ * a region constant, it is returned as the second one.
*
* Beware: Real constants must be returned with higher priority than
- * constant expression, because they might be folded.
+ * region constants, because they might be folded.
*/
static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
{
assert(is_op_commutative(get_irn_op(binop)));
switch (class_a + 2*class_b) {
- case REAL_CONSTANT + 2*NO_CONSTANT:
case REAL_CONSTANT + 2*REAL_CONSTANT:
- case REAL_CONSTANT + 2*CONST_EXPR:
- case CONST_EXPR + 2*NO_CONSTANT:
+ /* if both are constants, one might be a
+ * pointer constant like NULL, return the other
+ */
+ if (mode_is_reference(get_irn_mode(op_a))) {
+ *a = op_a;
+ *c = op_b;
+ }
+ else {
+ *a = op_b;
+ *c = op_a;
+ }
+ break;
+ case REAL_CONSTANT + 2*NO_CONSTANT:
+ case REAL_CONSTANT + 2*REGION_CONST:
+ case REGION_CONST + 2*NO_CONSTANT:
*a = op_b;
*c = op_a;
break;
static int reassoc_Sub(ir_node **in)
{
ir_node *n = *in;
- ir_node *block = get_nodes_block(n);
ir_node *right = get_Sub_right(n);
+ ir_mode *rmode = get_irn_mode(right);
+ ir_node *block;
- /* FIXME: Do not apply this rule for unsigned Sub's because our code
- * generation is currently buggy :-)
- */
- if (! mode_is_signed(get_irn_mode(n)))
- return 0;
+ /* cannot handle SubIs(P, P) */
+ if (mode_is_reference(rmode))
+ return 0;
+
+ block = get_nodes_block(n);
/* handles rule R6:
* convert x - c => (-c) + x
* */
if (get_const_class(right, block) == REAL_CONSTANT) {
ir_node *left = get_Sub_left(n);
- ir_node *block = get_nodes_block(n);
- ir_mode *mode = get_irn_mode(n);
- dbg_info *dbi = get_irn_dbg_info(n);
+ ir_mode *mode;
+ dbg_info *dbi;
ir_node *irn, *c;
switch (get_const_class(left, block)) {
irn = optimize_in_place(n);
if (irn != n) {
exchange(n, irn);
- *in = irn;
+ *in = irn;
return 1;
}
return 0;
/* already constant, nothing to do */
return 0;
}
+ mode = get_irn_mode(n);
+ dbi = get_irn_dbg_info(n);
- c = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
- irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, mode);
+ /* Beware of SubP(P, Is) */
+ c = new_r_Const(current_ir_graph, block, rmode, get_mode_null(rmode));
+ irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, rmode);
irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n));
get_Sub_left(n), c, get_Sub_left(n), c));
exchange(n, irn);
- *in = irn;
+ *in = irn;
return 1;
}
* reassociate a commutative Binop
*
* BEWARE: this rule leads to a potential loop, if
- * two operands are are constant expressions and the third is a
+ * two operands are region constants and the third is a
* constant, so avoid this situation.
*/
static int reassoc_commutative(ir_node **node)
c_t2 = get_const_class(t2, block);
if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
- ((((c_c1 ^ c_c2 ^ c_t2) & CONST_EXPR) == 0) || ((c_c1 & c_c2 & c_t2) == CONST_EXPR)) ) {
+ ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
/* All three are constant and either all are constant expressions or two of them are:
* then applying this rule would lead into a cycle
*
DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n",
c1, get_irn_opname(n), c2, get_irn_opname(n),
- t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
+ t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
/*
* In some rare cases it can really happen that we get the same node back.
* This might be happen in dead loops, were the Phi nodes are already gone away.
*/
if (n != irn) {
exchange(n, irn);
- *node = irn;
+ *node = irn;
return 1;
}
}
t1 = get_binop_left(add_sub);
t2 = get_binop_right(add_sub);
- in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
- in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
+ /* we can only multiplication rules on integer arithmetic */
+ if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
+ in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
+ in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
- mode = get_mode_from_ops(in[0], in[1]);
- irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
+ mode = get_mode_from_ops(in[0], in[1]);
+ irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
- DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
- t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
- exchange(n, irn);
- *node = irn;
+ /* In some cases it might happen that the new irn is equal the old one, for
+ * instance in:
+ * (x - 1) * y == x * y - y
+ * will be transformed back by simpler optimization
+ * We could switch simple optimizations off, but this only happens iff y
+ * is a loop-invariant expression and that it is not clear if the new form
+ * is better.
+ * So, we let the old one.
+ */
+ if (irn != n) {
+ DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
+ t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
+ exchange(n, irn);
+ *node = irn;
- return 1;
+ return 1;
+ }
+ }
}
return 0;
}
hook_reassociate(1);
- /* Reassociation must run until a fixpoint is reached. */
+ /* reassociation must run until a fixpoint is reached. */
do {
ir_op *op = get_irn_op(n);
ir_mode *mode = get_irn_mode(n);
res = 0;
/* reassociation works only for integer or reference modes */
- if (op->reassociate && (mode_is_int(mode) || mode_is_reference(mode))) {
- res = op->reassociate(&n);
+ if (op->ops.reassociate && (mode_is_int(mode) || mode_is_reference(mode))) {
+ res = op->ops.reassociate(&n);
- wenv->changes |= res;
+ wenv->changes |= res;
}
} while (res == 1);
irg_loopinfo_state state;
assert(get_irg_phase_state(irg) != phase_building);
+ assert(get_irg_pinned(irg) != op_pin_state_floats &&
+ "Reassociation needs pinned graph to work properly");
/* reassociation needs constant folding */
if (!get_opt_reassociation() || !get_opt_constant_folding())
/* Handle graph state */
if (env.changes) {
- if (get_irg_outs_state(irg) == outs_consistent)
- set_irg_outs_inconsistent(irg);
+ set_irg_outs_inconsistent(irg);
set_irg_loopinfo_inconsistent(irg);
}
}
+/* Sets the default reassociation operation for an ir_op_ops. */
+ir_op_ops *firm_set_default_reassoc(opcode code, ir_op_ops *ops)
+{
+#define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
+
+ switch (code) {
+ CASE(Mul);
+ CASE(Add);
+ CASE(Sub);
+ CASE(And);
+ CASE(Or);
+ CASE(Eor);
+ default:
+ /* leave NULL */;
+ }
+
+ return ops;
+#undef CASE
+}
+
/* initialize the reassociation by adding operations to some opcodes */
void firm_init_reassociation(void)
{
-#define INIT(a) op_##a->reassociate = reassoc_##a;
- INIT(Mul);
- INIT(Add);
- INIT(Sub);
- INIT(And);
- INIT(Or);
- INIT(Eor);
-#undef INIT
-
- dbg = firm_dbg_register("firm.opt.reassoc");
+ FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
}