3 * File name: ir/opt/reassoc.c
4 * Purpose: Reassociation
8 * Copyright: (c) 1998-2007 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 #include "irgraph_t.h"
22 #include "iropt_dbg.h"
25 #include "reassoc_t.h"
30 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
32 typedef struct _walker_t {
33 int changes; /* set, if a reassociation take place */
37 NO_CONSTANT = 0, /**< node is not constant */
38 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
39 REGION_CONST = 4 /**< node is a constant expression in the current context,
40 use 4 here to simplify implementation of get_comm_Binop_ops() */
44 * returns whether a node is constant ie is a constant or
45 * is loop invariant (called region constant)
47 * @param n the node to be checked for constant
48 * @param block a block that might be in a loop
50 static const_class_t get_const_class(ir_node *n, ir_node *block)
52 ir_op *op = get_irn_op(n);
57 /* although SymConst's are of course real constant, we cannot
58 fold them, so handle them like region constants */
59 if (op == op_SymConst)
63 * Beware: Bad nodes are always loop-invariant, but
64 * cannot handled in later code, so filter them here.
66 if (! is_Bad(n) && is_loop_invariant(n, block))
70 } /* get_const_class */
73 * returns the operands of a commutative bin-op, if one operand is
74 * a region constant, it is returned as the second one.
76 * Beware: Real constants must be returned with higher priority than
77 * region constants, because they might be folded.
79 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
81 ir_node *op_a = get_binop_left(binop);
82 ir_node *op_b = get_binop_right(binop);
83 ir_node *block = get_nodes_block(binop);
84 int class_a = get_const_class(op_a, block);
85 int class_b = get_const_class(op_b, block);
87 assert(is_op_commutative(get_irn_op(binop)));
89 switch (class_a + 2*class_b) {
90 case REAL_CONSTANT + 2*REAL_CONSTANT:
91 /* if both are constants, one might be a
92 * pointer constant like NULL, return the other
94 if (mode_is_reference(get_irn_mode(op_a))) {
102 case REAL_CONSTANT + 2*NO_CONSTANT:
103 case REAL_CONSTANT + 2*REGION_CONST:
104 case REGION_CONST + 2*NO_CONSTANT:
113 } /* get_comm_Binop_ops */
116 * reassociate a Sub: x - c = (-c) + x
118 static int reassoc_Sub(ir_node **in)
121 ir_node *right = get_Sub_right(n);
122 ir_mode *rmode = get_irn_mode(right);
125 /* cannot handle SubIs(P, P) */
126 if (mode_is_reference(rmode))
129 block = get_nodes_block(n);
132 * convert x - c => (-c) + x
134 * As there is NO real Minus in Firm it makes no sense to do this
135 * for non-real constants yet.
137 if (get_const_class(right, block) == REAL_CONSTANT) {
138 ir_node *left = get_Sub_left(n);
143 switch (get_const_class(left, block)) {
145 irn = optimize_in_place(n);
155 /* already constant, nothing to do */
158 mode = get_irn_mode(n);
159 dbi = get_irn_dbg_info(n);
161 /* Beware of SubP(P, Is) */
162 c = new_r_Const(current_ir_graph, block, rmode, get_mode_null(rmode));
163 irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, rmode);
165 irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n));
167 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
168 get_Sub_left(n), c, get_Sub_left(n), c));
178 /** Retrieve a mode from the operands. We need this, because
179 * Add and Sub are allowed to operate on (P, Is)
181 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
185 m1 = get_irn_mode(op1);
186 if (mode_is_reference(m1))
189 m2 = get_irn_mode(op2);
190 if (mode_is_reference(m2))
196 } /* get_mode_from_ops */
199 * reassociate a commutative Binop
201 * BEWARE: this rule leads to a potential loop, if
202 * two operands are region constants and the third is a
203 * constant, so avoid this situation.
205 static int reassoc_commutative(ir_node **node)
208 ir_op *op = get_irn_op(n);
209 ir_node *block = get_nodes_block(n);
212 get_comm_Binop_ops(n, &t1, &c1);
214 if (get_irn_op(t1) == op) {
216 const_class_t c_c1, c_c2, c_t2;
218 get_comm_Binop_ops(t1, &t2, &c2);
220 /* do not optimize Bad nodes, will fail later */
224 c_c1 = get_const_class(c1, block);
225 c_c2 = get_const_class(c2, block);
226 c_t2 = get_const_class(t2, block);
228 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
229 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
230 /* All three are constant and either all are constant expressions or two of them are:
231 * then applying this rule would lead into a cycle
233 * Note that if t2 is a constant so is c2 hence we save one test.
238 if ((c_c1 != NO_CONSTANT) & (c_c2 != NO_CONSTANT)) {
239 /* handles rules R7, R8, R9, R10:
240 * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x
242 ir_node *irn, *in[2];
243 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
245 /* It might happen, that c1 and c2 have different modes, for instance Is and Iu.
248 if (mode_c1 != mode_c2) {
249 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
250 /* get the bigger one */
251 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
252 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
253 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
254 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
256 /* Try to cast the real const */
257 if (c_c1 == REAL_CONSTANT)
258 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
260 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
268 mode = get_mode_from_ops(in[0], in[1]);
269 in[0] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
272 mode = get_mode_from_ops(in[0], in[1]);
273 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
275 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n",
276 c1, get_irn_opname(n), c2, get_irn_opname(n),
277 t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
279 * In some rare cases it can really happen that we get the same node back.
280 * This might be happen in dead loops, were the Phi nodes are already gone away.
291 } /* reassoc_commutative */
293 #define reassoc_Add reassoc_commutative
294 #define reassoc_And reassoc_commutative
295 #define reassoc_Or reassoc_commutative
296 #define reassoc_Eor reassoc_commutative
299 * reassociate using distributive law for Mul and Add/Sub
301 static int reassoc_Mul(ir_node **node)
304 ir_node *add_sub, *c;
307 if (reassoc_commutative(&n))
310 get_comm_Binop_ops(n, &add_sub, &c);
311 op = get_irn_op(add_sub);
313 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
314 if (op == op_Add || op == op_Sub) {
315 ir_mode *mode = get_irn_mode(n);
316 ir_node *irn, *block, *t1, *t2, *in[2];
318 block = get_nodes_block(n);
319 t1 = get_binop_left(add_sub);
320 t2 = get_binop_right(add_sub);
322 /* we can only multiplication rules on integer arithmetic */
323 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
324 in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
325 in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
327 mode = get_mode_from_ops(in[0], in[1]);
328 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
330 /* In some cases it might happen that the new irn is equal the old one, for
332 * (x - 1) * y == x * y - y
333 * will be transformed back by simpler optimization
334 * We could switch simple optimizations off, but this only happens iff y
335 * is a loop-invariant expression and that it is not clear if the new form
337 * So, we let the old one.
340 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
341 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
353 * The walker for the reassociation.
355 static void do_reassociation(ir_node *n, void *env)
357 walker_t *wenv = env;
360 if (is_no_Block(n)) {
361 ir_node *blk = get_nodes_block(n);
363 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
364 /* We are in a dead block, do not optimize or we may fall into an endless
365 loop. We check this here instead of requiring that all dead blocks are removed
366 which or cf_opt do not guarantee yet. */
372 /* reassociation must run until a fixpoint is reached. */
374 ir_op *op = get_irn_op(n);
375 ir_mode *mode = get_irn_mode(n);
379 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
380 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
383 if (op->ops.reassociate) {
384 res = op->ops.reassociate(&n);
386 wenv->changes |= res;
392 } /* do_reassociation */
395 * do the reassociation
397 void optimize_reassociation(ir_graph *irg)
400 irg_loopinfo_state state;
402 assert(get_irg_phase_state(irg) != phase_building);
403 assert(get_irg_pinned(irg) != op_pin_state_floats &&
404 "Reassociation needs pinned graph to work properly");
406 /* reassociation needs constant folding */
407 if (!get_opt_reassociation() || !get_opt_constant_folding())
410 /* we use dominance to detect dead blocks */
414 * Calculate loop info, so we could identify loop-invariant
415 * code and threat it like a constant.
416 * We only need control flow loops here but can handle generic
417 * INTRA info as well.
419 state = get_irg_loopinfo_state(irg);
420 if ((state & loopinfo_inter) ||
421 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
422 construct_cf_backedges(irg);
426 /* now we have collected enough information, optimize */
427 irg_walk_graph(irg, NULL, do_reassociation, &env);
429 /* Handle graph state */
431 set_irg_outs_inconsistent(irg);
432 set_irg_loopinfo_inconsistent(irg);
434 } /* optimize_reassociation */
436 /* Sets the default reassociation operation for an ir_op_ops. */
437 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
439 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
454 } /* firm_set_default_reassoc */
456 /* initialize the reassociation by adding operations to some opcodes */
457 void firm_init_reassociation(void)
459 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
460 } /* firm_init_reassociation */