2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
31 #include "irgraph_t.h"
37 #include "iropt_dbg.h"
40 #include "reassoc_t.h"
45 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
47 typedef struct _walker_t {
48 int changes; /* set, if a reassociation take place */
52 NO_CONSTANT = 0, /**< node is not constant */
53 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
54 REGION_CONST = 4 /**< node is a constant expression in the current context,
55 use 4 here to simplify implementation of get_comm_Binop_ops() */
59 * returns whether a node is constant ie is a constant or
60 * is loop invariant (called region constant)
62 * @param n the node to be checked for constant
63 * @param block a block that might be in a loop
65 static const_class_t get_const_class(ir_node *n, ir_node *block)
67 ir_op *op = get_irn_op(n);
72 /* although SymConst's are of course real constant, we cannot
73 fold them, so handle them like region constants */
74 if (op == op_SymConst)
78 * Beware: Bad nodes are always loop-invariant, but
79 * cannot handled in later code, so filter them here.
81 if (! is_Bad(n) && is_loop_invariant(n, block))
85 } /* get_const_class */
88 * returns the operands of a commutative bin-op, if one operand is
89 * a region constant, it is returned as the second one.
91 * Beware: Real constants must be returned with higher priority than
92 * region constants, because they might be folded.
94 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
96 ir_node *op_a = get_binop_left(binop);
97 ir_node *op_b = get_binop_right(binop);
98 ir_node *block = get_nodes_block(binop);
99 int class_a = get_const_class(op_a, block);
100 int class_b = get_const_class(op_b, block);
102 assert(is_op_commutative(get_irn_op(binop)));
104 switch (class_a + 2*class_b) {
105 case REAL_CONSTANT + 2*REAL_CONSTANT:
106 /* if both are constants, one might be a
107 * pointer constant like NULL, return the other
109 if (mode_is_reference(get_irn_mode(op_a))) {
117 case REAL_CONSTANT + 2*NO_CONSTANT:
118 case REAL_CONSTANT + 2*REGION_CONST:
119 case REGION_CONST + 2*NO_CONSTANT:
128 } /* get_comm_Binop_ops */
131 * reassociate a Sub: x - c = (-c) + x
133 static int reassoc_Sub(ir_node **in)
136 ir_node *right = get_Sub_right(n);
137 ir_mode *rmode = get_irn_mode(right);
140 /* cannot handle SubIs(P, P) */
141 if (mode_is_reference(rmode))
144 block = get_nodes_block(n);
147 * convert x - c => (-c) + x
149 * As there is NO real Minus in Firm it makes no sense to do this
150 * for non-real constants yet.
152 if (get_const_class(right, block) == REAL_CONSTANT) {
153 ir_node *left = get_Sub_left(n);
158 switch (get_const_class(left, block)) {
160 irn = optimize_in_place(n);
170 /* already constant, nothing to do */
173 mode = get_irn_mode(n);
174 dbi = get_irn_dbg_info(n);
176 /* Beware of SubP(P, Is) */
177 c = new_r_Const(current_ir_graph, block, rmode, get_mode_null(rmode));
178 irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, rmode);
180 irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n));
182 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
183 get_Sub_left(n), c, get_Sub_left(n), c));
193 /** Retrieve a mode from the operands. We need this, because
194 * Add and Sub are allowed to operate on (P, Is)
196 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
200 m1 = get_irn_mode(op1);
201 if (mode_is_reference(m1))
204 m2 = get_irn_mode(op2);
205 if (mode_is_reference(m2))
211 } /* get_mode_from_ops */
214 * reassociate a commutative Binop
216 * BEWARE: this rule leads to a potential loop, if
217 * two operands are region constants and the third is a
218 * constant, so avoid this situation.
220 static int reassoc_commutative(ir_node **node)
223 ir_op *op = get_irn_op(n);
224 ir_node *block = get_nodes_block(n);
227 get_comm_Binop_ops(n, &t1, &c1);
229 if (get_irn_op(t1) == op) {
231 const_class_t c_c1, c_c2, c_t2;
233 get_comm_Binop_ops(t1, &t2, &c2);
235 /* do not optimize Bad nodes, will fail later */
239 c_c1 = get_const_class(c1, block);
240 c_c2 = get_const_class(c2, block);
241 c_t2 = get_const_class(t2, block);
243 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
244 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
245 /* All three are constant and either all are constant expressions or two of them are:
246 * then applying this rule would lead into a cycle
248 * Note that if t2 is a constant so is c2 hence we save one test.
253 if ((c_c1 != NO_CONSTANT) & (c_c2 != NO_CONSTANT)) {
254 /* handles rules R7, R8, R9, R10:
255 * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x
257 ir_node *irn, *in[2];
258 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
260 /* It might happen, that c1 and c2 have different modes, for instance Is and Iu.
263 if (mode_c1 != mode_c2) {
264 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
265 /* get the bigger one */
266 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
267 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
268 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
269 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
271 /* Try to cast the real const */
272 if (c_c1 == REAL_CONSTANT)
273 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
275 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
283 mode = get_mode_from_ops(in[0], in[1]);
284 in[0] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
287 mode = get_mode_from_ops(in[0], in[1]);
288 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
290 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n",
291 c1, get_irn_opname(n), c2, get_irn_opname(n),
292 t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
294 * In some rare cases it can really happen that we get the same node back.
295 * This might be happen in dead loops, were the Phi nodes are already gone away.
306 } /* reassoc_commutative */
308 #define reassoc_Add reassoc_commutative
309 #define reassoc_And reassoc_commutative
310 #define reassoc_Or reassoc_commutative
311 #define reassoc_Eor reassoc_commutative
314 * reassociate using distributive law for Mul and Add/Sub
316 static int reassoc_Mul(ir_node **node)
319 ir_node *add_sub, *c;
322 if (reassoc_commutative(&n))
325 get_comm_Binop_ops(n, &add_sub, &c);
326 op = get_irn_op(add_sub);
328 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
329 if (op == op_Add || op == op_Sub) {
330 ir_mode *mode = get_irn_mode(n);
331 ir_node *irn, *block, *t1, *t2, *in[2];
333 block = get_nodes_block(n);
334 t1 = get_binop_left(add_sub);
335 t2 = get_binop_right(add_sub);
337 /* we can only multiplication rules on integer arithmetic */
338 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
339 in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
340 in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
342 mode = get_mode_from_ops(in[0], in[1]);
343 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
345 /* In some cases it might happen that the new irn is equal the old one, for
347 * (x - 1) * y == x * y - y
348 * will be transformed back by simpler optimization
349 * We could switch simple optimizations off, but this only happens iff y
350 * is a loop-invariant expression and that it is not clear if the new form
352 * So, we let the old one.
355 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
356 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
368 * The walker for the reassociation.
370 static void do_reassociation(ir_node *n, void *env)
372 walker_t *wenv = env;
375 if (is_no_Block(n)) {
376 ir_node *blk = get_nodes_block(n);
378 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
379 /* We are in a dead block, do not optimize or we may fall into an endless
380 loop. We check this here instead of requiring that all dead blocks are removed
381 which or cf_opt do not guarantee yet. */
387 /* reassociation must run until a fixpoint is reached. */
389 ir_op *op = get_irn_op(n);
390 ir_mode *mode = get_irn_mode(n);
394 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
395 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
398 if (op->ops.reassociate) {
399 res = op->ops.reassociate(&n);
401 wenv->changes |= res;
407 } /* do_reassociation */
410 * do the reassociation
412 void optimize_reassociation(ir_graph *irg)
415 irg_loopinfo_state state;
417 assert(get_irg_phase_state(irg) != phase_building);
418 assert(get_irg_pinned(irg) != op_pin_state_floats &&
419 "Reassociation needs pinned graph to work properly");
421 /* reassociation needs constant folding */
422 if (!get_opt_reassociation() || !get_opt_constant_folding())
425 /* we use dominance to detect dead blocks */
429 * Calculate loop info, so we could identify loop-invariant
430 * code and threat it like a constant.
431 * We only need control flow loops here but can handle generic
432 * INTRA info as well.
434 state = get_irg_loopinfo_state(irg);
435 if ((state & loopinfo_inter) ||
436 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
437 construct_cf_backedges(irg);
441 /* now we have collected enough information, optimize */
442 irg_walk_graph(irg, NULL, do_reassociation, &env);
444 /* Handle graph state */
446 set_irg_outs_inconsistent(irg);
447 set_irg_loopinfo_inconsistent(irg);
449 } /* optimize_reassociation */
451 /* Sets the default reassociation operation for an ir_op_ops. */
452 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
454 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
469 } /* firm_set_default_reassoc */
471 /* initialize the reassociation by adding operations to some opcodes */
472 void firm_init_reassociation(void)
474 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
475 } /* firm_init_reassociation */