2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
32 #include "irgraph_t.h"
36 #include "iropt_dbg.h"
39 #include "reassoc_t.h"
44 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
46 typedef struct _walker_t {
47 int changes; /* set, if a reassociation take place */
51 NO_CONSTANT = 0, /**< node is not constant */
52 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
53 REGION_CONST = 4 /**< node is a constant expression in the current context,
54 use 4 here to simplify implementation of get_comm_Binop_ops() */
58 * returns whether a node is constant ie is a constant or
59 * is loop invariant (called region constant)
61 * @param n the node to be checked for constant
62 * @param block a block that might be in a loop
64 static const_class_t get_const_class(ir_node *n, ir_node *block)
66 ir_op *op = get_irn_op(n);
71 /* although SymConst's are of course real constant, we cannot
72 fold them, so handle them like region constants */
73 if (op == op_SymConst)
77 * Beware: Bad nodes are always loop-invariant, but
78 * cannot handled in later code, so filter them here.
80 if (! is_Bad(n) && is_loop_invariant(n, block))
84 } /* get_const_class */
87 * returns the operands of a commutative bin-op, if one operand is
88 * a region constant, it is returned as the second one.
90 * Beware: Real constants must be returned with higher priority than
91 * region constants, because they might be folded.
93 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
95 ir_node *op_a = get_binop_left(binop);
96 ir_node *op_b = get_binop_right(binop);
97 ir_node *block = get_nodes_block(binop);
98 int class_a = get_const_class(op_a, block);
99 int class_b = get_const_class(op_b, block);
101 assert(is_op_commutative(get_irn_op(binop)));
103 switch (class_a + 2*class_b) {
104 case REAL_CONSTANT + 2*REAL_CONSTANT:
105 /* if both are constants, one might be a
106 * pointer constant like NULL, return the other
108 if (mode_is_reference(get_irn_mode(op_a))) {
116 case REAL_CONSTANT + 2*NO_CONSTANT:
117 case REAL_CONSTANT + 2*REGION_CONST:
118 case REGION_CONST + 2*NO_CONSTANT:
127 } /* get_comm_Binop_ops */
130 * reassociate a Sub: x - c = (-c) + x
132 static int reassoc_Sub(ir_node **in)
135 ir_node *right = get_Sub_right(n);
136 ir_mode *rmode = get_irn_mode(right);
139 /* cannot handle SubIs(P, P) */
140 if (mode_is_reference(rmode))
143 block = get_nodes_block(n);
146 * convert x - c => (-c) + x
148 * As there is NO real Minus in Firm it makes no sense to do this
149 * for non-real constants yet.
151 if (get_const_class(right, block) == REAL_CONSTANT) {
152 ir_node *left = get_Sub_left(n);
157 switch (get_const_class(left, block)) {
159 irn = optimize_in_place(n);
169 /* already constant, nothing to do */
172 mode = get_irn_mode(n);
173 dbi = get_irn_dbg_info(n);
175 /* Beware of SubP(P, Is) */
176 c = new_r_Const(current_ir_graph, block, rmode, get_mode_null(rmode));
177 irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, rmode);
179 irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n));
181 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
182 get_Sub_left(n), c, get_Sub_left(n), c));
195 /** Retrieve a mode from the operands. We need this, because
196 * Add and Sub are allowed to operate on (P, Is)
198 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
202 m1 = get_irn_mode(op1);
203 if (mode_is_reference(m1))
206 m2 = get_irn_mode(op2);
207 if (mode_is_reference(m2))
213 } /* get_mode_from_ops */
216 * reassociate a commutative Binop
218 * BEWARE: this rule leads to a potential loop, if
219 * two operands are region constants and the third is a
220 * constant, so avoid this situation.
222 static int reassoc_commutative(ir_node **node)
225 ir_op *op = get_irn_op(n);
226 ir_node *block = get_nodes_block(n);
229 get_comm_Binop_ops(n, &t1, &c1);
231 if (get_irn_op(t1) == op) {
233 const_class_t c_c1, c_c2, c_t2;
235 get_comm_Binop_ops(t1, &t2, &c2);
237 /* do not optimize Bad nodes, will fail later */
241 c_c1 = get_const_class(c1, block);
242 c_c2 = get_const_class(c2, block);
243 c_t2 = get_const_class(t2, block);
245 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
246 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
247 /* All three are constant and either all are constant expressions or two of them are:
248 * then applying this rule would lead into a cycle
250 * Note that if t2 is a constant so is c2 hence we save one test.
255 if ((c_c1 != NO_CONSTANT) & (c_c2 != NO_CONSTANT)) {
256 /* handles rules R7, R8, R9, R10:
257 * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x
259 ir_node *irn, *in[2];
260 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
262 /* It might happen, that c1 and c2 have different modes, for instance Is and Iu.
265 if (mode_c1 != mode_c2) {
266 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
267 /* get the bigger one */
268 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
269 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
270 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
271 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
273 /* Try to cast the real const */
274 if (c_c1 == REAL_CONSTANT)
275 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
277 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
285 mode = get_mode_from_ops(in[0], in[1]);
286 in[0] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
289 mode = get_mode_from_ops(in[0], in[1]);
290 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
292 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n",
293 c1, get_irn_opname(n), c2, get_irn_opname(n),
294 t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
296 * In some rare cases it can really happen that we get the same node back.
297 * This might be happen in dead loops, were the Phi nodes are already gone away.
308 } /* reassoc_commutative */
310 #define reassoc_Add reassoc_commutative
311 #define reassoc_And reassoc_commutative
312 #define reassoc_Or reassoc_commutative
313 #define reassoc_Eor reassoc_commutative
316 * reassociate using distributive law for Mul and Add/Sub
318 static int reassoc_Mul(ir_node **node)
321 ir_node *add_sub, *c;
324 if (reassoc_commutative(&n))
327 get_comm_Binop_ops(n, &add_sub, &c);
328 op = get_irn_op(add_sub);
330 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
331 if (op == op_Add || op == op_Sub) {
332 ir_mode *mode = get_irn_mode(n);
333 ir_node *irn, *block, *t1, *t2, *in[2];
335 block = get_nodes_block(n);
336 t1 = get_binop_left(add_sub);
337 t2 = get_binop_right(add_sub);
339 /* we can only multiplication rules on integer arithmetic */
340 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
341 in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
342 in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
344 mode = get_mode_from_ops(in[0], in[1]);
345 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
347 /* In some cases it might happen that the new irn is equal the old one, for
349 * (x - 1) * y == x * y - y
350 * will be transformed back by simpler optimization
351 * We could switch simple optimizations off, but this only happens iff y
352 * is a loop-invariant expression and that it is not clear if the new form
354 * So, we let the old one.
357 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
358 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
370 * The walker for the reassociation.
372 static void do_reassociation(ir_node *n, void *env)
374 walker_t *wenv = env;
377 if (is_no_Block(n)) {
378 ir_node *blk = get_nodes_block(n);
380 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
381 /* We are in a dead block, do not optimize or we may fall into an endless
382 loop. We check this here instead of requiring that all dead blocks are removed
383 which or cf_opt do not guarantee yet. */
389 /* reassociation must run until a fixpoint is reached. */
391 ir_op *op = get_irn_op(n);
392 ir_mode *mode = get_irn_mode(n);
396 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
397 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
400 if (op->ops.reassociate) {
401 res = op->ops.reassociate(&n);
403 wenv->changes |= res;
409 } /* do_reassociation */
412 * do the reassociation
414 void optimize_reassociation(ir_graph *irg)
417 irg_loopinfo_state state;
419 assert(get_irg_phase_state(irg) != phase_building);
420 assert(get_irg_pinned(irg) != op_pin_state_floats &&
421 "Reassociation needs pinned graph to work properly");
423 /* reassociation needs constant folding */
424 if (!get_opt_reassociation() || !get_opt_constant_folding())
427 /* we use dominance to detect dead blocks */
431 * Calculate loop info, so we could identify loop-invariant
432 * code and threat it like a constant.
433 * We only need control flow loops here but can handle generic
434 * INTRA info as well.
436 state = get_irg_loopinfo_state(irg);
437 if ((state & loopinfo_inter) ||
438 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
439 construct_cf_backedges(irg);
443 /* now we have collected enough information, optimize */
444 irg_walk_graph(irg, NULL, do_reassociation, &env);
446 /* Handle graph state */
448 set_irg_outs_inconsistent(irg);
449 set_irg_loopinfo_inconsistent(irg);
451 } /* optimize_reassociation */
453 /* Sets the default reassociation operation for an ir_op_ops. */
454 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
456 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
471 } /* firm_set_default_reassoc */
473 /* initialize the reassociation by adding operations to some opcodes */
474 void firm_init_reassociation(void)
476 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
477 } /* firm_init_reassociation */