2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
32 #include "irgraph_t.h"
37 #include "iropt_dbg.h"
40 #include "reassoc_t.h"
45 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
47 typedef struct _walker_t {
48 int changes; /* set, if a reassociation take place */
52 NO_CONSTANT = 0, /**< node is not constant */
53 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
54 REGION_CONST = 4 /**< node is a constant expression in the current context,
55 use 4 here to simplify implementation of get_comm_Binop_ops() */
59 * returns whether a node is constant ie is a constant or
60 * is loop invariant (called region constant)
62 * @param n the node to be checked for constant
63 * @param block a block that might be in a loop
65 static const_class_t get_const_class(ir_node *n, ir_node *block)
67 ir_op *op = get_irn_op(n);
72 /* although SymConst's are of course real constant, we cannot
73 fold them, so handle them like region constants */
74 if (op == op_SymConst)
78 * Beware: Bad nodes are always loop-invariant, but
79 * cannot handled in later code, so filter them here.
81 if (! is_Bad(n) && is_loop_invariant(n, block))
85 } /* get_const_class */
88 * returns the operands of a commutative bin-op, if one operand is
89 * a region constant, it is returned as the second one.
91 * Beware: Real constants must be returned with higher priority than
92 * region constants, because they might be folded.
94 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
96 ir_node *op_a = get_binop_left(binop);
97 ir_node *op_b = get_binop_right(binop);
98 ir_node *block = get_nodes_block(binop);
99 int class_a = get_const_class(op_a, block);
100 int class_b = get_const_class(op_b, block);
102 assert(is_op_commutative(get_irn_op(binop)));
104 switch (class_a + 2*class_b) {
105 case REAL_CONSTANT + 2*REAL_CONSTANT:
106 /* if both are constants, one might be a
107 * pointer constant like NULL, return the other
109 if (mode_is_reference(get_irn_mode(op_a))) {
117 case REAL_CONSTANT + 2*NO_CONSTANT:
118 case REAL_CONSTANT + 2*REGION_CONST:
119 case REGION_CONST + 2*NO_CONSTANT:
128 } /* get_comm_Binop_ops */
131 * reassociate a Sub: x - c = (-c) + x
133 static int reassoc_Sub(ir_node **in)
136 ir_node *right = get_Sub_right(n);
137 ir_mode *rmode = get_irn_mode(right);
140 /* cannot handle SubIs(P, P) */
141 if (mode_is_reference(rmode))
144 block = get_nodes_block(n);
147 * convert x - c => (-c) + x
149 * As there is NO real Minus in Firm it makes no sense to do this
150 * for non-real constants yet.
152 if (get_const_class(right, block) == REAL_CONSTANT) {
153 ir_node *left = get_Sub_left(n);
158 switch (get_const_class(left, block)) {
160 irn = optimize_in_place(n);
170 /* already constant, nothing to do */
173 mode = get_irn_mode(n);
174 dbi = get_irn_dbg_info(n);
176 /* Beware of SubP(P, Is) */
177 c = new_r_Const(current_ir_graph, block, rmode, get_mode_null(rmode));
178 irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, rmode);
180 irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n));
182 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
183 get_Sub_left(n), c, get_Sub_left(n), c));
196 /** Retrieve a mode from the operands. We need this, because
197 * Add and Sub are allowed to operate on (P, Is)
199 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
203 m1 = get_irn_mode(op1);
204 if (mode_is_reference(m1))
207 m2 = get_irn_mode(op2);
208 if (mode_is_reference(m2))
214 } /* get_mode_from_ops */
217 * reassociate a commutative Binop
219 * BEWARE: this rule leads to a potential loop, if
220 * two operands are region constants and the third is a
221 * constant, so avoid this situation.
223 static int reassoc_commutative(ir_node **node)
226 ir_op *op = get_irn_op(n);
227 ir_node *block = get_nodes_block(n);
230 get_comm_Binop_ops(n, &t1, &c1);
232 if (get_irn_op(t1) == op) {
234 const_class_t c_c1, c_c2, c_t2;
236 get_comm_Binop_ops(t1, &t2, &c2);
238 /* do not optimize Bad nodes, will fail later */
242 c_c1 = get_const_class(c1, block);
243 c_c2 = get_const_class(c2, block);
244 c_t2 = get_const_class(t2, block);
246 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
247 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
248 /* All three are constant and either all are constant expressions or two of them are:
249 * then applying this rule would lead into a cycle
251 * Note that if t2 is a constant so is c2 hence we save one test.
256 if ((c_c1 != NO_CONSTANT) & (c_c2 != NO_CONSTANT)) {
257 /* handles rules R7, R8, R9, R10:
258 * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x
260 ir_node *irn, *in[2];
261 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
263 /* It might happen, that c1 and c2 have different modes, for instance Is and Iu.
266 if (mode_c1 != mode_c2) {
267 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
268 /* get the bigger one */
269 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
270 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
271 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
272 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
274 /* Try to cast the real const */
275 if (c_c1 == REAL_CONSTANT)
276 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
278 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
286 mode = get_mode_from_ops(in[0], in[1]);
287 in[0] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
290 mode = get_mode_from_ops(in[0], in[1]);
291 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
293 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n",
294 c1, get_irn_opname(n), c2, get_irn_opname(n),
295 t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2));
297 * In some rare cases it can really happen that we get the same node back.
298 * This might be happen in dead loops, were the Phi nodes are already gone away.
309 } /* reassoc_commutative */
311 #define reassoc_Add reassoc_commutative
312 #define reassoc_And reassoc_commutative
313 #define reassoc_Or reassoc_commutative
314 #define reassoc_Eor reassoc_commutative
317 * reassociate using distributive law for Mul and Add/Sub
319 static int reassoc_Mul(ir_node **node)
322 ir_node *add_sub, *c;
325 if (reassoc_commutative(&n))
328 get_comm_Binop_ops(n, &add_sub, &c);
329 op = get_irn_op(add_sub);
331 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
332 if (op == op_Add || op == op_Sub) {
333 ir_mode *mode = get_irn_mode(n);
334 ir_node *irn, *block, *t1, *t2, *in[2];
336 block = get_nodes_block(n);
337 t1 = get_binop_left(add_sub);
338 t2 = get_binop_right(add_sub);
340 /* we can only multiplication rules on integer arithmetic */
341 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
342 in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
343 in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
345 mode = get_mode_from_ops(in[0], in[1]);
346 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
348 /* In some cases it might happen that the new irn is equal the old one, for
350 * (x - 1) * y == x * y - y
351 * will be transformed back by simpler optimization
352 * We could switch simple optimizations off, but this only happens iff y
353 * is a loop-invariant expression and that it is not clear if the new form
355 * So, we let the old one.
358 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
359 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
371 * The walker for the reassociation.
373 static void do_reassociation(ir_node *n, void *env)
375 walker_t *wenv = env;
378 if (is_no_Block(n)) {
379 ir_node *blk = get_nodes_block(n);
381 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
382 /* We are in a dead block, do not optimize or we may fall into an endless
383 loop. We check this here instead of requiring that all dead blocks are removed
384 which or cf_opt do not guarantee yet. */
390 /* reassociation must run until a fixpoint is reached. */
392 ir_op *op = get_irn_op(n);
393 ir_mode *mode = get_irn_mode(n);
397 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
398 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
401 if (op->ops.reassociate) {
402 res = op->ops.reassociate(&n);
404 wenv->changes |= res;
410 } /* do_reassociation */
413 * do the reassociation
415 void optimize_reassociation(ir_graph *irg)
418 irg_loopinfo_state state;
420 assert(get_irg_phase_state(irg) != phase_building);
421 assert(get_irg_pinned(irg) != op_pin_state_floats &&
422 "Reassociation needs pinned graph to work properly");
424 /* reassociation needs constant folding */
425 if (!get_opt_reassociation() || !get_opt_constant_folding())
428 /* we use dominance to detect dead blocks */
432 * Calculate loop info, so we could identify loop-invariant
433 * code and threat it like a constant.
434 * We only need control flow loops here but can handle generic
435 * INTRA info as well.
437 state = get_irg_loopinfo_state(irg);
438 if ((state & loopinfo_inter) ||
439 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
440 construct_cf_backedges(irg);
444 /* now we have collected enough information, optimize */
445 irg_walk_graph(irg, NULL, do_reassociation, &env);
447 /* Handle graph state */
449 set_irg_outs_inconsistent(irg);
450 set_irg_loopinfo_inconsistent(irg);
452 } /* optimize_reassociation */
454 /* Sets the default reassociation operation for an ir_op_ops. */
455 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
457 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
472 } /* firm_set_default_reassoc */
474 /* initialize the reassociation by adding operations to some opcodes */
475 void firm_init_reassociation(void)
477 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
478 } /* firm_init_reassociation */