3 * File name: ir/opt/reassoc.c
4 * Purpose: Reassociation
8 * Copyright: (c) 1998-2004 Universität Karlsruhe
9 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
16 # include "irnode_t.h"
17 # include "irgraph_t.h"
18 # include "irmode_t.h"
20 # include "ircons_t.h"
23 # include "iropt_dbg.h"
24 # include "irflag_t.h"
26 # include "reassoc_t.h"
27 # include "firmstat.h"
29 typedef struct _walker_t {
30 int changes; /* set, if a reassociation take place */
34 NO_CONSTANT = 0, /**< node is not constant */
35 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
36 CONST_EXPR = 4 /**< node is a constant expression in the current context,
37 use 4 here to simplify implementation of get_comm_Binop_ops() */
41 * returns whether a node is constant, ie is a constant or
44 static const_class_t get_const_class(ir_node *n)
46 ir_op *op = get_irn_op(n);
50 if (op == op_SymConst)
57 * returns the operands of a commutative bin-op, if one operand is
58 * a constant in the current context, it is returned as the second one.
60 * Beware: Real constants must be returned with higher priority than
61 * constant expression, because they might be folded.
63 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
65 ir_node *op_a = get_binop_left(binop);
66 ir_node *op_b = get_binop_right(binop);
67 int class_a = get_const_class(op_a);
68 int class_b = get_const_class(op_b);
70 assert(is_op_commutative(get_irn_op(binop)));
72 switch (class_a + 2*class_b) {
73 case REAL_CONSTANT + 2*NO_CONSTANT:
74 case REAL_CONSTANT + 2*REAL_CONSTANT:
75 case REAL_CONSTANT + 2*CONST_EXPR:
76 case CONST_EXPR + 2*NO_CONSTANT:
88 * reassociate a Sub: x - c = (-c) + x
90 static int reassoc_Sub(ir_node *n)
92 ir_node *right = get_Sub_right(n);
94 /* FIXME: Do not apply this rule for unsigned Sub's because our code
95 * generation is currently buggy :-)
97 if (! mode_is_signed(get_irn_mode(n)))
101 * convert x - c => (-c) + x
103 * As there is NO real Minus in Firm it makes no sense to do this
104 * for non-real constants yet.
106 if (get_const_class(right) == REAL_CONSTANT) {
107 ir_node *left = get_Sub_left(n);
108 ir_node *block = get_nodes_block(n);
109 ir_mode *mode = get_irn_mode(n);
110 dbg_info *dbg = get_irn_dbg_info(n);
113 switch (get_const_class(left)) {
115 irn = optimize_in_place(n);
124 /* already constant, nothing to do */
128 c = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
129 irn = new_rd_Sub(dbg, current_ir_graph, block, c, right, mode);
131 irn = new_rd_Add(dbg, current_ir_graph, block, left, irn, get_irn_mode(n));
134 printf("Applied: %s - %s => %s + (-%s)\n",
135 get_irn_opname(get_Sub_left(n)), get_irn_opname(c),
136 get_irn_opname(get_Sub_left(n)), get_irn_opname(c) );
145 /** Retrieve a mode form the operands. We need this, because
146 * Add and Sub are allowed to operate on (P, Is)
148 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
152 m1 = get_irn_mode(op1);
153 if (mode_is_reference(m1))
156 m2 = get_irn_mode(op2);
157 if (mode_is_reference(m2))
166 * reassociate a commutative Binop
168 * BEWARE: this rule leads to a potential loop, if
169 * all two operands are are constant expressions and the third is a
170 * constant, so avoid this situation.
172 static int reassoc_commutative(ir_node *n)
174 ir_op *op = get_irn_op(n);
175 ir_node *block = get_nodes_block(n);
178 get_comm_Binop_ops(n, &t1, &c1);
180 if (get_irn_op(t1) == op) {
182 const_class_t c_c1, c_c2, c_t2;
184 get_comm_Binop_ops(t1, &t2, &c2);
186 /* do not optimize Bad nodes, will fail later */
190 c_c1 = get_const_class(c1);
191 c_c2 = get_const_class(c2);
192 c_t2 = get_const_class(t2);
194 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
195 ((((c_c1 ^ c_c2 ^ c_t2) & CONST_EXPR) == 0) || ((c_c1 & c_c2 & c_t2) == CONST_EXPR)) ) {
196 /* all three are constant and either all are constant expressions or two of them are:
197 * then, applying this rule would lead into a cycle
199 * Note that if t2 is a constant so is c2, so we save one test.
204 if ((c_c1 != NO_CONSTANT) & (c_c2 != NO_CONSTANT)) {
205 /* handles rules R7, R8, R9, R10:
206 * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x
208 ir_node *irn, *in[2];
209 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
211 /* It might happen, that c1 and c2 have different modes, for instance Is and Iu.
214 if (mode_c1 != mode_c2) {
215 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
216 /* get the bigger one */
217 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
218 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
219 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
220 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
222 /* Try to cast the real const */
223 if (c_c1 == REAL_CONSTANT)
224 c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
226 c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
234 mode = get_mode_from_ops(in[0], in[1]);
235 in[0] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
238 mode = get_mode_from_ops(in[0], in[1]);
239 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
242 printf("Applied: %s .%s. (%s .%s. %s) => (%s .%s. %s) .%s. %s\n",
243 get_irn_opname(c1), get_irn_opname(n), get_irn_opname(c2), get_irn_opname(n), get_irn_opname(t2),
244 get_irn_opname(c1), get_irn_opname(n), get_irn_opname(c2), get_irn_opname(n), get_irn_opname(t2));
247 * in some rare cases it can really happen that we get the same node back.
248 * This might be happen in dead loops, were the Phi nodes are already gone away.
260 #define reassoc_Add reassoc_commutative
261 #define reassoc_And reassoc_commutative
262 #define reassoc_Or reassoc_commutative
263 #define reassoc_Eor reassoc_commutative
266 * reassociate using distributive law for Mul and Add/Sub
268 static int reassoc_Mul(ir_node *n)
270 ir_node *add_sub, *c;
273 if (reassoc_commutative(n))
276 get_comm_Binop_ops(n, &add_sub, &c);
277 op = get_irn_op(add_sub);
279 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
280 if (op == op_Add || op == op_Sub) {
281 ir_mode *mode = get_irn_mode(n);
282 ir_node *irn, *block, *t1, *t2, *in[2];
284 block = get_nodes_block(n);
285 t1 = get_binop_left(add_sub);
286 t2 = get_binop_right(add_sub);
288 in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode);
289 in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode);
291 mode = get_mode_from_ops(in[0], in[1]);
292 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
295 printf("Applied: (%s .%s. %s) %s %s => (%s %s %s) .%s. (%s %s %s)\n",
296 get_irn_opname(t1), get_op_name(op), get_irn_opname(t2), get_irn_opname(n), get_irn_opname(c),
297 get_irn_opname(t1), get_irn_opname(n), get_irn_opname(c),
299 get_irn_opname(t2), get_irn_opname(n), get_irn_opname(c));
309 * The walker for the reassociation
311 static void do_reassociation(ir_node *n, void *env)
313 walker_t *wenv = env;
318 /* reassociation must run until fixpoint */
320 ir_op *op = get_irn_op(n);
321 ir_mode *mode = get_irn_mode(n);
325 /* reassociation works only for integer or reference modes */
326 if (op->reassociate && (mode_is_int(mode) || mode_is_reference(mode))) {
327 res = op->reassociate(n);
331 /* we need a skip here, or we will see an Id in the next iteration */
341 * do the reassociation
343 void optimize_reassociation(ir_graph *irg)
347 assert(get_irg_phase_state(irg) != phase_building);
349 /* reassociation needs constant folding */
350 if (!get_opt_reassociation() || !get_opt_constant_folding())
355 irg_walk_graph(irg, NULL, do_reassociation, &env);
357 /* now we have collected enough information, optimize */
358 irg_walk_graph(irg, NULL, do_reassociation, &env);
360 /* Handle graph state */
362 if (get_irg_outs_state(current_ir_graph) == outs_consistent)
363 set_irg_outs_inconsistent(current_ir_graph);
367 /* initialize the reassociation by adding operations to some opcodes */
368 void firm_init_reassociation(void)
370 #define INIT(a) op_##a->reassociate = reassoc_##a;