2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief conv node optimisation
23 * @author Matthias Braun, Christoph Mallon
26 * Try to minimize the number of conv nodes by changing modes of operations.
27 * The typical example is the following structure:
32 * Add Is gets transformed to |
36 * TODO: * try to optimize cmp modes
37 * * decide when it is useful to move the convs through phis
41 #include "iroptimize.h"
49 #include "iredges_t.h"
53 DEBUG_ONLY(static firm_dbg_module_t *dbg);
55 static inline int imin(int a, int b) { return a < b ? a : b; }
58 int is_optimizable_node(const ir_node *node)
60 switch (get_irn_opcode(node)) {
77 static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
79 return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
83 int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
86 mode_is_int(src_mode) &&
87 mode_is_int(dest_mode) &&
88 get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
92 int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
94 ir_mode *mode = get_irn_mode(node);
99 if (mode == dest_mode)
102 if (is_Const(node)) {
103 /* TODO tarval module is incomplete and can't convert floats to ints */
104 return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
108 is_downconv(mode, dest_mode) &&
109 get_irn_mode(get_Conv_op(node)) == dest_mode) {
113 if (get_irn_n_edges(node) > 1) {
114 DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
118 if (is_Conv(node) && is_downconv(mode, dest_mode)) {
119 return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
123 /* Take the minimum of the conversion costs for Phi predecessors as only one
124 * branch is actually executed at a time */
127 size_t arity = get_Phi_n_preds(node);
130 costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
131 for (i = 1; i < arity; ++i) {
132 ir_node *pred = get_Phi_pred(node, i);
133 int c = get_conv_costs(pred, dest_mode);
134 if (c < costs) costs = c;
141 if (!mode_is_int(mode) || !is_optimizable_node(node)) {
146 // The shift count does not participate in the conv optimisation
147 arity = is_Shl(node) ? 1 : get_irn_arity(node);
148 for (i = 0; i < arity; ++i) {
149 ir_node *pred = get_irn_n(node, i);
150 costs += imin(get_conv_costs(pred, dest_mode), 1);
156 static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
158 ir_node *block = get_nodes_block(node);
159 ir_node *conv = new_r_Conv(current_ir_graph, block, node, dest_mode);
164 ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
166 ir_mode *mode = get_irn_mode(node);
170 if (mode == dest_mode)
173 if (is_Const(node)) {
174 /* TODO tarval module is incomplete and can't convert floats to ints */
175 tarval *tv = conv_const_tv(node, dest_mode);
176 if (tv == tarval_bad) {
177 return place_conv(node, dest_mode);
179 return new_Const(dest_mode, tv);
184 is_downconv(mode, dest_mode) &&
185 get_irn_mode(get_Conv_op(node)) == dest_mode) {
186 return get_Conv_op(node);
189 if (get_irn_n_edges(node) > 1) {
190 return place_conv(node, dest_mode);
193 if (is_Conv(node) && is_downconv(mode, dest_mode)) {
194 return conv_transform(get_Conv_op(node), dest_mode);
197 if (!mode_is_int(mode) || !is_optimizable_node(node)) {
198 return place_conv(node, dest_mode);
201 // The shift count does not participate in the conv optimisation
202 arity = is_Shl(node) ? 1 : get_irn_arity(node);
203 for (i = 0; i < arity; i++) {
204 ir_node *pred = get_irn_n(node, i);
205 ir_node *transformed;
206 if (get_conv_costs(pred, dest_mode) > 0) {
207 transformed = place_conv(pred, dest_mode);
209 transformed = conv_transform(pred, dest_mode);
211 set_irn_n(node, i, transformed);
213 set_irn_mode(node, dest_mode);
217 /* TODO, backends (at least ia32) can't handle it at the moment,
218 and it's probably not more efficient on most archs */
221 void try_optimize_cmp(ir_node *node)
223 ir_node *left = get_Cmp_left(node);
224 ir_node *right = get_Cmp_right(node);
225 ir_node *conv = NULL;
234 void conv_opt_walker(ir_node *node, void *data)
236 ir_node *transformed;
245 try_optimize_cmp(node);
253 pred = get_Conv_op(node);
254 mode = get_irn_mode(node);
255 pred_mode = get_irn_mode(pred);
257 if (mode_is_reference(mode) || mode_is_reference(pred_mode))
260 if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
263 /* - 1 for the initial conv */
264 costs = get_conv_costs(pred, mode) - 1;
265 DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
266 if (costs > 0) return;
268 transformed = conv_transform(pred, mode);
269 if (node != transformed) {
270 exchange(node, transformed);
275 int conv_opt(ir_graph *irg)
278 FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
280 DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
285 irg_walk_graph(irg, NULL, conv_opt_walker, NULL);
286 local_optimize_graph(irg);
287 invalidate |= changed;
291 set_irg_outs_inconsistent(irg);