2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief conv node optimisation
23 * @author Matthias Braun, Christoph Mallon
26 * Try to minimize the number of conv nodes by changing modes of operations.
27 * The typical example is the following structure:
32 * Add Is gets transformed to |
36 * TODO: * try to optimize cmp modes
37 * * decide when it is useful to move the convs through phis
41 #include "iroptimize.h"
49 #include "iredges_t.h"
54 DEBUG_ONLY(static firm_dbg_module_t *dbg);
56 static inline int imin(int a, int b) { return a < b ? a : b; }
59 int is_optimizable_node(const ir_node *node)
61 switch (get_irn_opcode(node)) {
78 static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
80 return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
84 int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
87 mode_is_int(src_mode) &&
88 mode_is_int(dest_mode) &&
89 get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
93 int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
95 ir_mode *mode = get_irn_mode(node);
100 if (mode == dest_mode)
103 if (is_Const(node)) {
104 /* TODO tarval module is incomplete and can't convert floats to ints */
105 return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
109 is_downconv(mode, dest_mode) &&
110 get_irn_mode(get_Conv_op(node)) == dest_mode) {
114 if (get_irn_n_edges(node) > 1) {
115 DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
120 /* Take the minimum of the conversion costs for Phi predecessors as only one
121 * branch is actually executed at a time */
124 size_t arity = get_Phi_n_preds(node);
127 costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
128 for (i = 1; i < arity; ++i) {
129 ir_node *pred = get_Phi_pred(node, i);
130 int c = get_conv_costs(pred, dest_mode);
131 if (c < costs) costs = c;
138 if (!is_downconv(mode, dest_mode)) {
143 return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
146 if (!is_optimizable_node(node)) {
151 // The shift count does not participate in the conv optimisation
152 arity = is_Shl(node) ? 1 : get_irn_arity(node);
153 for (i = 0; i < arity; ++i) {
154 ir_node *pred = get_irn_n(node, i);
155 costs += imin(get_conv_costs(pred, dest_mode), 1);
161 static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
163 ir_node *block = get_nodes_block(node);
164 ir_node *conv = new_r_Conv(block, node, dest_mode);
169 ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
171 ir_mode *mode = get_irn_mode(node);
175 if (mode == dest_mode)
178 if (is_Const(node)) {
179 /* TODO tarval module is incomplete and can't convert floats to ints */
180 tarval *tv = conv_const_tv(node, dest_mode);
181 if (tv == tarval_bad) {
182 return place_conv(node, dest_mode);
184 return new_Const(tv);
189 is_downconv(mode, dest_mode) &&
190 get_irn_mode(get_Conv_op(node)) == dest_mode) {
191 return get_Conv_op(node);
194 if (get_irn_n_edges(node) > 1) {
195 return place_conv(node, dest_mode);
198 if (!is_downconv(mode, dest_mode)) {
199 return place_conv(node, dest_mode);
203 return conv_transform(get_Conv_op(node), dest_mode);
206 if (!is_optimizable_node(node)) {
207 return place_conv(node, dest_mode);
210 // The shift count does not participate in the conv optimisation
211 arity = is_Shl(node) ? 1 : get_irn_arity(node);
212 for (i = 0; i < arity; i++) {
213 ir_node *pred = get_irn_n(node, i);
214 ir_node *transformed;
215 if (get_conv_costs(pred, dest_mode) > 0) {
216 transformed = place_conv(pred, dest_mode);
218 transformed = conv_transform(pred, dest_mode);
220 set_irn_n(node, i, transformed);
222 set_irn_mode(node, dest_mode);
226 /* TODO, backends (at least ia32) can't handle it at the moment,
227 and it's probably not more efficient on most archs */
230 void try_optimize_cmp(ir_node *node)
232 ir_node *left = get_Cmp_left(node);
233 ir_node *right = get_Cmp_right(node);
234 ir_node *conv = NULL;
243 void conv_opt_walker(ir_node *node, void *data)
245 ir_node *transformed;
254 try_optimize_cmp(node);
262 pred = get_Conv_op(node);
263 mode = get_irn_mode(node);
264 pred_mode = get_irn_mode(pred);
266 if (mode_is_reference(mode) || mode_is_reference(pred_mode))
269 if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
272 /* - 1 for the initial conv */
273 costs = get_conv_costs(pred, mode) - 1;
274 DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
275 if (costs > 0) return;
277 transformed = conv_transform(pred, mode);
278 if (node != transformed) {
279 exchange(node, transformed);
284 int conv_opt(ir_graph *irg)
287 FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
289 DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
294 irg_walk_graph(irg, NULL, conv_opt_walker, NULL);
295 local_optimize_graph(irg);
296 invalidate |= changed;
300 set_irg_outs_inconsistent(irg);
305 /* Creates an ir_graph pass for conv_opt. */
306 ir_graph_pass_t *conv_opt_pass(const char *name, int verify, int dump)
308 return def_graph_pass_ret(name ? name : "conv_opt", verify, dump, conv_opt);
309 } /* conv_opt_pass */