2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief conv node optimisation
23 * @author Matthias Braun, Christoph Mallon
26 * Try to minimize the number of conv nodes by changing modes of operations.
27 * The typical example is the following structure:
32 * Add Is gets transformed to |
36 * TODO: * try to optimize cmp modes
37 * * decide when it is useful to move the convs through phis
43 #include "iroptimize.h"
51 #include "iredges_t.h"
55 DEBUG_ONLY(static firm_dbg_module_t *dbg);
57 static INLINE int imin(int a, int b) { return a < b ? a : b; }
60 int is_optimizable_node(const ir_node *node)
62 switch (get_irn_opcode(node)) {
79 static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
81 return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
85 int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
88 mode_is_int(src_mode) &&
89 mode_is_int(dest_mode) &&
90 get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode);
94 int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
96 ir_mode *mode = get_irn_mode(node);
101 if (mode == dest_mode)
104 if (is_Const(node)) {
105 /* TODO tarval module is incomplete and can't convert floats to ints */
106 return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
109 if (get_irn_n_edges(node) > 1) {
110 DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
114 if (is_Conv(node) && is_downconv(get_irn_mode(node), dest_mode)) {
115 return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
119 /* Take the minimum of the conversion costs for Phi predecessors as only one
120 * branch is actually executed at a time */
123 size_t arity = get_Phi_n_preds(node);
126 costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
127 for (i = 1; i < arity; ++i) {
128 ir_node *pred = get_Phi_pred(node, i);
129 int c = get_conv_costs(pred, dest_mode);
130 if (c < costs) costs = c;
137 if (!is_optimizable_node(node)) {
142 // The shift count does not participate in the conv optimisation
143 arity = is_Shl(node) ? 1 : get_irn_arity(node);
144 for (i = 0; i < arity; ++i) {
145 ir_node *pred = get_irn_n(node, i);
146 costs += imin(get_conv_costs(pred, dest_mode), 1);
152 static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
154 ir_node *block = get_nodes_block(node);
155 ir_node *conv = new_r_Conv(current_ir_graph, block, node, dest_mode);
160 ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
165 if (get_irn_mode(node) == dest_mode)
168 if (is_Const(node)) {
169 /* TODO tarval module is incomplete and can't convert floats to ints */
170 tarval *tv = conv_const_tv(node, dest_mode);
171 if (tv == tarval_bad) {
172 return place_conv(node, dest_mode);
174 return new_Const(dest_mode, tv);
178 if (get_irn_n_edges(node) > 1) {
179 return place_conv(node, dest_mode);
182 if (is_Conv(node) && is_downconv(get_irn_mode(node), dest_mode)) {
183 return conv_transform(get_Conv_op(node), dest_mode);
186 if (!is_optimizable_node(node)) {
187 return place_conv(node, dest_mode);
190 // The shift count does not participate in the conv optimisation
191 arity = is_Shl(node) ? 1 : get_irn_arity(node);
192 for (i = 0; i < arity; i++) {
193 ir_node *pred = get_irn_n(node, i);
194 ir_node *transformed;
195 if (get_conv_costs(pred, dest_mode) > 0) {
196 transformed = place_conv(pred, dest_mode);
198 transformed = conv_transform(pred, dest_mode);
200 set_irn_n(node, i, transformed);
202 set_irn_mode(node, dest_mode);
206 /* TODO, backends (at least ia32) can't handle it at the moment,
207 and it's probably not more efficient on most archs */
210 void try_optimize_cmp(ir_node *node)
212 ir_node *left = get_Cmp_left(node);
213 ir_node *right = get_Cmp_right(node);
214 ir_node *conv = NULL;
223 void conv_opt_walker(ir_node *node, void *data)
225 ir_node *transformed;
234 try_optimize_cmp(node);
242 pred = get_Conv_op(node);
243 mode = get_irn_mode(node);
244 pred_mode = get_irn_mode(pred);
246 if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
249 /* - 1 for the initial conv */
250 costs = get_conv_costs(pred, mode) - 1;
251 DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
252 if (costs > 0) return;
254 transformed = conv_transform(pred, mode);
255 if (node != transformed) {
256 exchange(node, transformed);
261 void conv_opt(ir_graph *irg)
264 FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
266 DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
271 irg_walk_graph(irg, NULL, conv_opt_walker, NULL);
272 local_optimize_graph(irg);
273 invalidate |= changed;
277 set_irg_outs_inconsistent(irg);