2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief conv node optimisation
23 * @author Matthias Braun, Christoph Mallon
26 * Try to minimize the number of conv nodes by changing modes of operations.
27 * The typical example is the following structure:
32 * Add Is gets transformed to |
36 * TODO: * try to optimize cmp modes
37 * * decide when it is useful to move the convs through phis
43 #include "iroptimize.h"
51 #include "iredges_t.h"
55 DEBUG_ONLY(static firm_dbg_module_t *dbg);
57 static INLINE int imin(int a, int b) { return a < b ? a : b; }
60 int is_optimizable_node(const ir_node *node)
62 switch (get_irn_opcode(node)) {
79 static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
81 return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
85 int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
88 mode_is_int(src_mode) &&
89 mode_is_int(dest_mode) &&
90 get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
94 int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
96 ir_mode *mode = get_irn_mode(node);
101 if (mode == dest_mode)
104 if (is_Const(node)) {
105 /* TODO tarval module is incomplete and can't convert floats to ints */
106 return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
110 is_downconv(mode, dest_mode) &&
111 get_irn_mode(get_Conv_op(node)) == dest_mode) {
115 if (get_irn_n_edges(node) > 1) {
116 DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
120 if (is_Conv(node) && is_downconv(mode, dest_mode)) {
121 return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
125 /* Take the minimum of the conversion costs for Phi predecessors as only one
126 * branch is actually executed at a time */
129 size_t arity = get_Phi_n_preds(node);
132 costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
133 for (i = 1; i < arity; ++i) {
134 ir_node *pred = get_Phi_pred(node, i);
135 int c = get_conv_costs(pred, dest_mode);
136 if (c < costs) costs = c;
143 if (!mode_is_int(mode) || !is_optimizable_node(node)) {
148 // The shift count does not participate in the conv optimisation
149 arity = is_Shl(node) ? 1 : get_irn_arity(node);
150 for (i = 0; i < arity; ++i) {
151 ir_node *pred = get_irn_n(node, i);
152 costs += imin(get_conv_costs(pred, dest_mode), 1);
158 static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
160 ir_node *block = get_nodes_block(node);
161 ir_node *conv = new_r_Conv(current_ir_graph, block, node, dest_mode);
166 ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
168 ir_mode *mode = get_irn_mode(node);
172 if (mode == dest_mode)
175 if (is_Const(node)) {
176 /* TODO tarval module is incomplete and can't convert floats to ints */
177 tarval *tv = conv_const_tv(node, dest_mode);
178 if (tv == tarval_bad) {
179 return place_conv(node, dest_mode);
181 return new_Const(dest_mode, tv);
186 is_downconv(mode, dest_mode) &&
187 get_irn_mode(get_Conv_op(node)) == dest_mode) {
188 return get_Conv_op(node);
191 if (get_irn_n_edges(node) > 1) {
192 return place_conv(node, dest_mode);
195 if (is_Conv(node) && is_downconv(mode, dest_mode)) {
196 return conv_transform(get_Conv_op(node), dest_mode);
199 if (!mode_is_int(mode) || !is_optimizable_node(node)) {
200 return place_conv(node, dest_mode);
203 // The shift count does not participate in the conv optimisation
204 arity = is_Shl(node) ? 1 : get_irn_arity(node);
205 for (i = 0; i < arity; i++) {
206 ir_node *pred = get_irn_n(node, i);
207 ir_node *transformed;
208 if (get_conv_costs(pred, dest_mode) > 0) {
209 transformed = place_conv(pred, dest_mode);
211 transformed = conv_transform(pred, dest_mode);
213 set_irn_n(node, i, transformed);
215 set_irn_mode(node, dest_mode);
219 /* TODO, backends (at least ia32) can't handle it at the moment,
220 and it's probably not more efficient on most archs */
223 void try_optimize_cmp(ir_node *node)
225 ir_node *left = get_Cmp_left(node);
226 ir_node *right = get_Cmp_right(node);
227 ir_node *conv = NULL;
236 void conv_opt_walker(ir_node *node, void *data)
238 ir_node *transformed;
247 try_optimize_cmp(node);
255 pred = get_Conv_op(node);
256 mode = get_irn_mode(node);
257 pred_mode = get_irn_mode(pred);
259 if (mode_is_reference(mode) || mode_is_reference(pred_mode))
262 if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
265 /* - 1 for the initial conv */
266 costs = get_conv_costs(pred, mode) - 1;
267 DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
268 if (costs > 0) return;
270 transformed = conv_transform(pred, mode);
271 if (node != transformed) {
272 exchange(node, transformed);
277 int conv_opt(ir_graph *irg)
280 FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
282 DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
287 irg_walk_graph(irg, NULL, conv_opt_walker, NULL);
288 local_optimize_graph(irg);
289 invalidate |= changed;
293 set_irg_outs_inconsistent(irg);