2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief conv node optimisation
23 * @author Matthias Braun, Christoph Mallon
26 * Try to minimize the number of conv nodes by changing modes of operations.
27 * The typical example is the following structure:
32 * Add Is gets transformed to |
36 * TODO: * try to optimize cmp modes
37 * * decide when it is useful to move the convs through phis
41 #include "iroptimize.h"
50 #include "iredges_t.h"
55 DEBUG_ONLY(static firm_dbg_module_t *dbg);
57 static inline int imin(int a, int b) { return a < b ? a : b; }
59 static bool is_optimizable_node(const ir_node *node)
61 switch (get_irn_opcode(node)) {
78 static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
80 return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
83 static int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
86 mode_is_int(src_mode) &&
87 mode_is_int(dest_mode) &&
88 get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
91 static int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
93 ir_mode *mode = get_irn_mode(node);
98 if (mode == dest_mode)
101 if (is_Const(node)) {
102 /* TODO tarval module is incomplete and can't convert floats to ints */
103 return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
107 is_downconv(mode, dest_mode) &&
108 get_irn_mode(get_Conv_op(node)) == dest_mode) {
112 if (get_irn_n_edges(node) > 1) {
113 DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
118 /* Take the minimum of the conversion costs for Phi predecessors as only one
119 * branch is actually executed at a time */
122 size_t arity = get_Phi_n_preds(node);
125 costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
126 for (i = 1; i < arity; ++i) {
127 ir_node *pred = get_Phi_pred(node, i);
128 int c = get_conv_costs(pred, dest_mode);
129 if (c < costs) costs = c;
136 if (!is_downconv(mode, dest_mode)) {
141 ir_node *pred = get_Conv_op(node);
142 ir_mode *pred_mode = get_irn_mode(pred);
144 if (!values_in_mode(dest_mode, pred_mode)) {
147 return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
150 if (!is_optimizable_node(node)) {
155 // The shift count does not participate in the conv optimisation
156 arity = is_Shl(node) ? 1 : get_irn_arity(node);
157 for (i = 0; i < arity; ++i) {
158 ir_node *pred = get_irn_n(node, i);
159 costs += imin(get_conv_costs(pred, dest_mode), 1);
165 static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
167 ir_node *block = get_nodes_block(node);
168 ir_node *conv = new_r_Conv(block, node, dest_mode);
172 static ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
174 ir_mode *mode = get_irn_mode(node);
178 if (mode == dest_mode)
181 if (is_Const(node)) {
182 /* TODO tarval module is incomplete and can't convert floats to ints */
183 tarval *tv = conv_const_tv(node, dest_mode);
184 if (tv == tarval_bad) {
185 return place_conv(node, dest_mode);
187 return new_Const(tv);
192 is_downconv(mode, dest_mode) &&
193 get_irn_mode(get_Conv_op(node)) == dest_mode) {
194 return get_Conv_op(node);
197 if (get_irn_n_edges(node) > 1) {
198 return place_conv(node, dest_mode);
201 if (!is_downconv(mode, dest_mode)) {
202 return place_conv(node, dest_mode);
206 ir_node *pred = get_Conv_op(node);
207 ir_mode *pred_mode = get_irn_mode(pred);
209 if (!values_in_mode(dest_mode, pred_mode)) {
210 return place_conv(node, dest_mode);
212 return conv_transform(get_Conv_op(node), dest_mode);
215 if (!is_optimizable_node(node)) {
216 return place_conv(node, dest_mode);
219 // The shift count does not participate in the conv optimisation
220 arity = is_Shl(node) ? 1 : get_irn_arity(node);
221 for (i = 0; i < arity; i++) {
222 ir_node *pred = get_irn_n(node, i);
223 ir_node *transformed;
224 if (get_conv_costs(pred, dest_mode) > 0) {
225 transformed = place_conv(pred, dest_mode);
227 transformed = conv_transform(pred, dest_mode);
229 set_irn_n(node, i, transformed);
231 set_irn_mode(node, dest_mode);
235 /* TODO, backends (at least ia32) can't handle it at the moment,
236 and it's probably not more efficient on most archs */
238 static void try_optimize_cmp(ir_node *node)
240 ir_node *left = get_Cmp_left(node);
241 ir_node *right = get_Cmp_right(node);
242 ir_node *conv = NULL;
248 static void conv_opt_walker(ir_node *node, void *data)
250 ir_node *transformed;
255 bool *changed = data;
259 try_optimize_cmp(node);
267 pred = get_Conv_op(node);
268 mode = get_irn_mode(node);
269 pred_mode = get_irn_mode(pred);
271 if (mode_is_reference(mode) || mode_is_reference(pred_mode))
274 if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
277 /* - 1 for the initial conv */
278 costs = get_conv_costs(pred, mode) - 1;
279 DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
283 transformed = conv_transform(pred, mode);
284 if (node != transformed) {
285 exchange(node, transformed);
290 int conv_opt(ir_graph *irg)
293 bool invalidate = false;
294 FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
296 DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
301 irg_walk_graph(irg, NULL, conv_opt_walker, &changed);
302 local_optimize_graph(irg);
303 invalidate |= changed;
307 set_irg_outs_inconsistent(irg);
312 /* Creates an ir_graph pass for conv_opt. */
313 ir_graph_pass_t *conv_opt_pass(const char *name)
315 ir_graph_pass_t *path = def_graph_pass_ret(name ? name : "conv_opt", conv_opt);
317 /* safe to run parallel on all irgs */
318 ir_graph_pass_set_parallel(path, 1);