/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* TODO: * try to optimize cmp modes
* * decide when it is useful to move the convs through phis
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "iroptimize.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg);
+static inline int imin(int a, int b) { return a < b ? a : b; }
+
static
int is_optimizable_node(const ir_node *node)
{
- return
- is_Add(node) ||
- is_Sub(node) ||
- is_Mul(node) ||
- is_Phi(node);
+ switch (get_irn_opcode(node)) {
+ case iro_Add:
+ case iro_And:
+ case iro_Eor:
+ case iro_Minus:
+ case iro_Mul:
+ case iro_Not:
+ case iro_Or:
+ case iro_Phi:
+ case iro_Shl:
+ case iro_Sub:
+ return 1;
+
+ default: return 0;
+ }
}
static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
}
+static
+int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
+{
+ return
+ mode_is_int(src_mode) &&
+ mode_is_int(dest_mode) &&
+ get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
+}
+
static
int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
{
return conv_const_tv(node, dest_mode) == tarval_bad ? 1 : 0;
}
+ if (is_Conv(node) &&
+ is_downconv(mode, dest_mode) &&
+ get_irn_mode(get_Conv_op(node)) == dest_mode) {
+ return -1;
+ }
+
if (get_irn_n_edges(node) > 1) {
DB((dbg, LEVEL_3, "multi outs at %+F\n", node));
return 1;
}
+#if 0 // TODO
+ /* Take the minimum of the conversion costs for Phi predecessors as only one
+ * branch is actually executed at a time */
+ if (is_Phi(node)) {
+ size_t i;
+ size_t arity = get_Phi_n_preds(node);
+ int costs;
+
+ costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
+ for (i = 1; i < arity; ++i) {
+ ir_node *pred = get_Phi_pred(node, i);
+ int c = get_conv_costs(pred, dest_mode);
+ if (c < costs) costs = c;
+ }
+
+ return costs;
+ }
+#endif
+
+ if (!is_downconv(mode, dest_mode)) {
+ return 1;
+ }
+
if (is_Conv(node)) {
return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
}
}
costs = 0;
- arity = get_irn_arity(node);
+ // The shift count does not participate in the conv optimisation
+ arity = is_Shl(node) ? 1 : get_irn_arity(node);
for (i = 0; i < arity; ++i) {
ir_node *pred = get_irn_n(node, i);
- costs += get_conv_costs(pred, dest_mode);
+ costs += imin(get_conv_costs(pred, dest_mode), 1);
}
return costs;
static
ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
{
- size_t arity;
- size_t i;
+ ir_mode *mode = get_irn_mode(node);
+ size_t arity;
+ size_t i;
- if (get_irn_mode(node) == dest_mode)
+ if (mode == dest_mode)
return node;
if (is_Const(node)) {
if (tv == tarval_bad) {
return place_conv(node, dest_mode);
} else {
- return new_Const(dest_mode, tv);
+ return new_Const(tv);
}
}
+ if (is_Conv(node) &&
+ is_downconv(mode, dest_mode) &&
+ get_irn_mode(get_Conv_op(node)) == dest_mode) {
+ return get_Conv_op(node);
+ }
+
if (get_irn_n_edges(node) > 1) {
return place_conv(node, dest_mode);
}
+ if (!is_downconv(mode, dest_mode)) {
+ return place_conv(node, dest_mode);
+ }
+
if (is_Conv(node)) {
return conv_transform(get_Conv_op(node), dest_mode);
}
return place_conv(node, dest_mode);
}
- arity = get_irn_arity(node);
+ // The shift count does not participate in the conv optimisation
+ arity = is_Shl(node) ? 1 : get_irn_arity(node);
for (i = 0; i < arity; i++) {
ir_node *pred = get_irn_n(node, i);
- ir_node *transformed = conv_transform(pred, dest_mode);
+ ir_node *transformed;
+ if (get_conv_costs(pred, dest_mode) > 0) {
+ transformed = place_conv(pred, dest_mode);
+ } else {
+ transformed = conv_transform(pred, dest_mode);
+ }
set_irn_n(node, i, transformed);
}
set_irn_mode(node, dest_mode);
return node;
}
-static
-int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
-{
- return
- mode_is_int(src_mode) &&
- mode_is_int(dest_mode) &&
- get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode);
-}
-
-/* TODO, backends (at least ia23) can't handle it at the moment,
- and it's probably not more efficient on most
- archs */
+/* TODO, backends (at least ia32) can't handle it at the moment,
+ and it's probably not more efficient on most archs */
#if 0
static
void try_optimize_cmp(ir_node *node)
ir_mode *pred_mode;
ir_mode *mode;
int costs;
+ (void) data;
#if 0
if(is_Cmp(node)) {
mode = get_irn_mode(node);
pred_mode = get_irn_mode(pred);
+ if (mode_is_reference(mode) || mode_is_reference(pred_mode))
+ return;
+
if (!is_Phi(pred) && !is_downconv(pred_mode, mode))
return;
/* - 1 for the initial conv */
costs = get_conv_costs(pred, mode) - 1;
DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
- if (costs >= 0) return;
+ if (costs > 0) return;
transformed = conv_transform(pred, mode);
- exchange(node, transformed);
- changed = 1;
+ if (node != transformed) {
+ exchange(node, transformed);
+ changed = 1;
+ }
}
-void conv_opt(ir_graph *irg)
+int conv_opt(ir_graph *irg)
{
char invalidate = 0;
FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
if (invalidate) {
set_irg_outs_inconsistent(irg);
}
+ return invalidate;
}