/*
- * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
* TODO: * try to optimize cmp modes
* * decide when it is useful to move the convs through phis
*/
-#ifdef HAVE_CONFIG_H
#include "config.h"
-#endif
#include "iroptimize.h"
#include <assert.h>
+#include <stdbool.h>
#include "debug.h"
#include "ircons.h"
#include "irgmod.h"
#include "irgopt.h"
#include "irnode_t.h"
+#include "iropt_t.h"
#include "iredges_t.h"
#include "irgwalk.h"
#include "irprintf.h"
+#include "irpass_t.h"
+#include "tv.h"
+#include "vrp.h"
+#include "opt_manage.h"
-DEBUG_ONLY(static firm_dbg_module_t *dbg);
+DEBUG_ONLY(static firm_dbg_module_t *dbg;)
-static INLINE int imin(int a, int b) { return a < b ? a : b; }
+static inline int imin(int a, int b) { return a < b ? a : b; }
-static
-int is_optimizable_node(const ir_node *node)
+static bool is_optimizable_node(const ir_node *node, ir_mode *dest_mode)
{
switch (get_irn_opcode(node)) {
- case iro_Add:
- case iro_And:
- case iro_Eor:
- case iro_Minus:
- case iro_Mul:
- case iro_Not:
- case iro_Or:
- case iro_Phi:
- case iro_Shl:
- case iro_Sub:
- return 1;
+ case iro_Add:
+ case iro_And:
+ case iro_Eor:
+ case iro_Minus:
+ case iro_Mul:
+ case iro_Not:
+ case iro_Or:
+ case iro_Phi:
+ case iro_Sub:
+ return true;
+ case iro_Shl: {
+ int modulo_shift = get_mode_modulo_shift(dest_mode);
+ int old_shift = get_mode_modulo_shift(get_irn_mode(node));
+ /* bail out if modulo shift changes */
+ if (modulo_shift != old_shift)
+ return false;
+ return true;
+ }
- default: return 0;
+ default:
+ return false;
}
}
-static tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
+static ir_tarval* conv_const_tv(const ir_node* cnst, ir_mode* dest_mode)
{
return tarval_convert_to(get_Const_tarval(cnst), dest_mode);
}
-static
-int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
+static int is_downconv(ir_mode *src_mode, ir_mode *dest_mode)
{
return
mode_is_int(src_mode) &&
mode_is_int(dest_mode) &&
- get_mode_size_bits(dest_mode) < get_mode_size_bits(src_mode);
+ get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
}
-static
-int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
+static int get_conv_costs(const ir_node *node, ir_mode *dest_mode)
{
ir_mode *mode = get_irn_mode(node);
- size_t arity;
- size_t i;
+ int arity;
+ int i;
int costs;
if (mode == dest_mode)
}
if (is_Conv(node) &&
- is_downconv(get_irn_mode(node), dest_mode) &&
+ is_downconv(mode, dest_mode) &&
get_irn_mode(get_Conv_op(node)) == dest_mode) {
return -1;
}
return 1;
}
- if (is_Conv(node) && is_downconv(get_irn_mode(node), dest_mode)) {
- return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
+ if (ir_zero_when_converted(node, dest_mode)) {
+ return -1;
}
#if 0 // TODO
/* Take the minimum of the conversion costs for Phi predecessors as only one
* branch is actually executed at a time */
if (is_Phi(node)) {
- size_t i;
- size_t arity = get_Phi_n_preds(node);
+ int i;
+ int arity = get_Phi_n_preds(node);
int costs;
costs = get_conv_costs(get_Phi_pred(node, 0), dest_mode);
}
#endif
- if (!is_optimizable_node(node)) {
+ if (!is_downconv(mode, dest_mode)) {
+ return 1;
+ }
+
+ if (is_Conv(node)) {
+ ir_node *pred = get_Conv_op(node);
+ ir_mode *pred_mode = get_irn_mode(pred);
+
+ if (!values_in_mode(dest_mode, pred_mode)) {
+ return 1;
+ }
+ return get_conv_costs(get_Conv_op(node), dest_mode) - 1;
+ }
+
+ if (!is_optimizable_node(node, dest_mode)) {
return 1;
}
static ir_node *place_conv(ir_node *node, ir_mode *dest_mode)
{
ir_node *block = get_nodes_block(node);
- ir_node *conv = new_r_Conv(current_ir_graph, block, node, dest_mode);
+ ir_node *conv = new_r_Conv(block, node, dest_mode);
return conv;
}
-static
-ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
+static ir_node *conv_transform(ir_node *node, ir_mode *dest_mode)
{
- size_t arity;
- size_t i;
+ ir_mode *mode = get_irn_mode(node);
+ ir_graph *irg = get_irn_irg(node);
+ int arity;
+ int conv_arity;
+ int i;
+ ir_node *new_node;
+ ir_node **ins;
- if (get_irn_mode(node) == dest_mode)
+ if (mode == dest_mode)
return node;
if (is_Const(node)) {
/* TODO tarval module is incomplete and can't convert floats to ints */
- tarval *tv = conv_const_tv(node, dest_mode);
+ ir_tarval *tv = conv_const_tv(node, dest_mode);
if (tv == tarval_bad) {
return place_conv(node, dest_mode);
} else {
- return new_Const(dest_mode, tv);
+ return new_r_Const(irg, tv);
}
}
if (is_Conv(node) &&
- is_downconv(get_irn_mode(node), dest_mode) &&
+ is_downconv(mode, dest_mode) &&
get_irn_mode(get_Conv_op(node)) == dest_mode) {
return get_Conv_op(node);
}
return place_conv(node, dest_mode);
}
- if (is_Conv(node) && is_downconv(get_irn_mode(node), dest_mode)) {
+ if (!is_downconv(mode, dest_mode)) {
+ return place_conv(node, dest_mode);
+ }
+
+ if (is_Conv(node)) {
+ ir_node *pred = get_Conv_op(node);
+ ir_mode *pred_mode = get_irn_mode(pred);
+
+ if (!values_in_mode(dest_mode, pred_mode)) {
+ return place_conv(node, dest_mode);
+ }
return conv_transform(get_Conv_op(node), dest_mode);
}
- if (!is_optimizable_node(node)) {
+ if (!is_optimizable_node(node, dest_mode)) {
return place_conv(node, dest_mode);
}
+ // We want to create a new node with the right mode
+ arity = get_irn_arity(node);
+ ins = ALLOCAN(ir_node *, arity);
+
// The shift count does not participate in the conv optimisation
- arity = is_Shl(node) ? 1 : get_irn_arity(node);
- for (i = 0; i < arity; i++) {
+ conv_arity = is_Shl(node) ? 1 : arity;
+ for (i = 0; i < conv_arity; i++) {
ir_node *pred = get_irn_n(node, i);
ir_node *transformed;
if (get_conv_costs(pred, dest_mode) > 0) {
} else {
transformed = conv_transform(pred, dest_mode);
}
- set_irn_n(node, i, transformed);
+ ins[i] = transformed;
}
- set_irn_mode(node, dest_mode);
- return node;
-}
-/* TODO, backends (at least ia32) can't handle it at the moment,
- and it's probably not more efficient on most archs */
-#if 0
-static
-void try_optimize_cmp(ir_node *node)
-{
- ir_node *left = get_Cmp_left(node);
- ir_node *right = get_Cmp_right(node);
- ir_node *conv = NULL;
+ for (i = conv_arity; i < arity; i++) {
+ ins[i] = get_irn_n(node, i);
+ }
- if(is_downconv
-}
-#endif
+ new_node = new_ir_node(get_irn_dbg_info(node),
+ irg,
+ get_nodes_block(node),
+ get_irn_op(node),
+ dest_mode,
+ arity,
+ ins);
+ copy_node_attr(irg, node, new_node);
-static char changed;
+ return new_node;
+}
-static
-void conv_opt_walker(ir_node *node, void *data)
+static void conv_opt_walker(ir_node *node, void *data)
{
ir_node *transformed;
ir_node *pred;
ir_mode *pred_mode;
ir_mode *mode;
int costs;
- (void) data;
-
-#if 0
- if(is_Cmp(node)) {
- try_optimize_cmp(node);
- return;
- }
-#endif
+ bool *changed = (bool*)data;
if (!is_Conv(node))
return;
/* - 1 for the initial conv */
costs = get_conv_costs(pred, mode) - 1;
DB((dbg, LEVEL_2, "Costs for %+F -> %+F: %d\n", node, pred, costs));
- if (costs > 0) return;
+ if (costs > 0)
+ return;
transformed = conv_transform(pred, mode);
if (node != transformed) {
exchange(node, transformed);
- changed = 1;
+ *changed = true;
}
}
-void conv_opt(ir_graph *irg)
+static ir_graph_state_t do_deconv(ir_graph *irg)
{
- char invalidate = 0;
+ bool changed;
FIRM_DBG_REGISTER(dbg, "firm.opt.conv");
DB((dbg, LEVEL_1, "===> Performing conversion optimization on %+F\n", irg));
- edges_assure(irg);
do {
- changed = 0;
- irg_walk_graph(irg, NULL, conv_opt_walker, NULL);
+ changed = false;
+ irg_walk_graph(irg, NULL, conv_opt_walker, &changed);
local_optimize_graph(irg);
- invalidate |= changed;
} while (changed);
- if (invalidate) {
- set_irg_outs_inconsistent(irg);
- }
+ return 0;
+}
+
+static optdesc_t opt_deconv = {
+ "deconv",
+ IR_GRAPH_STATE_CONSISTENT_OUT_EDGES,
+ do_deconv,
+};
+
+int conv_opt(ir_graph *irg)
+{
+ perform_irg_optimization(irg, &opt_deconv);
+ return 1;
+}
+
+/* Creates an ir_graph pass for conv_opt. */
+ir_graph_pass_t *conv_opt_pass(const char *name)
+{
+ ir_graph_pass_t *path = def_graph_pass_ret(name ? name : "conv_opt", conv_opt);
+
+ /* safe to run parallel on all irgs */
+ ir_graph_pass_set_parallel(path, 1);
+
+ return path;
}