-/* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
-* All rights reserved.
-*
-* Authors: Christian Schaefer, Goetz Lindenmaier
-*
-* iropt --- optimizations intertwined with IR construction.
-*/
-
-/* $Id$ */
+/*
+ * Project: libFIRM
+ * File name: ir/ir/iropt.c
+ * Purpose: iropt --- optimizations intertwined with IR construction.
+ * Author: Christian Schaefer
+ * Modified by: Goetz Lindenmaier
+ * Created:
+ * CVS-ID: $Id$
+ * Copyright: (c) 1998-2003 Universität Karlsruhe
+ * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
+ */
#ifdef HAVE_CONFIG_H
# include <config.h>
# include "irgmod.h"
# include "irvrfy.h"
# include "tv.h"
-# include "tune.h"
# include "dbginfo_t.h"
# include "iropt_dbg.h"
res = tarval_and (ta, tb);
} else {
tarval *v;
- if ( ( ((v = computed_value (a)) != tarval_bad)
- && (v == get_mode_null(get_tarval_mode(v))) )
- || ( ((v = computed_value (b)) != tarval_bad)
- && (v == get_mode_null(get_tarval_mode(v))) )) {
+ if ( (tarval_classify ((v = computed_value (a))) == TV_CLASSIFY_NULL)
+ || (tarval_classify ((v = computed_value (b))) == TV_CLASSIFY_NULL)) {
res = v;
}
}
res = tarval_or (ta, tb);
} else {
tarval *v;
- if ( (tarval_classify ((v = computed_value (a))) == -1)
- || (tarval_classify ((v = computed_value (b))) == -1)) {
+ if ( (tarval_classify ((v = computed_value (a))) == TV_CLASSIFY_ALL_ONE)
+ || (tarval_classify ((v = computed_value (b))) == TV_CLASSIFY_ALL_ONE)) {
res = v;
}
}
break;
case iro_Not:
if ((ta != tarval_bad)) {
- res = tarval_neg (ta);
+ res = tarval_not (ta);
}
break;
case iro_Shl:
/* If this predecessors constant value is zero, the operation is
unnecessary. Remove it: */
- if (tarval_classify (tv) == 0) {
+ if (tarval_classify (tv) == TV_CLASSIFY_NULL) {
n = on; DBG_OPT_ALGSIM1;
}
} break;
case iro_Shrs:
case iro_Rot:
/* these operations are not commutative. Test only one predecessor. */
- if (tarval_classify (computed_value (b)) == 0) {
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
n = a; DBG_OPT_ALGSIM1;
/* Test if b > #bits of a ==> return 0 / divide b by #bits
--> transform node? */
break;
case iro_Mul:
/* Mul is commutative and has again an other neutral element. */
- if (tarval_classify (computed_value (a)) == 1) {
+ if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ONE) {
n = b; DBG_OPT_ALGSIM1;
- } else if (tarval_classify (computed_value (b)) == 1) {
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) {
n = a; DBG_OPT_ALGSIM1;
}
break;
case iro_Div:
/* Div is not commutative. */
- if (tarval_classify (computed_value (b)) == 1) { /* div(x, 1) == x */
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
case iro_And:
if (a == b) {
n = a; /* And has it's own neutral element */
- } else if (tarval_classify (computed_value (a)) == -1) {
+ } else if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ALL_ONE) {
n = b;
- } else if (tarval_classify (computed_value (b)) == -1) {
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ALL_ONE) {
n = a;
}
if (n != oldn) DBG_OPT_ALGSIM1;
break;
case iro_Conv:
- if (get_irn_mode(n) == get_irn_mode(a)) { /* No Conv necessary */
+ {
+ ir_mode *n_mode = get_irn_mode(n);
+ ir_mode *a_mode = get_irn_mode(a);
+
+ if (n_mode == a_mode) { /* No Conv necessary */
n = a; DBG_OPT_ALGSIM3;
- } else if (get_irn_mode(n) == mode_b) {
- if (get_irn_op(a) == op_Conv &&
- get_irn_mode (get_Conv_op(a)) == mode_b) {
- n = get_Conv_op(a); /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM2;
+ } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ ir_mode *b_mode;
+
+ b = get_Conv_op(a);
+ n_mode = get_irn_mode(n);
+ b_mode = get_irn_mode(b);
+
+ if (n_mode == b_mode) {
+ if (n_mode == mode_b) {
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
+ }
+ else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+ if (smaller_mode(b_mode, a_mode)){
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;
+ }
+ }
}
}
break;
-
+ }
case iro_Phi:
{
/* Several optimizations:
return n;
} /* end equivalent_node() */
+/* do node specific optimizations of nodes predecessors. */
+static void
+optimize_preds(ir_node *n) {
+ ir_node *a = NULL, *b = NULL;
+
+ /* get the operands we will work on for simple cases. */
+ if (is_binop(n)) {
+ a = get_binop_left(n);
+ b = get_binop_right(n);
+ } else if (is_unop(n)) {
+ a = get_unop_op(n);
+ }
+
+ switch (get_irn_opcode(n)) {
+
+ case iro_Cmp:
+ /* We don't want Cast as input to Cmp. */
+ if (get_irn_op(a) == op_Cast) {
+ a = get_Cast_op(a);
+ set_Cmp_left(n, a);
+ }
+ if (get_irn_op(b) == op_Cast) {
+ b = get_Cast_op(b);
+ set_Cmp_right(n, b);
+ }
+ break;
+
+ default: break;
+ } /* end switch */
+}
+
/* tries several [inplace] [optimizing] transformations and returns an
equivalent node. The difference to equivalent_node is that these
transformations _do_ generate new nodes, and thus the old node must
not be freed even if the equivalent node isn't the old one. */
static ir_node *
-transform_node (ir_node *n)
-{
-
+transform_node (ir_node *n) {
ir_node *a = NULL, *b;
tarval *ta, *tb;
case iro_Mod: {
ta = computed_value(n);
if (ta != tarval_bad) {
- /* Turn Div into a tuple (mem, bad, value) */
+ /* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
turn_into_tuple(n, 3);
set_Tuple_pred(n, 0, mem);
set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
- } else if ((get_irn_op(get_Cond_selector(n)) == op_Eor)
- && (get_irn_mode(get_Cond_selector(n)) == mode_b)
- && (tarval_classify(computed_value(get_Eor_right(a))) == 1)) {
+ } else if ((get_irn_op(a) == op_Eor)
+ && (get_irn_mode(a) == mode_b)
+ && (tarval_classify(computed_value(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
/* The Eor is a negate. Generate a new Cond without the negate,
simulate the negate by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
get_Eor_left(a)));
- } else if ((get_irn_op(get_Cond_selector(n)) == op_Not)
- && (get_irn_mode(get_Cond_selector(n)) == mode_b)) {
+ } else if ((get_irn_op(a) == op_Not)
+ && (get_irn_mode(a) == mode_b)) {
/* A Not before the Cond. Generate a new Cond without the Not,
simulate the Not by exchanging the results. */
set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_Block(n),
if ((get_irn_mode(n) == mode_b)
&& (get_irn_op(a) == op_Proj)
&& (get_irn_mode(a) == mode_b)
- && (tarval_classify (computed_value (b)) == 1)
+ && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE)
&& (get_irn_op(get_Proj_pred(a)) == op_Cmp))
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_nodes_Block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a)));
else if ((get_irn_mode(n) == mode_b)
- && (tarval_classify (computed_value (b)) == 1))
+ && (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE))
/* The Eor is a Not. Replace it by a Not. */
/* ????!!!Extend to bitfield 1111111. */
n = new_r_Not(current_ir_graph, get_nodes_Block(n), a, mode_b);
/* **************** Common Subexpression Elimination **************** */
+/** The size of the hash table used, should estimate the number of nodes
+ in a graph. */
+#define N_IR_NODES 512
+
/* Compare function for two nodes in the hash table. Gets two */
/* nodes as parameters. Returns 0 if the nodes are a cse. */
static int
switch (get_irn_opcode(a)) {
case iro_Const:
- return get_irn_const_attr (a) != get_irn_const_attr (b);
+ return (get_Const_tarval(a) != get_Const_tarval(b))
+ || (get_Const_type(a) != get_Const_type(b));
case iro_Proj:
return get_irn_proj_attr (a) != get_irn_proj_attr (b);
case iro_Filter:
|| (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
case iro_Phi:
return get_irn_phi_attr (a) != get_irn_phi_attr (b);
+ case iro_Cast:
+ return get_Cast_type(a) != get_Cast_type(b);
default: ;
}
pset *
new_identities (void)
{
- return new_pset (vt_cmp, TUNE_NIR_NODES);
+ return new_pset (vt_cmp, N_IR_NODES);
}
void
int i;
ir_op* op = get_irn_op(node);
+#if 1
+ /* remove garbage blocks by looking at control flow that leaves the block
+ and replacing the control flow by Bad. */
+ if (get_irn_mode(node) == mode_X) {
+ ir_node *block = get_nodes_block(node);
+ if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
+ for (i = 0; i < get_irn_arity(block); i++) {
+ if (!is_Bad(get_irn_n(block, i))) break;
+ }
+ if (i == get_irn_arity(block)) return new_Bad();
+ }
+ }
+#endif
+
/* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
blocks predecessors is dead. */
if ( op != op_Block && op != op_Phi && op != op_Tuple) {
}
}
#if 0
+ /* With this code we violate the agreement that local_optimize
+ only leaves Bads in Block, Phi and Tuple nodes. */
/* If Block has only Bads as predecessors it's garbage. */
/* If Phi has only Bads as predecessors it's garbage. */
- if (op == op_Block || op == op_Phi) {
+ if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
for (i = 0; i < get_irn_arity(node); i++) {
if (!is_Bad(get_irn_n(node, i))) break;
}
- if (i = get_irn_arity(node)) node = new_Bad();
+ if (i == get_irn_arity(node)) node = new_Bad();
}
#endif
return node;
(get_irn_op(n) == op_Block) ) /* Flags tested local. */
n = equivalent_node (n);
+ optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
+
/** common subexpression elimination **/
/* Checks whether n is already available. */
/* The block input is used to distinguish different subexpressions. Right
(get_irn_op(n) == op_Block) ) /* Flags tested local. */
n = equivalent_node (n);
+ optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
+
/** common subexpression elimination **/
/* Checks whether n is already available. */
/* The block input is used to distinguish different subexpressions. Right