# include "irnode_t.h"
# include "irgraph_t.h"
+# include "iredges_t.h"
# include "irmode_t.h"
# include "iropt_t.h"
# include "ircons_t.h"
# include "dbginfo_t.h"
# include "iropt_dbg.h"
# include "irflag_t.h"
-# include "firmstat.h"
+# include "irhooks.h"
# include "irarch.h"
# include "hashptr.h"
return get_Mux_true(n);
else if (ts == get_tarval_b_false())
return get_Mux_false(n);
+ else if(get_Mux_false(n) == get_Mux_true(n))
+ return get_Mux_true(n);
return n;
}
}
#define transform_node_Add transform_node_AddSub
-#define transform_node_Sub transform_node_AddSub
+
+/**
+ * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a).
+ */
+static ir_node *transform_node_Sub(ir_node *n)
+{
+ ir_mode *mode;
+
+ n = transform_node_AddSub(n);
+
+ mode = get_irn_mode(n);
+ if (mode_is_num(mode)) {
+ if (classify_Const(get_Sub_left(n)) == CNST_NULL) {
+ n = new_rd_Minus(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ get_nodes_block(n),
+ get_Sub_right(n),
+ mode);
+ }
+ }
+
+ return n;
+}
/** Do architecture dependend optimizations on Mul nodes */
static ir_node *transform_node_Mul(ir_node *n) {
* AND c1
* OR
*/
-static ir_node *transform_node_Or(ir_node *or)
+static ir_node *transform_node_Or_bf_store(ir_node *or)
{
ir_node *and, *c1;
ir_node *or_l, *c2;
set_Or_right(or, new_const);
/* check for more */
- return transform_node_Or(or);
+ return transform_node_Or_bf_store(or);
+}
+
+/**
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ */
+static ir_node *transform_node_Or_Rot(ir_node *or)
+{
+ ir_mode *mode = get_irn_mode(or);
+ ir_node *shl, *shr, *block;
+ ir_node *irn, *x, *c1, *c2, *v, *sub;
+ tarval *tv1, *tv2;
+
+ if (! mode_is_int(mode))
+ return or;
+
+ shl = get_binop_left(or);
+ shr = get_binop_right(or);
+
+ if (get_irn_op(shl) == op_Shr) {
+ if (get_irn_op(shr) != op_Shl)
+ return or;
+
+ irn = shl;
+ shl = shr;
+ shr = irn;
+ }
+ else if (get_irn_op(shl) != op_Shl)
+ return or;
+ else if (get_irn_op(shr) != op_Shr)
+ return or;
+
+ x = get_Shl_left(shl);
+ if (x != get_Shr_left(shr))
+ return or;
+
+ c1 = get_Shl_right(shl);
+ c2 = get_Shr_right(shr);
+ if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ tv2 = get_Const_tarval(c2);
+ if (! tarval_is_long(tv2))
+ return or;
+
+ if (get_tarval_long(tv1) + get_tarval_long(tv2)
+ != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_nodes_block(or);
+
+ return new_r_Rot(current_ir_graph, block, x, c1, mode);
+ }
+ else if (get_irn_op(c1) == op_Sub) {
+ v = c2;
+ sub = c1;
+
+ if (get_Sub_right(sub) != v)
+ return or;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_nodes_block(or);
+
+ /* a Rot right is not supported, so use a rot left */
+ return new_r_Rot(current_ir_graph, block, x, sub, mode);
+ }
+ else if (get_irn_op(c2) == op_Sub) {
+ v = c1;
+ sub = c2;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_nodes_block(or);
+
+ /* a Rot Left */
+ return new_r_Rot(current_ir_graph, block, x, v, mode);
+ }
+
+ return or;
+}
+
+/**
+ * Optimize an Or
+ */
+static ir_node *transform_node_Or(ir_node *or)
+{
+ or = transform_node_Or_bf_store(or);
+ or = transform_node_Or_Rot(or);
+
+ return or;
}
/* forward */
return n;
}
+static ir_node * transform_node_End(ir_node *n) {
+ int i, n_keepalives = get_End_n_keepalives(n);
+
+ /* Remove dead blocks in keepalive list.
+ We do not generate a new End node. */
+ for (i = 0; i < n_keepalives; ++i) {
+ ir_node *ka = get_End_keepalive(n, i);
+ if (is_Block(ka) && is_Block_dead(ka))
+ set_End_keepalive(n, i, new_Bad());
+ }
+ return n;
+}
+
/**
* Tries several [inplace] [optimizing] transformations and returns an
CASE(Cast);
CASE(Proj);
CASE(Or);
+ CASE(End);
case iro_Shr:
case iro_Shrs:
case iro_Shl:
/** Compares the attributes of two Free nodes. */
static int node_cmp_attr_Free(ir_node *a, ir_node *b)
{
- return (get_irn_free_attr(a) != get_irn_free_attr(b));
+ return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
+ || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
}
/** Compares the attributes of two SymConst nodes. */
and replacing the control flow by Bad. */
if (get_irn_mode(node) == mode_X) {
ir_node *block = get_nodes_block(node);
+ if (!get_Block_matured(block)) return node; /* Don't optimize nodes in immature blocks. */
if (op == op_End) return node; /* Don't optimize End, may have Bads. */
if (get_irn_op(block) == op_Block && get_Block_matured(block)) {
ir_node *
optimize_node (ir_node *n)
{
- tarval *tv;
- ir_node *oldn = n;
- opcode iro = get_irn_opcode(n);
-
- type *old_tp = get_irn_type(n);
- {
- int i, arity = get_irn_arity(n);
- for (i = 0; i < arity && !old_tp; ++i)
- old_tp = get_irn_type(get_irn_n(n, i));
- }
-
- /* Allways optimize Phi nodes: part of the construction. */
- if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
-
- /* constant expression evaluation / constant folding */
- if (get_opt_constant_folding()) {
- /* constants can not be evaluated */
- if (iro != iro_Const) {
- /* try to evaluate */
- tv = computed_value(n);
- if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
- /*
- * we MUST copy the node here temporary, because it's still needed
- * for DBG_OPT_CSTEVAL
- */
- int node_size = offsetof(ir_node, attr) + n->op->attr_size;
- oldn = alloca(node_size);
-
- memcpy(oldn, n, node_size);
- CLONE_ARR_A(ir_node *, oldn->in, n->in);
-
- /* ARG, copy the in array, we need it for statistics */
- memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
-
- /* evaluation was successful -- replace the node. */
- obstack_free (current_ir_graph->obst, n);
- n = new_Const (get_tarval_mode (tv), tv);
- if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
- set_Const_type(n, old_tp);
- DBG_OPT_CSTEVAL(oldn, n);
- return n;
- }
- }
- }
-
- /* remove unnecessary nodes */
- if (get_opt_constant_folding() ||
- (iro == iro_Phi) || /* always optimize these nodes. */
- (iro == iro_Id) ||
- (iro == iro_Proj) ||
- (iro == iro_Block) ) /* Flags tested local. */
- n = equivalent_node (n);
-
- optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
-
- /** common subexpression elimination **/
- /* Checks whether n is already available. */
- /* The block input is used to distinguish different subexpressions. Right
- now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
- subexpressions within a block. */
- if (get_opt_cse())
- n = identify_cons (current_ir_graph->value_table, n);
-
- if (n != oldn) {
- /* We found an existing, better node, so we can deallocate the old node. */
- obstack_free (current_ir_graph->obst, oldn);
-
- return n;
- }
-
- /* Some more constant expression evaluation that does not allow to
- free the node. */
- iro = get_irn_opcode(n);
- if (get_opt_constant_folding() ||
- (iro == iro_Cond) ||
- (iro == iro_Proj)) /* Flags tested local. */
- n = transform_node (n);
-
- /* Remove nodes with dead (Bad) input.
- Run always for transformation induced Bads. */
- n = gigo (n);
-
- /* Now we have a legal, useful node. Enter it in hash table for cse */
- if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
- n = identify_remember (current_ir_graph->value_table, n);
- }
-
- return n;
+ tarval *tv;
+ ir_node *oldn = n;
+ opcode iro = get_irn_opcode(n);
+
+ type *old_tp = get_irn_type(n);
+ {
+ int i, arity = get_irn_arity(n);
+ for (i = 0; i < arity && !old_tp; ++i)
+ old_tp = get_irn_type(get_irn_n(n, i));
+ }
+
+ /* Always optimize Phi nodes: part of the construction. */
+ if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
+
+ /* constant expression evaluation / constant folding */
+ if (get_opt_constant_folding()) {
+ /* constants can not be evaluated */
+ if (iro != iro_Const) {
+ /* try to evaluate */
+ tv = computed_value(n);
+ if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
+ ir_node *nw;
+
+ /*
+ * we MUST copy the node here temporary, because it's still needed
+ * for DBG_OPT_CSTEVAL
+ */
+ int node_size = offsetof(ir_node, attr) + n->op->attr_size;
+ oldn = alloca(node_size);
+
+ memcpy(oldn, n, node_size);
+ CLONE_ARR_A(ir_node *, oldn->in, n->in);
+
+ /* ARG, copy the in array, we need it for statistics */
+ memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
+
+
+ edges_node_deleted(n, current_ir_graph);
+
+ /* evaluation was successful -- replace the node. */
+ obstack_free (current_ir_graph->obst, n);
+ nw = new_Const (get_tarval_mode (tv), tv);
+
+ if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
+ set_Const_type(nw, old_tp);
+ DBG_OPT_CSTEVAL(oldn, nw);
+ return nw;
+ }
+ }
+ }
+
+ /* remove unnecessary nodes */
+ if (get_opt_constant_folding() ||
+ (iro == iro_Phi) || /* always optimize these nodes. */
+ (iro == iro_Id) ||
+ (iro == iro_Proj) ||
+ (iro == iro_Block) ) /* Flags tested local. */
+ n = equivalent_node (n);
+
+ optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
+
+ /** common subexpression elimination **/
+ /* Checks whether n is already available. */
+ /* The block input is used to distinguish different subexpressions. Right
+ now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
+ subexpressions within a block. */
+ if (get_opt_cse())
+ n = identify_cons (current_ir_graph->value_table, n);
+
+ if (n != oldn) {
+ edges_node_deleted(oldn, current_ir_graph);
+
+ /* We found an existing, better node, so we can deallocate the old node. */
+ obstack_free (current_ir_graph->obst, oldn);
+
+ return n;
+ }
+
+ /* Some more constant expression evaluation that does not allow to
+ free the node. */
+ iro = get_irn_opcode(n);
+ if (get_opt_constant_folding() ||
+ (iro == iro_Cond) ||
+ (iro == iro_Proj)) /* Flags tested local. */
+ n = transform_node (n);
+
+ /* Remove nodes with dead (Bad) input.
+ Run always for transformation induced Bads. */
+ n = gigo (n);
+
+ /* Now we have a legal, useful node. Enter it in hash table for cse */
+ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
+ n = identify_remember (current_ir_graph->value_table, n);
+ }
+
+ return n;
}
/**
- * These optimizations never deallocate nodes. This can cause dead
+ * These optimizations never deallocate nodes (in place). This can cause dead
* nodes lying on the obstack. Remove these by a dead node elimination,
* i.e., a copying garbage collection.
*/