/* Copyright (C) 1998 - 2000 by Universitaet Karlsruhe
-** All rights reserved.
-**
-** Authors: Christian Schaefer, Goetz Lindenmaier
-**
-** iropt --- optimizations intertwined with IR construction.
+* All rights reserved.
+*
+* Authors: Christian Schaefer, Goetz Lindenmaier
+*
+* iropt --- optimizations intertwined with IR construction.
*/
/* $Id$ */
/* Make types visible to allow most efficient access */
# include "entity_t.h"
-/* Trivial inlineable routine for copy propagation.
- Does follow Ids, needed to optimize inlined code. */
-static inline ir_node *
+/* Trivial INLINEable routine for copy propagation.
+ Does follow Ids, needed to optimize INLINEd code. */
+static INLINE ir_node *
follow_Id (ir_node *n)
{
while (get_irn_op (n) == op_Id) n = get_Id_pred (n);
return n;
}
-static inline tarval *
+static INLINE tarval *
value_of (ir_node *n)
{
if ((n != NULL) && (get_irn_op(n) == op_Const))
- return get_Const_tarval(n);
+ return get_Const_tarval(n); /* might return tarval_bad */
else
- return NULL;
+ return tarval_bad;
}
/* if n can be computed, return the value, else NULL. Performs
tarval *res;
ir_node *a = NULL, *b = NULL; /* initialized to shut up gcc */
- tarval *ta = NULL, *tb = NULL; /* initialized to shut up gcc */
+ /* initialized to uniformly filter invalid constants */
+ tarval *ta = tarval_bad, *tb = tarval_bad;
- res = NULL;
+ res = tarval_bad;
/* get the operands we will work on for simple cases. */
if (is_binop(n)) {
case iro_SymConst:
if ((get_SymConst_kind(n) == size) &&
(get_type_state(get_SymConst_type(n))) == layout_fixed)
- res = tarval_from_long (mode_i, get_type_size(get_SymConst_type(n)));
+ res = new_tarval_from_long (get_type_size(get_SymConst_type(n)), mode_Is);
break;
case iro_Add:
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
- && (get_irn_mode(a) != mode_p)) {
+ if ((ta != tarval_bad) && (tb != tarval_bad)
+ && (get_irn_mode(a) == get_irn_mode(b))
+ && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
res = tarval_add (ta, tb);
}
break;
case iro_Sub:
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))
- && (get_irn_mode(a) != mode_p)) {
+ if ((ta != tarval_bad) && (tb != tarval_bad)
+ && (get_irn_mode(a) == get_irn_mode(b))
+ && !(get_mode_sort(get_irn_mode(a)) == irms_reference)) {
res = tarval_sub (ta, tb);
- } else if (a == b) {
- res = tarval_mode_null [get_irn_modecode (n)];
}
break;
case iro_Minus:
- if (ta && mode_is_float(get_irn_mode(a)))
+ if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
res = tarval_neg (ta);
break;
case iro_Mul:
- if (ta && tb) /* tarval_mul tests for equivalent modes itself */ {
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
res = tarval_mul (ta, tb);
} else {
/* a*0 = 0 or 0*b = 0:
calls computed_value recursive and returns the 0 with proper
mode. */
tarval *v;
- if ( (tarval_classify ((v = computed_value (a))) == 0)
- || (tarval_classify ((v = computed_value (b))) == 0)) {
+ if ( ( ((v = computed_value (a)) != tarval_bad)
+ && (v == get_mode_null(get_tarval_mode(v))) )
+ || ( ((v = computed_value (b)) != tarval_bad)
+ && (v == get_mode_null(get_tarval_mode(v))) )) {
res = v;
}
}
break;
case iro_Quot:
/* This was missing in original implementation. Why? */
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
- if (tarval_classify(tb) == 0) {res = NULL; break;}
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
res = tarval_quo(ta, tb);
}
break;
case iro_Div:
/* This was missing in original implementation. Why? */
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
- if (tarval_classify(tb) == 0) {res = NULL; break;}
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
res = tarval_div(ta, tb);
}
break;
case iro_Mod:
/* This was missing in original implementation. Why? */
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
- if (tarval_classify(tb) == 0) {res = NULL; break;}
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
res = tarval_mod(ta, tb);
}
break;
/* for iro_DivMod see iro_Proj */
case iro_Abs:
- if (ta)
+ if (ta != tarval_bad)
res = tarval_abs (ta);
break;
case iro_And:
- if (ta && tb) {
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
res = tarval_and (ta, tb);
} else {
tarval *v;
- if ( (tarval_classify ((v = computed_value (a))) == 0)
- || (tarval_classify ((v = computed_value (b))) == 0)) {
+ if ( ( ((v = computed_value (a)) != tarval_bad)
+ && (v == get_mode_null(get_tarval_mode(v))) )
+ || ( ((v = computed_value (b)) != tarval_bad)
+ && (v == get_mode_null(get_tarval_mode(v))) )) {
res = v;
}
}
break;
case iro_Or:
- if (ta && tb) {
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
res = tarval_or (ta, tb);
} else {
tarval *v;
}
}
break;
- case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
- case iro_Not: if (ta) { res = tarval_neg (ta); } break;
- case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
- /* tarval_shr is faulty !! */
- case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
- case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
- case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
- case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
+ case iro_Eor:
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ res = tarval_eor (ta, tb);
+ }
+ break;
+ case iro_Not:
+ if ((ta != tarval_bad)) {
+ res = tarval_neg (ta);
+ }
+ break;
+ case iro_Shl:
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ res = tarval_shl (ta, tb);
+ }
+ break;
+ case iro_Shr:
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ res = tarval_shr (ta, tb);
+ }
+ break;
+ case iro_Shrs:
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ res = tarval_shrs (ta, tb);
+ }
+ break;
+ case iro_Rot:
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ /*res = tarval_rot (ta, tb)*/;
+ }
+ break;
+ case iro_Conv:
+ if (ta != tarval_bad) {
+ res = tarval_convert_to (ta, get_irn_mode (n));
+ }
break;
case iro_Proj: /* iro_Cmp */
{
only 1 is used.
There are several case where we can evaluate a Cmp node:
1. The nodes compared are both the same. If we compare for
- equal, this will return true, else it will return false.
- This step relies on cse.
+ equal, greater equal, ... this will return true, else it
+ will return false. This step relies on cse.
2. The predecessors of Cmp are target values. We can evaluate
the Cmp.
3. The predecessors are Allocs or void* constants. Allocs never
if (aa == ab) { /* 1.: */
/* This is a tric with the bits used for encoding the Cmp
Proj numbers, the following statement is not the same:
- res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
- res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
+ res = new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
+ res = new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);
} else {
tarval *taa = computed_value (aa);
tarval *tab = computed_value (ab);
- if (taa && tab) { /* 2.: */
+ if ((taa != tarval_bad) && (tab != tarval_bad)) { /* 2.: */
/* strange checks... */
- ir_pncmp flags = tarval_comp (taa, tab);
- if (flags != irpn_False) {
- res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
+ pnc_number flags = tarval_cmp (taa, tab);
+ if (flags != False) {
+ res = new_tarval_from_long (get_Proj_proj(n) & flags, mode_b);
}
} else { /* check for 3.: */
ir_node *aaa = skip_nop(skip_Proj(aa));
ir_node *aba = skip_nop(skip_Proj(ab));
if ( ( (/* aa is ProjP and aaa is Alloc */
(get_irn_op(aa) == op_Proj)
- && (get_irn_mode(aa) == mode_p)
+ && (get_irn_mode(aa) == mode_P)
&& (get_irn_op(aaa) == op_Alloc))
&& ( (/* ab is constant void */
(get_irn_op(ab) == op_Const)
- && (get_irn_mode(ab) == mode_p)
- && (get_Const_tarval(ab) == tarval_p_void))
+ && (get_irn_mode(ab) == mode_P)
+ && (get_Const_tarval(ab) == get_mode_null(mode_P)))
|| (/* ab is other Alloc */
(get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
+ && (get_irn_mode(ab) == mode_P)
&& (get_irn_op(aba) == op_Alloc)
&& (aaa != aba))))
|| (/* aa is void and aba is Alloc */
(get_irn_op(aa) == op_Const)
- && (get_irn_mode(aa) == mode_p)
- && (get_Const_tarval(aa) == tarval_p_void)
+ && (get_irn_mode(aa) == mode_P)
+ && (get_Const_tarval(aa) == get_mode_null(mode_P))
&& (get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
+ && (get_irn_mode(ab) == mode_P)
&& (get_irn_op(aba) == op_Alloc)))
/* 3.: */
- res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
+ res = new_tarval_from_long (get_Proj_proj(n) & Ne, mode_b);
}
}
} else if (get_irn_op(a) == op_DivMod) {
ta = value_of(get_DivMod_left(a));
tb = value_of(get_DivMod_right(a));
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
- if (tarval_classify(tb) == 0) {res = NULL; break;}
+ if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tb == get_mode_null(get_tarval_mode(tb))) break; /* div by zero: return tarval_bad */
if (get_Proj_proj(n)== 0) /* Div */
res = tarval_div(ta, tb);
else /* Mod */
} /* compute node */
-
+#if 0
/* returns 1 if the a and b are pointers to different locations. */
-bool
+static bool
different_identity (ir_node *a, ir_node *b)
{
- assert (get_irn_mode (a) == mode_p
- && get_irn_mode (b) == mode_p);
+ assert (get_irn_mode (a) == mode_P
+ && get_irn_mode (b) == mode_P);
if (get_irn_op (a) == op_Proj && get_irn_op(b) == op_Proj) {
ir_node *a1 = get_Proj_pred (a);
}
return 0;
}
-
+#endif
/* equivalent_node returns a node equivalent to N. It skips all nodes that
perform no actual computation, as, e.g., the Id nodes. It does not create
Remaining Phi nodes are just Ids. */
if ((get_Block_n_cfgpreds(n) == 1) &&
(get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp) &&
- (get_opt_control_flow())) {
+ (get_opt_control_flow_straightening())) {
n = get_nodes_Block(get_Block_cfgpred(n, 0)); DBG_OPT_STG;
+
} else if ((get_Block_n_cfgpreds(n) == 2) &&
- (get_opt_control_flow())) {
+ (get_opt_control_flow_weak_simplification())) {
/* Test whether Cond jumps twice to this block
@@@ we could do this also with two loops finding two preds from several ones. */
a = get_Block_cfgpred(n, 0);
if ((get_irn_op(a) == op_Proj) &&
(get_irn_op(b) == op_Proj) &&
(get_Proj_pred(a) == get_Proj_pred(b)) &&
- (get_irn_op(get_Proj_pred(a)) == op_Cond)) {
+ (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
/* Also a single entry Block following a single exit Block. Phis have
twice the same operand and will be optimized away. */
n = get_nodes_Block(a); DBG_OPT_IFSIM;
case iro_Minus: /* --x == x */ /* ??? Is this possible or can --x raise an
out of bounds exception if min =! max? */
if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
- n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2
+ n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
}
break;
case iro_Mul:
/* Mul is commutative and has again an other neutral element. */
if (tarval_classify (computed_value (a)) == 1) {
- n = b; DBG_OPT_ALGSIM1
+ n = b; DBG_OPT_ALGSIM1;
} else if (tarval_classify (computed_value (b)) == 1) {
- n = a; DBG_OPT_ALGSIM1
+ n = a; DBG_OPT_ALGSIM1;
}
break;
case iro_Div:
set_Tuple_pred(n, 2, a);
}
break;
- /* GL: Why are they skipped? DivMod allocates new nodes --> it's
- treated in transform node.
- case iro_Mod, Quot, DivMod
- */
+ /*
+ case iro_Mod, Quot, DivMod
+ DivMod allocates new nodes --> it's treated in transform node.
+ What about Quot, DivMod?
+ */
case iro_And:
if (a == b) {
n = a; /* And has it's own neutral element */
ir_node *first_val = NULL; /* to shutup gcc */
ir_node *scnd_val = NULL; /* to shutup gcc */
+ if (!get_opt_normalize()) return n;
+
n_preds = get_Phi_n_preds(n);
block = get_nodes_Block(n);
+ /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
+ assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
if ((is_Bad(block)) || /* Control dead */
(block == current_ir_graph->start_block)) /* There should be no Phi nodes */
return new_Bad(); /* in the Start Block. */
}
} else if (get_irn_mode(n) == mode_X &&
is_Bad(get_nodes_Block(n))) {
- /* Remove dead control flow. */
+ /* Remove dead control flow -- early gigo. */
n = new_Bad();
}
}
switch (get_irn_opcode(n)) {
case iro_Div: {
ta = computed_value(n);
- if (ta) {
+ if (ta != tarval_bad) {
/* Turn Div into a tuple (mem, bad, value) */
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
set_Tuple_pred(n, 0, mem);
set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
+ set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
}
} break;
case iro_Mod: {
ta = computed_value(n);
- if (ta) {
+ if (ta != tarval_bad) {
/* Turn Div into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
turn_into_tuple(n, 3);
set_Tuple_pred(n, 0, mem);
set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tv_mode(ta), ta));
+ set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
}
} break;
case iro_DivMod: {
break;
if (a == b) {
- a = new_Const (mode, tarval_from_long (mode, 1));
- b = new_Const (mode, tarval_from_long (mode, 0));
+ a = new_Const (mode, get_mode_one(mode));
+ b = new_Const (mode, get_mode_null(mode));
evaluated = 1;
} else {
ta = value_of(a);
tb = value_of(b);
- if (tb) {
- if (tarval_classify(tb) == 1) {
- b = new_Const (mode, tarval_from_long (mode, 0));
+ if (tb != tarval_bad) {
+ if (tb == get_mode_one(get_tarval_mode(tb))) {
+ b = new_Const (mode, get_mode_null(mode));
evaluated = 1;
- } else if (ta) {
+ } else if (ta != tarval_bad) {
tarval *resa, *resb;
resa = tarval_div (ta, tb);
- if (!resa) break; /* Causes exception!!! Model by replacing through
- Jmp for X result!? */
+ if (resa == tarval_bad) break; /* Causes exception!!! Model by replacing through
+ Jmp for X result!? */
resb = tarval_mod (ta, tb);
- if (!resb) break; /* Causes exception! */
+ if (resb == tarval_bad) break; /* Causes exception! */
a = new_Const (mode, resa);
b = new_Const (mode, resb);
evaluated = 1;
}
- } else if (tarval_classify (ta) == 0) {
+ } else if (ta == get_mode_null(get_tarval_mode(ta))) {
b = a;
evaluated = 1;
}
a = get_Cond_selector(n);
ta = value_of(a);
- if (ta &&
+ if ((ta != tarval_bad) &&
(get_irn_mode(a) == mode_b) &&
(get_opt_unreachable_code())) {
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
turn_into_tuple(n, 2);
- if (tv_val_b(ta) == 1) /* GL: I hope this returns 1 if true */ {
+ if (ta == tarval_b_true) {
set_Tuple_pred(n, 0, new_Bad());
set_Tuple_pred(n, 1, jmp);
} else {
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));
- } else if (ta &&
- (get_irn_mode(a) == mode_I) &&
+ } else if ((ta != tarval_bad) &&
+ (get_irn_mode(a) == mode_Iu) &&
(get_Cond_kind(n) == dense) &&
(get_opt_unreachable_code())) {
/* I don't want to allow Tuples smaller than the biggest Proj.
else
set_Proj_proj(n, 0);
} else if ((get_irn_op(a) == op_Cond)
- && (get_irn_mode(get_Cond_selector(a)) == mode_I)
+ && (get_irn_mode(get_Cond_selector(a)) == mode_Iu)
&& value_of(a)
&& (get_Cond_kind(a) == dense)
&& (get_opt_unreachable_code())) {
/* The Cond is a Switch on a Constant */
- if (get_Proj_proj(n) == tv_val_CHIL(value_of(a))) {
+ if (get_Proj_proj(n) == tarval_to_long(value_of(a))) {
/* The always taken branch, reuse the existing Jmp. */
if (!get_irn_link(a)) /* well, if it exists ;-> */
set_irn_link(a, new_r_Jmp(current_ir_graph, get_nodes_Block(n)));
(get_irn_mode(a) != get_irn_mode(b))) return 1;
/* compare if a's in and b's in are equal */
- /* GL: we optimize only nodes with in arrays of fixed sizes.
- if (get_irn_arity (a) != -2) {
- ins = get_irn_arity (a);
- if (ins != get_irn_arity (b)) return 1;
- ain = get_irn_in (a);
- bin = get_irn_in (b);
- }
- */
if (get_irn_arity (a) != get_irn_arity(b))
return 1;
return get_irn_const_attr (a) != get_irn_const_attr (b);
case iro_Proj:
return get_irn_proj_attr (a) != get_irn_proj_attr (b);
+ case iro_Filter:
+ return get_Filter_proj(a) != get_Filter_proj(b);
case iro_Alloc:
return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
|| (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
|| (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
|| (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
|| (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
- || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type)
- || (get_irn_sel_attr(a).ltyp != get_irn_sel_attr(b).ltyp);
+ || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
case iro_Phi:
return get_irn_phi_attr (a) != get_irn_phi_attr (b);
default: ;
/* Return the canonical node computing the same value as n.
Looks up the node in a hash table. */
-static inline ir_node *
+static INLINE ir_node *
identify (pset *value_table, ir_node *n)
{
ir_node *o = NULL;
/* During construction we set the pinned flag in the graph right when the
optimizatin is performed. The flag turning on procedure global cse could
be changed between two allocations. This way we are safe. */
-static inline ir_node *
+static INLINE ir_node *
identify_cons (pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify(value_table, n);
/* garbage in, garbage out. If a node has a dead input, i.e., the
Bad node is input to the node, return the Bad node. */
-static inline ir_node *
+static INLINE ir_node *
gigo (ir_node *node)
{
int i;
It can only be called if it is guaranteed that no other nodes
reference this one, i.e., right after construction of a node. */
ir_node *
-optimize (ir_node *n)
+optimize_node (ir_node *n)
{
tarval *tv;
ir_node *old_n = n;
if (get_irn_op(n) != op_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
+ if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
- return new_Const (get_tv_mode (tv), tv);
+ return new_Const (get_tarval_mode (tv), tv);
}
}
}
if (get_irn_op(n) != op_Const) {
/* try to evaluate */
tv = computed_value (n);
- if ((get_irn_mode(n) != mode_T) && (tv != NULL)) {
+ if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was succesful -- replace the node. */
- n = new_Const (get_tv_mode (tv), tv);
+ n = new_Const (get_tarval_mode (tv), tv);
__dbg_info_merge_pair(n, old_n, dbg_const_eval);
return n;
}