# include "config.h"
#endif
-#ifdef HAVE_STRING_H
#include <string.h>
-#endif
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irarch.h"
#include "hashptr.h"
#include "archop.h"
-#include "opt_polymorphy.h"
#include "opt_confirms.h"
+#include "opt_polymorphy.h"
#include "irtools.h"
#include "xmalloc.h"
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
- set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Div_res, a);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
}
return n;
} /* equivalent_node_Div */
/* Div is not commutative. */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
- /* Turn Quot into a tuple (mem, bad, a) */
+ /* Turn Quot into a tuple (mem, jmp, bad, a) */
ir_node *mem = get_Quot_mem(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Quot_max);
- set_Tuple_pred(n, pn_Quot_M, mem);
- set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Quot_res, a);
+ set_Tuple_pred(n, pn_Quot_M, mem);
+ set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Quot_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Quot_res, a);
}
return n;
} /* equivalent_node_Quot */
* Optimize a / 1 = a.
*/
static ir_node *equivalent_node_DivMod(ir_node *n) {
- ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
/* Div is not commutative. */
if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
- /* Turn DivMod into a tuple (mem, bad, a, 0) */
+ /* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */
+ ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_mode *mode = get_irn_mode(b);
+ ir_node *blk = get_irn_n(n, -1);
+ ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
- set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_DivMod_res_div, a);
- set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, new_Const(mode, get_mode_null(mode)));
}
return n;
} /* equivalent_node_DivMod */
* Optimize Proj(Tuple) and gigo() for ProjX in Bad block,
* ProjX(Load) and ProjX(Store).
*/
-static ir_node *equivalent_node_Proj(ir_node *n) {
- ir_node *oldn = n;
- ir_node *a = get_Proj_pred(n);
+static ir_node *equivalent_node_Proj(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *a = get_Proj_pred(proj);
- if ( get_irn_op(a) == op_Tuple) {
+ if (get_irn_op(a) == op_Tuple) {
/* Remove the Tuple/Proj combination. */
- if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
- n = get_Tuple_pred(a, get_Proj_proj(n));
- DBG_OPT_TUPLE(oldn, a, n);
+ if ( get_Proj_proj(proj) <= get_Tuple_n_preds(a) ) {
+ proj = get_Tuple_pred(a, get_Proj_proj(proj));
+ DBG_OPT_TUPLE(oldn, a, proj);
} else {
- assert(0); /* This should not happen! */
- n = new_Bad();
+ /* This should not happen! */
+ assert(! "found a Proj with higher number than Tuple predecessors");
+ proj = new_Bad();
}
- } else if (get_irn_mode(n) == mode_X) {
- if (is_Block_dead(get_nodes_block(skip_Proj(n)))) {
+ } else if (get_irn_mode(proj) == mode_X) {
+ if (is_Block_dead(get_nodes_block(skip_Proj(proj)))) {
/* Remove dead control flow -- early gigo(). */
- n = new_Bad();
+ proj = new_Bad();
} else if (get_opt_ldst_only_null_ptr_exceptions()) {
ir_op *op = get_irn_op(a);
- if (op == op_Load || op == op_Store) {
+ if (op == op_Load) {
+ /* get the Load address */
+ ir_node *addr = get_Load_ptr(a);
+ ir_node *blk = get_irn_n(a, -1);
+ ir_node *confirm;
+
+ if (value_not_null(addr, &confirm)) {
+ if (confirm == NULL) {
+ /* this node may float if it did not depend on a Confirm */
+ set_irn_pinned(a, op_pin_state_floats);
+ }
+ if (get_Proj_proj(proj) == pn_Load_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return new_Bad();
+ } else
+ return new_r_Jmp(current_ir_graph, blk);
+ }
+ } else if (op == op_Store) {
/* get the load/store address */
- ir_node *addr = get_irn_n(a, 1);
+ ir_node *addr = get_Store_ptr(a);
+ ir_node *blk = get_irn_n(a, -1);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
/* this node may float if it did not depend on a Confirm */
set_irn_pinned(a, op_pin_state_floats);
}
- DBG_OPT_EXC_REM(n);
- return new_Bad();
+ if (get_Proj_proj(proj) == pn_Store_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return new_Bad();
+ } else
+ return new_r_Jmp(current_ir_graph, blk);
}
}
}
}
- return n;
+ return proj;
} /* equivalent_node_Proj */
/**
ir_node *b = get_CopyB_src(n);
if (a == b) {
- /* Turn CopyB into a tuple (mem, bad, bad) */
+ /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
ir_node *mem = get_CopyB_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_CopyB_max);
- set_Tuple_pred(n, pn_CopyB_M, mem);
- set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
+ set_Tuple_pred(n, pn_CopyB_M, mem);
+ set_Tuple_pred(n, pn_CopyB_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
}
return n;
} /* equivalent_node_CopyB */
/* By definition lower < upper, so if idx == lower -->
lower <= idx && idx < upper */
if (idx == lower) {
- /* Turn Bound into a tuple (mem, bad, idx) */
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
ret_tuple = 1;
} else {
ir_node *pred = skip_Proj(idx);
}
}
if (ret_tuple) {
- /* Turn Bound into a tuple (mem, bad, idx) */
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
ir_node *mem = get_Bound_mem(n);
+ ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Bound_max);
- set_Tuple_pred(n, pn_Bound_M, mem);
- set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_Bound_res, idx);
+ set_Tuple_pred(n, pn_Bound_M, mem);
+ set_Tuple_pred(n, pn_Bound_X_regular, new_r_Jmp(current_ir_graph, blk)); /* no exception */
+ set_Tuple_pred(n, pn_Bound_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Bound_res, idx);
}
return n;
} /* equivalent_node_Bound */
value = arch_dep_replace_div_by_const(n);
if (value != n) {
- /* Turn Div into a tuple (mem, bad, value) */
+ /* Turn Div into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Div_mem(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
- set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_except, new_Bad());
- set_Tuple_pred(n, pn_Div_res, value);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, value);
}
return n;
} /* transform_node_Div */
value = arch_dep_replace_mod_by_const(n);
if (value != n) {
- /* Turn Mod into a tuple (mem, bad, value) */
+ /* Turn Mod into a tuple (mem, jmp, bad, value) */
ir_node *mem = get_Mod_mem(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Mod_max);
- set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
- set_Tuple_pred(n, pn_Mod_res, value);
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
} /* transform_node_Mod */
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_DivMod_max);
- set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
set_Tuple_pred(n, pn_DivMod_res_mod, b);
}
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_Div(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_Div_right(n);
- ir_node *confirm;
+ ir_node *div = get_Proj_pred(proj);
+ ir_node *b = get_Div_right(div);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* div(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Div_X_except) {
+ switch (proj_nr) {
+ case pn_Div_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
+
+ case pn_Div_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_Div_M) {
- ir_node *res = get_Div_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_Div_M:
+ res = get_Div_mem(div);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
+ set_irn_pinned(div, op_pin_state_floats);
/* this is a Div without exception, we can remove the memory edge */
- set_Div_mem(n, new_mem);
+ set_Div_mem(div, new_mem);
return res;
}
}
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_Mod(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_Mod_right(n);
- ir_node *confirm;
+ ir_node *mod = get_Proj_pred(proj);
+ ir_node *b = get_Mod_right(mod);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* mod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Mod_X_except) {
+ switch (proj_nr) {
+
+ case pn_Mod_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
+
+ case pn_Mod_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_Mod_M) {
- ir_node *res = get_Mod_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_Mod_M:
+ res = get_Mod_mem(mod);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
+ set_irn_pinned(mod, op_pin_state_floats);
/* this is a Mod without exception, we can remove the memory edge */
- set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
- return res;
- } else if (proj_nr == pn_Mod_res && get_Mod_left(n) == b) {
- /* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
-
- DBG_OPT_CSTEVAL(n, res);
+ set_Mod_mem(mod, get_irg_no_mem(current_ir_graph));
return res;
+ case pn_Mod_res:
+ if (get_Mod_left(mod) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(mod, res);
+ return res;
+ }
}
}
return proj;
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_DivMod(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_DivMod_right(n);
- ir_node *confirm;
+ ir_node *divmod = get_Proj_pred(proj);
+ ir_node *b = get_DivMod_right(divmod);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* DivMod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_DivMod_X_except) {
+ switch (proj_nr) {
+
+ case pn_DivMod_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+
+ case pn_DivMod_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_DivMod_M) {
- ir_node *res = get_DivMod_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_DivMod_M:
+ res = get_DivMod_mem(divmod);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
+ set_irn_pinned(divmod, op_pin_state_floats);
/* this is a DivMod without exception, we can remove the memory edge */
- set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
+ set_DivMod_mem(divmod, get_irg_no_mem(current_ir_graph));
return res;
- } else if (proj_nr == pn_DivMod_res_mod && get_DivMod_left(n) == b) {
- /* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
- DBG_OPT_CSTEVAL(n, res);
- return res;
+ case pn_DivMod_res_mod:
+ if (get_DivMod_left(divmod) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(divmod, res);
+ return res;
+ }
}
}
return proj;
* in keep alive list. We do not generate a new End node.
*/
static ir_node *transform_node_End(ir_node *n) {
- int i, n_keepalives = get_End_n_keepalives(n);
+ int i, j, n_keepalives = get_End_n_keepalives(n);
+ ir_node **in;
- for (i = 0; i < n_keepalives; ++i) {
+ NEW_ARR_A(ir_node *, in, n_keepalives);
+
+ for (i = j = 0; i < n_keepalives; ++i) {
ir_node *ka = get_End_keepalive(n, i);
if (is_Block(ka)) {
- if (is_Block_dead(ka)) {
- set_End_keepalive(n, i, new_Bad());
+ if (! is_Block_dead(ka)) {
+ in[j++] = ka;
}
- } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka)))
- set_End_keepalive(n, i, new_Bad());
+ continue;
+ } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
+ continue;
+ }
+ /* FIXME: beabi need to keep a Proj(M) */
+ if (is_Phi(ka) || is_irn_keep(ka) || is_Proj(ka))
+ in[j++] = ka;
}
+ if (j != n_keepalives)
+ set_End_keepalives(n, j, in);
return n;
} /* transform_node_End */
/** Compares the attributes of two Phi nodes. */
static int node_cmp_attr_Phi(ir_node *a, ir_node *b) {
- return get_irn_phi_attr (a) != get_irn_phi_attr (b);
+ /* we can only enter this function if both nodes have the same number of inputs,
+ hence it is enough to check if one of them is a Phi0 */
+ if (is_Phi0(a)) {
+ /* check the Phi0 attribute */
+ return get_irn_phi0_attr(a) != get_irn_phi0_attr(b);
+ }
+ return 0;
} /* node_cmp_attr_Phi */
/** Compares the attributes of two Conv nodes. */
return (get_Confirm_cmp(a) != get_Confirm_cmp(b));
} /* node_cmp_attr_Confirm */
+/** Compares the attributes of two ASM nodes. */
+static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
+ int i, n;
+ const ir_asm_constraint *ca;
+ const ir_asm_constraint *cb;
+ ident **cla, **clb;
+
+ if (get_ASM_text(a) != get_ASM_text(b));
+ return 1;
+
+ /* Should we really check the constraints here? Should be better, but is strange. */
+ n = get_ASM_n_input_constraints(a);
+ if (n != get_ASM_n_input_constraints(b))
+ return 0;
+
+ ca = get_ASM_input_constraints(a);
+ cb = get_ASM_input_constraints(b);
+ for (i = 0; i < n; ++i) {
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ return 1;
+ }
+
+ n = get_ASM_n_output_constraints(a);
+ if (n != get_ASM_n_output_constraints(b))
+ return 0;
+
+ ca = get_ASM_output_constraints(a);
+ cb = get_ASM_output_constraints(b);
+ for (i = 0; i < n; ++i) {
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ return 1;
+ }
+
+ n = get_ASM_n_clobbers(a);
+ if (n != get_ASM_n_clobbers(b))
+ return 0;
+
+ cla = get_ASM_clobbers(a);
+ clb = get_ASM_clobbers(b);
+ for (i = 0; i < n; ++i) {
+ if (cla[i] != clb[i])
+ return 1;
+ }
+ return 0;
+} /* node_cmp_attr_ASM */
+
/**
* Set the default node attribute compare operation for an ir_op_ops.
*
CASE(Load);
CASE(Store);
CASE(Confirm);
+ CASE(ASM);
default:
/* leave NULL */;
}
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
-
- /* for commutative operators perform a OP b == b OP a */
- if (l > r) {
+ int l_idx = get_irn_idx(l);
+ int r_idx = get_irn_idx(r);
+
+ /* For commutative operators perform a OP b == b OP a but keep
+ constants on the RIGHT side. This helps greatly in some optimizations.
+ Moreover we use the idx number to make the form deterministic. */
+ if (is_irn_constlike(l))
+ l_idx = -l_idx;
+ if (is_irn_constlike(r))
+ r_idx = -r_idx;
+ if (l_idx < r_idx) {
set_binop_left(n, r);
set_binop_right(n, l);
}
}
/* lookup or insert in hash table with given hash key. */
- o = pset_insert (value_table, n, ir_node_hash (n));
+ o = pset_insert(value_table, n, ir_node_hash(n));
if (o != n) {
DBG_OPT_CSE(n, o);