+/**
+ * Transform Mul(a,-1) into -a.
+ * Do constant evaluation of Phi nodes.
+ * Do architecture dependent optimizations on Mul nodes
+ */
+static ir_node *transform_node_Mul(ir_node *n) {
+ ir_node *c, *oldn = n;
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+
+ if (is_Bad(a) || is_Bad(b))
+ return n;
+
+ if (mode != get_irn_mode(a))
+ return transform_node_Mul2n(n, mode);
+
+ HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode);
+
+ if (mode_is_signed(mode)) {
+ ir_node *r = NULL;
+
+ if (value_of(a) == get_mode_minus_one(mode))
+ r = b;
+ else if (value_of(b) == get_mode_minus_one(mode))
+ r = a;
+ if (r) {
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ }
+ if (is_Minus(a)) {
+ if (is_Const(b)) { /* (-a) * const -> a * -const */
+ ir_node *cnst = const_negate(b);
+ if (cnst != NULL) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), cnst, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ } else if (is_Minus(b)) { /* (-a) * (-b) -> a * b */
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), get_Minus_op(b), mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_MINUS);
+ return n;
+ } else if (is_Sub(b)) { /* (-a) * (b - c) -> a * (c - b) */
+ ir_node *sub_l = get_Sub_left(b);
+ ir_node *sub_r = get_Sub_right(b);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_node *new_b = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, irg, block, get_Minus_op(a), new_b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
+ return n;
+ }
+ } else if (is_Minus(b)) {
+ if (is_Sub(a)) { /* (a - b) * (-c) -> (b - a) * c */
+ ir_node *sub_l = get_Sub_left(a);
+ ir_node *sub_r = get_Sub_right(a);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_node *new_a = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, irg, block, new_a, get_Minus_op(b), mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
+ return n;
+ }
+ }
+ if (get_mode_arithmetic(mode) == irma_ieee754) {
+ if (is_Const(a)) {
+ tarval *tv = get_Const_tarval(a);
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+ return n;
+ }
+ }
+ else if (is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+ return n;
+ }
+ }
+ }
+ return arch_dep_replace_mul_with_shifts(n);
+} /* transform_node_Mul */
+
+/**
+ * Transform a Div Node.
+ */
+static ir_node *transform_node_Div(ir_node *n) {
+ ir_mode *mode = get_Div_resmode(n);
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+ ir_node *value;
+ tarval *tv;
+
+ if (is_Const(b) && is_const_Phi(a)) {
+ /* check for Div(Phi, Const) */
+ value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+ else if (is_Const(a) && is_const_Phi(b)) {
+ /* check for Div(Const, Phi) */
+ value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+ else if (is_const_Phi(a) && is_const_Phi(b)) {
+ /* check for Div(Phi, Phi) */
+ value = apply_binop_on_2_phis(a, b, tarval_div, mode);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+
+ value = n;
+ tv = value_of(n);
+ if (tv != tarval_bad) {
+ value = new_Const(get_tarval_mode(tv), tv);
+
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+ ir_node *dummy;
+
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+ value = new_Const(mode, get_mode_one(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+
+ if (tv == get_mode_minus_one(mode)) {
+ /* a / -1 */
+ value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ }
+ }
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_div_by_const(n);
+ }
+ }
+
+ if (value != n) {
+ ir_node *mem, *blk;
+
+make_tuple:
+ /* Turn Div into a tuple (mem, jmp, bad, value) */
+ mem = get_Div_mem(n);
+ blk = get_irn_n(n, -1);
+
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
+ turn_into_tuple(n, pn_Div_max);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, value);
+ }
+ return n;
+} /* transform_node_Div */
+
+/**
+ * Transform a Mod node.
+ */
+static ir_node *transform_node_Mod(ir_node *n) {
+ ir_mode *mode = get_Mod_resmode(n);
+ ir_node *a = get_Mod_left(n);
+ ir_node *b = get_Mod_right(n);
+ ir_node *value;
+ tarval *tv;
+
+ if (is_Const(b) && is_const_Phi(a)) {
+ /* check for Div(Phi, Const) */
+ value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+ else if (is_Const(a) && is_const_Phi(b)) {
+ /* check for Div(Const, Phi) */
+ value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+ else if (is_const_Phi(a) && is_const_Phi(b)) {
+ /* check for Div(Phi, Phi) */
+ value = apply_binop_on_2_phis(a, b, tarval_mod, mode);
+ if (value) {
+ DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
+ goto make_tuple;
+ }
+ }
+
+ value = n;
+ tv = value_of(n);
+ if (tv != tarval_bad) {
+ value = new_Const(get_tarval_mode(tv), tv);
+
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ ir_node *a = get_Mod_left(n);
+ ir_node *b = get_Mod_right(n);
+ ir_node *dummy;
+
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
+ value = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+
+ if (tv == get_mode_minus_one(mode)) {
+ /* a % -1 = 0 */
+ value = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ }
+ }
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_mod_by_const(n);
+ }
+ }
+
+ if (value != n) {
+ ir_node *mem, *blk;
+
+make_tuple:
+ /* Turn Mod into a tuple (mem, jmp, bad, value) */
+ mem = get_Mod_mem(n);
+ blk = get_irn_n(n, -1);
+
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
+ turn_into_tuple(n, pn_Mod_max);
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, value);
+ }
+ return n;
+} /* transform_node_Mod */