-void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn)
-{
- *div = *mod = NULL;
-
- /* If the architecture dependent optimizations were not initialized
- or this optimization was not enabled. */
- if (params == NULL ||
- ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
- return;
-
- if (get_irn_opcode(irn) == iro_DivMod) {
- ir_node *c = get_DivMod_right(irn);
- ir_node *block, *left;
- ir_mode *mode;
- tarval *tv, *ntv;
- dbg_info *dbg;
- int n, bits;
- int k, n_flag;
-
- if (get_irn_op(c) != op_Const)
- return;
-
- left = get_DivMod_left(irn);
- mode = get_irn_mode(left);
- block = get_irn_n(irn, -1);
- dbg = get_irn_dbg_info(irn);
- tv = get_Const_tarval(c);
-
- bits = get_mode_size_bits(mode);
- n = (bits + 7) / 8;
-
- k = -1;
- if (mode_is_signed(mode)) {
- /* for signed divisions, the algorithm works for a / -2^k by negating the result */
- ntv = tarval_neg(tv);
- n_flag = 1;
- k = tv_ld2(ntv, n);
- }
-
- if (k < 0) {
- n_flag = 0;
- k = tv_ld2(tv, n);
- }
-
- if (k >= 0) { /* division by 2^k or -2^k */
- if (mode_is_signed(mode)) {
- ir_node *k_node, *c_k;
- ir_node *curr = left;
-
- if (k != 1) {
- k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
- curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
- }
-
- k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
- curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
-
- curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
-
- c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
-
- *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode);
-
- if (n_flag) { /* negate the div result */
- ir_node *k_node;
-
- k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
- *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode);
- }
-
- k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
- curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
-
- *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
- }
- else { /* unsigned case */
- ir_node *k_node;
-
- k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
- *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
-
- k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
- *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
- }
- }
- else {
- /* other constant */
- if (allow_Mulh(mode)) {
- ir_node *t;
-
- *div = replace_div_by_mulh(irn, tv);
-
- t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode);
-
- /* t = arch_dep_mul_to_shift(t); */
-
- *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode);
- }
- }
- }
-
- if (*div)
- hook_arch_dep_replace_division_by_const(irn);
+/* Replace DivMods with Shifts and Add/Subs and Mulh. */
+void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) {
+ *div = *mod = NULL;
+
+ /* If the architecture dependent optimizations were not initialized
+ or this optimization was not enabled. */
+ if (params == NULL ||
+ ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
+ return;
+
+ if (is_DivMod(irn)) {
+ ir_node *c = get_DivMod_right(irn);
+ ir_node *block, *left;
+ ir_mode *mode;
+ tarval *tv, *ntv;
+ dbg_info *dbg;
+ int n, bits;
+ int k, n_flag;
+
+ if (! is_Const(c))
+ return;
+
+ tv = get_Const_tarval(c);
+
+ /* check for division by zero */
+ if (tarval_is_null(tv))
+ return;
+
+ left = get_DivMod_left(irn);
+ mode = get_irn_mode(left);
+ block = get_irn_n(irn, -1);
+ dbg = get_irn_dbg_info(irn);
+
+ bits = get_mode_size_bits(mode);
+ n = (bits + 7) / 8;
+
+ k = -1;
+ if (mode_is_signed(mode)) {
+ /* for signed divisions, the algorithm works for a / -2^k by negating the result */
+ ntv = tarval_neg(tv);
+ n_flag = 1;
+ k = tv_ld2(ntv, n);
+ }
+
+ if (k < 0) {
+ n_flag = 0;
+ k = tv_ld2(tv, n);
+ }
+
+ if (k >= 0) { /* division by 2^k or -2^k */
+ if (mode_is_signed(mode)) {
+ ir_node *k_node, *c_k;
+ ir_node *curr = left;
+
+ if (k != 1) {
+ k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
+ curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
+ }
+
+ k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
+ curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
+
+ curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
+
+ c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
+
+ *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode);
+
+ if (n_flag) { /* negate the div result */
+ ir_node *k_node;
+
+ k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
+ *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode);
+ }
+
+ k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
+ curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
+
+ *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
+ } else { /* unsigned case */
+ ir_node *k_node;
+
+ k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
+ *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
+
+ k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
+ *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
+ }
+ } else {
+ /* other constant */
+ if (allow_Mulh(mode)) {
+ ir_node *t;
+
+ *div = replace_div_by_mulh(irn, tv);
+
+ t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode);
+
+ /* t = arch_dep_mul_to_shift(t); */
+
+ *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode);
+ }
+ }
+ }
+
+ if (*div)
+ hook_arch_dep_replace_division_by_const(irn);