4 * @author Sebastian Hack
5 * @brief Machine dependent firm optimizations.
13 #include "irgraph_t.h"
20 #include "dbginfo_t.h"
21 #include "iropt_dbg.h"
26 #include "irreflect.h"
32 /* when we need verifying */
34 # define IRN_VRFY_IRG(res, irg)
36 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
39 /** The params got from the factory in arch_dep_init(...). */
40 static const arch_dep_params_t *params = NULL;
42 /** The bit mask, which optimizations to apply. */
43 static arch_dep_opts_t opts;
45 /* we need this new pseudo op */
46 static ir_op *op_Mulh = NULL;
49 * construct a Mulh: Mulh(a,b) = (a * b) >> w, w is the with in bits of a, b
52 new_rd_Mulh (dbg_info *db, ir_graph *irg, ir_node *block,
53 ir_node *op1, ir_node *op2, ir_mode *mode)
60 res = new_ir_node(db, irg, block, op_Mulh, mode, 2, in);
61 res = optimize_node(res);
62 IRN_VRFY_IRG(res, irg);
66 ir_op *get_op_Mulh(void) { return op_Mulh; }
68 void arch_dep_init(arch_dep_params_factory_t factory)
77 int mulh_opc = get_next_ir_opcode();
79 /* create the Mulh operation */
80 op_Mulh = new_ir_op(mulh_opc, "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0, NULL);
81 sig = rflct_signature_allocate(1, 3);
82 rflct_signature_set_arg(sig, 0, 0, "Res", RFLCT_MC(Int), 0, 0);
83 rflct_signature_set_arg(sig, 1, 0, "Block", RFLCT_MC(BB), 0, 0);
84 rflct_signature_set_arg(sig, 1, 1, "Op 0", RFLCT_MC(Int), 0, 0);
85 rflct_signature_set_arg(sig, 1, 2, "Op 1", RFLCT_MC(Int), 0, 0);
87 rflct_new_opcode(mulh_opc, "Mulh", false);
88 rflct_opcode_add_signature(mulh_opc, sig);
92 void arch_dep_set_opts(arch_dep_opts_t the_opts) {
96 /* check, whether a mode allows a Mulh instruction */
97 static int allow_Mulh(ir_mode *mode)
99 if (get_mode_size_bits(mode) > params->max_bits_for_mulh)
101 return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu);
104 ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn)
107 ir_mode *mode = get_irn_mode(irn);
109 /* If the architecture dependent optimizations were not initialized
110 or this optimization was not enabled. */
111 if (params == NULL || (opts & arch_dep_mul_to_shift) == 0)
114 if (get_irn_op(irn) == op_Mul && mode_is_int(mode)) {
115 ir_node *block = get_irn_n(irn, -1);
116 ir_node *left = get_binop_left(irn);
117 ir_node *right = get_binop_right(irn);
119 ir_node *operand = NULL;
121 /* Look, if one operand is a constant. */
122 if (get_irn_opcode(left) == iro_Const) {
123 tv = get_Const_tarval(left);
125 } else if(get_irn_opcode(right) == iro_Const) {
126 tv = get_Const_tarval(right);
131 int maximum_shifts = params->maximum_shifts;
132 int also_use_subs = params->also_use_subs;
133 int highest_shift_amount = params->highest_shift_amount;
135 char *bitstr = get_tarval_bitpattern(tv);
141 char compr[MAX_BITSTR];
145 int shift_with_sub[MAX_BITSTR] = { 0 };
146 int shift_without_sub[MAX_BITSTR] = { 0 };
147 int shift_with_sub_pos = 0;
148 int shift_without_sub_pos = 0;
152 long val = get_tarval_long(tv);
153 fprintf(stderr, "Found mul with %ld(%lx) = ", val, val);
154 for(p = bitstr; *p != '\0'; p++)
160 for(p = bitstr; *p != '\0'; p++) {
164 /* The last was 1 we are now at 0 OR
165 * The last was 0 and we are now at 1 */
166 compr[compr_len++] = counter;
174 compr[compr_len++] = counter;
179 const char *prefix = "";
180 for(i = 0; i < compr_len; i++, prefix = ",")
181 fprintf(stderr, "%s%d", prefix, compr[i]);
186 // Go over all recorded one groups.
189 for(i = 1; i < compr_len; i = end_of_group + 2) {
190 int j, zeros_in_group, ones_in_group;
192 ones_in_group = compr[i];
195 // Scan for singular 0s in a sequence
196 for(j = i + 1; j < compr_len && compr[j] == 1; j += 2) {
198 ones_in_group += (j + 1 < compr_len ? compr[j + 1] : 0);
200 end_of_group = j - 1;
202 if(zeros_in_group >= ones_in_group - 1)
206 fprintf(stderr, " i:%d, eg:%d\n", i, end_of_group);
209 singleton = compr[i] == 1 && i == end_of_group;
210 for(j = i; j <= end_of_group; j += 2) {
211 int curr_ones = compr[j];
212 int biased_curr_bit = curr_bit + 1;
216 fprintf(stderr, " j:%d, ones:%d\n", j, curr_ones);
219 // If this ones group is a singleton group (it has no
220 // singleton zeros inside
222 shift_with_sub[shift_with_sub_pos++] = biased_curr_bit;
224 shift_with_sub[shift_with_sub_pos++] = -biased_curr_bit;
226 for(k = 0; k < curr_ones; k++)
227 shift_without_sub[shift_without_sub_pos++] = biased_curr_bit + k;
229 curr_bit += curr_ones;
230 biased_curr_bit = curr_bit + 1;
232 if(!singleton && j == end_of_group)
233 shift_with_sub[shift_with_sub_pos++] = biased_curr_bit;
234 else if(j != end_of_group)
235 shift_with_sub[shift_with_sub_pos++] = -biased_curr_bit;
237 curr_bit += compr[j + 1];
243 int *shifts = shift_with_sub;
244 int n = shift_with_sub_pos;
245 int highest_shift_wide = 0;
246 int highest_shift_seq = 0;
249 /* If we may not use subs, or we can achive the same with adds,
251 if(!also_use_subs || shift_with_sub_pos >= shift_without_sub_pos) {
252 shifts = shift_without_sub;
253 n = shift_without_sub_pos;
256 /* If the number of needed shifts exceeds the given maximum,
257 use the Mul and exit. */
258 if(n > maximum_shifts) {
260 fprintf(stderr, "Only allowed %d shifts, but %d are needed\n",
266 /* Compute the highest shift needed for both, the
267 sequential and wide representations. */
268 for(i = 0; i < n; i++) {
269 int curr = abs(shifts[i]);
270 int curr_seq = curr - last;
272 highest_shift_wide = curr > highest_shift_wide ? curr
273 : highest_shift_wide;
274 highest_shift_seq = curr_seq > highest_shift_seq ? curr_seq
280 /* If the highest shift amount is greater than the given limit,
282 if(highest_shift_seq > highest_shift_amount) {
284 fprintf(stderr, "Shift argument %d exceeds maximum %d\n",
285 highest_shift_seq, highest_shift_amount);
290 /* If we have subs, we cannot do sequential. */
291 if(1 /* also_use_subs */) {
293 ir_node *curr = NULL;
298 int curr_shift = shifts[i];
299 int sub = curr_shift < 0;
300 int amount = abs(curr_shift) - 1;
301 ir_node *aux = operand;
303 assert(amount >= 0 && "What is a negative shift??");
306 ir_node *cnst = new_r_Const_long(current_ir_graph, block, mode_Iu, amount);
307 aux = new_r_Shl(current_ir_graph, block, operand, cnst, mode);
312 curr = new_r_Sub(current_ir_graph, block, curr, aux, mode);
314 curr = new_r_Add(current_ir_graph, block, curr, aux, mode);
326 const char *prefix = "";
327 for (i = 0; i < n; ++i) {
328 fprintf(stderr, "%s%d", prefix, shifts[i]);
331 fprintf(stderr, "\n");
344 hook_arch_dep_replace_mul_with_shifts(irn);
350 * calculated the ld2 of a tarval if tarval is 2^n, else returns -1.
352 static int tv_ld2(tarval *tv, int bits)
356 for (num = i = 0; i < bits; ++i) {
357 unsigned char v = get_tarval_sub_bits(tv, i);
362 for (j = 0; j < 8; ++j)
375 /* for shorter lines */
376 #define ABS(a) tarval_abs(a)
377 #define NEG(a) tarval_neg(a)
378 #define NOT(a) tarval_not(a)
379 #define SHL(a, b) tarval_shl(a, b)
380 #define SHR(a, b) tarval_shr(a, b)
381 #define ADD(a, b) tarval_add(a, b)
382 #define SUB(a, b) tarval_sub(a, b)
383 #define MUL(a, b) tarval_mul(a, b)
384 #define DIV(a, b) tarval_div(a, b)
385 #define MOD(a, b) tarval_mod(a, b)
386 #define CMP(a, b) tarval_cmp(a, b)
387 #define CNV(a, m) tarval_convert_to(a, m)
388 #define ONE(m) get_mode_one(m)
389 #define ZERO(m) get_mode_null(m)
392 tarval *M; /**< magic number */
393 int s; /**< shift amount */
394 int need_add; /**< an additional add is needed */
395 int need_sub; /**< an additional sub is needed */
399 * Signed division by constant d: calculate the Magic multiplier M and the shift amount s
401 * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler
403 static struct ms magic(tarval *d)
405 ir_mode *mode = get_tarval_mode(d);
406 ir_mode *u_mode = find_unsigned_mode(mode);
407 int bits = get_mode_size_bits(u_mode);
409 tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */
412 tarval *bits_minus_1, *two_bits_1;
416 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
418 /* we need overflow mode to work correctly */
419 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
422 bits_minus_1 = new_tarval_from_long(bits - 1, u_mode);
423 two_bits_1 = SHL(get_mode_one(u_mode), bits_minus_1);
425 ad = CNV(ABS(d), u_mode);
426 t = ADD(two_bits_1, SHR(CNV(d, u_mode), bits_minus_1));
427 anc = SUB(SUB(t, ONE(u_mode)), MOD(t, ad)); /* Absolute value of nc */
428 p = bits - 1; /* Init: p */
429 q1 = DIV(two_bits_1, anc); /* Init: q1 = 2^p/|nc| */
430 r1 = SUB(two_bits_1, MUL(q1, anc)); /* Init: r1 = rem(2^p, |nc|) */
431 q2 = DIV(two_bits_1, ad); /* Init: q2 = 2^p/|d| */
432 r2 = SUB(two_bits_1, MUL(q2, ad)); /* Init: r2 = rem(2^p, |d|) */
436 q1 = ADD(q1, q1); /* Update q1 = 2^p/|nc| */
437 r1 = ADD(r1, r1); /* Update r1 = rem(2^p, |nc|) */
439 if (CMP(r1, anc) & pn_Cmp_Ge) {
440 q1 = ADD(q1, ONE(u_mode));
444 q2 = ADD(q2, q2); /* Update q2 = 2^p/|d| */
445 r2 = ADD(r2, r2); /* Update r2 = rem(2^p, |d|) */
447 if (CMP(r2, ad) & pn_Cmp_Ge) {
448 q2 = ADD(q2, ONE(u_mode));
453 } while (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(u_mode)) & pn_Cmp_Eq));
455 d_cmp = CMP(d, ZERO(mode));
457 if (d_cmp & pn_Cmp_Ge)
458 mag.M = ADD(CNV(q2, mode), ONE(mode));
460 mag.M = SUB(ZERO(mode), ADD(CNV(q2, mode), ONE(mode)));
462 M_cmp = CMP(mag.M, ZERO(mode));
466 /* need an add if d > 0 && M < 0 */
467 mag.need_add = d_cmp & pn_Cmp_Gt && M_cmp & pn_Cmp_Lt;
469 /* need a sub if d < 0 && M > 0 */
470 mag.need_sub = d_cmp & pn_Cmp_Lt && M_cmp & pn_Cmp_Gt;
472 tarval_set_integer_overflow_mode(rem);
478 tarval *M; /**< magic add constant */
479 int s; /**< shift amount */
480 int need_add; /**< add indicator */
484 * Unsigned division by constant d: calculate the Magic multiplier M and the shift amount s
486 * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned)
488 static struct mu magicu(tarval *d)
490 ir_mode *mode = get_tarval_mode(d);
491 int bits = get_mode_size_bits(mode);
493 tarval *nc, *delta, *q1, *r1, *q2, *r2;
494 tarval *bits_minus_1, *two_bits_1, *seven_ff;
498 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
500 /* we need overflow mode to work correctly */
501 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
503 bits_minus_1 = new_tarval_from_long(bits - 1, mode);
504 two_bits_1 = SHL(get_mode_one(mode), bits_minus_1);
505 seven_ff = SUB(two_bits_1, ONE(mode));
507 magu.need_add = 0; /* initialize the add indicator */
508 nc = SUB(NEG(ONE(mode)), MOD(NEG(d), d));
509 p = bits - 1; /* Init: p */
510 q1 = DIV(two_bits_1, nc); /* Init: q1 = 2^p/nc */
511 r1 = SUB(two_bits_1, MUL(q1, nc)); /* Init: r1 = rem(2^p, nc) */
512 q2 = DIV(seven_ff, d); /* Init: q2 = (2^p - 1)/d */
513 r2 = SUB(seven_ff, MUL(q2, d)); /* Init: r2 = rem(2^p - 1, d) */
517 if (CMP(r1, SUB(nc, r1)) & pn_Cmp_Ge) {
518 q1 = ADD(ADD(q1, q1), ONE(mode));
519 r1 = SUB(ADD(r1, r1), nc);
526 if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & pn_Cmp_Ge) {
527 if (CMP(q2, seven_ff) & pn_Cmp_Ge)
530 q2 = ADD(ADD(q2, q2), ONE(mode));
531 r2 = SUB(ADD(ADD(r2, r2), ONE(mode)), d);
534 if (CMP(q2, two_bits_1) & pn_Cmp_Ge)
538 r2 = ADD(ADD(r2, r2), ONE(mode));
540 delta = SUB(SUB(d, ONE(mode)), r2);
541 } while (p < 2*bits &&
542 (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(mode)) & pn_Cmp_Eq)));
544 magu.M = ADD(q2, ONE(mode)); /* Magic number */
545 magu.s = p - bits; /* and shift amount */
547 tarval_set_integer_overflow_mode(rem);
553 * build the Mulh replacement code for n / tv
555 * Note that 'div' might be a mod or DivMod operation as well
557 static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv)
559 dbg_info *dbg = get_irn_dbg_info(div);
560 ir_node *n = get_binop_left(div);
561 ir_node *block = get_irn_n(div, -1);
562 ir_mode *mode = get_irn_mode(n);
563 int bits = get_mode_size_bits(mode);
566 /* Beware: do not transform bad code */
567 if (is_Bad(n) || is_Bad(block))
570 if (mode_is_signed(mode)) {
571 struct ms mag = magic(tv);
573 /* generate the Mulh instruction */
574 c = new_r_Const(current_ir_graph, block, mode, mag.M);
575 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
577 /* do we need an Add or Sub */
579 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
580 else if (mag.need_sub)
581 q = new_rd_Sub(dbg, current_ir_graph, block, q, n, mode);
583 /* Do we need the shift */
585 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s);
586 q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode);
590 c = new_r_Const_long(current_ir_graph, block, mode_Iu, bits-1);
591 t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
593 q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode);
596 struct mu mag = magicu(tv);
599 /* generate the Mulh instruction */
600 c = new_r_Const(current_ir_graph, block, mode, mag.M);
601 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
605 /* use the GM scheme */
606 t = new_rd_Sub(dbg, current_ir_graph, block, n, q, mode);
608 c = new_r_Const(current_ir_graph, block, mode_Iu, get_mode_one(mode_Iu));
609 t = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
611 t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode);
613 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s-1);
614 q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
617 /* use the default scheme */
618 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
621 else if (mag.s > 0) { /* default scheme, shift needed */
622 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s);
623 q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
629 ir_node *arch_dep_replace_div_by_const(ir_node *irn)
633 /* If the architecture dependent optimizations were not initialized
634 or this optimization was not enabled. */
635 if (params == NULL || (opts & arch_dep_div_by_const) == 0)
638 if (get_irn_opcode(irn) == iro_Div) {
639 ir_node *c = get_Div_right(irn);
640 ir_node *block, *left;
647 if (get_irn_op(c) != op_Const)
650 left = get_Div_left(irn);
651 mode = get_irn_mode(left);
652 block = get_irn_n(irn, -1);
653 dbg = get_irn_dbg_info(irn);
654 tv = get_Const_tarval(c);
656 bits = get_mode_size_bits(mode);
660 if (mode_is_signed(mode)) {
661 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
662 ntv = tarval_neg(tv);
672 if (k >= 0) { /* division by 2^k or -2^k */
673 if (mode_is_signed(mode)) {
675 ir_node *curr = left;
678 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
679 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
682 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
683 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
685 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
687 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
688 res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode);
690 if (n_flag) { /* negate the result */
693 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
694 res = new_rd_Sub(dbg, current_ir_graph, block, k_node, res, mode);
697 else { /* unsigned case */
700 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
701 res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
706 if (allow_Mulh(mode))
707 res = replace_div_by_mulh(irn, tv);
712 hook_arch_dep_replace_division_by_const(irn);
717 ir_node *arch_dep_replace_mod_by_const(ir_node *irn)
721 /* If the architecture dependent optimizations were not initialized
722 or this optimization was not enabled. */
723 if (params == NULL || (opts & arch_dep_mod_by_const) == 0)
726 if (get_irn_opcode(irn) == iro_Mod) {
727 ir_node *c = get_Mod_right(irn);
728 ir_node *block, *left;
735 if (get_irn_op(c) != op_Const)
738 left = get_Mod_left(irn);
739 mode = get_irn_mode(left);
740 block = get_irn_n(irn, -1);
741 dbg = get_irn_dbg_info(irn);
742 tv = get_Const_tarval(c);
744 bits = get_mode_size_bits(mode);
748 if (mode_is_signed(mode)) {
749 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
750 ntv = tarval_neg(tv);
759 /* division by 2^k or -2^k:
760 * we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k
762 if (mode_is_signed(mode)) {
764 ir_node *curr = left;
767 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
768 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
771 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
772 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
774 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
776 k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
777 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
779 res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
781 else { /* unsigned case */
784 k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
785 res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
790 if (allow_Mulh(mode)) {
791 res = replace_div_by_mulh(irn, tv);
793 res = new_rd_Mul(dbg, current_ir_graph, block, res, c, mode);
795 /* res = arch_dep_mul_to_shift(res); */
797 res = new_rd_Sub(dbg, current_ir_graph, block, left, res, mode);
803 hook_arch_dep_replace_division_by_const(irn);
808 void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn)
812 /* If the architecture dependent optimizations were not initialized
813 or this optimization was not enabled. */
814 if (params == NULL ||
815 ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
818 if (get_irn_opcode(irn) == iro_DivMod) {
819 ir_node *c = get_DivMod_right(irn);
820 ir_node *block, *left;
827 if (get_irn_op(c) != op_Const)
830 left = get_DivMod_left(irn);
831 mode = get_irn_mode(left);
832 block = get_irn_n(irn, -1);
833 dbg = get_irn_dbg_info(irn);
834 tv = get_Const_tarval(c);
836 bits = get_mode_size_bits(mode);
840 if (mode_is_signed(mode)) {
841 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
842 ntv = tarval_neg(tv);
852 if (k >= 0) { /* division by 2^k or -2^k */
853 if (mode_is_signed(mode)) {
854 ir_node *k_node, *c_k;
855 ir_node *curr = left;
858 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
859 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
862 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
863 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
865 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
867 c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
869 *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode);
871 if (n_flag) { /* negate the div result */
874 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
875 *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode);
878 k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
879 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
881 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
883 else { /* unsigned case */
886 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
887 *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
889 k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
890 *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
895 if (allow_Mulh(mode)) {
898 *div = replace_div_by_mulh(irn, tv);
900 t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode);
902 /* t = arch_dep_mul_to_shift(t); */
904 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode);
910 hook_arch_dep_replace_division_by_const(irn);
914 static const arch_dep_params_t default_params = {
915 1, /* also use subs */
916 4, /* maximum shifts */
917 31, /* maximum shift amount */
921 32 /* Mulh allowed up to 32 bit */
924 const arch_dep_params_t *arch_dep_default_factory(void) {
925 return &default_params;