4 * @author Sebastian Hack
5 * @brief Machine dependent firm optimizations.
13 #include "irgraph_t.h"
20 #include "dbginfo_t.h"
21 #include "iropt_dbg.h"
32 /* when we need verifying */
34 # define IRN_VRFY_IRG(res, irg)
36 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
39 /** The params got from the factory in arch_dep_init(...). */
40 static const arch_dep_params_t *params = NULL;
42 /** The bit mask, which optimizations to apply. */
43 static arch_dep_opts_t opts;
45 /* we need this new pseudo op */
46 static ir_op *op_Mulh = NULL;
49 * construct a Mulh: Mulh(a,b) = (a * b) >> w, w is the with in bits of a, b
52 new_rd_Mulh (dbg_info *db, ir_graph *irg, ir_node *block,
53 ir_node *op1, ir_node *op2, ir_mode *mode)
59 op_Mulh = new_ir_op(get_next_ir_opcode(), "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0);
64 res = new_ir_node(db, irg, block, op_Mulh, mode, 2, in);
65 res = optimize_node(res);
66 IRN_VRFY_IRG(res, irg);
70 ir_op *get_op_Mulh(void) { return op_Mulh; }
72 void arch_dep_init(arch_dep_params_factory_t factory)
79 if (params && (opts & (arch_dep_div_by_const|arch_dep_mod_by_const))) {
81 /* create the Mulh operation */
82 op_Mulh = new_ir_op(get_next_ir_opcode(), "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0);
87 void arch_dep_set_opts(arch_dep_opts_t the_opts) {
91 ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn)
94 ir_mode *mode = get_irn_mode(irn);
96 /* If the architecture dependent optimizations were not initialized
97 or this optimization was not enabled. */
98 if(params == NULL || (opts & arch_dep_mul_to_shift) == 0)
101 if(get_irn_opcode(irn) == iro_Mul && mode_is_int(mode)) {
102 ir_node *block = get_nodes_block(irn);
103 ir_node *left = get_binop_left(irn);
104 ir_node *right = get_binop_right(irn);
106 ir_node *operand = NULL;
108 /* Look, if one operand is a constant. */
109 if(get_irn_opcode(left) == iro_Const) {
110 tv = get_Const_tarval(left);
112 } else if(get_irn_opcode(right) == iro_Const) {
113 tv = get_Const_tarval(right);
118 int maximum_shifts = params->maximum_shifts;
119 int also_use_subs = params->also_use_subs;
120 int highest_shift_amount = params->highest_shift_amount;
122 char *bitstr = get_tarval_bitpattern(tv);
128 char compr[MAX_BITSTR];
132 int shift_with_sub[MAX_BITSTR] = { 0 };
133 int shift_without_sub[MAX_BITSTR] = { 0 };
134 int shift_with_sub_pos = 0;
135 int shift_without_sub_pos = 0;
139 long val = get_tarval_long(tv);
140 fprintf(stderr, "Found mul with %ld(%lx) = ", val, val);
141 for(p = bitstr; *p != '\0'; p++)
147 for(p = bitstr; *p != '\0'; p++) {
151 /* The last was 1 we are now at 0 OR
152 * The last was 0 and we are now at 1 */
153 compr[compr_len++] = counter;
161 compr[compr_len++] = counter;
166 const char *prefix = "";
167 for(i = 0; i < compr_len; i++, prefix = ",")
168 fprintf(stderr, "%s%d", prefix, compr[i]);
173 // Go over all recorded one groups.
176 for(i = 1; i < compr_len; i = end_of_group + 2) {
177 int j, zeros_in_group, ones_in_group;
179 ones_in_group = compr[i];
182 // Scan for singular 0s in a sequence
183 for(j = i + 1; j < compr_len && compr[j] == 1; j += 2) {
185 ones_in_group += (j + 1 < compr_len ? compr[j + 1] : 0);
187 end_of_group = j - 1;
189 if(zeros_in_group >= ones_in_group - 1)
193 fprintf(stderr, " i:%d, eg:%d\n", i, end_of_group);
196 singleton = compr[i] == 1 && i == end_of_group;
197 for(j = i; j <= end_of_group; j += 2) {
198 int curr_ones = compr[j];
199 int biased_curr_bit = curr_bit + 1;
203 fprintf(stderr, " j:%d, ones:%d\n", j, curr_ones);
206 // If this ones group is a singleton group (it has no
207 // singleton zeros inside
209 shift_with_sub[shift_with_sub_pos++] = biased_curr_bit;
211 shift_with_sub[shift_with_sub_pos++] = -biased_curr_bit;
213 for(k = 0; k < curr_ones; k++)
214 shift_without_sub[shift_without_sub_pos++] = biased_curr_bit + k;
216 curr_bit += curr_ones;
217 biased_curr_bit = curr_bit + 1;
219 if(!singleton && j == end_of_group)
220 shift_with_sub[shift_with_sub_pos++] = biased_curr_bit;
221 else if(j != end_of_group)
222 shift_with_sub[shift_with_sub_pos++] = -biased_curr_bit;
224 curr_bit += compr[j + 1];
230 int *shifts = shift_with_sub;
231 int n = shift_with_sub_pos;
232 int highest_shift_wide = 0;
233 int highest_shift_seq = 0;
236 /* If we may not use subs, or we can achive the same with adds,
238 if(!also_use_subs || shift_with_sub_pos >= shift_without_sub_pos) {
239 shifts = shift_without_sub;
240 n = shift_without_sub_pos;
243 /* If the number of needed shifts exceeds the given maximum,
244 use the Mul and exit. */
245 if(n > maximum_shifts) {
247 fprintf(stderr, "Only allowed %d shifts, but %d are needed\n",
253 /* Compute the highest shift needed for both, the
254 sequential and wide representations. */
255 for(i = 0; i < n; i++) {
256 int curr = abs(shifts[i]);
257 int curr_seq = curr - last;
259 highest_shift_wide = curr > highest_shift_wide ? curr
260 : highest_shift_wide;
261 highest_shift_seq = curr_seq > highest_shift_seq ? curr_seq
267 /* If the highest shift amount is greater than the given limit,
269 if(highest_shift_seq > highest_shift_amount) {
271 fprintf(stderr, "Shift argument %d exceeds maximum %d\n",
272 highest_shift_seq, highest_shift_amount);
277 /* If we have subs, we cannot do sequential. */
278 if(1 /* also_use_subs */) {
280 ir_node *curr = NULL;
285 int curr_shift = shifts[i];
286 int sub = curr_shift < 0;
287 int amount = abs(curr_shift) - 1;
288 ir_node *aux = operand;
291 assert(amount >= 0 && "What is a negative shift??");
294 tarval *shift_amount = new_tarval_from_long(amount, mode_Iu);
295 ir_node *cnst = new_r_Const(current_ir_graph, block, mode_Iu, shift_amount);
296 aux = new_r_Shl(current_ir_graph, block, operand, cnst, mode);
301 curr = new_r_Sub(current_ir_graph, block, curr, aux, mode);
303 curr = new_r_Add(current_ir_graph, block, curr, aux, mode);
315 const char *prefix = "";
316 for(i = 0; i < n; i++) {
317 fprintf(stderr, "%s%d", prefix, shifts[i]);
320 fprintf(stderr, "\n");
333 stat_arch_dep_replace_mul_with_shifts(irn);
339 * calculated the ld2 of a tarval if tarval is 2^n, else returns -1.
341 static int tv_ld2(tarval *tv, int bits)
345 for (num = i = 0; i < bits; ++i) {
346 unsigned char v = get_tarval_sub_bits(tv, i);
351 for (j = 0; j < 8; ++j)
364 /* for shorter lines */
365 #define ABS(a) tarval_abs(a)
366 #define NEG(a) tarval_neg(a)
367 #define NOT(a) tarval_not(a)
368 #define SHL(a, b) tarval_shl(a, b)
369 #define SHR(a, b) tarval_shr(a, b)
370 #define ADD(a, b) tarval_add(a, b)
371 #define SUB(a, b) tarval_sub(a, b)
372 #define MUL(a, b) tarval_mul(a, b)
373 #define DIV(a, b) tarval_div(a, b)
374 #define MOD(a, b) tarval_mod(a, b)
375 #define CMP(a, b) tarval_cmp(a, b)
376 #define CNV(a, m) tarval_convert_to(a, m)
377 #define ONE(m) get_mode_one(m)
378 #define ZERO(m) get_mode_null(m)
381 tarval *M; /**< magic number */
382 int s; /**< shift amount */
383 int need_add; /**< an additional add is needed */
384 int need_sub; /**< an additional sub is needed */
388 * Signed division by constant d: calculate the Magic multiplier M and the shift amount s
390 * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler
392 static struct ms magic(tarval *d)
394 ir_mode *mode = get_tarval_mode(d);
395 ir_mode *u_mode = find_unsigned_mode(mode);
396 int bits = get_mode_size_bits(u_mode);
398 tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */
399 pnc_number d_cmp, M_cmp;
404 tarval *bits_minus_1 = new_tarval_from_long(bits - 1, u_mode);
405 tarval *two_bits_1 = SHL(get_mode_one(u_mode), bits_minus_1);
407 ad = CNV(ABS(d), u_mode);
408 t = ADD(two_bits_1, SHR(CNV(d, u_mode), bits_minus_1));
409 anc = SUB(SUB(t, ONE(u_mode)), MOD(t, ad)); /* Absolute value of nc */
410 p = bits - 1; /* Init: p */
411 q1 = DIV(two_bits_1, anc); /* Init: q1 = 2^p/|nc| */
412 r1 = SUB(two_bits_1, MUL(q1, anc)); /* Init: r1 = rem(2^p, |nc|) */
413 q2 = DIV(two_bits_1, ad); /* Init: q2 = 2^p/|d| */
414 r2 = SUB(two_bits_1, MUL(q2, ad)); /* Init: r2 = rem(2^p, |d|) */
418 q1 = ADD(q1, q1); /* Update q1 = 2^p/|nc| */
419 r1 = ADD(r1, r1); /* Update r1 = rem(2^p, |nc|) */
421 if (CMP(r1, anc) & Ge) {
422 q1 = ADD(q1, ONE(u_mode));
426 q2 = ADD(q2, q2); /* Update q2 = 2^p/|d| */
427 r2 = ADD(r2, r2); /* Update r2 = rem(2^p, |d|) */
429 if (CMP(r2, ad) & Ge) {
430 q2 = ADD(q2, ONE(u_mode));
435 } while (CMP(q1, delta) & Lt || (CMP(q1, delta) & Eq && CMP(r1, ZERO(u_mode)) & Eq));
437 d_cmp = CMP(d, ZERO(mode));
440 mag.M = ADD(CNV(q2, mode), ONE(mode));
442 mag.M = SUB(ZERO(mode), ADD(CNV(q2, mode), ONE(mode)));
444 M_cmp = CMP(mag.M, ZERO(mode));
448 /* need an add if d > 0 && M < 0 */
449 mag.need_add = d_cmp & Gt && M_cmp & Lt;
451 /* need a sub if d < 0 && M > 0 */
452 mag.need_sub = d_cmp & Lt && M_cmp & Gt;
458 tarval *M; /**< magic add constant */
459 int s; /**< shift amount */
460 int need_add; /**< add indicator */
464 * Unsigned division by constant d: calculate the Magic multiplier M and the shift amount s
466 * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned)
468 static struct mu magicu(tarval *d)
470 ir_mode *mode = get_tarval_mode(d);
471 int bits = get_mode_size_bits(mode);
473 tarval *nc, *delta, *q1, *r1, *q2, *r2;
477 tarval *bits_minus_1 = new_tarval_from_long(bits - 1, mode);
478 tarval *two_bits_1 = SHL(get_mode_one(mode), bits_minus_1);
479 tarval *seven_ff = SUB(two_bits_1, ONE(mode));
481 magu.need_add = 0; /* initialize the add indicator */
482 nc = SUB(NEG(ONE(mode)), MOD(NEG(d), d));
483 p = bits - 1; /* Init: p */
484 q1 = DIV(two_bits_1, nc); /* Init: q1 = 2^p/nc */
485 r1 = SUB(two_bits_1, MUL(q1, nc)); /* Init: r1 = rem(2^p, nc) */
486 q2 = DIV(seven_ff, d); /* Init: q2 = (2^p - 1)/d */
487 r2 = SUB(seven_ff, MUL(q2, d)); /* Init: r2 = rem(2^p - 1, d) */
491 if (CMP(r1, SUB(nc, r1)) & Ge) {
492 q1 = ADD(ADD(q1, q1), ONE(mode));
493 r1 = SUB(ADD(r1, r1), nc);
500 if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & Ge) {
501 if (CMP(q2, seven_ff) & Ge)
504 q2 = ADD(ADD(q2, q2), ONE(mode));
505 r2 = SUB(ADD(ADD(r2, r2), ONE(mode)), d);
508 if (CMP(q2, two_bits_1) & Ge)
512 r2 = ADD(ADD(r2, r2), ONE(mode));
514 delta = SUB(SUB(d, ONE(mode)), r2);
515 } while (p < 2*bits &&
516 (CMP(q1, delta) & Lt || (CMP(q1, delta) & Eq && CMP(r1, ZERO(mode)) & Eq)));
518 magu.M = ADD(q2, ONE(mode)); /* Magic number */
519 magu.s = p - bits; /* and shift amount */
525 * build the Mulh replacement code for n / tv
527 * Note thet 'div' might be a mod or DivMod operation as well
529 static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv)
531 dbg_info *dbg = get_irn_dbg_info(div);
532 ir_node *n = get_binop_left(div);
533 ir_node *block = get_nodes_block(div);
534 ir_mode *mode = get_irn_mode(n);
535 int bits = get_mode_size_bits(mode);
538 if (mode_is_signed(mode)) {
539 struct ms mag = magic(tv);
541 /* generate the Mulh instruction */
542 c = new_r_Const(current_ir_graph, block, mode, mag.M);
543 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
545 /* do we need an Add or Sub */
547 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
548 else if (mag.need_sub)
549 q = new_rd_Sub(dbg, current_ir_graph, block, q, n, mode);
551 /* Do we need the shift */
553 c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s, mode_Iu));
554 q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode);
558 c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits-1, mode_Iu));
559 t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
561 q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode);
564 struct mu mag = magicu(tv);
567 /* generate the Mulh instruction */
568 c = new_r_Const(current_ir_graph, block, mode, mag.M);
569 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
573 /* use the GM scheme */
574 t = new_rd_Sub(dbg, current_ir_graph, block, n, q, mode);
576 c = new_r_Const(current_ir_graph, block, mode_Iu, get_mode_one(mode_Iu));
577 t = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
579 t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode);
581 c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s-1, mode_Iu));
582 q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
585 /* use the default scheme */
586 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
589 else if (mag.s > 0) { /* default scheme, shift needed */
590 c = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(mag.s, mode_Iu));
591 q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
597 ir_node *arch_dep_replace_div_by_const(ir_node *irn)
601 /* If the architecture dependent optimizations were not initialized
602 or this optimization was not enabled. */
603 if (params == NULL || (opts & arch_dep_div_by_const) == 0)
606 if (get_irn_opcode(irn) == iro_Div) {
607 ir_node *c = get_Div_right(irn);
608 ir_node *block, *left;
615 if (get_irn_op(c) != op_Const)
618 left = get_Div_left(irn);
619 mode = get_irn_mode(left);
620 block = get_nodes_block(irn);
621 dbg = get_irn_dbg_info(irn);
622 tv = get_Const_tarval(c);
624 bits = get_mode_size_bits(mode);
628 if (mode_is_signed(mode)) {
629 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
630 ntv = tarval_neg(tv);
640 if (k >= 0) { /* division by 2^k or -2^k */
641 if (mode_is_signed(mode)) {
643 ir_node *curr = left;
646 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu));
647 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
650 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu));
651 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
653 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
655 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu));
656 res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode);
658 if (n_flag) { /* negate the result */
661 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
662 res = new_rd_Sub(dbg, current_ir_graph, block, k_node, res, mode);
665 else { /* unsigned case */
668 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu));
669 res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
674 if ((mode_is_signed(mode) && params->allow_mulhs) ||
675 (!mode_is_signed(mode) && params->allow_mulhu))
676 res = replace_div_by_mulh(irn, tv);
681 stat_arch_dep_replace_div_by_const(irn);
686 ir_node *arch_dep_replace_mod_by_const(ir_node *irn)
690 /* If the architecture dependent optimizations were not initialized
691 or this optimization was not enabled. */
692 if (params == NULL || (opts & arch_dep_mod_by_const) == 0)
695 if (get_irn_opcode(irn) == iro_Mod) {
696 ir_node *c = get_Mod_right(irn);
697 ir_node *block, *left;
704 if (get_irn_op(c) != op_Const)
707 left = get_Mod_left(irn);
708 mode = get_irn_mode(left);
709 block = get_nodes_block(irn);
710 dbg = get_irn_dbg_info(irn);
711 tv = get_Const_tarval(c);
713 bits = get_mode_size_bits(mode);
717 if (mode_is_signed(mode)) {
718 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
719 ntv = tarval_neg(tv);
728 /* division by 2^k or -2^k:
729 * we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k
731 if (mode_is_signed(mode)) {
733 ir_node *curr = left;
736 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu));
737 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
740 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu));
741 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
743 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
745 k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((-1) << k, mode));
746 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
748 res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
750 else { /* unsigned case */
753 k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((1 << k) - 1, mode));
754 res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
759 if ((mode_is_signed(mode) && params->allow_mulhs) ||
760 (!mode_is_signed(mode) && params->allow_mulhu)) {
761 res = replace_div_by_mulh(irn, tv);
763 res = new_rd_Mul(dbg, current_ir_graph, block, res, c, mode);
765 /* res = arch_dep_mul_to_shift(res); */
767 res = new_rd_Sub(dbg, current_ir_graph, block, left, res, mode);
773 stat_arch_dep_replace_mod_by_const(irn);
778 void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn)
782 /* If the architecture dependent optimizations were not initialized
783 or this optimization was not enabled. */
784 if (params == NULL ||
785 ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
788 if (get_irn_opcode(irn) == iro_DivMod) {
789 ir_node *c = get_DivMod_right(irn);
790 ir_node *block, *left;
797 if (get_irn_op(c) != op_Const)
800 left = get_DivMod_left(irn);
801 mode = get_irn_mode(left);
802 block = get_nodes_block(irn);
803 dbg = get_irn_dbg_info(irn);
804 tv = get_Const_tarval(c);
806 bits = get_mode_size_bits(mode);
810 if (mode_is_signed(mode)) {
811 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
812 ntv = tarval_neg(tv);
822 if (k >= 0) { /* division by 2^k or -2^k */
823 if (mode_is_signed(mode)) {
824 ir_node *k_node, *c_k;
825 ir_node *curr = left;
828 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k - 1, mode_Iu));
829 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
832 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(bits - k, mode_Iu));
833 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
835 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
837 c_k = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu));
839 *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode);
841 if (n_flag) { /* negate the div result */
844 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
845 *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode);
848 k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((-1) << k, mode));
849 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
851 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
853 else { /* unsigned case */
856 k_node = new_r_Const(current_ir_graph, block, mode_Iu, new_tarval_from_long(k, mode_Iu));
857 *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
859 k_node = new_r_Const(current_ir_graph, block, mode, new_tarval_from_long((1 << k) - 1, mode));
860 *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
865 if ((mode_is_signed(mode) && params->allow_mulhs) ||
866 (!mode_is_signed(mode) && params->allow_mulhu)) {
869 *div = replace_div_by_mulh(irn, tv);
871 t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode);
873 /* t = arch_dep_mul_to_shift(t); */
875 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode);
881 stat_arch_dep_replace_DivMod_by_const(irn);
885 static const arch_dep_params_t default_params = {
886 1, /* also use subs */
889 4, /* maximum shifts */
890 31 /* maximum shift amount */
893 const arch_dep_params_t *arch_dep_default_factory(void) {
894 return &default_params;