2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Machine dependent Firm optimizations.
24 * @author Sebastian Hack, Michael Beck
27 * Implements "Strength Reduction of Multiplications by Integer Constants" by Youfeng Wu.
28 * Implements Division and Modulo by Consts from "Hackers Delight",
36 #include "irgraph_t.h"
43 #include "dbginfo_t.h"
44 #include "iropt_dbg.h"
56 /* when we need verifying */
58 # define IRN_VRFY_IRG(res, irg)
60 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
63 /** The params got from the factory in arch_dep_init(...). */
64 static const ir_settings_arch_dep_t *params = NULL;
66 /** The bit mask, which optimizations to apply. */
67 static arch_dep_opts_t opts;
69 void arch_dep_init(arch_dep_params_factory_t factory)
77 void arch_dep_set_opts(arch_dep_opts_t the_opts)
82 /** check, whether a mode allows a Mulh instruction. */
83 static int allow_Mulh(ir_mode *mode)
85 if (get_mode_size_bits(mode) > params->max_bits_for_mulh)
87 return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu);
93 typedef struct instruction instruction;
95 insn_kind kind; /**< the instruction kind */
96 instruction *in[2]; /**< the ins */
97 unsigned shift_count; /**< shift count for LEA and SHIFT */
98 ir_node *irn; /**< the generated node for this instruction if any. */
99 int costs; /**< the costs for this instruction */
103 * The environment for the strength reduction of multiplications.
105 typedef struct _mul_env {
106 struct obstack obst; /**< an obstack for local space. */
107 ir_mode *mode; /**< the mode of the multiplication constant */
108 unsigned bits; /**< number of bits in the mode */
109 unsigned max_S; /**< the maximum LEA shift value. */
110 instruction *root; /**< the root of the instruction tree */
111 ir_node *op; /**< the operand that is multiplied */
112 ir_node *blk; /**< the block where the new graph is built */
113 dbg_info *dbg; /**< the debug info for the new graph. */
114 ir_mode *shf_mode; /**< the (unsigned) mode for the shift constants */
115 int fail; /**< set to 1 if the instruction sequence fails the constraints */
116 int n_shift; /**< maximum number of allowed shift instructions */
118 evaluate_costs_func evaluate; /**< the evaluate callback */
122 * Some kind of default evaluator. Return the cost of
125 static int default_evaluate(insn_kind kind, tarval *tv)
135 * emit a LEA (or an Add) instruction
137 static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, unsigned shift)
139 instruction *res = OALLOC(&env->obst, instruction);
140 res->kind = shift > 0 ? LEA : ADD;
143 res->shift_count = shift;
150 * emit a SHIFT (or an Add or a Zero) instruction
152 static instruction *emit_SHIFT(mul_env *env, instruction *a, unsigned shift)
154 instruction *res = OALLOC(&env->obst, instruction);
155 if (shift == env->bits) {
156 /* a 2^bits with bits resolution is a zero */
160 res->shift_count = 0;
161 } else if (shift != 1) {
165 res->shift_count = shift;
170 res->shift_count = 0;
178 * emit a SUB instruction
180 static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b)
182 instruction *res = OALLOC(&env->obst, instruction);
186 res->shift_count = 0;
193 * emit the ROOT instruction
195 static instruction *emit_ROOT(mul_env *env, ir_node *root_op)
197 instruction *res = OALLOC(&env->obst, instruction);
201 res->shift_count = 0;
209 * Returns the condensed representation of the tarval tv
211 static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr)
213 ir_mode *mode = get_tarval_mode(tv);
214 int bits = get_mode_size_bits(mode);
215 char *bitstr = get_tarval_bitpattern(tv);
217 unsigned char *R = obstack_alloc(&env->obst, bits);
220 for (i = 0; bitstr[i] != '\0'; ++i) {
221 if (bitstr[i] == '1') {
234 * Calculate the gain when using the generalized complementary technique
236 static int calculate_gain(unsigned char *R, int r)
242 /* the gain for r == 1 */
244 for (i = 2; i < r; ++i) {
245 /* calculate the gain for r from the gain for r-1 */
246 gain += 2 - R[i - 1];
248 if (gain > max_gain) {
257 * Calculates the condensed complement of a given (R,r) tuple
259 static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs)
261 unsigned char *value = obstack_alloc(&env->obst, env->bits);
265 memset(value, 0, env->bits);
268 for (i = 0; i < gain; ++i) {
273 /* negate and propagate 1 */
275 for (i = 0; i <= j; ++i) {
276 unsigned char v = !value[i];
282 /* condense it again */
285 for (i = 0; i <= j; ++i) {
298 * creates a tarval from a condensed representation.
300 static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r)
306 tv = get_mode_one(env->mode);
308 for (i = 0; i < r; ++i) {
311 tarval *t = new_tarval_from_long(j, mode_Iu);
312 tv = tarval_shl(tv, t);
314 res = res ? tarval_add(res, tv) : tv;
320 static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N);
323 * handle simple cases with up-to 2 bits set
325 static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N)
327 instruction *ins, *ins2;
331 return emit_SHIFT(env, env->root, R[0]);
336 if (R[1] <= env->max_S) {
337 ins = emit_LEA(env, ins, ins, R[1]);
339 ins = emit_SHIFT(env, ins, R[0]);
344 ins = emit_SHIFT(env, ins, R[0]);
347 ins2 = emit_SHIFT(env, env->root, R[0] + R[1]);
348 return emit_LEA(env, ins, ins2, 0);
353 * Main decompose driver.
355 static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N)
361 return decompose_simple_cases(env, R, r, N);
363 if (params->also_use_subs) {
364 gain = calculate_gain(R, r);
366 instruction *instr1, *instr2;
367 unsigned char *R1, *R2;
370 R1 = complement_condensed(env, R, r, gain, &r1);
372 R2 = obstack_alloc(&env->obst, r2);
375 for (i = 0; i < gain; ++i) {
382 /* Two identical bits: normalize */
387 for (i = gain + 1; i < r; ++i) {
391 instr1 = decompose_mul(env, R1, r1, NULL);
392 instr2 = decompose_mul(env, R2, r2, NULL);
393 return emit_SUB(env, instr2, instr1);
398 N = condensed_to_value(env, R, r);
400 for (i = env->max_S; i > 0; --i) {
401 tarval *div_res, *mod_res;
402 tarval *tv = new_tarval_from_long((1 << i) + 1, env->mode);
404 div_res = tarval_divmod(N, tv, &mod_res);
405 if (mod_res == get_mode_null(env->mode)) {
409 Rs = value_to_condensed(env, div_res, &rs);
411 instruction *N1 = decompose_mul(env, Rs, rs, div_res);
412 return emit_LEA(env, N1, N1, i);
416 return basic_decompose_mul(env, R, r, N);
419 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
422 * basic decomposition routine
424 static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N)
429 if (R[0] == 0) { /* Case 1 */
430 t = R[1] > IMAX(env->max_S, R[1]);
432 Ns = decompose_mul(env, &R[1], r - 1, N);
433 return emit_LEA(env, env->root, Ns, t);
434 } else if (R[0] <= env->max_S) { /* Case 2 */
437 Ns = decompose_mul(env, &R[1], r - 1, N);
438 return emit_LEA(env, Ns, env->root, t);
442 Ns = decompose_mul(env, R, r, N);
443 return emit_SHIFT(env, Ns, t);
448 * recursive build the graph form the instructions.
450 * @param env the environment
451 * @param inst the instruction
453 static ir_node *build_graph(mul_env *env, instruction *inst)
460 switch (inst->kind) {
462 l = build_graph(env, inst->in[0]);
463 r = build_graph(env, inst->in[1]);
464 c = new_Const_long(env->shf_mode, inst->shift_count);
465 r = new_rd_Shl(env->dbg, env->blk, r, c, env->mode);
466 return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode);
468 l = build_graph(env, inst->in[0]);
469 c = new_Const_long(env->shf_mode, inst->shift_count);
470 return inst->irn = new_rd_Shl(env->dbg, env->blk, l, c, env->mode);
472 l = build_graph(env, inst->in[0]);
473 r = build_graph(env, inst->in[1]);
474 return inst->irn = new_rd_Sub(env->dbg, env->blk, l, r, env->mode);
476 l = build_graph(env, inst->in[0]);
477 r = build_graph(env, inst->in[1]);
478 return inst->irn = new_rd_Add(env->dbg, env->blk, l, r, env->mode);
480 return inst->irn = new_Const(get_mode_null(env->mode));
482 panic("Unsupported instruction kind");
487 * Calculate the costs for the given instruction sequence.
488 * Note that additional costs due to higher register pressure are NOT evaluated yet
490 static int evaluate_insn(mul_env *env, instruction *inst)
494 if (inst->costs >= 0) {
495 /* was already evaluated */
499 switch (inst->kind) {
503 costs = evaluate_insn(env, inst->in[0]);
504 costs += evaluate_insn(env, inst->in[1]);
505 costs += env->evaluate(inst->kind, NULL);
509 if (inst->shift_count > params->highest_shift_amount)
511 if (env->n_shift <= 0)
515 costs = evaluate_insn(env, inst->in[0]);
516 costs += env->evaluate(inst->kind, NULL);
520 inst->costs = costs = env->evaluate(inst->kind, NULL);
526 panic("Unsupported instruction kind");
530 * Evaluate the replacement instructions and build a new graph
531 * if faster than the Mul.
532 * Returns the root of the new graph then or irn otherwise.
534 * @param irn the Mul operation
535 * @param operand the multiplication operand
536 * @param tv the multiplication constant
538 * @return the new graph
540 static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv)
549 obstack_init(&env.obst);
550 env.mode = get_tarval_mode(tv);
551 env.bits = (unsigned)get_mode_size_bits(env.mode);
553 env.root = emit_ROOT(&env, operand);
555 env.n_shift = params->maximum_shifts;
556 env.evaluate = params->evaluate != NULL ? params->evaluate : default_evaluate;
558 R = value_to_condensed(&env, tv, &r);
559 inst = decompose_mul(&env, R, r, tv);
561 /* the paper suggests 70% here */
562 mul_costs = (env.evaluate(MUL, tv) * 7 + 5) / 10;
563 if (evaluate_insn(&env, inst) <= mul_costs && !env.fail) {
565 env.blk = get_nodes_block(irn);
566 env.dbg = get_irn_dbg_info(irn);
567 env.shf_mode = find_unsigned_mode(env.mode);
568 if (env.shf_mode == NULL)
569 env.shf_mode = mode_Iu;
571 res = build_graph(&env, inst);
573 obstack_free(&env.obst, NULL);
577 /* Replace Muls with Shifts and Add/Subs. */
578 ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn)
582 ir_mode *mode = get_irn_mode(irn);
589 /* If the architecture dependent optimizations were not initialized
590 or this optimization was not enabled. */
591 if (params == NULL || (opts & arch_dep_mul_to_shift) == 0)
594 if (!is_Mul(irn) || !mode_is_int(mode))
597 /* we should never do the reverse transformations again
599 irg = get_irn_irg(irn);
600 set_irg_state(irg, IR_GRAPH_STATE_ARCH_DEP);
602 left = get_binop_left(irn);
603 right = get_binop_right(irn);
607 /* Look, if one operand is a constant. */
608 if (is_Const(left)) {
609 tv = get_Const_tarval(left);
611 } else if (is_Const(right)) {
612 tv = get_Const_tarval(right);
617 res = do_decomposition(irn, operand, tv);
620 hook_arch_dep_replace_mul_with_shifts(irn);
629 * calculated the ld2 of a tarval if tarval is 2^n, else returns -1.
631 static int tv_ld2(tarval *tv, int bits)
635 for (num = i = 0; i < bits; ++i) {
636 unsigned char v = get_tarval_sub_bits(tv, i);
641 for (j = 0; j < 8; ++j)
654 /* for shorter lines */
655 #define ABS(a) tarval_abs(a)
656 #define NEG(a) tarval_neg(a)
657 #define NOT(a) tarval_not(a)
658 #define SHL(a, b) tarval_shl(a, b)
659 #define SHR(a, b) tarval_shr(a, b)
660 #define ADD(a, b) tarval_add(a, b)
661 #define SUB(a, b) tarval_sub(a, b, NULL)
662 #define MUL(a, b) tarval_mul(a, b)
663 #define DIV(a, b) tarval_div(a, b)
664 #define MOD(a, b) tarval_mod(a, b)
665 #define CMP(a, b) tarval_cmp(a, b)
666 #define CNV(a, m) tarval_convert_to(a, m)
667 #define ONE(m) get_mode_one(m)
668 #define ZERO(m) get_mode_null(m)
670 /** The result of a the magic() function. */
672 tarval *M; /**< magic number */
673 int s; /**< shift amount */
674 int need_add; /**< an additional add is needed */
675 int need_sub; /**< an additional sub is needed */
679 * Signed division by constant d: calculate the Magic multiplier M and the shift amount s
681 * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler
683 static struct ms magic(tarval *d)
685 ir_mode *mode = get_tarval_mode(d);
686 ir_mode *u_mode = find_unsigned_mode(mode);
687 int bits = get_mode_size_bits(u_mode);
689 tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */
692 tarval *bits_minus_1, *two_bits_1;
696 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
698 /* we need overflow mode to work correctly */
699 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
702 bits_minus_1 = new_tarval_from_long(bits - 1, u_mode);
703 two_bits_1 = SHL(get_mode_one(u_mode), bits_minus_1);
705 ad = CNV(ABS(d), u_mode);
706 t = ADD(two_bits_1, SHR(CNV(d, u_mode), bits_minus_1));
707 anc = SUB(SUB(t, ONE(u_mode)), MOD(t, ad)); /* Absolute value of nc */
708 p = bits - 1; /* Init: p */
709 q1 = DIV(two_bits_1, anc); /* Init: q1 = 2^p/|nc| */
710 r1 = SUB(two_bits_1, MUL(q1, anc)); /* Init: r1 = rem(2^p, |nc|) */
711 q2 = DIV(two_bits_1, ad); /* Init: q2 = 2^p/|d| */
712 r2 = SUB(two_bits_1, MUL(q2, ad)); /* Init: r2 = rem(2^p, |d|) */
716 q1 = ADD(q1, q1); /* Update q1 = 2^p/|nc| */
717 r1 = ADD(r1, r1); /* Update r1 = rem(2^p, |nc|) */
719 if (CMP(r1, anc) & pn_Cmp_Ge) {
720 q1 = ADD(q1, ONE(u_mode));
724 q2 = ADD(q2, q2); /* Update q2 = 2^p/|d| */
725 r2 = ADD(r2, r2); /* Update r2 = rem(2^p, |d|) */
727 if (CMP(r2, ad) & pn_Cmp_Ge) {
728 q2 = ADD(q2, ONE(u_mode));
733 } while (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(u_mode)) & pn_Cmp_Eq));
735 d_cmp = CMP(d, ZERO(mode));
737 if (d_cmp & pn_Cmp_Ge)
738 mag.M = ADD(CNV(q2, mode), ONE(mode));
740 mag.M = SUB(ZERO(mode), ADD(CNV(q2, mode), ONE(mode)));
742 M_cmp = CMP(mag.M, ZERO(mode));
746 /* need an add if d > 0 && M < 0 */
747 mag.need_add = d_cmp & pn_Cmp_Gt && M_cmp & pn_Cmp_Lt;
749 /* need a sub if d < 0 && M > 0 */
750 mag.need_sub = d_cmp & pn_Cmp_Lt && M_cmp & pn_Cmp_Gt;
752 tarval_set_integer_overflow_mode(rem);
757 /** The result of the magicu() function. */
759 tarval *M; /**< magic add constant */
760 int s; /**< shift amount */
761 int need_add; /**< add indicator */
765 * Unsigned division by constant d: calculate the Magic multiplier M and the shift amount s
767 * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned)
769 static struct mu magicu(tarval *d)
771 ir_mode *mode = get_tarval_mode(d);
772 int bits = get_mode_size_bits(mode);
774 tarval *nc, *delta, *q1, *r1, *q2, *r2;
775 tarval *bits_minus_1, *two_bits_1, *seven_ff;
779 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
781 /* we need overflow mode to work correctly */
782 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
784 bits_minus_1 = new_tarval_from_long(bits - 1, mode);
785 two_bits_1 = SHL(get_mode_one(mode), bits_minus_1);
786 seven_ff = SUB(two_bits_1, ONE(mode));
788 magu.need_add = 0; /* initialize the add indicator */
789 nc = SUB(NEG(ONE(mode)), MOD(NEG(d), d));
790 p = bits - 1; /* Init: p */
791 q1 = DIV(two_bits_1, nc); /* Init: q1 = 2^p/nc */
792 r1 = SUB(two_bits_1, MUL(q1, nc)); /* Init: r1 = rem(2^p, nc) */
793 q2 = DIV(seven_ff, d); /* Init: q2 = (2^p - 1)/d */
794 r2 = SUB(seven_ff, MUL(q2, d)); /* Init: r2 = rem(2^p - 1, d) */
798 if (CMP(r1, SUB(nc, r1)) & pn_Cmp_Ge) {
799 q1 = ADD(ADD(q1, q1), ONE(mode));
800 r1 = SUB(ADD(r1, r1), nc);
807 if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & pn_Cmp_Ge) {
808 if (CMP(q2, seven_ff) & pn_Cmp_Ge)
811 q2 = ADD(ADD(q2, q2), ONE(mode));
812 r2 = SUB(ADD(ADD(r2, r2), ONE(mode)), d);
815 if (CMP(q2, two_bits_1) & pn_Cmp_Ge)
819 r2 = ADD(ADD(r2, r2), ONE(mode));
821 delta = SUB(SUB(d, ONE(mode)), r2);
822 } while (p < 2*bits &&
823 (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(mode)) & pn_Cmp_Eq)));
825 magu.M = ADD(q2, ONE(mode)); /* Magic number */
826 magu.s = p - bits; /* and shift amount */
828 tarval_set_integer_overflow_mode(rem);
834 * Build the Mulh replacement code for n / tv.
836 * Note that 'div' might be a mod or DivMod operation as well
838 static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv)
840 dbg_info *dbg = get_irn_dbg_info(div);
841 ir_node *n = get_binop_left(div);
842 ir_node *block = get_irn_n(div, -1);
843 ir_mode *mode = get_irn_mode(n);
844 int bits = get_mode_size_bits(mode);
847 /* Beware: do not transform bad code */
848 if (is_Bad(n) || is_Bad(block))
851 if (mode_is_signed(mode)) {
852 struct ms mag = magic(tv);
854 /* generate the Mulh instruction */
855 c = new_Const(mag.M);
856 q = new_rd_Mulh(dbg, block, n, c, mode);
858 /* do we need an Add or Sub */
860 q = new_rd_Add(dbg, block, q, n, mode);
861 else if (mag.need_sub)
862 q = new_rd_Sub(dbg, block, q, n, mode);
864 /* Do we need the shift */
866 c = new_Const_long(mode_Iu, mag.s);
867 q = new_rd_Shrs(dbg, block, q, c, mode);
871 c = new_Const_long(mode_Iu, bits - 1);
872 t = new_rd_Shr(dbg, block, q, c, mode);
874 q = new_rd_Add(dbg, block, q, t, mode);
876 struct mu mag = magicu(tv);
879 /* generate the Mulh instruction */
880 c = new_Const(mag.M);
881 q = new_rd_Mulh(dbg, block, n, c, mode);
885 /* use the GM scheme */
886 t = new_rd_Sub(dbg, block, n, q, mode);
888 c = new_Const(get_mode_one(mode_Iu));
889 t = new_rd_Shr(dbg, block, t, c, mode);
891 t = new_rd_Add(dbg, block, t, q, mode);
893 c = new_Const_long(mode_Iu, mag.s - 1);
894 q = new_rd_Shr(dbg, block, t, c, mode);
896 /* use the default scheme */
897 q = new_rd_Add(dbg, block, q, n, mode);
899 } else if (mag.s > 0) { /* default scheme, shift needed */
900 c = new_Const_long(mode_Iu, mag.s);
901 q = new_rd_Shr(dbg, block, q, c, mode);
907 /* Replace Divs with Shifts and Add/Subs and Mulh. */
908 ir_node *arch_dep_replace_div_by_const(ir_node *irn)
912 /* If the architecture dependent optimizations were not initialized
913 or this optimization was not enabled. */
914 if (params == NULL || (opts & arch_dep_div_by_const) == 0)
918 ir_node *c = get_Div_right(irn);
919 ir_node *block, *left;
930 tv = get_Const_tarval(c);
932 /* check for division by zero */
933 if (tarval_is_null(tv))
936 left = get_Div_left(irn);
937 mode = get_irn_mode(left);
938 block = get_irn_n(irn, -1);
939 dbg = get_irn_dbg_info(irn);
941 bits = get_mode_size_bits(mode);
945 if (mode_is_signed(mode)) {
946 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
947 ntv = tarval_neg(tv);
957 if (k >= 0) { /* division by 2^k or -2^k */
958 if (mode_is_signed(mode)) {
960 ir_node *curr = left;
962 /* create the correction code for signed values only if there might be a remainder */
963 if (! get_Div_no_remainder(irn)) {
965 k_node = new_Const_long(mode_Iu, k - 1);
966 curr = new_rd_Shrs(dbg, block, left, k_node, mode);
969 k_node = new_Const_long(mode_Iu, bits - k);
970 curr = new_rd_Shr(dbg, block, curr, k_node, mode);
972 curr = new_rd_Add(dbg, block, left, curr, mode);
977 k_node = new_Const_long(mode_Iu, k);
978 res = new_rd_Shrs(dbg, block, curr, k_node, mode);
980 if (n_flag) { /* negate the result */
983 k_node = new_Const(get_mode_null(mode));
984 res = new_rd_Sub(dbg, block, k_node, res, mode);
986 } else { /* unsigned case */
989 k_node = new_Const_long(mode_Iu, k);
990 res = new_rd_Shr(dbg, block, left, k_node, mode);
994 if (allow_Mulh(mode))
995 res = replace_div_by_mulh(irn, tv);
1000 hook_arch_dep_replace_division_by_const(irn);
1005 /* Replace Mods with Shifts and Add/Subs and Mulh. */
1006 ir_node *arch_dep_replace_mod_by_const(ir_node *irn)
1010 /* If the architecture dependent optimizations were not initialized
1011 or this optimization was not enabled. */
1012 if (params == NULL || (opts & arch_dep_mod_by_const) == 0)
1016 ir_node *c = get_Mod_right(irn);
1017 ir_node *block, *left;
1027 tv = get_Const_tarval(c);
1029 /* check for division by zero */
1030 if (tarval_is_null(tv))
1033 left = get_Mod_left(irn);
1034 mode = get_irn_mode(left);
1035 block = get_irn_n(irn, -1);
1036 dbg = get_irn_dbg_info(irn);
1037 bits = get_mode_size_bits(mode);
1041 if (mode_is_signed(mode)) {
1042 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
1043 ntv = tarval_neg(tv);
1052 /* division by 2^k or -2^k:
1053 * we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k
1055 if (mode_is_signed(mode)) {
1057 ir_node *curr = left;
1060 k_node = new_Const_long(mode_Iu, k - 1);
1061 curr = new_rd_Shrs(dbg, block, left, k_node, mode);
1064 k_node = new_Const_long(mode_Iu, bits - k);
1065 curr = new_rd_Shr(dbg, block, curr, k_node, mode);
1067 curr = new_rd_Add(dbg, block, left, curr, mode);
1069 k_node = new_Const_long(mode, (-1) << k);
1070 curr = new_rd_And(dbg, block, curr, k_node, mode);
1072 res = new_rd_Sub(dbg, block, left, curr, mode);
1073 } else { /* unsigned case */
1076 k_node = new_Const_long(mode, (1 << k) - 1);
1077 res = new_rd_And(dbg, block, left, k_node, mode);
1080 /* other constant */
1081 if (allow_Mulh(mode)) {
1082 res = replace_div_by_mulh(irn, tv);
1084 res = new_rd_Mul(dbg, block, res, c, mode);
1086 /* res = arch_dep_mul_to_shift(res); */
1088 res = new_rd_Sub(dbg, block, left, res, mode);
1094 hook_arch_dep_replace_division_by_const(irn);
1099 /* Replace DivMods with Shifts and Add/Subs and Mulh. */
1100 void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn)
1104 /* If the architecture dependent optimizations were not initialized
1105 or this optimization was not enabled. */
1106 if (params == NULL ||
1107 ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
1110 if (is_DivMod(irn)) {
1111 ir_node *c = get_DivMod_right(irn);
1112 ir_node *block, *left;
1123 tv = get_Const_tarval(c);
1125 /* check for division by zero */
1126 if (tarval_is_null(tv))
1129 left = get_DivMod_left(irn);
1130 mode = get_irn_mode(left);
1131 block = get_irn_n(irn, -1);
1132 dbg = get_irn_dbg_info(irn);
1134 bits = get_mode_size_bits(mode);
1138 if (mode_is_signed(mode)) {
1139 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
1140 ntv = tarval_neg(tv);
1150 if (k >= 0) { /* division by 2^k or -2^k */
1151 if (mode_is_signed(mode)) {
1152 ir_node *k_node, *c_k;
1153 ir_node *curr = left;
1156 k_node = new_Const_long(mode_Iu, k - 1);
1157 curr = new_rd_Shrs(dbg, block, left, k_node, mode);
1160 k_node = new_Const_long(mode_Iu, bits - k);
1161 curr = new_rd_Shr(dbg, block, curr, k_node, mode);
1163 curr = new_rd_Add(dbg, block, left, curr, mode);
1165 c_k = new_Const_long(mode_Iu, k);
1167 *div = new_rd_Shrs(dbg, block, curr, c_k, mode);
1169 if (n_flag) { /* negate the div result */
1172 k_node = new_Const(get_mode_null(mode));
1173 *div = new_rd_Sub(dbg, block, k_node, *div, mode);
1176 k_node = new_Const_long(mode, (-1) << k);
1177 curr = new_rd_And(dbg, block, curr, k_node, mode);
1179 *mod = new_rd_Sub(dbg, block, left, curr, mode);
1180 } else { /* unsigned case */
1183 k_node = new_Const_long(mode_Iu, k);
1184 *div = new_rd_Shr(dbg, block, left, k_node, mode);
1186 k_node = new_Const_long(mode, (1 << k) - 1);
1187 *mod = new_rd_And(dbg, block, left, k_node, mode);
1190 /* other constant */
1191 if (allow_Mulh(mode)) {
1194 *div = replace_div_by_mulh(irn, tv);
1196 t = new_rd_Mul(dbg, block, *div, c, mode);
1198 /* t = arch_dep_mul_to_shift(t); */
1200 *mod = new_rd_Sub(dbg, block, left, t, mode);
1206 hook_arch_dep_replace_division_by_const(irn);
1210 static const ir_settings_arch_dep_t default_params = {
1211 1, /* also use subs */
1212 4, /* maximum shifts */
1213 31, /* maximum shift amount */
1214 default_evaluate, /* default evaluator */
1216 0, /* allow Mulhs */
1217 0, /* allow Mulus */
1218 32 /* Mulh allowed up to 32 bit */
1221 /* A default parameter factory for testing purposes. */
1222 const ir_settings_arch_dep_t *arch_dep_default_factory(void)
1224 return &default_params;