2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Machine dependent Firm optimizations.
24 * @author Sebastian Hack, Michael Beck
27 * Implements "Strenght Reduction of Multiplications by Integer Constants" by Youfeng Wu.
28 * Implements Division and Modulo by Consts from "Hackers Delight",
41 #include "irgraph_t.h"
48 #include "dbginfo_t.h"
49 #include "iropt_dbg.h"
59 /* when we need verifying */
61 # define IRN_VRFY_IRG(res, irg)
63 # define IRN_VRFY_IRG(res, irg) irn_vrfy_irg(res, irg)
66 /** The params got from the factory in arch_dep_init(...). */
67 static const ir_settings_arch_dep_t *params = NULL;
69 /** The bit mask, which optimizations to apply. */
70 static arch_dep_opts_t opts;
72 /* we need this new pseudo op */
73 static ir_op *op_Mulh = NULL;
76 * construct a Mulh: Mulh(a,b) = (a * b) >> w, w is the with in bits of a, b
79 new_rd_Mulh (dbg_info *db, ir_graph *irg, ir_node *block,
80 ir_node *op1, ir_node *op2, ir_mode *mode) {
86 res = new_ir_node(db, irg, block, op_Mulh, mode, 2, in);
87 res = optimize_node(res);
88 IRN_VRFY_IRG(res, irg);
92 ir_op *get_op_Mulh(void) { return op_Mulh; }
94 void arch_dep_init(arch_dep_params_factory_t factory) {
101 int mulh_opc = get_next_ir_opcode();
103 /* create the Mulh operation */
104 op_Mulh = new_ir_op(mulh_opc, "Mulh", op_pin_state_floats, irop_flag_commutative, oparity_binary, 0, 0, NULL);
108 void arch_dep_set_opts(arch_dep_opts_t the_opts) {
112 /** check, whether a mode allows a Mulh instruction. */
113 static int allow_Mulh(ir_mode *mode) {
114 if (get_mode_size_bits(mode) > params->max_bits_for_mulh)
116 return (mode_is_signed(mode) && params->allow_mulhs) || (!mode_is_signed(mode) && params->allow_mulhu);
122 typedef struct instruction instruction;
124 insn_kind kind; /**< the instruction kind */
125 instruction *in[2]; /**< the ins */
126 int shift_count; /**< shift count for LEA and SHIFT */
127 ir_node *irn; /**< the generated node for this instruction if any. */
128 int costs; /**< the costs for this instruction */
132 * The environment for the strength reduction of multiplications.
134 typedef struct _mul_env {
135 struct obstack obst; /**< an obstack for local space. */
136 ir_mode *mode; /**< the mode of the multiplication constant */
137 int bits; /**< number of bits in the mode */
138 unsigned max_S; /**< the maximum LEA shift value. */
139 instruction *root; /**< the root of the instruction tree */
140 ir_node *op; /**< the operand that is multiplied */
141 ir_node *blk; /**< the block where the new graph is built */
142 dbg_info *dbg; /**< the debug info for the new graph. */
143 ir_mode *shf_mode; /**< the (unsigned) mode for the shift constants */
144 int fail; /**< set to 1 if the instruction sequence fails the constraints */
145 int n_shift; /**< maximum number of allowed shift instructions */
147 evaluate_costs_func evaluate; /**< the evaluate callback */
151 * Some kind of default evaluator.
153 static int default_evaluate(insn_kind kind, tarval *tv) {
160 * emit a LEA (or an Add) instruction
162 static instruction *emit_LEA(mul_env *env, instruction *a, instruction *b, int shift) {
163 instruction *res = obstack_alloc(&env->obst, sizeof(*res));
164 res->kind = shift > 0 ? LEA : ADD;
167 res->shift_count = shift;
174 * emit a SHIFT (or an Add) instruction
176 static instruction *emit_SHIFT(mul_env *env, instruction *a, int shift) {
177 instruction *res = obstack_alloc(&env->obst, sizeof(*res));
182 res->shift_count = shift;
187 res->shift_count = 0;
195 * emit a SUB instruction
197 static instruction *emit_SUB(mul_env *env, instruction *a, instruction *b) {
198 instruction *res = obstack_alloc(&env->obst, sizeof(*res));
202 res->shift_count = 0;
209 * emit the ROOT instruction
211 static instruction *emit_ROOT(mul_env *env, ir_node *root_op) {
212 instruction *res = obstack_alloc(&env->obst, sizeof(*res));
216 res->shift_count = 0;
224 * Returns the condensed representation of the tarval tv
226 static unsigned char *value_to_condensed(mul_env *env, tarval *tv, int *pr) {
227 ir_mode *mode = get_tarval_mode(tv);
228 int bits = get_mode_size_bits(mode);
229 char *bitstr = get_tarval_bitpattern(tv);
231 unsigned char *R = obstack_alloc(&env->obst, bits);
234 for (i = 0; bitstr[i] != '\0'; ++i) {
235 if (bitstr[i] == '1') {
248 * Calculate the gain when using the generalized complementary technique
250 static int calculate_gain(unsigned char *R, int r) {
255 /* the gain for r == 1 */
257 for (i = 2; i < r; ++i) {
258 /* calculate the gain for r from the gain for r-1 */
259 gain += 2 - R[i - 1];
261 if (gain > max_gain) {
272 * Calculates the condensed complement of a given (R,r) tuple
274 static unsigned char *complement_condensed(mul_env *env, unsigned char *R, int r, int gain, int *prs) {
275 unsigned char *value = obstack_alloc(&env->obst, env->bits);
279 memset(value, 0, env->bits);
282 for (i = 0; i < gain; ++i) {
287 /* negate and propagate 1 */
289 for (i = 0; i <= j; ++i) {
290 unsigned char v = !value[i];
296 /* condense it again */
299 for (i = 0; i <= j; ++i) {
312 * creates a tarval from a condensed representation.
314 static tarval *condensed_to_value(mul_env *env, unsigned char *R, int r) {
319 tv = get_mode_one(env->mode);
321 for (i = 0; i < r; ++i) {
324 tarval *t = new_tarval_from_long(j, mode_Iu);
325 tv = tarval_shl(tv, t);
327 res = res ? tarval_add(res, tv) : tv;
333 static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N);
336 * handle simple cases with up-to 2 bits set
338 static instruction *decompose_simple_cases(mul_env *env, unsigned char *R, int r, tarval *N) {
339 instruction *ins, *ins2;
342 return emit_SHIFT(env, env->root, R[0]);
348 ins = emit_SHIFT(env, ins, R[0]);
350 if (R[1] <= env->max_S)
351 return emit_LEA(env, ins, ins, R[1]);
353 ins2 = emit_SHIFT(env, env->root, R[0] + R[1]);
354 return emit_LEA(env, ins, ins2, 0);
359 * Main decompose driver.
361 static instruction *decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) {
366 return decompose_simple_cases(env, R, r, N);
368 if (params->also_use_subs) {
369 gain = calculate_gain(R, r);
371 instruction *instr1, *instr2;
372 unsigned char *R1, *R2;
375 R1 = complement_condensed(env, R, r, gain, &r1);
377 R2 = obstack_alloc(&env->obst, r2);
380 for (i = 0; i < gain; ++i) {
385 for (i = gain; i < r; ++i) {
389 instr1 = decompose_mul(env, R1, r1, NULL);
390 instr2 = decompose_mul(env, R2, r2, NULL);
391 return emit_SUB(env, instr2, instr1);
396 N = condensed_to_value(env, R, r);
398 for (i = env->max_S; i > 0; --i) {
399 tarval *div_res, *mod_res;
400 tarval *tv = new_tarval_from_long((1 << i) + 1, env->mode);
402 div_res = tarval_divmod(N, tv, &mod_res);
403 if (mod_res == get_mode_null(env->mode)) {
407 Rs = value_to_condensed(env, div_res, &rs);
409 instruction *N1 = decompose_mul(env, Rs, rs, div_res);
410 return emit_LEA(env, N1, N1, i);
414 return basic_decompose_mul(env, R, r, N);
417 #define IMAX(a,b) ((a) > (b) ? (a) : (b))
420 * basic decomposition routine
422 static instruction *basic_decompose_mul(mul_env *env, unsigned char *R, int r, tarval *N) {
426 if (R[0] == 0) { /* Case 1 */
427 t = R[1] > IMAX(env->max_S, R[1]);
429 Ns = decompose_mul(env, &R[1], r - 1, N);
430 return emit_LEA(env, env->root, Ns, t);
431 } else if (R[0] <= env->max_S) { /* Case 2 */
434 Ns = decompose_mul(env, &R[1], r - 1, N);
435 return emit_LEA(env, Ns, env->root, t);
439 Ns = decompose_mul(env, R, r, N);
440 return emit_SHIFT(env, Ns, t);
445 * recursive build the graph form the instructions.
447 * @param env the environment
448 * @param inst the instruction
450 static ir_node *build_graph(mul_env *env, instruction *inst) {
456 switch (inst->kind) {
458 l = build_graph(env, inst->in[0]);
459 r = build_graph(env, inst->in[1]);
460 c = new_r_Const(current_ir_graph, env->blk, env->shf_mode, new_tarval_from_long(inst->shift_count, env->shf_mode));
461 r = new_rd_Shl(env->dbg, current_ir_graph, env->blk, r, c, env->mode);
462 return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode);
464 l = build_graph(env, inst->in[0]);
465 c = new_r_Const(current_ir_graph, env->blk, env->shf_mode, new_tarval_from_long(inst->shift_count, env->shf_mode));
466 return inst->irn = new_rd_Shl(env->dbg, current_ir_graph, env->blk, l, c, env->mode);
468 l = build_graph(env, inst->in[0]);
469 r = build_graph(env, inst->in[1]);
470 return inst->irn = new_rd_Sub(env->dbg, current_ir_graph, env->blk, l, r, env->mode);
472 l = build_graph(env, inst->in[0]);
473 r = build_graph(env, inst->in[1]);
474 return inst->irn = new_rd_Add(env->dbg, current_ir_graph, env->blk, l, r, env->mode);
482 * Calculate the costs for the given instruction sequence.
483 * Note that additional costs due to higher register pressure are NOT evaluated yet
485 static int evaluate_insn(mul_env *env, instruction *inst) {
488 if (inst->costs >= 0) {
489 /* was already evaluated */
493 switch (inst->kind) {
497 costs = evaluate_insn(env, inst->in[0]);
498 costs += evaluate_insn(env, inst->in[1]);
499 costs += env->evaluate(inst->kind, NULL);
503 if (inst->shift_count > params->highest_shift_amount)
505 if (env->n_shift <= 0)
509 costs = evaluate_insn(env, inst->in[0]);
510 costs += env->evaluate(inst->kind, NULL);
520 * Evaluate the replacement instructions and build a new graph
521 * if faster than the Mul.
522 * returns the root of the new graph then or irn otherwise.
524 * @param irn the Mul operation
525 * @param operand the multiplication operand
526 * @param tv the multiplication constant
528 * @return the new graph
530 static ir_node *do_decomposition(ir_node *irn, ir_node *operand, tarval *tv) {
538 obstack_init(&env.obst);
539 env.mode = get_tarval_mode(tv);
540 env.bits = get_mode_size_bits(env.mode);
542 env.root = emit_ROOT(&env, operand);
544 env.n_shift = params->maximum_shifts;
545 env.evaluate = params->evaluate != NULL ? params->evaluate : default_evaluate;
547 R = value_to_condensed(&env, tv, &r);
548 inst = decompose_mul(&env, R, r, tv);
550 /* the paper suggests 70% here */
551 mul_costs = (env.evaluate(MUL, tv) * 7) / 10;
552 if (evaluate_insn(&env, inst) <= mul_costs && !env.fail) {
554 env.blk = get_nodes_block(irn);
555 env.dbg = get_irn_dbg_info(irn);
556 env.shf_mode = find_unsigned_mode(env.mode);
557 if (env.shf_mode == NULL)
558 env.shf_mode = mode_Iu;
560 res = build_graph(&env, inst);
562 obstack_free(&env.obst, NULL);
566 /* Replace Muls with Shifts and Add/Subs. */
567 ir_node *arch_dep_replace_mul_with_shifts(ir_node *irn) {
569 ir_mode *mode = get_irn_mode(irn);
571 /* If the architecture dependent optimizations were not initialized
572 or this optimization was not enabled. */
573 if (params == NULL || (opts & arch_dep_mul_to_shift) == 0)
576 if (is_Mul(irn) && mode_is_int(mode)) {
577 ir_node *block = get_nodes_block(irn);
578 ir_node *left = get_binop_left(irn);
579 ir_node *right = get_binop_right(irn);
581 ir_node *operand = NULL;
583 /* Look, if one operand is a constant. */
584 if (is_Const(left)) {
585 tv = get_Const_tarval(left);
587 } else if (is_Const(right)) {
588 tv = get_Const_tarval(right);
593 res = do_decomposition(irn, operand, tv);
596 hook_arch_dep_replace_mul_with_shifts(irn);
606 * calculated the ld2 of a tarval if tarval is 2^n, else returns -1.
608 static int tv_ld2(tarval *tv, int bits) {
611 for (num = i = 0; i < bits; ++i) {
612 unsigned char v = get_tarval_sub_bits(tv, i);
617 for (j = 0; j < 8; ++j)
630 /* for shorter lines */
631 #define ABS(a) tarval_abs(a)
632 #define NEG(a) tarval_neg(a)
633 #define NOT(a) tarval_not(a)
634 #define SHL(a, b) tarval_shl(a, b)
635 #define SHR(a, b) tarval_shr(a, b)
636 #define ADD(a, b) tarval_add(a, b)
637 #define SUB(a, b) tarval_sub(a, b)
638 #define MUL(a, b) tarval_mul(a, b)
639 #define DIV(a, b) tarval_div(a, b)
640 #define MOD(a, b) tarval_mod(a, b)
641 #define CMP(a, b) tarval_cmp(a, b)
642 #define CNV(a, m) tarval_convert_to(a, m)
643 #define ONE(m) get_mode_one(m)
644 #define ZERO(m) get_mode_null(m)
646 /** The result of a the magic() function. */
648 tarval *M; /**< magic number */
649 int s; /**< shift amount */
650 int need_add; /**< an additional add is needed */
651 int need_sub; /**< an additional sub is needed */
655 * Signed division by constant d: calculate the Magic multiplier M and the shift amount s
657 * see Hacker's Delight: 10-6 Integer Division by Constants: Incorporation into a Compiler
659 static struct ms magic(tarval *d) {
660 ir_mode *mode = get_tarval_mode(d);
661 ir_mode *u_mode = find_unsigned_mode(mode);
662 int bits = get_mode_size_bits(u_mode);
664 tarval *ad, *anc, *delta, *q1, *r1, *q2, *r2, *t; /* unsigned */
667 tarval *bits_minus_1, *two_bits_1;
671 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
673 /* we need overflow mode to work correctly */
674 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
677 bits_minus_1 = new_tarval_from_long(bits - 1, u_mode);
678 two_bits_1 = SHL(get_mode_one(u_mode), bits_minus_1);
680 ad = CNV(ABS(d), u_mode);
681 t = ADD(two_bits_1, SHR(CNV(d, u_mode), bits_minus_1));
682 anc = SUB(SUB(t, ONE(u_mode)), MOD(t, ad)); /* Absolute value of nc */
683 p = bits - 1; /* Init: p */
684 q1 = DIV(two_bits_1, anc); /* Init: q1 = 2^p/|nc| */
685 r1 = SUB(two_bits_1, MUL(q1, anc)); /* Init: r1 = rem(2^p, |nc|) */
686 q2 = DIV(two_bits_1, ad); /* Init: q2 = 2^p/|d| */
687 r2 = SUB(two_bits_1, MUL(q2, ad)); /* Init: r2 = rem(2^p, |d|) */
691 q1 = ADD(q1, q1); /* Update q1 = 2^p/|nc| */
692 r1 = ADD(r1, r1); /* Update r1 = rem(2^p, |nc|) */
694 if (CMP(r1, anc) & pn_Cmp_Ge) {
695 q1 = ADD(q1, ONE(u_mode));
699 q2 = ADD(q2, q2); /* Update q2 = 2^p/|d| */
700 r2 = ADD(r2, r2); /* Update r2 = rem(2^p, |d|) */
702 if (CMP(r2, ad) & pn_Cmp_Ge) {
703 q2 = ADD(q2, ONE(u_mode));
708 } while (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(u_mode)) & pn_Cmp_Eq));
710 d_cmp = CMP(d, ZERO(mode));
712 if (d_cmp & pn_Cmp_Ge)
713 mag.M = ADD(CNV(q2, mode), ONE(mode));
715 mag.M = SUB(ZERO(mode), ADD(CNV(q2, mode), ONE(mode)));
717 M_cmp = CMP(mag.M, ZERO(mode));
721 /* need an add if d > 0 && M < 0 */
722 mag.need_add = d_cmp & pn_Cmp_Gt && M_cmp & pn_Cmp_Lt;
724 /* need a sub if d < 0 && M > 0 */
725 mag.need_sub = d_cmp & pn_Cmp_Lt && M_cmp & pn_Cmp_Gt;
727 tarval_set_integer_overflow_mode(rem);
732 /** The result of the magicu() function. */
734 tarval *M; /**< magic add constant */
735 int s; /**< shift amount */
736 int need_add; /**< add indicator */
740 * Unsigned division by constant d: calculate the Magic multiplier M and the shift amount s
742 * see Hacker's Delight: 10-10 Integer Division by Constants: Incorporation into a Compiler (Unsigned)
744 static struct mu magicu(tarval *d) {
745 ir_mode *mode = get_tarval_mode(d);
746 int bits = get_mode_size_bits(mode);
748 tarval *nc, *delta, *q1, *r1, *q2, *r2;
749 tarval *bits_minus_1, *two_bits_1, *seven_ff;
753 tarval_int_overflow_mode_t rem = tarval_get_integer_overflow_mode();
755 /* we need overflow mode to work correctly */
756 tarval_set_integer_overflow_mode(TV_OVERFLOW_WRAP);
758 bits_minus_1 = new_tarval_from_long(bits - 1, mode);
759 two_bits_1 = SHL(get_mode_one(mode), bits_minus_1);
760 seven_ff = SUB(two_bits_1, ONE(mode));
762 magu.need_add = 0; /* initialize the add indicator */
763 nc = SUB(NEG(ONE(mode)), MOD(NEG(d), d));
764 p = bits - 1; /* Init: p */
765 q1 = DIV(two_bits_1, nc); /* Init: q1 = 2^p/nc */
766 r1 = SUB(two_bits_1, MUL(q1, nc)); /* Init: r1 = rem(2^p, nc) */
767 q2 = DIV(seven_ff, d); /* Init: q2 = (2^p - 1)/d */
768 r2 = SUB(seven_ff, MUL(q2, d)); /* Init: r2 = rem(2^p - 1, d) */
772 if (CMP(r1, SUB(nc, r1)) & pn_Cmp_Ge) {
773 q1 = ADD(ADD(q1, q1), ONE(mode));
774 r1 = SUB(ADD(r1, r1), nc);
781 if (CMP(ADD(r2, ONE(mode)), SUB(d, r2)) & pn_Cmp_Ge) {
782 if (CMP(q2, seven_ff) & pn_Cmp_Ge)
785 q2 = ADD(ADD(q2, q2), ONE(mode));
786 r2 = SUB(ADD(ADD(r2, r2), ONE(mode)), d);
789 if (CMP(q2, two_bits_1) & pn_Cmp_Ge)
793 r2 = ADD(ADD(r2, r2), ONE(mode));
795 delta = SUB(SUB(d, ONE(mode)), r2);
796 } while (p < 2*bits &&
797 (CMP(q1, delta) & pn_Cmp_Lt || (CMP(q1, delta) & pn_Cmp_Eq && CMP(r1, ZERO(mode)) & pn_Cmp_Eq)));
799 magu.M = ADD(q2, ONE(mode)); /* Magic number */
800 magu.s = p - bits; /* and shift amount */
802 tarval_set_integer_overflow_mode(rem);
808 * Build the Mulh replacement code for n / tv.
810 * Note that 'div' might be a mod or DivMod operation as well
812 static ir_node *replace_div_by_mulh(ir_node *div, tarval *tv) {
813 dbg_info *dbg = get_irn_dbg_info(div);
814 ir_node *n = get_binop_left(div);
815 ir_node *block = get_irn_n(div, -1);
816 ir_mode *mode = get_irn_mode(n);
817 int bits = get_mode_size_bits(mode);
820 /* Beware: do not transform bad code */
821 if (is_Bad(n) || is_Bad(block))
824 if (mode_is_signed(mode)) {
825 struct ms mag = magic(tv);
827 /* generate the Mulh instruction */
828 c = new_r_Const(current_ir_graph, block, mode, mag.M);
829 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
831 /* do we need an Add or Sub */
833 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
834 else if (mag.need_sub)
835 q = new_rd_Sub(dbg, current_ir_graph, block, q, n, mode);
837 /* Do we need the shift */
839 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s);
840 q = new_rd_Shrs(dbg, current_ir_graph, block, q, c, mode);
844 c = new_r_Const_long(current_ir_graph, block, mode_Iu, bits-1);
845 t = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
847 q = new_rd_Add(dbg, current_ir_graph, block, q, t, mode);
849 struct mu mag = magicu(tv);
852 /* generate the Mulh instruction */
853 c = new_r_Const(current_ir_graph, block, mode, mag.M);
854 q = new_rd_Mulh(dbg, current_ir_graph, block, n, c, mode);
858 /* use the GM scheme */
859 t = new_rd_Sub(dbg, current_ir_graph, block, n, q, mode);
861 c = new_r_Const(current_ir_graph, block, mode_Iu, get_mode_one(mode_Iu));
862 t = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
864 t = new_rd_Add(dbg, current_ir_graph, block, t, q, mode);
866 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s-1);
867 q = new_rd_Shr(dbg, current_ir_graph, block, t, c, mode);
869 /* use the default scheme */
870 q = new_rd_Add(dbg, current_ir_graph, block, q, n, mode);
872 } else if (mag.s > 0) { /* default scheme, shift needed */
873 c = new_r_Const_long(current_ir_graph, block, mode_Iu, mag.s);
874 q = new_rd_Shr(dbg, current_ir_graph, block, q, c, mode);
880 /* Replace Divs with Shifts and Add/Subs and Mulh. */
881 ir_node *arch_dep_replace_div_by_const(ir_node *irn) {
884 /* If the architecture dependent optimizations were not initialized
885 or this optimization was not enabled. */
886 if (params == NULL || (opts & arch_dep_div_by_const) == 0)
889 if (get_irn_opcode(irn) == iro_Div) {
890 ir_node *c = get_Div_right(irn);
891 ir_node *block, *left;
898 if (get_irn_op(c) != op_Const)
901 tv = get_Const_tarval(c);
903 /* check for division by zero */
904 if (classify_tarval(tv) == TV_CLASSIFY_NULL)
907 left = get_Div_left(irn);
908 mode = get_irn_mode(left);
909 block = get_irn_n(irn, -1);
910 dbg = get_irn_dbg_info(irn);
912 bits = get_mode_size_bits(mode);
916 if (mode_is_signed(mode)) {
917 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
918 ntv = tarval_neg(tv);
928 if (k >= 0) { /* division by 2^k or -2^k */
929 if (mode_is_signed(mode)) {
931 ir_node *curr = left;
934 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
935 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
938 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
939 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
941 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
943 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
944 res = new_rd_Shrs(dbg, current_ir_graph, block, curr, k_node, mode);
946 if (n_flag) { /* negate the result */
949 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
950 res = new_rd_Sub(dbg, current_ir_graph, block, k_node, res, mode);
952 } else { /* unsigned case */
955 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
956 res = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
960 if (allow_Mulh(mode))
961 res = replace_div_by_mulh(irn, tv);
966 hook_arch_dep_replace_division_by_const(irn);
971 /* Replace Mods with Shifts and Add/Subs and Mulh. */
972 ir_node *arch_dep_replace_mod_by_const(ir_node *irn) {
975 /* If the architecture dependent optimizations were not initialized
976 or this optimization was not enabled. */
977 if (params == NULL || (opts & arch_dep_mod_by_const) == 0)
980 if (get_irn_opcode(irn) == iro_Mod) {
981 ir_node *c = get_Mod_right(irn);
982 ir_node *block, *left;
989 if (get_irn_op(c) != op_Const)
992 tv = get_Const_tarval(c);
994 /* check for division by zero */
995 if (classify_tarval(tv) == TV_CLASSIFY_NULL)
998 left = get_Mod_left(irn);
999 mode = get_irn_mode(left);
1000 block = get_irn_n(irn, -1);
1001 dbg = get_irn_dbg_info(irn);
1002 bits = get_mode_size_bits(mode);
1006 if (mode_is_signed(mode)) {
1007 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
1008 ntv = tarval_neg(tv);
1017 /* division by 2^k or -2^k:
1018 * we use "modulus" here, so x % y == x % -y that's why is no difference between the case 2^k and -2^k
1020 if (mode_is_signed(mode)) {
1022 ir_node *curr = left;
1025 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
1026 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
1029 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
1030 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
1032 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
1034 k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
1035 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
1037 res = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
1038 } else { /* unsigned case */
1041 k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
1042 res = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
1045 /* other constant */
1046 if (allow_Mulh(mode)) {
1047 res = replace_div_by_mulh(irn, tv);
1049 res = new_rd_Mul(dbg, current_ir_graph, block, res, c, mode);
1051 /* res = arch_dep_mul_to_shift(res); */
1053 res = new_rd_Sub(dbg, current_ir_graph, block, left, res, mode);
1059 hook_arch_dep_replace_division_by_const(irn);
1064 /* Replace DivMods with Shifts and Add/Subs and Mulh. */
1065 void arch_dep_replace_divmod_by_const(ir_node **div, ir_node **mod, ir_node *irn) {
1068 /* If the architecture dependent optimizations were not initialized
1069 or this optimization was not enabled. */
1070 if (params == NULL ||
1071 ((opts & (arch_dep_div_by_const|arch_dep_mod_by_const)) != (arch_dep_div_by_const|arch_dep_mod_by_const)))
1074 if (get_irn_opcode(irn) == iro_DivMod) {
1075 ir_node *c = get_DivMod_right(irn);
1076 ir_node *block, *left;
1083 if (get_irn_op(c) != op_Const)
1086 tv = get_Const_tarval(c);
1088 /* check for division by zero */
1089 if (classify_tarval(tv) == TV_CLASSIFY_NULL)
1092 left = get_DivMod_left(irn);
1093 mode = get_irn_mode(left);
1094 block = get_irn_n(irn, -1);
1095 dbg = get_irn_dbg_info(irn);
1097 bits = get_mode_size_bits(mode);
1101 if (mode_is_signed(mode)) {
1102 /* for signed divisions, the algorithm works for a / -2^k by negating the result */
1103 ntv = tarval_neg(tv);
1113 if (k >= 0) { /* division by 2^k or -2^k */
1114 if (mode_is_signed(mode)) {
1115 ir_node *k_node, *c_k;
1116 ir_node *curr = left;
1119 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k - 1);
1120 curr = new_rd_Shrs(dbg, current_ir_graph, block, left, k_node, mode);
1123 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, bits - k);
1124 curr = new_rd_Shr(dbg, current_ir_graph, block, curr, k_node, mode);
1126 curr = new_rd_Add(dbg, current_ir_graph, block, left, curr, mode);
1128 c_k = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
1130 *div = new_rd_Shrs(dbg, current_ir_graph, block, curr, c_k, mode);
1132 if (n_flag) { /* negate the div result */
1135 k_node = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode));
1136 *div = new_rd_Sub(dbg, current_ir_graph, block, k_node, *div, mode);
1139 k_node = new_r_Const_long(current_ir_graph, block, mode, (-1) << k);
1140 curr = new_rd_And(dbg, current_ir_graph, block, curr, k_node, mode);
1142 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, curr, mode);
1143 } else { /* unsigned case */
1146 k_node = new_r_Const_long(current_ir_graph, block, mode_Iu, k);
1147 *div = new_rd_Shr(dbg, current_ir_graph, block, left, k_node, mode);
1149 k_node = new_r_Const_long(current_ir_graph, block, mode, (1 << k) - 1);
1150 *mod = new_rd_And(dbg, current_ir_graph, block, left, k_node, mode);
1153 /* other constant */
1154 if (allow_Mulh(mode)) {
1157 *div = replace_div_by_mulh(irn, tv);
1159 t = new_rd_Mul(dbg, current_ir_graph, block, *div, c, mode);
1161 /* t = arch_dep_mul_to_shift(t); */
1163 *mod = new_rd_Sub(dbg, current_ir_graph, block, left, t, mode);
1169 hook_arch_dep_replace_division_by_const(irn);
1173 static const ir_settings_arch_dep_t default_params = {
1174 1, /* also use subs */
1175 4, /* maximum shifts */
1176 31, /* maximum shift amount */
1178 0, /* allow Mulhs */
1179 0, /* allow Mulus */
1180 32 /* Mulh allowed up to 32 bit */
1183 /* A default parameter factory for testing purposes. */
1184 const ir_settings_arch_dep_t *arch_dep_default_factory(void) {
1185 return &default_params;