4 #if LDBL_MANT_DIG==64 && LDBL_MAX_EXP==16384
15 /* exact add, assumes exponent_x >= exponent_y */
16 static void add(long double *hi, long double *lo, long double x, long double y)
26 /* exact mul, assumes no over/underflow */
27 static void mul(long double *hi, long double *lo, long double x, long double y)
29 static const long double c = 1.0 + 0x1p32L;
30 long double cx, xh, xl, cy, yh, yl;
39 *lo = (xh*yh - *hi) + xh*yl + xl*yh + xl*yl;
43 assume (long double)(hi+lo) == hi
44 return an adjusted hi so that rounding it to double (or less) precision is correct
46 static long double adjust(long double hi, long double lo)
53 if (uhi.bits.m & 0x3ff)
56 if (uhi.bits.s == ulo.bits.s)
60 /* handle underflow and take care of ld80 implicit msb */
61 if (uhi.bits.m == (uint64_t)-1/2) {
69 /* adjusted add so the result is correct when rounded to double (or less) precision */
70 static long double dadd(long double x, long double y)
76 /* adjusted mul so the result is correct when rounded to double (or less) precision */
77 static long double dmul(long double x, long double y)
83 static int getexp(long double x)
90 double fma(double x, double y, double z)
92 #pragma STDC FENV_ACCESS ON
93 long double hi, lo1, lo2, xy;
96 /* handle +-inf,nan */
97 if (!isfinite(x) || !isfinite(y))
102 if (x == 0.0 || y == 0.0)
104 round = fegetround();
106 if (round == FE_TONEAREST)
111 /* exact mul and add require nearest rounding */
112 /* spurious inexact exceptions may be raised */
113 fesetround(FE_TONEAREST);
114 mul(&xy, &lo1, x, y);
118 add(&hi, &lo2, z, xy);
119 } else if (ez > exy - 12) {
120 add(&hi, &lo2, xy, z);
123 /* make sure that the sign of 0 is correct */
124 return (xy + z) + lo1;
129 the 12 extra bits (1guard, 11round+sticky) are needed so with
131 elo <= ehi - 11, and we use the last 10 bits in adjust so
133 gives correct result when rounded to double
139 if (round == FE_TONEAREST)
140 return dadd(hi, dadd(lo1, lo2));
141 return hi + (lo1 + lo2);
144 /* origin: FreeBSD /usr/src/lib/msun/src/s_fma.c */
146 * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
147 * All rights reserved.
149 * Redistribution and use in source and binary forms, with or without
150 * modification, are permitted provided that the following conditions
152 * 1. Redistributions of source code must retain the above copyright
153 * notice, this list of conditions and the following disclaimer.
154 * 2. Redistributions in binary form must reproduce the above copyright
155 * notice, this list of conditions and the following disclaimer in the
156 * documentation and/or other materials provided with the distribution.
158 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
159 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
160 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
161 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
162 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
163 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
164 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
165 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
166 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
167 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
172 * A struct dd represents a floating-point number with twice the precision
173 * of a double. We maintain the invariant that "hi" stores the 53 high-order
174 * bits of the result.
182 * Compute a+b exactly, returning the exact result in a struct dd. We assume
183 * that both a and b are finite, but make no assumptions about their relative
186 static inline struct dd dd_add(double a, double b)
193 ret.lo = (a - (ret.hi - s)) + (b - s);
198 * Compute a+b, with a small tweak: The least significant bit of the
199 * result is adjusted into a sticky bit summarizing all the bits that
200 * were lost to rounding. This adjustment negates the effects of double
201 * rounding when the result is added to another number with a higher
202 * exponent. For an explanation of round and sticky bits, see any reference
203 * on FPU design, e.g.,
205 * J. Coonen. An Implementation Guide to a Proposed Standard for
206 * Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
208 static inline double add_adjusted(double a, double b)
211 uint64_t hibits, lobits;
215 EXTRACT_WORD64(hibits, sum.hi);
216 if ((hibits & 1) == 0) {
217 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
218 EXTRACT_WORD64(lobits, sum.lo);
219 hibits += 1 - ((hibits ^ lobits) >> 62);
220 INSERT_WORD64(sum.hi, hibits);
227 * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
228 * that the result will be subnormal, and care is taken to ensure that
229 * double rounding does not occur.
231 static inline double add_and_denormalize(double a, double b, int scale)
234 uint64_t hibits, lobits;
240 * If we are losing at least two bits of accuracy to denormalization,
241 * then the first lost bit becomes a round bit, and we adjust the
242 * lowest bit of sum.hi to make it a sticky bit summarizing all the
243 * bits in sum.lo. With the sticky bit adjusted, the hardware will
244 * break any ties in the correct direction.
246 * If we are losing only one bit to denormalization, however, we must
247 * break the ties manually.
250 EXTRACT_WORD64(hibits, sum.hi);
251 bits_lost = -((int)(hibits >> 52) & 0x7ff) - scale + 1;
252 if (bits_lost != 1 ^ (int)(hibits & 1)) {
253 /* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
254 EXTRACT_WORD64(lobits, sum.lo);
255 hibits += 1 - (((hibits ^ lobits) >> 62) & 2);
256 INSERT_WORD64(sum.hi, hibits);
259 return scalbn(sum.hi, scale);
263 * Compute a*b exactly, returning the exact result in a struct dd. We assume
264 * that both a and b are normalized, so no underflow or overflow will occur.
265 * The current rounding mode must be round-to-nearest.
267 static inline struct dd dd_mul(double a, double b)
269 static const double split = 0x1p27 + 1.0;
271 double ha, hb, la, lb, p, q;
284 q = ha * lb + la * hb;
287 ret.lo = p - ret.hi + q + la * lb;
292 * Fused multiply-add: Compute x * y + z with a single rounding error.
294 * We use scaling to avoid overflow/underflow, along with the
295 * canonical precision-doubling technique adapted from:
297 * Dekker, T. A Floating-Point Technique for Extending the
298 * Available Precision. Numer. Math. 18, 224-242 (1971).
300 * This algorithm is sensitive to the rounding precision. FPUs such
301 * as the i387 must be set in double-precision mode if variables are
302 * to be stored in FP registers in order to avoid incorrect results.
303 * This is the default on FreeBSD, but not on many other systems.
305 * Hardware instructions should be used on architectures that support it,
306 * since this implementation will likely be several times slower.
308 double fma(double x, double y, double z)
310 #pragma STDC FENV_ACCESS ON
311 double xs, ys, zs, adj;
318 * Handle special cases. The order of operations and the particular
319 * return values here are crucial in handling special cases involving
320 * infinities, NaNs, overflows, and signed zeroes correctly.
322 if (!isfinite(x) || !isfinite(y))
326 if (x == 0.0 || y == 0.0)
334 oround = fegetround();
335 spread = ex + ey - ez;
338 * If x * y and z are many orders of magnitude apart, the scaling
339 * will overflow, so we handle these cases specially. Rounding
340 * modes other than FE_TONEAREST are painful.
342 if (spread < -DBL_MANT_DIG) {
344 feraiseexcept(FE_INEXACT);
348 feraiseexcept(FE_UNDERFLOW);
351 default: /* FE_TONEAREST */
355 if (x > 0.0 ^ y < 0.0 ^ z < 0.0)
358 return (nextafter(z, 0));
362 if (x > 0.0 ^ y < 0.0)
365 return (nextafter(z, -INFINITY));
369 if (x > 0.0 ^ y < 0.0)
370 return (nextafter(z, INFINITY));
376 if (spread <= DBL_MANT_DIG * 2)
377 zs = scalbn(zs, -spread);
379 zs = copysign(DBL_MIN, zs);
381 fesetround(FE_TONEAREST);
384 * Basic approach for round-to-nearest:
386 * (xy.hi, xy.lo) = x * y (exact)
387 * (r.hi, r.lo) = xy.hi + z (exact)
388 * adj = xy.lo + r.lo (inexact; low bit is sticky)
389 * result = r.hi + adj (correctly rounded)
392 r = dd_add(xy.hi, zs);
398 * When the addends cancel to 0, ensure that the result has
402 volatile double vzs = zs; /* XXX gcc CSE bug workaround */
403 return xy.hi + vzs + scalbn(xy.lo, spread);
406 if (oround != FE_TONEAREST) {
408 * There is no need to worry about double rounding in directed
413 return scalbn(r.hi + adj, spread);
416 adj = add_adjusted(r.lo, xy.lo);
417 if (spread + ilogb(r.hi) > -1023)
418 return scalbn(r.hi + adj, spread);
420 return add_and_denormalize(r.hi, adj, spread);