X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=src%2Fmath%2Ffma.c;h=17f1cdcc39bc2a7671b99dbeb7d107ee7973d505;hb=8224bdbbedc60a5d637d95ddd344501f667f0294;hp=a9de2cb37e0f70d73312edeadec8fbd6a21c9992;hpb=b1cbd70743f0e0e0295e92c3e38d7599114db8c6;p=musl diff --git a/src/math/fma.c b/src/math/fma.c index a9de2cb3..17f1cdcc 100644 --- a/src/math/fma.c +++ b/src/math/fma.c @@ -23,10 +23,6 @@ static void add(long double *hi, long double *lo, long double x, long double y) *lo = y - r; } -/* -TODO(nsz): probably simpler mul is enough if we assume x and y are doubles -so last 11bits are all zeros, no subnormals etc -*/ /* exact mul, assumes no over/underflow */ static void mul(long double *hi, long double *lo, long double x, long double y) { @@ -45,7 +41,7 @@ static void mul(long double *hi, long double *lo, long double x, long double y) /* assume (long double)(hi+lo) == hi -return an adjusted hi so that rounding it to double is correct +return an adjusted hi so that rounding it to double (or less) precision is correct */ static long double adjust(long double hi, long double lo) { @@ -59,17 +55,25 @@ static long double adjust(long double hi, long double lo) ulo.x = lo; if (uhi.bits.s == ulo.bits.s) uhi.bits.m++; - else + else { uhi.bits.m--; + /* handle underflow and take care of ld80 implicit msb */ + if (uhi.bits.m == (uint64_t)-1/2) { + uhi.bits.m *= 2; + uhi.bits.e--; + } + } return uhi.x; } +/* adjusted add so the result is correct when rounded to double (or less) precision */ static long double dadd(long double x, long double y) { add(&x, &y, x, y); return adjust(x, y); } +/* adjusted mul so the result is correct when rounded to double (or less) precision */ static long double dmul(long double x, long double y) { mul(&x, &y, x, y); @@ -85,6 +89,7 @@ static int getexp(long double x) double fma(double x, double y, double z) { + #pragma STDC FENV_ACCESS ON long double hi, lo1, lo2, xy; int round, ez, exy; @@ -115,7 +120,7 @@ double fma(double x, double y, double z) add(&hi, &lo2, xy, z); if (hi == 0) { fesetround(round); - /* TODO: verify that the sign of 0 is always correct */ + /* make sure that the sign of 0 is correct */ return (xy + z) + lo1; } } else { @@ -251,7 +256,7 @@ static inline double add_and_denormalize(double a, double b, int scale) INSERT_WORD64(sum.hi, hibits); } } - return (ldexp(sum.hi, scale)); + return scalbn(sum.hi, scale); } /* @@ -302,6 +307,7 @@ static inline struct dd dd_mul(double a, double b) */ double fma(double x, double y, double z) { + #pragma STDC FENV_ACCESS ON double xs, ys, zs, adj; struct dd xy, r; int oround; @@ -368,7 +374,7 @@ double fma(double x, double y, double z) } } if (spread <= DBL_MANT_DIG * 2) - zs = ldexp(zs, -spread); + zs = scalbn(zs, -spread); else zs = copysign(DBL_MIN, zs); @@ -394,7 +400,7 @@ double fma(double x, double y, double z) */ fesetround(oround); volatile double vzs = zs; /* XXX gcc CSE bug workaround */ - return (xy.hi + vzs + ldexp(xy.lo, spread)); + return xy.hi + vzs + scalbn(xy.lo, spread); } if (oround != FE_TONEAREST) { @@ -404,13 +410,13 @@ double fma(double x, double y, double z) */ fesetround(oround); adj = r.lo + xy.lo; - return (ldexp(r.hi + adj, spread)); + return scalbn(r.hi + adj, spread); } adj = add_adjusted(r.lo, xy.lo); if (spread + ilogb(r.hi) > -1023) - return (ldexp(r.hi + adj, spread)); + return scalbn(r.hi + adj, spread); else - return (add_and_denormalize(r.hi, adj, spread)); + return add_and_denormalize(r.hi, adj, spread); } #endif