/*
assume (long double)(hi+lo) == hi
-return an adjusted hi so that rounding it to double is correct
+return an adjusted hi so that rounding it to double (or less) precision is correct
*/
static long double adjust(long double hi, long double lo)
{
ulo.x = lo;
if (uhi.bits.s == ulo.bits.s)
uhi.bits.m++;
- else
+ else {
uhi.bits.m--;
+ /* handle underflow and take care of ld80 implicit msb */
+ if (uhi.bits.m == (uint64_t)-1/2) {
+ uhi.bits.m *= 2;
+ uhi.bits.e--;
+ }
+ }
return uhi.x;
}
+/* adjusted add so the result is correct when rounded to double (or less) precision */
static long double dadd(long double x, long double y)
{
add(&x, &y, x, y);
return adjust(x, y);
}
+/* adjusted mul so the result is correct when rounded to double (or less) precision */
static long double dmul(long double x, long double y)
{
mul(&x, &y, x, y);
INSERT_WORD64(sum.hi, hibits);
}
}
- return (ldexp(sum.hi, scale));
+ return scalbn(sum.hi, scale);
}
/*
}
}
if (spread <= DBL_MANT_DIG * 2)
- zs = ldexp(zs, -spread);
+ zs = scalbn(zs, -spread);
else
zs = copysign(DBL_MIN, zs);
*/
fesetround(oround);
volatile double vzs = zs; /* XXX gcc CSE bug workaround */
- return (xy.hi + vzs + ldexp(xy.lo, spread));
+ return xy.hi + vzs + scalbn(xy.lo, spread);
}
if (oround != FE_TONEAREST) {
*/
fesetround(oround);
adj = r.lo + xy.lo;
- return (ldexp(r.hi + adj, spread));
+ return scalbn(r.hi + adj, spread);
}
adj = add_adjusted(r.lo, xy.lo);
if (spread + ilogb(r.hi) > -1023)
- return (ldexp(r.hi + adj, spread));
+ return scalbn(r.hi + adj, spread);
else
- return (add_and_denormalize(r.hi, adj, spread));
+ return add_and_denormalize(r.hi, adj, spread);
}
#endif