1
0

fmal.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /* origin: FreeBSD /usr/src/lib/msun/src/s_fmal.c */
  2. /*-
  3. * Copyright (c) 2005-2011 David Schultz <[email protected]>
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  16. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25. * SUCH DAMAGE.
  26. */
  27. #include "libm.h"
  28. #if LDBL_MANT_DIG == 53 && LDBL_MAX_EXP == 1024
  29. long double fmal(long double x, long double y, long double z)
  30. {
  31. return fma(x, y, z);
  32. }
  33. #elif (LDBL_MANT_DIG == 64 || LDBL_MANT_DIG == 113) && LDBL_MAX_EXP == 16384
  34. #include <fenv.h>
  35. /*
  36. * A struct dd represents a floating-point number with twice the precision
  37. * of a long double. We maintain the invariant that "hi" stores the high-order
  38. * bits of the result.
  39. */
  40. struct dd {
  41. long double hi;
  42. long double lo;
  43. };
  44. /*
  45. * Compute a+b exactly, returning the exact result in a struct dd. We assume
  46. * that both a and b are finite, but make no assumptions about their relative
  47. * magnitudes.
  48. */
  49. static inline struct dd dd_add(long double a, long double b)
  50. {
  51. struct dd ret;
  52. long double s;
  53. ret.hi = a + b;
  54. s = ret.hi - a;
  55. ret.lo = (a - (ret.hi - s)) + (b - s);
  56. return (ret);
  57. }
  58. /*
  59. * Compute a+b, with a small tweak: The least significant bit of the
  60. * result is adjusted into a sticky bit summarizing all the bits that
  61. * were lost to rounding. This adjustment negates the effects of double
  62. * rounding when the result is added to another number with a higher
  63. * exponent. For an explanation of round and sticky bits, see any reference
  64. * on FPU design, e.g.,
  65. *
  66. * J. Coonen. An Implementation Guide to a Proposed Standard for
  67. * Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
  68. */
  69. static inline long double add_adjusted(long double a, long double b)
  70. {
  71. struct dd sum;
  72. union IEEEl2bits u;
  73. sum = dd_add(a, b);
  74. if (sum.lo != 0) {
  75. u.e = sum.hi;
  76. if ((u.bits.manl & 1) == 0)
  77. sum.hi = nextafterl(sum.hi, INFINITY * sum.lo);
  78. }
  79. return (sum.hi);
  80. }
  81. /*
  82. * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
  83. * that the result will be subnormal, and care is taken to ensure that
  84. * double rounding does not occur.
  85. */
  86. static inline long double add_and_denormalize(long double a, long double b, int scale)
  87. {
  88. struct dd sum;
  89. int bits_lost;
  90. union IEEEl2bits u;
  91. sum = dd_add(a, b);
  92. /*
  93. * If we are losing at least two bits of accuracy to denormalization,
  94. * then the first lost bit becomes a round bit, and we adjust the
  95. * lowest bit of sum.hi to make it a sticky bit summarizing all the
  96. * bits in sum.lo. With the sticky bit adjusted, the hardware will
  97. * break any ties in the correct direction.
  98. *
  99. * If we are losing only one bit to denormalization, however, we must
  100. * break the ties manually.
  101. */
  102. if (sum.lo != 0) {
  103. u.e = sum.hi;
  104. bits_lost = -u.bits.exp - scale + 1;
  105. if (bits_lost != 1 ^ (int)(u.bits.manl & 1))
  106. sum.hi = nextafterl(sum.hi, INFINITY * sum.lo);
  107. }
  108. return scalbnl(sum.hi, scale);
  109. }
  110. /*
  111. * Compute a*b exactly, returning the exact result in a struct dd. We assume
  112. * that both a and b are normalized, so no underflow or overflow will occur.
  113. * The current rounding mode must be round-to-nearest.
  114. */
  115. static inline struct dd dd_mul(long double a, long double b)
  116. {
  117. #if LDBL_MANT_DIG == 64
  118. static const long double split = 0x1p32L + 1.0;
  119. #elif LDBL_MANT_DIG == 113
  120. static const long double split = 0x1p57L + 1.0;
  121. #endif
  122. struct dd ret;
  123. long double ha, hb, la, lb, p, q;
  124. p = a * split;
  125. ha = a - p;
  126. ha += p;
  127. la = a - ha;
  128. p = b * split;
  129. hb = b - p;
  130. hb += p;
  131. lb = b - hb;
  132. p = ha * hb;
  133. q = ha * lb + la * hb;
  134. ret.hi = p + q;
  135. ret.lo = p - ret.hi + q + la * lb;
  136. return (ret);
  137. }
  138. /*
  139. * Fused multiply-add: Compute x * y + z with a single rounding error.
  140. *
  141. * We use scaling to avoid overflow/underflow, along with the
  142. * canonical precision-doubling technique adapted from:
  143. *
  144. * Dekker, T. A Floating-Point Technique for Extending the
  145. * Available Precision. Numer. Math. 18, 224-242 (1971).
  146. */
  147. long double fmal(long double x, long double y, long double z)
  148. {
  149. long double xs, ys, zs, adj;
  150. struct dd xy, r;
  151. int oround;
  152. int ex, ey, ez;
  153. int spread;
  154. /*
  155. * Handle special cases. The order of operations and the particular
  156. * return values here are crucial in handling special cases involving
  157. * infinities, NaNs, overflows, and signed zeroes correctly.
  158. */
  159. if (!isfinite(x) || !isfinite(y))
  160. return (x * y + z);
  161. if (!isfinite(z))
  162. return (z);
  163. if (x == 0.0 || y == 0.0)
  164. return (x * y + z);
  165. if (z == 0.0)
  166. return (x * y);
  167. xs = frexpl(x, &ex);
  168. ys = frexpl(y, &ey);
  169. zs = frexpl(z, &ez);
  170. oround = fegetround();
  171. spread = ex + ey - ez;
  172. /*
  173. * If x * y and z are many orders of magnitude apart, the scaling
  174. * will overflow, so we handle these cases specially. Rounding
  175. * modes other than FE_TONEAREST are painful.
  176. */
  177. if (spread < -LDBL_MANT_DIG) {
  178. #ifdef FE_INEXACT
  179. feraiseexcept(FE_INEXACT);
  180. #endif
  181. #ifdef FE_UNDERFLOW
  182. if (!isnormal(z))
  183. feraiseexcept(FE_UNDERFLOW);
  184. #endif
  185. switch (oround) {
  186. default: /* FE_TONEAREST */
  187. return (z);
  188. #ifdef FE_TOWARDZERO
  189. case FE_TOWARDZERO:
  190. if (x > 0.0 ^ y < 0.0 ^ z < 0.0)
  191. return (z);
  192. else
  193. return (nextafterl(z, 0));
  194. #endif
  195. #ifdef FE_DOWNWARD
  196. case FE_DOWNWARD:
  197. if (x > 0.0 ^ y < 0.0)
  198. return (z);
  199. else
  200. return (nextafterl(z, -INFINITY));
  201. #endif
  202. #ifdef FE_UPWARD
  203. case FE_UPWARD:
  204. if (x > 0.0 ^ y < 0.0)
  205. return (nextafterl(z, INFINITY));
  206. else
  207. return (z);
  208. #endif
  209. }
  210. }
  211. if (spread <= LDBL_MANT_DIG * 2)
  212. zs = scalbnl(zs, -spread);
  213. else
  214. zs = copysignl(LDBL_MIN, zs);
  215. fesetround(FE_TONEAREST);
  216. /*
  217. * Basic approach for round-to-nearest:
  218. *
  219. * (xy.hi, xy.lo) = x * y (exact)
  220. * (r.hi, r.lo) = xy.hi + z (exact)
  221. * adj = xy.lo + r.lo (inexact; low bit is sticky)
  222. * result = r.hi + adj (correctly rounded)
  223. */
  224. xy = dd_mul(xs, ys);
  225. r = dd_add(xy.hi, zs);
  226. spread = ex + ey;
  227. if (r.hi == 0.0) {
  228. /*
  229. * When the addends cancel to 0, ensure that the result has
  230. * the correct sign.
  231. */
  232. fesetround(oround);
  233. volatile long double vzs = zs; /* XXX gcc CSE bug workaround */
  234. return xy.hi + vzs + scalbnl(xy.lo, spread);
  235. }
  236. if (oround != FE_TONEAREST) {
  237. /*
  238. * There is no need to worry about double rounding in directed
  239. * rounding modes.
  240. */
  241. fesetround(oround);
  242. adj = r.lo + xy.lo;
  243. return scalbnl(r.hi + adj, spread);
  244. }
  245. adj = add_adjusted(r.lo, xy.lo);
  246. if (spread + ilogbl(r.hi) > -16383)
  247. return scalbnl(r.hi + adj, spread);
  248. else
  249. return add_and_denormalize(r.hi, adj, spread);
  250. }
  251. #endif