summaryrefslogtreecommitdiff
path: root/src/math/fma.c
diff options
context:
space:
mode:
authornsz <nsz@port70.net>2012-03-19 22:57:58 +0100
committernsz <nsz@port70.net>2012-03-19 22:57:58 +0100
commit2786c7d21611b9fa3b2fe356542cf213e7dd0ba4 (patch)
treeb3954e9cec7580f5dc851491d3b60d808aae4259 /src/math/fma.c
parent01fdfd491b5d83b72099fbae14c4a71ed8e0b945 (diff)
downloadmusl-2786c7d21611b9fa3b2fe356542cf213e7dd0ba4.tar.gz
use scalbn or *2.0 instead of ldexp, fix fmal
Some code assumed ldexp(x, 1) is faster than 2.0*x, but ldexp is a wrapper around scalbn which uses multiplications inside, so this optimization is wrong. This commit also fixes fmal which accidentally used ldexp instead of ldexpl loosing precision. There are various additional changes from the work-in-progress const cleanups.
Diffstat (limited to 'src/math/fma.c')
-rw-r--r--src/math/fma.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/src/math/fma.c b/src/math/fma.c
index 87d450c7..5fb95406 100644
--- a/src/math/fma.c
+++ b/src/math/fma.c
@@ -247,7 +247,7 @@ static inline double add_and_denormalize(double a, double b, int scale)
INSERT_WORD64(sum.hi, hibits);
}
}
- return (ldexp(sum.hi, scale));
+ return scalbn(sum.hi, scale);
}
/*
@@ -364,7 +364,7 @@ double fma(double x, double y, double z)
}
}
if (spread <= DBL_MANT_DIG * 2)
- zs = ldexp(zs, -spread);
+ zs = scalbn(zs, -spread);
else
zs = copysign(DBL_MIN, zs);
@@ -390,7 +390,7 @@ double fma(double x, double y, double z)
*/
fesetround(oround);
volatile double vzs = zs; /* XXX gcc CSE bug workaround */
- return (xy.hi + vzs + ldexp(xy.lo, spread));
+ return xy.hi + vzs + scalbn(xy.lo, spread);
}
if (oround != FE_TONEAREST) {
@@ -400,13 +400,13 @@ double fma(double x, double y, double z)
*/
fesetround(oround);
adj = r.lo + xy.lo;
- return (ldexp(r.hi + adj, spread));
+ return scalbn(r.hi + adj, spread);
}
adj = add_adjusted(r.lo, xy.lo);
if (spread + ilogb(r.hi) > -1023)
- return (ldexp(r.hi + adj, spread));
+ return scalbn(r.hi + adj, spread);
else
- return (add_and_denormalize(r.hi, adj, spread));
+ return add_and_denormalize(r.hi, adj, spread);
}
#endif