1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
/* origin: FreeBSD /usr/src/lib/msun/src/e_log2.c */
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/*
* Return the base 2 logarithm of x. See log.c and __log1p.h for most
* comments.
*
* This reduces x to {k, 1+f} exactly as in e_log.c, then calls the kernel,
* then does the combining and scaling steps
* log2(x) = (f - 0.5*f*f + k_log1p(f)) / ln2 + k
* in not-quite-routine extra precision.
*/
#include "libm.h"
#include "__log1p.h"
static const double
two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
double log2(double x)
{
double f,hfsq,hi,lo,r,val_hi,val_lo,w,y;
int32_t i,k,hx;
uint32_t lx;
EXTRACT_WORDS(hx, lx, x);
k = 0;
if (hx < 0x00100000) { /* x < 2**-1022 */
if (((hx&0x7fffffff)|lx) == 0)
return -two54/0.0; /* log(+-0)=-inf */
if (hx < 0)
return (x-x)/0.0; /* log(-#) = NaN */
/* subnormal number, scale up x */
k -= 54;
x *= two54;
GET_HIGH_WORD(hx, x);
}
if (hx >= 0x7ff00000)
return x+x;
if (hx == 0x3ff00000 && lx == 0)
return 0.0; /* log(1) = +0 */
k += (hx>>20) - 1023;
hx &= 0x000fffff;
i = (hx+0x95f64) & 0x100000;
SET_HIGH_WORD(x, hx|(i^0x3ff00000)); /* normalize x or x/2 */
k += i>>20;
y = (double)k;
f = x - 1.0;
hfsq = 0.5*f*f;
r = __log1p(f);
/*
* f-hfsq must (for args near 1) be evaluated in extra precision
* to avoid a large cancellation when x is near sqrt(2) or 1/sqrt(2).
* This is fairly efficient since f-hfsq only depends on f, so can
* be evaluated in parallel with R. Not combining hfsq with R also
* keeps R small (though not as small as a true `lo' term would be),
* so that extra precision is not needed for terms involving R.
*
* Compiler bugs involving extra precision used to break Dekker's
* theorem for spitting f-hfsq as hi+lo, unless double_t was used
* or the multi-precision calculations were avoided when double_t
* has extra precision. These problems are now automatically
* avoided as a side effect of the optimization of combining the
* Dekker splitting step with the clear-low-bits step.
*
* y must (for args near sqrt(2) and 1/sqrt(2)) be added in extra
* precision to avoid a very large cancellation when x is very near
* these values. Unlike the above cancellations, this problem is
* specific to base 2. It is strange that adding +-1 is so much
* harder than adding +-ln2 or +-log10_2.
*
* This uses Dekker's theorem to normalize y+val_hi, so the
* compiler bugs are back in some configurations, sigh. And I
* don't want to used double_t to avoid them, since that gives a
* pessimization and the support for avoiding the pessimization
* is not yet available.
*
* The multi-precision calculations for the multiplications are
* routine.
*/
hi = f - hfsq;
SET_LOW_WORD(hi, 0);
lo = (f - hi) - hfsq + r;
val_hi = hi*ivln2hi;
val_lo = (lo+hi)*ivln2lo + lo*ivln2hi;
/* spadd(val_hi, val_lo, y), except for not using double_t: */
w = y + val_hi;
val_lo += (y - w) + val_hi;
val_hi = w;
return val_lo + val_hi;
}
|