]> sourceware.org Git - glibc.git/blob - sysdeps/ieee754/dbl-64/s_tan.c
Fix warnings.
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
1 /*
2 * IBM Accurate Mathematical Library
3 * Copyright (c) International Business Machines Corp., 2001
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU Lesser General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19 /*********************************************************************/
20 /* MODULE_NAME: utan.c */
21 /* */
22 /* FUNCTIONS: utan */
23 /* tanMp */
24 /* */
25 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26 /* branred.c sincos32.c mptan.c */
27 /* utan.tbl */
28 /* */
29 /* An ultimate tan routine. Given an IEEE double machine number x */
30 /* it computes the correctly rounded (to nearest) value of tan(x). */
31 /* Assumption: Machine arithmetic operations are performed in */
32 /* round to nearest mode of IEEE 754 standard. */
33 /* */
34 /*********************************************************************/
35 #include "endian.h"
36 #include "dla.h"
37 #include "mpa.h"
38 #include "MathLib.h"
39 static double tanMp(double);
40 void __mptan(double, mp_no *, int);
41
42 double tan(double x) {
43 #include "utan.h"
44 #include "utan.tbl"
45
46 int ux,i,n;
47 double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
48 t,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
49 int p;
50 number num,v;
51 mp_no mpa,mpt1,mpt2;
52 #if 0
53 mp_no mpy;
54 #endif
55
56 int branred(double, double *, double *);
57 int mpranred(double, mp_no *, int);
58
59 /* x=+-INF, x=NaN */
60 num.d = x; ux = num.i[HIGH_HALF];
61 if ((ux&0x7ff00000)==0x7ff00000) return x-x;
62
63 w=(x<ZERO) ? -x : x;
64
65 /* (I) The case abs(x) <= 1.259e-8 */
66 if (w<=g1.d) return x;
67
68 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
69 if (w<=g2.d) {
70
71 /* First stage */
72 x2 = x*x;
73 t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
74 if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) return y;
75
76 /* Second stage */
77 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
78 x2*a27.d))))));
79 EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
80 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
81 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
82 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
83 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
84 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
85 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
86 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
87 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
88 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
89 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
90 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
91 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
92 MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
93 ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2)
94 if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) return y;
95 return tanMp(x);
96 }
97
98 /* (III) The case 0.0608 < abs(x) <= 0.787 */
99 if (w<=g3.d) {
100
101 /* First stage */
102 i = ((int) (mfftnhf.d+TWO8*w));
103 z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE;
104 pz = z+z*z2*(e0.d+z2*e1.d);
105 fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
106 if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) return (s*y);
107 t3 = (t2<ZERO) ? -t2 : t2;
108 if ((y=fi+(t2-(t4=fi*ua3.d+t3*ub3.d)))==fi+(t2+t4)) return (s*y);
109
110 /* Second stage */
111 ffi = xfg[i][3].d;
112 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
113 EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
114 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
115 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
116 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
117 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
118 MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
119 ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2)
120
121 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
122 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
123 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
124 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
125
126 if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) return (s*y);
127 return tanMp(x);
128 }
129
130 /* (---) The case 0.787 < abs(x) <= 25 */
131 if (w<=g4.d) {
132 /* Range reduction by algorithm i */
133 t = (x*hpinv.d + toint.d);
134 xn = t - toint.d;
135 v.d = t;
136 t1 = (x - xn*mp1.d) - xn*mp2.d;
137 n =v.i[LOW_HALF] & 0x00000001;
138 da = xn*mp3.d;
139 a=t1-da;
140 da = (t1-a)-da;
141 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
142 else {ya= a; yya= da; sy= ONE;}
143
144 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
145 if (ya<=gy1.d) return tanMp(x);
146
147 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
148 if (ya<=gy2.d) {
149 a2 = a*a;
150 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
151 if (n) {
152 /* First stage -cot */
153 EADD(a,t2,b,db)
154 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
155 if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) return (-y); }
156 else {
157 /* First stage tan */
158 if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) return y; }
159 /* Second stage */
160 /* Range reduction by algorithm ii */
161 t = (x*hpinv.d + toint.d);
162 xn = t - toint.d;
163 v.d = t;
164 t1 = (x - xn*mp1.d) - xn*mp2.d;
165 n =v.i[LOW_HALF] & 0x00000001;
166 da = xn*pp3.d;
167 t=t1-da;
168 da = (t1-t)-da;
169 t1 = xn*pp4.d;
170 a = t - t1;
171 da = ((t-a)-t1)+da;
172
173 /* Second stage */
174 EADD(a,da,t1,t2) a=t1; da=t2;
175 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
176 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
177 x2*a27.d))))));
178 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
179 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
180 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
181 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
182 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
183 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
184 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
185 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
186 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
187 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
188 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
189 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
190 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
191 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
192
193 if (n) {
194 /* Second stage -cot */
195 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
196 if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) return (-y); }
197 else {
198 /* Second stage tan */
199 if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) return y; }
200 return tanMp(x);
201 }
202
203 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
204
205 /* First stage */
206 i = ((int) (mfftnhf.d+TWO8*ya));
207 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
208 pz = z+z*z2*(e0.d+z2*e1.d);
209 fi = xfg[i][1].d; gi = xfg[i][2].d;
210
211 if (n) {
212 /* -cot */
213 t2 = pz*(fi+gi)/(fi+pz);
214 if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) return (-sy*y);
215 t3 = (t2<ZERO) ? -t2 : t2;
216 if ((y=gi-(t2-(t4=gi*ua10.d+t3*ub10.d)))==gi-(t2+t4)) return (-sy*y); }
217 else {
218 /* tan */
219 t2 = pz*(gi+fi)/(gi-pz);
220 if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) return (sy*y);
221 t3 = (t2<ZERO) ? -t2 : t2;
222 if ((y=fi+(t2-(t4=fi*ua9.d+t3*ub9.d)))==fi+(t2+t4)) return (sy*y); }
223
224 /* Second stage */
225 ffi = xfg[i][3].d;
226 EADD(z0,yya,z,zz)
227 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
228 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
229 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
230 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
231 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
232 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
233 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
234 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
235
236 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
237 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
238 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
239
240 if (n) {
241 /* -cot */
242 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
243 if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) return (-sy*y); }
244 else {
245 /* tan */
246 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
247 if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) return (sy*y); }
248
249 return tanMp(x);
250 }
251
252 /* (---) The case 25 < abs(x) <= 1e8 */
253 if (w<=g5.d) {
254 /* Range reduction by algorithm ii */
255 t = (x*hpinv.d + toint.d);
256 xn = t - toint.d;
257 v.d = t;
258 t1 = (x - xn*mp1.d) - xn*mp2.d;
259 n =v.i[LOW_HALF] & 0x00000001;
260 da = xn*pp3.d;
261 t=t1-da;
262 da = (t1-t)-da;
263 t1 = xn*pp4.d;
264 a = t - t1;
265 da = ((t-a)-t1)+da;
266 EADD(a,da,t1,t2) a=t1; da=t2;
267 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
268 else {ya= a; yya= da; sy= ONE;}
269
270 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
271 if (ya<=gy1.d) return tanMp(x);
272
273 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
274 if (ya<=gy2.d) {
275 a2 = a*a;
276 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
277 if (n) {
278 /* First stage -cot */
279 EADD(a,t2,b,db)
280 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
281 if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) return (-y); }
282 else {
283 /* First stage tan */
284 if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) return y; }
285
286 /* Second stage */
287 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
288 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
289 x2*a27.d))))));
290 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
291 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
292 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
293 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
294 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
295 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
296 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
297 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
298 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
299 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
300 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
301 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
302 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
303 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
304
305 if (n) {
306 /* Second stage -cot */
307 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
308 if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) return (-y); }
309 else {
310 /* Second stage tan */
311 if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) return (y); }
312 return tanMp(x);
313 }
314
315 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
316 /* First stage */
317 i = ((int) (mfftnhf.d+TWO8*ya));
318 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
319 pz = z+z*z2*(e0.d+z2*e1.d);
320 fi = xfg[i][1].d; gi = xfg[i][2].d;
321
322 if (n) {
323 /* -cot */
324 t2 = pz*(fi+gi)/(fi+pz);
325 if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) return (-sy*y);
326 t3 = (t2<ZERO) ? -t2 : t2;
327 if ((y=gi-(t2-(t4=gi*ua18.d+t3*ub18.d)))==gi-(t2+t4)) return (-sy*y); }
328 else {
329 /* tan */
330 t2 = pz*(gi+fi)/(gi-pz);
331 if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) return (sy*y);
332 t3 = (t2<ZERO) ? -t2 : t2;
333 if ((y=fi+(t2-(t4=fi*ua17.d+t3*ub17.d)))==fi+(t2+t4)) return (sy*y); }
334
335 /* Second stage */
336 ffi = xfg[i][3].d;
337 EADD(z0,yya,z,zz)
338 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
339 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
340 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
341 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
342 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
343 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
344 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
345 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
346
347 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
348 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
349 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
350
351 if (n) {
352 /* -cot */
353 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
354 if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) return (-sy*y); }
355 else {
356 /* tan */
357 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
358 if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) return (sy*y); }
359 return tanMp(x);
360 }
361
362 /* (---) The case 1e8 < abs(x) < 2**1024 */
363 /* Range reduction by algorithm iii */
364 n = (branred(x,&a,&da)) & 0x00000001;
365 EADD(a,da,t1,t2) a=t1; da=t2;
366 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
367 else {ya= a; yya= da; sy= ONE;}
368
369 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
370 if (ya<=gy1.d) return tanMp(x);
371
372 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
373 if (ya<=gy2.d) {
374 a2 = a*a;
375 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
376 if (n) {
377 /* First stage -cot */
378 EADD(a,t2,b,db)
379 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
380 if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) return (-y); }
381 else {
382 /* First stage tan */
383 if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) return y; }
384
385 /* Second stage */
386 /* Reduction by algorithm iv */
387 p=10; n = (mpranred(x,&mpa,p)) & 0x00000001;
388 __mp_dbl(&mpa,&a,p); dbl_mp(a,&mpt1,p);
389 sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
390
391 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
392 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
393 x2*a27.d))))));
394 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
395 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
396 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
397 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
398 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
399 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
400 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
401 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
402 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
403 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
404 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
405 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
406 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
407 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
408
409 if (n) {
410 /* Second stage -cot */
411 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
412 if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) return (-y); }
413 else {
414 /* Second stage tan */
415 if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) return y; }
416 return tanMp(x);
417 }
418
419 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
420 /* First stage */
421 i = ((int) (mfftnhf.d+TWO8*ya));
422 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
423 pz = z+z*z2*(e0.d+z2*e1.d);
424 fi = xfg[i][1].d; gi = xfg[i][2].d;
425
426 if (n) {
427 /* -cot */
428 t2 = pz*(fi+gi)/(fi+pz);
429 if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) return (-sy*y);
430 t3 = (t2<ZERO) ? -t2 : t2;
431 if ((y=gi-(t2-(t4=gi*ua26.d+t3*ub26.d)))==gi-(t2+t4)) return (-sy*y); }
432 else {
433 /* tan */
434 t2 = pz*(gi+fi)/(gi-pz);
435 if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) return (sy*y);
436 t3 = (t2<ZERO) ? -t2 : t2;
437 if ((y=fi+(t2-(t4=fi*ua25.d+t3*ub25.d)))==fi+(t2+t4)) return (sy*y); }
438
439 /* Second stage */
440 ffi = xfg[i][3].d;
441 EADD(z0,yya,z,zz)
442 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
443 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
444 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
445 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
446 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
447 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
448 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
449 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
450
451 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
452 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
453 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
454
455 if (n) {
456 /* -cot */
457 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
458 if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) return (-sy*y); }
459 else {
460 /* tan */
461 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
462 if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) return (sy*y); }
463 return tanMp(x);
464 }
465
466
467 /* multiple precision stage */
468 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
469 /* and converts result back to double */
470 static double tanMp(double x)
471 {
472 int p;
473 double y;
474 mp_no mpy;
475 p=32;
476 __mptan(x, &mpy, p);
477 __mp_dbl(&mpy,&y,p);
478 return y;
479 }
480
481 #ifdef NO_LONG_DOUBLE
482 weak_alias (tan, tanl)
483 #endif
This page took 0.084433 seconds and 6 git commands to generate.