]> sourceware.org Git - glibc.git/blob - sysdeps/ieee754/dbl-64/s_tan.c
Merge branch 'master' into bug13658-branch
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
1 /*
2 * IBM Accurate Mathematical Library
3 * written by International Business Machines Corp.
4 * Copyright (C) 2001, 2009, 2011 Free Software Foundation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
8 * the Free Software Foundation; either version 2.1 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 /*********************************************************************/
20 /* MODULE_NAME: utan.c */
21 /* */
22 /* FUNCTIONS: utan */
23 /* tanMp */
24 /* */
25 /* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26 /* branred.c sincos32.c mptan.c */
27 /* utan.tbl */
28 /* */
29 /* An ultimate tan routine. Given an IEEE double machine number x */
30 /* it computes the correctly rounded (to nearest) value of tan(x). */
31 /* Assumption: Machine arithmetic operations are performed in */
32 /* round to nearest mode of IEEE 754 standard. */
33 /* */
34 /*********************************************************************/
35
36 #include <errno.h>
37 #include "endian.h"
38 #include <dla.h>
39 #include "mpa.h"
40 #include "MathLib.h"
41 #include <math.h>
42 #include <math_private.h>
43 #include <fenv.h>
44
45 #ifndef SECTION
46 # define SECTION
47 #endif
48
49 static double tanMp(double);
50 void __mptan(double, mp_no *, int);
51
52 double
53 SECTION
54 tan(double x) {
55 #include "utan.h"
56 #include "utan.tbl"
57
58 int ux,i,n;
59 double a,da,a2,b,db,c,dc,c1,cc1,c2,cc2,c3,cc3,fi,ffi,gi,pz,s,sy,
60 t,t1,t2,t3,t4,t7,t8,t9,t10,w,x2,xn,xx2,y,ya,yya,z0,z,zz,z2,zz2;
61 #ifndef DLA_FMS
62 double t5,t6;
63 #endif
64 int p;
65 number num,v;
66 mp_no mpa,mpt1,mpt2;
67 #if 0
68 mp_no mpy;
69 #endif
70
71 fenv_t env;
72 double retval;
73
74 int __branred(double, double *, double *);
75 int __mpranred(double, mp_no *, int);
76
77 libc_feholdexcept_setround (&env, FE_TONEAREST);
78
79 /* x=+-INF, x=NaN */
80 num.d = x; ux = num.i[HIGH_HALF];
81 if ((ux&0x7ff00000)==0x7ff00000) {
82 if ((ux&0x7fffffff)==0x7ff00000)
83 __set_errno (EDOM);
84 retval = x-x;
85 goto ret;
86 }
87
88 w=(x<ZERO) ? -x : x;
89
90 /* (I) The case abs(x) <= 1.259e-8 */
91 if (w<=g1.d) { retval = x; goto ret; }
92
93 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
94 if (w<=g2.d) {
95
96 /* First stage */
97 x2 = x*x;
98 t2 = x*x2*(d3.d+x2*(d5.d+x2*(d7.d+x2*(d9.d+x2*d11.d))));
99 if ((y=x+(t2-u1.d*t2)) == x+(t2+u1.d*t2)) { retval = y; goto ret; }
100
101 /* Second stage */
102 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
103 x2*a27.d))))));
104 EMULV(x,x,x2,xx2,t1,t2,t3,t4,t5)
105 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
106 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
107 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
108 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
109 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
110 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
111 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
112 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
113 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
114 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
115 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
116 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
117 MUL2(x ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
118 ADD2(x ,zero.d,c2,cc2,c1,cc1,t1,t2)
119 if ((y=c1+(cc1-u2.d*c1)) == c1+(cc1+u2.d*c1)) { retval = y; goto ret; }
120 retval = tanMp(x);
121 goto ret;
122 }
123
124 /* (III) The case 0.0608 < abs(x) <= 0.787 */
125 if (w<=g3.d) {
126
127 /* First stage */
128 i = ((int) (mfftnhf.d+TWO8*w));
129 z = w-xfg[i][0].d; z2 = z*z; s = (x<ZERO) ? MONE : ONE;
130 pz = z+z*z2*(e0.d+z2*e1.d);
131 fi = xfg[i][1].d; gi = xfg[i][2].d; t2 = pz*(gi+fi)/(gi-pz);
132 if ((y=fi+(t2-fi*u3.d))==fi+(t2+fi*u3.d)) { retval = (s*y); goto ret; }
133 t3 = (t2<ZERO) ? -t2 : t2;
134 t4 = fi*ua3.d+t3*ub3.d;
135 if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (s*y); goto ret; }
136
137 /* Second stage */
138 ffi = xfg[i][3].d;
139 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
140 EMULV(z,z,z2,zz2,t1,t2,t3,t4,t5)
141 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
142 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
143 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
144 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
145 MUL2(z ,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
146 ADD2(z ,zero.d,c2,cc2,c1,cc1,t1,t2)
147
148 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
149 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
150 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
151 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
152
153 if ((y=c3+(cc3-u4.d*c3))==c3+(cc3+u4.d*c3)) { retval = (s*y); goto ret; }
154 retval = tanMp(x);
155 goto ret;
156 }
157
158 /* (---) The case 0.787 < abs(x) <= 25 */
159 if (w<=g4.d) {
160 /* Range reduction by algorithm i */
161 t = (x*hpinv.d + toint.d);
162 xn = t - toint.d;
163 v.d = t;
164 t1 = (x - xn*mp1.d) - xn*mp2.d;
165 n =v.i[LOW_HALF] & 0x00000001;
166 da = xn*mp3.d;
167 a=t1-da;
168 da = (t1-a)-da;
169 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
170 else {ya= a; yya= da; sy= ONE;}
171
172 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
173 if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
174
175 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
176 if (ya<=gy2.d) {
177 a2 = a*a;
178 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
179 if (n) {
180 /* First stage -cot */
181 EADD(a,t2,b,db)
182 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
183 if ((y=c+(dc-u6.d*c))==c+(dc+u6.d*c)) { retval = (-y); goto ret; } }
184 else {
185 /* First stage tan */
186 if ((y=a+(t2-u5.d*a))==a+(t2+u5.d*a)) { retval = y; goto ret; } }
187 /* Second stage */
188 /* Range reduction by algorithm ii */
189 t = (x*hpinv.d + toint.d);
190 xn = t - toint.d;
191 v.d = t;
192 t1 = (x - xn*mp1.d) - xn*mp2.d;
193 n =v.i[LOW_HALF] & 0x00000001;
194 da = xn*pp3.d;
195 t=t1-da;
196 da = (t1-t)-da;
197 t1 = xn*pp4.d;
198 a = t - t1;
199 da = ((t-a)-t1)+da;
200
201 /* Second stage */
202 EADD(a,da,t1,t2) a=t1; da=t2;
203 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
204 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
205 x2*a27.d))))));
206 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
207 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
208 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
209 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
210 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
211 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
212 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
213 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
214 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
215 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
216 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
217 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
218 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
219 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
220
221 if (n) {
222 /* Second stage -cot */
223 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
224 if ((y=c2+(cc2-u8.d*c2)) == c2+(cc2+u8.d*c2)) { retval = (-y); goto ret; } }
225 else {
226 /* Second stage tan */
227 if ((y=c1+(cc1-u7.d*c1)) == c1+(cc1+u7.d*c1)) { retval = y; goto ret; } }
228 retval = tanMp(x);
229 goto ret;
230 }
231
232 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
233
234 /* First stage */
235 i = ((int) (mfftnhf.d+TWO8*ya));
236 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
237 pz = z+z*z2*(e0.d+z2*e1.d);
238 fi = xfg[i][1].d; gi = xfg[i][2].d;
239
240 if (n) {
241 /* -cot */
242 t2 = pz*(fi+gi)/(fi+pz);
243 if ((y=gi-(t2-gi*u10.d))==gi-(t2+gi*u10.d)) { retval = (-sy*y); goto ret; }
244 t3 = (t2<ZERO) ? -t2 : t2;
245 t4 = gi*ua10.d+t3*ub10.d;
246 if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
247 else {
248 /* tan */
249 t2 = pz*(gi+fi)/(gi-pz);
250 if ((y=fi+(t2-fi*u9.d))==fi+(t2+fi*u9.d)) { retval = (sy*y); goto ret; }
251 t3 = (t2<ZERO) ? -t2 : t2;
252 t4 = fi*ua9.d+t3*ub9.d;
253 if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
254
255 /* Second stage */
256 ffi = xfg[i][3].d;
257 EADD(z0,yya,z,zz)
258 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
259 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
260 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
261 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
262 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
263 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
264 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
265 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
266
267 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
268 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
269 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
270
271 if (n) {
272 /* -cot */
273 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
274 if ((y=c3+(cc3-u12.d*c3))==c3+(cc3+u12.d*c3)) { retval = (-sy*y); goto ret; } }
275 else {
276 /* tan */
277 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
278 if ((y=c3+(cc3-u11.d*c3))==c3+(cc3+u11.d*c3)) { retval = (sy*y); goto ret; } }
279
280 retval = tanMp(x);
281 goto ret;
282 }
283
284 /* (---) The case 25 < abs(x) <= 1e8 */
285 if (w<=g5.d) {
286 /* Range reduction by algorithm ii */
287 t = (x*hpinv.d + toint.d);
288 xn = t - toint.d;
289 v.d = t;
290 t1 = (x - xn*mp1.d) - xn*mp2.d;
291 n =v.i[LOW_HALF] & 0x00000001;
292 da = xn*pp3.d;
293 t=t1-da;
294 da = (t1-t)-da;
295 t1 = xn*pp4.d;
296 a = t - t1;
297 da = ((t-a)-t1)+da;
298 EADD(a,da,t1,t2) a=t1; da=t2;
299 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
300 else {ya= a; yya= da; sy= ONE;}
301
302 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
303 if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
304
305 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
306 if (ya<=gy2.d) {
307 a2 = a*a;
308 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
309 if (n) {
310 /* First stage -cot */
311 EADD(a,t2,b,db)
312 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
313 if ((y=c+(dc-u14.d*c))==c+(dc+u14.d*c)) { retval = (-y); goto ret; } }
314 else {
315 /* First stage tan */
316 if ((y=a+(t2-u13.d*a))==a+(t2+u13.d*a)) { retval = y; goto ret; } }
317
318 /* Second stage */
319 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
320 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
321 x2*a27.d))))));
322 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
323 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
324 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
325 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
326 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
327 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
328 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
329 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
330 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
331 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
332 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
333 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
334 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
335 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
336
337 if (n) {
338 /* Second stage -cot */
339 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
340 if ((y=c2+(cc2-u16.d*c2)) == c2+(cc2+u16.d*c2)) { retval = (-y); goto ret; } }
341 else {
342 /* Second stage tan */
343 if ((y=c1+(cc1-u15.d*c1)) == c1+(cc1+u15.d*c1)) { retval = (y); goto ret; } }
344 retval = tanMp(x);
345 goto ret;
346 }
347
348 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
349 /* First stage */
350 i = ((int) (mfftnhf.d+TWO8*ya));
351 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
352 pz = z+z*z2*(e0.d+z2*e1.d);
353 fi = xfg[i][1].d; gi = xfg[i][2].d;
354
355 if (n) {
356 /* -cot */
357 t2 = pz*(fi+gi)/(fi+pz);
358 if ((y=gi-(t2-gi*u18.d))==gi-(t2+gi*u18.d)) { retval = (-sy*y); goto ret; }
359 t3 = (t2<ZERO) ? -t2 : t2;
360 t4 = gi*ua18.d+t3*ub18.d;
361 if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
362 else {
363 /* tan */
364 t2 = pz*(gi+fi)/(gi-pz);
365 if ((y=fi+(t2-fi*u17.d))==fi+(t2+fi*u17.d)) { retval = (sy*y); goto ret; }
366 t3 = (t2<ZERO) ? -t2 : t2;
367 t4 = fi*ua17.d+t3*ub17.d;
368 if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
369
370 /* Second stage */
371 ffi = xfg[i][3].d;
372 EADD(z0,yya,z,zz)
373 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
374 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
375 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
376 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
377 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
378 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
379 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
380 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
381
382 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
383 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
384 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
385
386 if (n) {
387 /* -cot */
388 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
389 if ((y=c3+(cc3-u20.d*c3))==c3+(cc3+u20.d*c3)) { retval = (-sy*y); goto ret; } }
390 else {
391 /* tan */
392 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
393 if ((y=c3+(cc3-u19.d*c3))==c3+(cc3+u19.d*c3)) { retval = (sy*y); goto ret; } }
394 retval = tanMp(x);
395 goto ret;
396 }
397
398 /* (---) The case 1e8 < abs(x) < 2**1024 */
399 /* Range reduction by algorithm iii */
400 n = (__branred(x,&a,&da)) & 0x00000001;
401 EADD(a,da,t1,t2) a=t1; da=t2;
402 if (a<ZERO) {ya=-a; yya=-da; sy=MONE;}
403 else {ya= a; yya= da; sy= ONE;}
404
405 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
406 if (ya<=gy1.d) { retval = tanMp(x); goto ret; }
407
408 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
409 if (ya<=gy2.d) {
410 a2 = a*a;
411 t2 = da+a*a2*(d3.d+a2*(d5.d+a2*(d7.d+a2*(d9.d+a2*d11.d))));
412 if (n) {
413 /* First stage -cot */
414 EADD(a,t2,b,db)
415 DIV2(one.d,zero.d,b,db,c,dc,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
416 if ((y=c+(dc-u22.d*c))==c+(dc+u22.d*c)) { retval = (-y); goto ret; } }
417 else {
418 /* First stage tan */
419 if ((y=a+(t2-u21.d*a))==a+(t2+u21.d*a)) { retval = y; goto ret; } }
420
421 /* Second stage */
422 /* Reduction by algorithm iv */
423 p=10; n = (__mpranred(x,&mpa,p)) & 0x00000001;
424 __mp_dbl(&mpa,&a,p); __dbl_mp(a,&mpt1,p);
425 __sub(&mpa,&mpt1,&mpt2,p); __mp_dbl(&mpt2,&da,p);
426
427 MUL2(a,da,a,da,x2,xx2,t1,t2,t3,t4,t5,t6,t7,t8)
428 c1 = x2*(a15.d+x2*(a17.d+x2*(a19.d+x2*(a21.d+x2*(a23.d+x2*(a25.d+
429 x2*a27.d))))));
430 ADD2(a13.d,aa13.d,c1,zero.d,c2,cc2,t1,t2)
431 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
432 ADD2(a11.d,aa11.d,c1,cc1,c2,cc2,t1,t2)
433 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
434 ADD2(a9.d ,aa9.d ,c1,cc1,c2,cc2,t1,t2)
435 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
436 ADD2(a7.d ,aa7.d ,c1,cc1,c2,cc2,t1,t2)
437 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
438 ADD2(a5.d ,aa5.d ,c1,cc1,c2,cc2,t1,t2)
439 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
440 ADD2(a3.d ,aa3.d ,c1,cc1,c2,cc2,t1,t2)
441 MUL2(x2,xx2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
442 MUL2(a ,da ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
443 ADD2(a ,da ,c2,cc2,c1,cc1,t1,t2)
444
445 if (n) {
446 /* Second stage -cot */
447 DIV2(one.d,zero.d,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
448 if ((y=c2+(cc2-u24.d*c2)) == c2+(cc2+u24.d*c2)) { retval = (-y); goto ret; } }
449 else {
450 /* Second stage tan */
451 if ((y=c1+(cc1-u23.d*c1)) == c1+(cc1+u23.d*c1)) { retval = y; goto ret; } }
452 retval = tanMp(x);
453 goto ret;
454 }
455
456 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
457 /* First stage */
458 i = ((int) (mfftnhf.d+TWO8*ya));
459 z = (z0=(ya-xfg[i][0].d))+yya; z2 = z*z;
460 pz = z+z*z2*(e0.d+z2*e1.d);
461 fi = xfg[i][1].d; gi = xfg[i][2].d;
462
463 if (n) {
464 /* -cot */
465 t2 = pz*(fi+gi)/(fi+pz);
466 if ((y=gi-(t2-gi*u26.d))==gi-(t2+gi*u26.d)) { retval = (-sy*y); goto ret; }
467 t3 = (t2<ZERO) ? -t2 : t2;
468 t4 = gi*ua26.d+t3*ub26.d;
469 if ((y=gi-(t2-t4))==gi-(t2+t4)) { retval = (-sy*y); goto ret; } }
470 else {
471 /* tan */
472 t2 = pz*(gi+fi)/(gi-pz);
473 if ((y=fi+(t2-fi*u25.d))==fi+(t2+fi*u25.d)) { retval = (sy*y); goto ret; }
474 t3 = (t2<ZERO) ? -t2 : t2;
475 t4 = fi*ua25.d+t3*ub25.d;
476 if ((y=fi+(t2-t4))==fi+(t2+t4)) { retval = (sy*y); goto ret; } }
477
478 /* Second stage */
479 ffi = xfg[i][3].d;
480 EADD(z0,yya,z,zz)
481 MUL2(z,zz,z,zz,z2,zz2,t1,t2,t3,t4,t5,t6,t7,t8)
482 c1 = z2*(a7.d+z2*(a9.d+z2*a11.d));
483 ADD2(a5.d,aa5.d,c1,zero.d,c2,cc2,t1,t2)
484 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
485 ADD2(a3.d,aa3.d,c1,cc1,c2,cc2,t1,t2)
486 MUL2(z2,zz2,c2,cc2,c1,cc1,t1,t2,t3,t4,t5,t6,t7,t8)
487 MUL2(z ,zz ,c1,cc1,c2,cc2,t1,t2,t3,t4,t5,t6,t7,t8)
488 ADD2(z ,zz ,c2,cc2,c1,cc1,t1,t2)
489
490 ADD2(fi ,ffi,c1,cc1,c2,cc2,t1,t2)
491 MUL2(fi ,ffi,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8)
492 SUB2(one.d,zero.d,c3,cc3,c1,cc1,t1,t2)
493
494 if (n) {
495 /* -cot */
496 DIV2(c1,cc1,c2,cc2,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
497 if ((y=c3+(cc3-u28.d*c3))==c3+(cc3+u28.d*c3)) { retval = (-sy*y); goto ret; } }
498 else {
499 /* tan */
500 DIV2(c2,cc2,c1,cc1,c3,cc3,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10)
501 if ((y=c3+(cc3-u27.d*c3))==c3+(cc3+u27.d*c3)) { retval = (sy*y); goto ret; } }
502 retval = tanMp(x);
503 goto ret;
504
505 ret:
506 libc_feupdateenv (&env);
507 return retval;
508 }
509
510 /* multiple precision stage */
511 /* Convert x to multi precision number,compute tan(x) by mptan() routine */
512 /* and converts result back to double */
513 static double
514 SECTION
515 tanMp(double x)
516 {
517 int p;
518 double y;
519 mp_no mpy;
520 p=32;
521 __mptan(x, &mpy, p);
522 __mp_dbl(&mpy,&y,p);
523 return y;
524 }
525
526 #ifdef NO_LONG_DOUBLE
527 weak_alias (tan, tanl)
528 #endif
This page took 0.085251 seconds and 6 git commands to generate.