]> sourceware.org Git - glibc.git/blame - sysdeps/ieee754/dbl-64/s_tan.c
Move math_check_force_underflow macros to separate math-underflow.h.
[glibc.git] / sysdeps / ieee754 / dbl-64 / s_tan.c
CommitLineData
f7eac6eb 1/*
e4d82761 2 * IBM Accurate Mathematical Library
aeb25823 3 * written by International Business Machines Corp.
688903eb 4 * Copyright (C) 2001-2018 Free Software Foundation, Inc.
f7eac6eb 5 *
e4d82761
UD
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU Lesser General Public License as published by
cc7375ce 8 * the Free Software Foundation; either version 2.1 of the License, or
e4d82761 9 * (at your option) any later version.
f7eac6eb 10 *
e4d82761
UD
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
c6c6dd48 14 * GNU Lesser General Public License for more details.
f7eac6eb 15 *
e4d82761 16 * You should have received a copy of the GNU Lesser General Public License
59ba27a6 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
f7eac6eb 18 */
e4d82761
UD
19/*********************************************************************/
20/* MODULE_NAME: utan.c */
21/* */
22/* FUNCTIONS: utan */
23/* tanMp */
24/* */
25/* FILES NEEDED:dla.h endian.h mpa.h mydefs.h utan.h */
26/* branred.c sincos32.c mptan.c */
27/* utan.tbl */
28/* */
29/* An ultimate tan routine. Given an IEEE double machine number x */
30/* it computes the correctly rounded (to nearest) value of tan(x). */
31/* Assumption: Machine arithmetic operations are performed in */
32/* round to nearest mode of IEEE 754 standard. */
33/* */
34/*********************************************************************/
337c2708
UD
35
36#include <errno.h>
37550cb3 37#include <float.h>
e4d82761 38#include "endian.h"
c8b3296b 39#include <dla.h>
e4d82761
UD
40#include "mpa.h"
41#include "MathLib.h"
1ed0291c
RH
42#include <math.h>
43#include <math_private.h>
8f5b00d3 44#include <math-underflow.h>
38722448 45#include <libm-alias-double.h>
804360ed 46#include <fenv.h>
10e1cf6b 47#include <stap-probe.h>
15b3c029 48
31d3cc00
UD
49#ifndef SECTION
50# define SECTION
51#endif
52
27ec37f1
SP
53static double tanMp (double);
54void __mptan (double, mp_no *, int);
f7eac6eb 55
31d3cc00
UD
56double
57SECTION
527cd19c 58__tan (double x)
27ec37f1 59{
e4d82761
UD
60#include "utan.h"
61#include "utan.tbl"
f7eac6eb 62
27ec37f1
SP
63 int ux, i, n;
64 double a, da, a2, b, db, c, dc, c1, cc1, c2, cc2, c3, cc3, fi, ffi, gi, pz,
c5d5d574
OB
65 s, sy, t, t1, t2, t3, t4, t7, t8, t9, t10, w, x2, xn, xx2, y, ya,
66 yya, z0, z, zz, z2, zz2;
58985aa9 67#ifndef DLA_FMS
27ec37f1 68 double t5, t6;
a1a87169 69#endif
e4d82761 70 int p;
27ec37f1
SP
71 number num, v;
72 mp_no mpa, mpt1, mpt2;
e4d82761 73
804360ed
JM
74 double retval;
75
27ec37f1
SP
76 int __branred (double, double *, double *);
77 int __mpranred (double, mp_no *, int);
e4d82761 78
eb92c487 79 SET_RESTORE_ROUND_53BIT (FE_TONEAREST);
804360ed 80
e4d82761 81 /* x=+-INF, x=NaN */
27ec37f1
SP
82 num.d = x;
83 ux = num.i[HIGH_HALF];
84 if ((ux & 0x7ff00000) == 0x7ff00000)
85 {
86 if ((ux & 0x7fffffff) == 0x7ff00000)
87 __set_errno (EDOM);
88 retval = x - x;
89 goto ret;
90 }
e4d82761 91
27ec37f1 92 w = (x < 0.0) ? -x : x;
e4d82761
UD
93
94 /* (I) The case abs(x) <= 1.259e-8 */
27ec37f1
SP
95 if (w <= g1.d)
96 {
d96164c3 97 math_check_force_underflow_nonneg (w);
27ec37f1
SP
98 retval = x;
99 goto ret;
100 }
e4d82761
UD
101
102 /* (II) The case 1.259e-8 < abs(x) <= 0.0608 */
27ec37f1
SP
103 if (w <= g2.d)
104 {
27ec37f1
SP
105 /* First stage */
106 x2 = x * x;
e4d82761 107
27ec37f1
SP
108 t2 = d9.d + x2 * d11.d;
109 t2 = d7.d + x2 * t2;
110 t2 = d5.d + x2 * t2;
111 t2 = d3.d + x2 * t2;
112 t2 *= x * x2;
113
114 if ((y = x + (t2 - u1.d * t2)) == x + (t2 + u1.d * t2))
115 {
116 retval = y;
117 goto ret;
118 }
e4d82761
UD
119
120 /* Second stage */
27ec37f1
SP
121 c1 = a25.d + x2 * a27.d;
122 c1 = a23.d + x2 * c1;
123 c1 = a21.d + x2 * c1;
124 c1 = a19.d + x2 * c1;
125 c1 = a17.d + x2 * c1;
126 c1 = a15.d + x2 * c1;
127 c1 *= x2;
128
129 EMULV (x, x, x2, xx2, t1, t2, t3, t4, t5);
130 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
131 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
132 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
133 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
134 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
135 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
136 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
137 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
138 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
139 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
140 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
141 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
142 MUL2 (x, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
143 ADD2 (x, 0.0, c2, cc2, c1, cc1, t1, t2);
144 if ((y = c1 + (cc1 - u2.d * c1)) == c1 + (cc1 + u2.d * c1))
145 {
146 retval = y;
147 goto ret;
148 }
149 retval = tanMp (x);
804360ed 150 goto ret;
e4d82761
UD
151 }
152
27ec37f1
SP
153 /* (III) The case 0.0608 < abs(x) <= 0.787 */
154 if (w <= g3.d)
155 {
27ec37f1
SP
156 /* First stage */
157 i = ((int) (mfftnhf.d + TWO8 * w));
158 z = w - xfg[i][0].d;
159 z2 = z * z;
c2d94018 160 s = (x < 0.0) ? -1 : 1;
27ec37f1
SP
161 pz = z + z * z2 * (e0.d + z2 * e1.d);
162 fi = xfg[i][1].d;
163 gi = xfg[i][2].d;
164 t2 = pz * (gi + fi) / (gi - pz);
165 if ((y = fi + (t2 - fi * u3.d)) == fi + (t2 + fi * u3.d))
166 {
167 retval = (s * y);
168 goto ret;
169 }
170 t3 = (t2 < 0.0) ? -t2 : t2;
171 t4 = fi * ua3.d + t3 * ub3.d;
172 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
173 {
174 retval = (s * y);
175 goto ret;
176 }
e4d82761 177
27ec37f1
SP
178 /* Second stage */
179 ffi = xfg[i][3].d;
180 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
181 EMULV (z, z, z2, zz2, t1, t2, t3, t4, t5);
182 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
183 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
184 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
185 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
186 MUL2 (z, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
187 ADD2 (z, 0.0, c2, cc2, c1, cc1, t1, t2);
188
189 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
190 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
191 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
192 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
193 t10);
194
195 if ((y = c3 + (cc3 - u4.d * c3)) == c3 + (cc3 + u4.d * c3))
196 {
197 retval = (s * y);
198 goto ret;
199 }
200 retval = tanMp (x);
201 goto ret;
202 }
f7eac6eb 203
27ec37f1
SP
204 /* (---) The case 0.787 < abs(x) <= 25 */
205 if (w <= g4.d)
206 {
207 /* Range reduction by algorithm i */
208 t = (x * hpinv.d + toint.d);
209 xn = t - toint.d;
210 v.d = t;
211 t1 = (x - xn * mp1.d) - xn * mp2.d;
212 n = v.i[LOW_HALF] & 0x00000001;
213 da = xn * mp3.d;
214 a = t1 - da;
215 da = (t1 - a) - da;
216 if (a < 0.0)
217 {
218 ya = -a;
219 yya = -da;
c2d94018 220 sy = -1;
27ec37f1
SP
221 }
222 else
223 {
224 ya = a;
225 yya = da;
c2d94018 226 sy = 1;
27ec37f1
SP
227 }
228
229 /* (IV),(V) The case 0.787 < abs(x) <= 25, abs(y) <= 1e-7 */
230 if (ya <= gy1.d)
231 {
232 retval = tanMp (x);
233 goto ret;
234 }
235
236 /* (VI) The case 0.787 < abs(x) <= 25, 1e-7 < abs(y) <= 0.0608 */
237 if (ya <= gy2.d)
238 {
239 a2 = a * a;
240 t2 = d9.d + a2 * d11.d;
241 t2 = d7.d + a2 * t2;
242 t2 = d5.d + a2 * t2;
243 t2 = d3.d + a2 * t2;
244 t2 = da + a * a2 * t2;
245
246 if (n)
247 {
248 /* First stage -cot */
249 EADD (a, t2, b, db);
250 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
251 t9, t10);
252 if ((y = c + (dc - u6.d * c)) == c + (dc + u6.d * c))
253 {
254 retval = (-y);
255 goto ret;
256 }
257 }
258 else
259 {
260 /* First stage tan */
261 if ((y = a + (t2 - u5.d * a)) == a + (t2 + u5.d * a))
262 {
263 retval = y;
264 goto ret;
265 }
266 }
267 /* Second stage */
268 /* Range reduction by algorithm ii */
269 t = (x * hpinv.d + toint.d);
270 xn = t - toint.d;
271 v.d = t;
272 t1 = (x - xn * mp1.d) - xn * mp2.d;
273 n = v.i[LOW_HALF] & 0x00000001;
274 da = xn * pp3.d;
275 t = t1 - da;
276 da = (t1 - t) - da;
277 t1 = xn * pp4.d;
278 a = t - t1;
279 da = ((t - a) - t1) + da;
280
281 /* Second stage */
282 EADD (a, da, t1, t2);
283 a = t1;
284 da = t2;
285 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
286
287 c1 = a25.d + x2 * a27.d;
288 c1 = a23.d + x2 * c1;
289 c1 = a21.d + x2 * c1;
290 c1 = a19.d + x2 * c1;
291 c1 = a17.d + x2 * c1;
292 c1 = a15.d + x2 * c1;
293 c1 *= x2;
294
295 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
296 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
297 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
298 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
299 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
300 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
301 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
302 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
303 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
304 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
305 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
306 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
307 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
308 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
309
310 if (n)
311 {
312 /* Second stage -cot */
313 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
314 t8, t9, t10);
315 if ((y = c2 + (cc2 - u8.d * c2)) == c2 + (cc2 + u8.d * c2))
316 {
317 retval = (-y);
318 goto ret;
319 }
320 }
321 else
322 {
323 /* Second stage tan */
324 if ((y = c1 + (cc1 - u7.d * c1)) == c1 + (cc1 + u7.d * c1))
325 {
326 retval = y;
327 goto ret;
328 }
329 }
330 retval = tanMp (x);
331 goto ret;
332 }
333
334 /* (VII) The case 0.787 < abs(x) <= 25, 0.0608 < abs(y) <= 0.787 */
335
336 /* First stage */
337 i = ((int) (mfftnhf.d + TWO8 * ya));
338 z = (z0 = (ya - xfg[i][0].d)) + yya;
339 z2 = z * z;
340 pz = z + z * z2 * (e0.d + z2 * e1.d);
341 fi = xfg[i][1].d;
342 gi = xfg[i][2].d;
343
344 if (n)
345 {
346 /* -cot */
347 t2 = pz * (fi + gi) / (fi + pz);
348 if ((y = gi - (t2 - gi * u10.d)) == gi - (t2 + gi * u10.d))
349 {
350 retval = (-sy * y);
351 goto ret;
352 }
353 t3 = (t2 < 0.0) ? -t2 : t2;
354 t4 = gi * ua10.d + t3 * ub10.d;
355 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
356 {
357 retval = (-sy * y);
358 goto ret;
359 }
360 }
361 else
362 {
363 /* tan */
364 t2 = pz * (gi + fi) / (gi - pz);
365 if ((y = fi + (t2 - fi * u9.d)) == fi + (t2 + fi * u9.d))
366 {
367 retval = (sy * y);
368 goto ret;
369 }
370 t3 = (t2 < 0.0) ? -t2 : t2;
371 t4 = fi * ua9.d + t3 * ub9.d;
372 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
373 {
374 retval = (sy * y);
375 goto ret;
376 }
377 }
e4d82761 378
27ec37f1
SP
379 /* Second stage */
380 ffi = xfg[i][3].d;
381 EADD (z0, yya, z, zz)
c5d5d574 382 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
27ec37f1
SP
383 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
384 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
385 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
386 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
387 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
388 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
389 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
390
391 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
392 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
393 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
394
395 if (n)
396 {
397 /* -cot */
398 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
399 t10);
400 if ((y = c3 + (cc3 - u12.d * c3)) == c3 + (cc3 + u12.d * c3))
401 {
402 retval = (-sy * y);
403 goto ret;
404 }
405 }
406 else
407 {
408 /* tan */
409 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
410 t10);
411 if ((y = c3 + (cc3 - u11.d * c3)) == c3 + (cc3 + u11.d * c3))
412 {
413 retval = (sy * y);
414 goto ret;
415 }
416 }
417
418 retval = tanMp (x);
419 goto ret;
420 }
e4d82761
UD
421
422 /* (---) The case 25 < abs(x) <= 1e8 */
27ec37f1
SP
423 if (w <= g5.d)
424 {
425 /* Range reduction by algorithm ii */
426 t = (x * hpinv.d + toint.d);
427 xn = t - toint.d;
428 v.d = t;
429 t1 = (x - xn * mp1.d) - xn * mp2.d;
430 n = v.i[LOW_HALF] & 0x00000001;
431 da = xn * pp3.d;
432 t = t1 - da;
433 da = (t1 - t) - da;
434 t1 = xn * pp4.d;
435 a = t - t1;
436 da = ((t - a) - t1) + da;
437 EADD (a, da, t1, t2);
438 a = t1;
439 da = t2;
440 if (a < 0.0)
441 {
442 ya = -a;
443 yya = -da;
c2d94018 444 sy = -1;
27ec37f1
SP
445 }
446 else
447 {
448 ya = a;
449 yya = da;
c2d94018 450 sy = 1;
27ec37f1
SP
451 }
452
453 /* (+++) The case 25 < abs(x) <= 1e8, abs(y) <= 1e-7 */
454 if (ya <= gy1.d)
455 {
456 retval = tanMp (x);
457 goto ret;
458 }
459
460 /* (VIII) The case 25 < abs(x) <= 1e8, 1e-7 < abs(y) <= 0.0608 */
461 if (ya <= gy2.d)
462 {
463 a2 = a * a;
464 t2 = d9.d + a2 * d11.d;
465 t2 = d7.d + a2 * t2;
466 t2 = d5.d + a2 * t2;
467 t2 = d3.d + a2 * t2;
468 t2 = da + a * a2 * t2;
469
470 if (n)
471 {
472 /* First stage -cot */
473 EADD (a, t2, b, db);
474 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8,
475 t9, t10);
476 if ((y = c + (dc - u14.d * c)) == c + (dc + u14.d * c))
477 {
478 retval = (-y);
479 goto ret;
480 }
481 }
482 else
483 {
484 /* First stage tan */
485 if ((y = a + (t2 - u13.d * a)) == a + (t2 + u13.d * a))
486 {
487 retval = y;
488 goto ret;
489 }
490 }
491
492 /* Second stage */
493 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
494 c1 = a25.d + x2 * a27.d;
495 c1 = a23.d + x2 * c1;
496 c1 = a21.d + x2 * c1;
497 c1 = a19.d + x2 * c1;
498 c1 = a17.d + x2 * c1;
499 c1 = a15.d + x2 * c1;
500 c1 *= x2;
501
502 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
503 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
504 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
505 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
506 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
507 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
508 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
509 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
510 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
511 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
512 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
513 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
514 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
515 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
516
517 if (n)
518 {
519 /* Second stage -cot */
520 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7,
521 t8, t9, t10);
522 if ((y = c2 + (cc2 - u16.d * c2)) == c2 + (cc2 + u16.d * c2))
523 {
524 retval = (-y);
525 goto ret;
526 }
527 }
528 else
529 {
530 /* Second stage tan */
531 if ((y = c1 + (cc1 - u15.d * c1)) == c1 + (cc1 + u15.d * c1))
532 {
533 retval = (y);
534 goto ret;
535 }
536 }
537 retval = tanMp (x);
538 goto ret;
539 }
540
541 /* (IX) The case 25 < abs(x) <= 1e8, 0.0608 < abs(y) <= 0.787 */
542 /* First stage */
543 i = ((int) (mfftnhf.d + TWO8 * ya));
544 z = (z0 = (ya - xfg[i][0].d)) + yya;
545 z2 = z * z;
546 pz = z + z * z2 * (e0.d + z2 * e1.d);
547 fi = xfg[i][1].d;
548 gi = xfg[i][2].d;
549
550 if (n)
551 {
552 /* -cot */
553 t2 = pz * (fi + gi) / (fi + pz);
554 if ((y = gi - (t2 - gi * u18.d)) == gi - (t2 + gi * u18.d))
555 {
556 retval = (-sy * y);
557 goto ret;
558 }
559 t3 = (t2 < 0.0) ? -t2 : t2;
560 t4 = gi * ua18.d + t3 * ub18.d;
561 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
562 {
563 retval = (-sy * y);
564 goto ret;
565 }
566 }
567 else
568 {
569 /* tan */
570 t2 = pz * (gi + fi) / (gi - pz);
571 if ((y = fi + (t2 - fi * u17.d)) == fi + (t2 + fi * u17.d))
572 {
573 retval = (sy * y);
574 goto ret;
575 }
576 t3 = (t2 < 0.0) ? -t2 : t2;
577 t4 = fi * ua17.d + t3 * ub17.d;
578 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
579 {
580 retval = (sy * y);
581 goto ret;
582 }
583 }
e4d82761
UD
584
585 /* Second stage */
27ec37f1
SP
586 ffi = xfg[i][3].d;
587 EADD (z0, yya, z, zz);
588 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
589 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
590 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
591 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
592 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
593 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
594 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
595 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
596
597 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
598 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
599 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
600
601 if (n)
602 {
603 /* -cot */
604 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
605 t10);
606 if ((y = c3 + (cc3 - u20.d * c3)) == c3 + (cc3 + u20.d * c3))
607 {
608 retval = (-sy * y);
609 goto ret;
610 }
611 }
612 else
613 {
614 /* tan */
615 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
616 t10);
617 if ((y = c3 + (cc3 - u19.d * c3)) == c3 + (cc3 + u19.d * c3))
618 {
619 retval = (sy * y);
620 goto ret;
621 }
622 }
623 retval = tanMp (x);
804360ed 624 goto ret;
e4d82761
UD
625 }
626
e4d82761
UD
627 /* (---) The case 1e8 < abs(x) < 2**1024 */
628 /* Range reduction by algorithm iii */
27ec37f1
SP
629 n = (__branred (x, &a, &da)) & 0x00000001;
630 EADD (a, da, t1, t2);
631 a = t1;
632 da = t2;
633 if (a < 0.0)
634 {
635 ya = -a;
636 yya = -da;
c2d94018 637 sy = -1;
27ec37f1
SP
638 }
639 else
640 {
641 ya = a;
642 yya = da;
c2d94018 643 sy = 1;
27ec37f1 644 }
e4d82761
UD
645
646 /* (+++) The case 1e8 < abs(x) < 2**1024, abs(y) <= 1e-7 */
27ec37f1
SP
647 if (ya <= gy1.d)
648 {
649 retval = tanMp (x);
650 goto ret;
651 }
e4d82761
UD
652
653 /* (X) The case 1e8 < abs(x) < 2**1024, 1e-7 < abs(y) <= 0.0608 */
27ec37f1
SP
654 if (ya <= gy2.d)
655 {
656 a2 = a * a;
657 t2 = d9.d + a2 * d11.d;
658 t2 = d7.d + a2 * t2;
659 t2 = d5.d + a2 * t2;
660 t2 = d3.d + a2 * t2;
661 t2 = da + a * a2 * t2;
662 if (n)
663 {
664 /* First stage -cot */
665 EADD (a, t2, b, db);
666 DIV2 (1.0, 0.0, b, db, c, dc, t1, t2, t3, t4, t5, t6, t7, t8, t9,
667 t10);
668 if ((y = c + (dc - u22.d * c)) == c + (dc + u22.d * c))
669 {
670 retval = (-y);
671 goto ret;
672 }
673 }
674 else
675 {
676 /* First stage tan */
677 if ((y = a + (t2 - u21.d * a)) == a + (t2 + u21.d * a))
678 {
679 retval = y;
680 goto ret;
681 }
682 }
683
684 /* Second stage */
685 /* Reduction by algorithm iv */
686 p = 10;
687 n = (__mpranred (x, &mpa, p)) & 0x00000001;
688 __mp_dbl (&mpa, &a, p);
689 __dbl_mp (a, &mpt1, p);
690 __sub (&mpa, &mpt1, &mpt2, p);
691 __mp_dbl (&mpt2, &da, p);
692
693 MUL2 (a, da, a, da, x2, xx2, t1, t2, t3, t4, t5, t6, t7, t8);
694
695 c1 = a25.d + x2 * a27.d;
696 c1 = a23.d + x2 * c1;
697 c1 = a21.d + x2 * c1;
698 c1 = a19.d + x2 * c1;
699 c1 = a17.d + x2 * c1;
700 c1 = a15.d + x2 * c1;
701 c1 *= x2;
702
703 ADD2 (a13.d, aa13.d, c1, 0.0, c2, cc2, t1, t2);
704 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
705 ADD2 (a11.d, aa11.d, c1, cc1, c2, cc2, t1, t2);
706 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
707 ADD2 (a9.d, aa9.d, c1, cc1, c2, cc2, t1, t2);
708 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
709 ADD2 (a7.d, aa7.d, c1, cc1, c2, cc2, t1, t2);
710 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
711 ADD2 (a5.d, aa5.d, c1, cc1, c2, cc2, t1, t2);
712 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
713 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
714 MUL2 (x2, xx2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
715 MUL2 (a, da, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
716 ADD2 (a, da, c2, cc2, c1, cc1, t1, t2);
717
718 if (n)
719 {
720 /* Second stage -cot */
721 DIV2 (1.0, 0.0, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8,
722 t9, t10);
723 if ((y = c2 + (cc2 - u24.d * c2)) == c2 + (cc2 + u24.d * c2))
724 {
725 retval = (-y);
726 goto ret;
727 }
728 }
729 else
730 {
731 /* Second stage tan */
732 if ((y = c1 + (cc1 - u23.d * c1)) == c1 + (cc1 + u23.d * c1))
733 {
734 retval = y;
735 goto ret;
736 }
737 }
738 retval = tanMp (x);
739 goto ret;
740 }
e4d82761
UD
741
742 /* (XI) The case 1e8 < abs(x) < 2**1024, 0.0608 < abs(y) <= 0.787 */
743 /* First stage */
27ec37f1
SP
744 i = ((int) (mfftnhf.d + TWO8 * ya));
745 z = (z0 = (ya - xfg[i][0].d)) + yya;
746 z2 = z * z;
747 pz = z + z * z2 * (e0.d + z2 * e1.d);
748 fi = xfg[i][1].d;
749 gi = xfg[i][2].d;
750
751 if (n)
752 {
753 /* -cot */
754 t2 = pz * (fi + gi) / (fi + pz);
755 if ((y = gi - (t2 - gi * u26.d)) == gi - (t2 + gi * u26.d))
756 {
757 retval = (-sy * y);
758 goto ret;
759 }
760 t3 = (t2 < 0.0) ? -t2 : t2;
761 t4 = gi * ua26.d + t3 * ub26.d;
762 if ((y = gi - (t2 - t4)) == gi - (t2 + t4))
763 {
764 retval = (-sy * y);
765 goto ret;
766 }
767 }
768 else
769 {
770 /* tan */
771 t2 = pz * (gi + fi) / (gi - pz);
772 if ((y = fi + (t2 - fi * u25.d)) == fi + (t2 + fi * u25.d))
773 {
774 retval = (sy * y);
775 goto ret;
776 }
777 t3 = (t2 < 0.0) ? -t2 : t2;
778 t4 = fi * ua25.d + t3 * ub25.d;
779 if ((y = fi + (t2 - t4)) == fi + (t2 + t4))
780 {
781 retval = (sy * y);
782 goto ret;
783 }
784 }
e4d82761
UD
785
786 /* Second stage */
787 ffi = xfg[i][3].d;
27ec37f1
SP
788 EADD (z0, yya, z, zz);
789 MUL2 (z, zz, z, zz, z2, zz2, t1, t2, t3, t4, t5, t6, t7, t8);
790 c1 = z2 * (a7.d + z2 * (a9.d + z2 * a11.d));
791 ADD2 (a5.d, aa5.d, c1, 0.0, c2, cc2, t1, t2);
792 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
793 ADD2 (a3.d, aa3.d, c1, cc1, c2, cc2, t1, t2);
794 MUL2 (z2, zz2, c2, cc2, c1, cc1, t1, t2, t3, t4, t5, t6, t7, t8);
795 MUL2 (z, zz, c1, cc1, c2, cc2, t1, t2, t3, t4, t5, t6, t7, t8);
796 ADD2 (z, zz, c2, cc2, c1, cc1, t1, t2);
797
798 ADD2 (fi, ffi, c1, cc1, c2, cc2, t1, t2);
799 MUL2 (fi, ffi, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8);
800 SUB2 (1.0, 0.0, c3, cc3, c1, cc1, t1, t2);
801
802 if (n)
803 {
804 /* -cot */
805 DIV2 (c1, cc1, c2, cc2, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
806 t10);
807 if ((y = c3 + (cc3 - u28.d * c3)) == c3 + (cc3 + u28.d * c3))
808 {
809 retval = (-sy * y);
810 goto ret;
811 }
812 }
813 else
814 {
815 /* tan */
816 DIV2 (c2, cc2, c1, cc1, c3, cc3, t1, t2, t3, t4, t5, t6, t7, t8, t9,
817 t10);
818 if ((y = c3 + (cc3 - u27.d * c3)) == c3 + (cc3 + u27.d * c3))
819 {
820 retval = (sy * y);
821 goto ret;
822 }
823 }
824 retval = tanMp (x);
804360ed 825 goto ret;
e4d82761 826
27ec37f1 827ret:
804360ed
JM
828 return retval;
829}
e4d82761
UD
830
831/* multiple precision stage */
832/* Convert x to multi precision number,compute tan(x) by mptan() routine */
833/* and converts result back to double */
31d3cc00
UD
834static double
835SECTION
27ec37f1 836tanMp (double x)
e4d82761
UD
837{
838 int p;
839 double y;
840 mp_no mpy;
27ec37f1
SP
841 p = 32;
842 __mptan (x, &mpy, p);
843 __mp_dbl (&mpy, &y, p);
10e1cf6b 844 LIBC_PROBE (slowtan, 2, &x, &y);
e4d82761 845 return y;
f7eac6eb 846}
e4d82761 847
527cd19c 848#ifndef __tan
38722448 849libm_alias_double (__tan, tan)
cccda09f 850#endif
This page took 0.782593 seconds and 5 git commands to generate.