libmvec-supported-funcs = acos \
asin \
atan \
+ atan2 \
cos \
exp \
exp10 \
_ZGVnN2v_atan;
_ZGVsMxv_atanf;
_ZGVsMxv_atan;
+ _ZGVnN4vv_atan2f;
+ _ZGVnN2vv_atan2;
+ _ZGVsMxvv_atan2f;
+ _ZGVsMxvv_atan2;
_ZGVnN4v_exp10f;
_ZGVnN2v_exp10;
_ZGVsMxv_exp10f;
--- /dev/null
+/* Double-precision AdvSIMD atan2
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+#include "poly_advsimd_f64.h"
+
+static const struct data
+{
+ float64x2_t pi_over_2;
+ float64x2_t poly[20];
+} data = {
+ /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
+ the interval [2**-1022, 1.0]. */
+ .poly = { V2 (-0x1.5555555555555p-2), V2 (0x1.99999999996c1p-3),
+ V2 (-0x1.2492492478f88p-3), V2 (0x1.c71c71bc3951cp-4),
+ V2 (-0x1.745d160a7e368p-4), V2 (0x1.3b139b6a88ba1p-4),
+ V2 (-0x1.11100ee084227p-4), V2 (0x1.e1d0f9696f63bp-5),
+ V2 (-0x1.aebfe7b418581p-5), V2 (0x1.842dbe9b0d916p-5),
+ V2 (-0x1.5d30140ae5e99p-5), V2 (0x1.338e31eb2fbbcp-5),
+ V2 (-0x1.00e6eece7de8p-5), V2 (0x1.860897b29e5efp-6),
+ V2 (-0x1.0051381722a59p-6), V2 (0x1.14e9dc19a4a4ep-7),
+ V2 (-0x1.d0062b42fe3bfp-9), V2 (0x1.17739e210171ap-10),
+ V2 (-0x1.ab24da7be7402p-13), V2 (0x1.358851160a528p-16), },
+ .pi_over_2 = V2 (0x1.921fb54442d18p+0),
+};
+
+#define SignMask v_u64 (0x8000000000000000)
+
+/* Special cases i.e. 0, infinity, NaN (fall back to scalar calls). */
+static float64x2_t VPCS_ATTR NOINLINE
+special_case (float64x2_t y, float64x2_t x, float64x2_t ret, uint64x2_t cmp)
+{
+ return v_call2_f64 (atan2, y, x, ret, cmp);
+}
+
+/* Returns 1 if input is the bit representation of 0, infinity or nan. */
+static inline uint64x2_t
+zeroinfnan (uint64x2_t i)
+{
+ /* (2 * i - 1) >= (2 * asuint64 (INFINITY) - 1). */
+ return vcgeq_u64 (vsubq_u64 (vaddq_u64 (i, i), v_u64 (1)),
+ v_u64 (2 * asuint64 (INFINITY) - 1));
+}
+
+/* Fast implementation of vector atan2.
+ Maximum observed error is 2.8 ulps:
+ _ZGVnN2vv_atan2 (0x1.9651a429a859ap+5, 0x1.953075f4ee26p+5)
+ got 0x1.92d628ab678ccp-1
+ want 0x1.92d628ab678cfp-1. */
+float64x2_t VPCS_ATTR V_NAME_D2 (atan2) (float64x2_t y, float64x2_t x)
+{
+ const struct data *data_ptr = ptr_barrier (&data);
+
+ uint64x2_t ix = vreinterpretq_u64_f64 (x);
+ uint64x2_t iy = vreinterpretq_u64_f64 (y);
+
+ uint64x2_t special_cases = vorrq_u64 (zeroinfnan (ix), zeroinfnan (iy));
+
+ uint64x2_t sign_x = vandq_u64 (ix, SignMask);
+ uint64x2_t sign_y = vandq_u64 (iy, SignMask);
+ uint64x2_t sign_xy = veorq_u64 (sign_x, sign_y);
+
+ float64x2_t ax = vabsq_f64 (x);
+ float64x2_t ay = vabsq_f64 (y);
+
+ uint64x2_t pred_xlt0 = vcltzq_f64 (x);
+ uint64x2_t pred_aygtax = vcgtq_f64 (ay, ax);
+
+ /* Set up z for call to atan. */
+ float64x2_t n = vbslq_f64 (pred_aygtax, vnegq_f64 (ax), ay);
+ float64x2_t d = vbslq_f64 (pred_aygtax, ay, ax);
+ float64x2_t z = vdivq_f64 (n, d);
+
+ /* Work out the correct shift. */
+ float64x2_t shift = vreinterpretq_f64_u64 (
+ vandq_u64 (pred_xlt0, vreinterpretq_u64_f64 (v_f64 (-2.0))));
+ shift = vbslq_f64 (pred_aygtax, vaddq_f64 (shift, v_f64 (1.0)), shift);
+ shift = vmulq_f64 (shift, data_ptr->pi_over_2);
+
+ /* Calculate the polynomial approximation.
+ Use split Estrin scheme for P(z^2) with deg(P)=19. Use split instead of
+ full scheme to avoid underflow in x^16.
+ The order 19 polynomial P approximates
+ (atan(sqrt(x))-sqrt(x))/x^(3/2). */
+ float64x2_t z2 = vmulq_f64 (z, z);
+ float64x2_t x2 = vmulq_f64 (z2, z2);
+ float64x2_t x4 = vmulq_f64 (x2, x2);
+ float64x2_t x8 = vmulq_f64 (x4, x4);
+ float64x2_t ret
+ = vfmaq_f64 (v_estrin_7_f64 (z2, x2, x4, data_ptr->poly),
+ v_estrin_11_f64 (z2, x2, x4, x8, data_ptr->poly + 8), x8);
+
+ /* Finalize. y = shift + z + z^3 * P(z^2). */
+ ret = vfmaq_f64 (z, ret, vmulq_f64 (z2, z));
+ ret = vaddq_f64 (ret, shift);
+
+ /* Account for the sign of x and y. */
+ ret = vreinterpretq_f64_u64 (
+ veorq_u64 (vreinterpretq_u64_f64 (ret), sign_xy));
+
+ if (__glibc_unlikely (v_any_u64 (special_cases)))
+ return special_case (y, x, ret, special_cases);
+
+ return ret;
+}
--- /dev/null
+/* Double-precision SVE atan2
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+#include "poly_sve_f64.h"
+
+static const struct data
+{
+ float64_t poly[20];
+ float64_t pi_over_2;
+} data = {
+ /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
+ [2**-1022, 1.0]. */
+ .poly = { -0x1.5555555555555p-2, 0x1.99999999996c1p-3, -0x1.2492492478f88p-3,
+ 0x1.c71c71bc3951cp-4, -0x1.745d160a7e368p-4, 0x1.3b139b6a88ba1p-4,
+ -0x1.11100ee084227p-4, 0x1.e1d0f9696f63bp-5, -0x1.aebfe7b418581p-5,
+ 0x1.842dbe9b0d916p-5, -0x1.5d30140ae5e99p-5, 0x1.338e31eb2fbbcp-5,
+ -0x1.00e6eece7de8p-5, 0x1.860897b29e5efp-6, -0x1.0051381722a59p-6,
+ 0x1.14e9dc19a4a4ep-7, -0x1.d0062b42fe3bfp-9, 0x1.17739e210171ap-10,
+ -0x1.ab24da7be7402p-13, 0x1.358851160a528p-16, },
+ .pi_over_2 = 0x1.921fb54442d18p+0,
+};
+
+/* Useful constants. */
+#define SignMask sv_u64 (0x8000000000000000)
+
+/* Special cases i.e. 0, infinity, nan (fall back to scalar calls). */
+static svfloat64_t NOINLINE
+special_case (svfloat64_t y, svfloat64_t x, svfloat64_t ret,
+ const svbool_t cmp)
+{
+ return sv_call2_f64 (atan2, y, x, ret, cmp);
+}
+
+/* Returns a predicate indicating true if the input is the bit representation
+ of 0, infinity or nan. */
+static inline svbool_t
+zeroinfnan (svuint64_t i, const svbool_t pg)
+{
+ return svcmpge (pg, svsub_x (pg, svlsl_x (pg, i, 1), 1),
+ sv_u64 (2 * asuint64 (INFINITY) - 1));
+}
+
+/* Fast implementation of SVE atan2. Errors are greatest when y and
+ x are reasonably close together. The greatest observed error is 2.28 ULP:
+ _ZGVsMxvv_atan2 (-0x1.5915b1498e82fp+732, 0x1.54d11ef838826p+732)
+ got -0x1.954f42f1fa841p-1 want -0x1.954f42f1fa843p-1. */
+svfloat64_t SV_NAME_D2 (atan2) (svfloat64_t y, svfloat64_t x, const svbool_t pg)
+{
+ const struct data *data_ptr = ptr_barrier (&data);
+
+ svuint64_t ix = svreinterpret_u64 (x);
+ svuint64_t iy = svreinterpret_u64 (y);
+
+ svbool_t cmp_x = zeroinfnan (ix, pg);
+ svbool_t cmp_y = zeroinfnan (iy, pg);
+ svbool_t cmp_xy = svorr_z (pg, cmp_x, cmp_y);
+
+ svuint64_t sign_x = svand_x (pg, ix, SignMask);
+ svuint64_t sign_y = svand_x (pg, iy, SignMask);
+ svuint64_t sign_xy = sveor_x (pg, sign_x, sign_y);
+
+ svfloat64_t ax = svabs_x (pg, x);
+ svfloat64_t ay = svabs_x (pg, y);
+
+ svbool_t pred_xlt0 = svcmplt (pg, x, 0.0);
+ svbool_t pred_aygtax = svcmpgt (pg, ay, ax);
+
+ /* Set up z for call to atan. */
+ svfloat64_t n = svsel (pred_aygtax, svneg_x (pg, ax), ay);
+ svfloat64_t d = svsel (pred_aygtax, ay, ax);
+ svfloat64_t z = svdiv_x (pg, n, d);
+
+ /* Work out the correct shift. */
+ svfloat64_t shift = svsel (pred_xlt0, sv_f64 (-2.0), sv_f64 (0.0));
+ shift = svsel (pred_aygtax, svadd_x (pg, shift, 1.0), shift);
+ shift = svmul_x (pg, shift, data_ptr->pi_over_2);
+
+ /* Use split Estrin scheme for P(z^2) with deg(P)=19. */
+ svfloat64_t z2 = svmul_x (pg, z, z);
+ svfloat64_t x2 = svmul_x (pg, z2, z2);
+ svfloat64_t x4 = svmul_x (pg, x2, x2);
+ svfloat64_t x8 = svmul_x (pg, x4, x4);
+
+ svfloat64_t ret = svmla_x (
+ pg, sv_estrin_7_f64_x (pg, z2, x2, x4, data_ptr->poly),
+ sv_estrin_11_f64_x (pg, z2, x2, x4, x8, data_ptr->poly + 8), x8);
+
+ /* y = shift + z + z^3 * P(z^2). */
+ svfloat64_t z3 = svmul_x (pg, z2, z);
+ ret = svmla_x (pg, z, z3, ret);
+
+ ret = svadd_m (pg, ret, shift);
+
+ /* Account for the sign of x and y. */
+ ret = svreinterpret_f64 (sveor_x (pg, svreinterpret_u64 (ret), sign_xy));
+
+ if (__glibc_unlikely (svptest_any (pg, cmp_xy)))
+ return special_case (y, x, ret, cmp_xy);
+
+ return ret;
+}
--- /dev/null
+/* Single-precision AdvSIMD atan2
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "v_math.h"
+#include "poly_advsimd_f32.h"
+
+static const struct data
+{
+ float32x4_t poly[8];
+ float32x4_t pi_over_2;
+} data = {
+ /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
+ [2**-128, 1.0].
+ Generated using fpminimax between FLT_MIN and 1. */
+ .poly = { V4 (-0x1.55555p-2f), V4 (0x1.99935ep-3f), V4 (-0x1.24051ep-3f),
+ V4 (0x1.bd7368p-4f), V4 (-0x1.491f0ep-4f), V4 (0x1.93a2c0p-5f),
+ V4 (-0x1.4c3c60p-6f), V4 (0x1.01fd88p-8f) },
+ .pi_over_2 = V4 (0x1.921fb6p+0f),
+};
+
+#define SignMask v_u32 (0x80000000)
+
+/* Special cases i.e. 0, infinity and nan (fall back to scalar calls). */
+static float32x4_t VPCS_ATTR NOINLINE
+special_case (float32x4_t y, float32x4_t x, float32x4_t ret, uint32x4_t cmp)
+{
+ return v_call2_f32 (atan2f, y, x, ret, cmp);
+}
+
+/* Returns 1 if input is the bit representation of 0, infinity or nan. */
+static inline uint32x4_t
+zeroinfnan (uint32x4_t i)
+{
+ /* 2 * i - 1 >= 2 * 0x7f800000lu - 1. */
+ return vcgeq_u32 (vsubq_u32 (vmulq_n_u32 (i, 2), v_u32 (1)),
+ v_u32 (2 * 0x7f800000lu - 1));
+}
+
+/* Fast implementation of vector atan2f. Maximum observed error is
+ 2.95 ULP in [0x1.9300d6p+6 0x1.93c0c6p+6] x [0x1.8c2dbp+6 0x1.8cea6p+6]:
+ _ZGVnN4vv_atan2f (0x1.93836cp+6, 0x1.8cae1p+6) got 0x1.967f06p-1
+ want 0x1.967f00p-1. */
+float32x4_t VPCS_ATTR V_NAME_F2 (atan2) (float32x4_t y, float32x4_t x)
+{
+ const struct data *data_ptr = ptr_barrier (&data);
+
+ uint32x4_t ix = vreinterpretq_u32_f32 (x);
+ uint32x4_t iy = vreinterpretq_u32_f32 (y);
+
+ uint32x4_t special_cases = vorrq_u32 (zeroinfnan (ix), zeroinfnan (iy));
+
+ uint32x4_t sign_x = vandq_u32 (ix, SignMask);
+ uint32x4_t sign_y = vandq_u32 (iy, SignMask);
+ uint32x4_t sign_xy = veorq_u32 (sign_x, sign_y);
+
+ float32x4_t ax = vabsq_f32 (x);
+ float32x4_t ay = vabsq_f32 (y);
+
+ uint32x4_t pred_xlt0 = vcltzq_f32 (x);
+ uint32x4_t pred_aygtax = vcgtq_f32 (ay, ax);
+
+ /* Set up z for call to atanf. */
+ float32x4_t n = vbslq_f32 (pred_aygtax, vnegq_f32 (ax), ay);
+ float32x4_t d = vbslq_f32 (pred_aygtax, ay, ax);
+ float32x4_t z = vdivq_f32 (n, d);
+
+ /* Work out the correct shift. */
+ float32x4_t shift = vreinterpretq_f32_u32 (
+ vandq_u32 (pred_xlt0, vreinterpretq_u32_f32 (v_f32 (-2.0f))));
+ shift = vbslq_f32 (pred_aygtax, vaddq_f32 (shift, v_f32 (1.0f)), shift);
+ shift = vmulq_f32 (shift, data_ptr->pi_over_2);
+
+ /* Calculate the polynomial approximation.
+ Use 2-level Estrin scheme for P(z^2) with deg(P)=7. However,
+ a standard implementation using z8 creates spurious underflow
+ in the very last fma (when z^8 is small enough).
+ Therefore, we split the last fma into a mul and an fma.
+ Horner and single-level Estrin have higher errors that exceed
+ threshold. */
+ float32x4_t z2 = vmulq_f32 (z, z);
+ float32x4_t z4 = vmulq_f32 (z2, z2);
+
+ float32x4_t ret = vfmaq_f32 (
+ v_pairwise_poly_3_f32 (z2, z4, data_ptr->poly), z4,
+ vmulq_f32 (z4, v_pairwise_poly_3_f32 (z2, z4, data_ptr->poly + 4)));
+
+ /* y = shift + z * P(z^2). */
+ ret = vaddq_f32 (vfmaq_f32 (z, ret, vmulq_f32 (z2, z)), shift);
+
+ /* Account for the sign of y. */
+ ret = vreinterpretq_f32_u32 (
+ veorq_u32 (vreinterpretq_u32_f32 (ret), sign_xy));
+
+ if (__glibc_unlikely (v_any_u32 (special_cases)))
+ {
+ return special_case (y, x, ret, special_cases);
+ }
+
+ return ret;
+}
--- /dev/null
+/* Single-precision SVE atan2
+
+ Copyright (C) 2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include "sv_math.h"
+#include "poly_sve_f32.h"
+
+static const struct data
+{
+ float32_t poly[8];
+ float32_t pi_over_2;
+} data = {
+ /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
+ [2**-128, 1.0]. */
+ .poly = { -0x1.55555p-2f, 0x1.99935ep-3f, -0x1.24051ep-3f, 0x1.bd7368p-4f,
+ -0x1.491f0ep-4f, 0x1.93a2c0p-5f, -0x1.4c3c60p-6f, 0x1.01fd88p-8f },
+ .pi_over_2 = 0x1.921fb6p+0f,
+};
+
+#define SignMask sv_u32 (0x80000000)
+
+/* Special cases i.e. 0, infinity, nan (fall back to scalar calls). */
+static inline svfloat32_t
+special_case (svfloat32_t y, svfloat32_t x, svfloat32_t ret,
+ const svbool_t cmp)
+{
+ return sv_call2_f32 (atan2f, y, x, ret, cmp);
+}
+
+/* Returns a predicate indicating true if the input is the bit representation
+ of 0, infinity or nan. */
+static inline svbool_t
+zeroinfnan (svuint32_t i, const svbool_t pg)
+{
+ return svcmpge (pg, svsub_x (pg, svlsl_x (pg, i, 1), 1),
+ sv_u32 (2 * 0x7f800000lu - 1));
+}
+
+/* Fast implementation of SVE atan2f based on atan(x) ~ shift + z + z^3 *
+ P(z^2) with reduction to [0,1] using z=1/x and shift = pi/2. Maximum
+ observed error is 2.95 ULP:
+ _ZGVsMxvv_atan2f (0x1.93836cp+6, 0x1.8cae1p+6) got 0x1.967f06p-1
+ want 0x1.967f00p-1. */
+svfloat32_t SV_NAME_F2 (atan2) (svfloat32_t y, svfloat32_t x, const svbool_t pg)
+{
+ const struct data *data_ptr = ptr_barrier (&data);
+
+ svuint32_t ix = svreinterpret_u32 (x);
+ svuint32_t iy = svreinterpret_u32 (y);
+
+ svbool_t cmp_x = zeroinfnan (ix, pg);
+ svbool_t cmp_y = zeroinfnan (iy, pg);
+ svbool_t cmp_xy = svorr_z (pg, cmp_x, cmp_y);
+
+ svuint32_t sign_x = svand_x (pg, ix, SignMask);
+ svuint32_t sign_y = svand_x (pg, iy, SignMask);
+ svuint32_t sign_xy = sveor_x (pg, sign_x, sign_y);
+
+ svfloat32_t ax = svabs_x (pg, x);
+ svfloat32_t ay = svabs_x (pg, y);
+
+ svbool_t pred_xlt0 = svcmplt (pg, x, 0.0);
+ svbool_t pred_aygtax = svcmpgt (pg, ay, ax);
+
+ /* Set up z for call to atan. */
+ svfloat32_t n = svsel (pred_aygtax, svneg_x (pg, ax), ay);
+ svfloat32_t d = svsel (pred_aygtax, ay, ax);
+ svfloat32_t z = svdiv_x (pg, n, d);
+
+ /* Work out the correct shift. */
+ svfloat32_t shift = svsel (pred_xlt0, sv_f32 (-2.0), sv_f32 (0.0));
+ shift = svsel (pred_aygtax, svadd_x (pg, shift, 1.0), shift);
+ shift = svmul_x (pg, shift, sv_f32 (data_ptr->pi_over_2));
+
+ /* Use split Estrin scheme for P(z^2) with deg(P)=7. */
+ svfloat32_t z2 = svmul_x (pg, z, z);
+ svfloat32_t z4 = svmul_x (pg, z2, z2);
+ svfloat32_t z8 = svmul_x (pg, z4, z4);
+
+ svfloat32_t ret = sv_estrin_7_f32_x (pg, z2, z4, z8, data_ptr->poly);
+
+ /* ret = shift + z + z^3 * P(z^2). */
+ svfloat32_t z3 = svmul_x (pg, z2, z);
+ ret = svmla_x (pg, z, z3, ret);
+
+ ret = svadd_m (pg, ret, shift);
+
+ /* Account for the sign of x and y. */
+ ret = svreinterpret_f32 (sveor_x (pg, svreinterpret_u32 (ret), sign_xy));
+
+ if (__glibc_unlikely (svptest_any (pg, cmp_xy)))
+ return special_case (y, x, ret, cmp_xy);
+
+ return ret;
+}
# define __vpcs __attribute__ ((__aarch64_vector_pcs__))
+__vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_sinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanf (__f32x4_t);
+__vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
#ifdef __SVE_VEC_MATH_SUPPORTED
+__sv_f32_t _ZGVsMxvv_atan2f (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_sinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanf (__sv_f32_t, __sv_bool_t);
+__sv_f64_t _ZGVsMxvv_atan2 (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atan (__sv_f64_t, __sv_bool_t);
VPCS_VECTOR_WRAPPER (acos_advsimd, _ZGVnN2v_acos)
VPCS_VECTOR_WRAPPER (asin_advsimd, _ZGVnN2v_asin)
VPCS_VECTOR_WRAPPER (atan_advsimd, _ZGVnN2v_atan)
+VPCS_VECTOR_WRAPPER_ff (atan2_advsimd, _ZGVnN2vv_atan2)
VPCS_VECTOR_WRAPPER (cos_advsimd, _ZGVnN2v_cos)
VPCS_VECTOR_WRAPPER (exp_advsimd, _ZGVnN2v_exp)
VPCS_VECTOR_WRAPPER (exp10_advsimd, _ZGVnN2v_exp10)
return svlastb_f64 (svptrue_b64 (), mr); \
}
+#define SVE_VECTOR_WRAPPER_ff(scalar_func, vector_func) \
+ extern VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE, svbool_t); \
+ FLOAT scalar_func (FLOAT x, FLOAT y) \
+ { \
+ VEC_TYPE mx = svdup_n_f64 (x); \
+ VEC_TYPE my = svdup_n_f64 (y); \
+ VEC_TYPE mr = vector_func (mx, my, svptrue_b64 ()); \
+ return svlastb_f64 (svptrue_b64 (), mr); \
+ }
+
SVE_VECTOR_WRAPPER (acos_sve, _ZGVsMxv_acos)
SVE_VECTOR_WRAPPER (asin_sve, _ZGVsMxv_asin)
SVE_VECTOR_WRAPPER (atan_sve, _ZGVsMxv_atan)
+SVE_VECTOR_WRAPPER_ff (atan2_sve, _ZGVsMxvv_atan2)
SVE_VECTOR_WRAPPER (cos_sve, _ZGVsMxv_cos)
SVE_VECTOR_WRAPPER (exp_sve, _ZGVsMxv_exp)
SVE_VECTOR_WRAPPER (exp10_sve, _ZGVsMxv_exp10)
VPCS_VECTOR_WRAPPER (acosf_advsimd, _ZGVnN4v_acosf)
VPCS_VECTOR_WRAPPER (asinf_advsimd, _ZGVnN4v_asinf)
VPCS_VECTOR_WRAPPER (atanf_advsimd, _ZGVnN4v_atanf)
+VPCS_VECTOR_WRAPPER_ff (atan2f_advsimd, _ZGVnN4vv_atan2f)
VPCS_VECTOR_WRAPPER (cosf_advsimd, _ZGVnN4v_cosf)
VPCS_VECTOR_WRAPPER (expf_advsimd, _ZGVnN4v_expf)
VPCS_VECTOR_WRAPPER (exp10f_advsimd, _ZGVnN4v_exp10f)
return svlastb_f32 (svptrue_b32 (), mr); \
}
+#define SVE_VECTOR_WRAPPER_ff(scalar_func, vector_func) \
+ extern VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE, svbool_t); \
+ FLOAT scalar_func (FLOAT x, FLOAT y) \
+ { \
+ VEC_TYPE mx = svdup_n_f32 (x); \
+ VEC_TYPE my = svdup_n_f32 (y); \
+ VEC_TYPE mr = vector_func (mx, my, svptrue_b32 ()); \
+ return svlastb_f32 (svptrue_b32 (), mr); \
+ }
+
SVE_VECTOR_WRAPPER (acosf_sve, _ZGVsMxv_acosf)
SVE_VECTOR_WRAPPER (asinf_sve, _ZGVsMxv_asinf)
SVE_VECTOR_WRAPPER (atanf_sve, _ZGVsMxv_atanf)
+SVE_VECTOR_WRAPPER_ff (atan2f_sve, _ZGVsMxvv_atan2f)
SVE_VECTOR_WRAPPER (cosf_sve, _ZGVsMxv_cosf)
SVE_VECTOR_WRAPPER (expf_sve, _ZGVsMxv_expf)
SVE_VECTOR_WRAPPER (exp10f_sve, _ZGVsMxv_exp10f)
TEST_VEC_LOOP (mr, VEC_LEN); \
return ((FLOAT) mr[0]); \
}
+
+#define VPCS_VECTOR_WRAPPER_ff(scalar_func, vector_func) \
+ extern __attribute__ ((aarch64_vector_pcs)) \
+ VEC_TYPE vector_func (VEC_TYPE, VEC_TYPE); \
+ FLOAT scalar_func (FLOAT x, FLOAT y) \
+ { \
+ int i; \
+ VEC_TYPE mx, my; \
+ INIT_VEC_LOOP (mx, x, VEC_LEN); \
+ INIT_VEC_LOOP (my, y, VEC_LEN); \
+ VEC_TYPE mr = vector_func (mx, my); \
+ TEST_VEC_LOOP (mr, VEC_LEN); \
+ return ((FLOAT) mr[0]); \
+ }
__ptr; \
})
+static inline uint64_t
+asuint64 (double f)
+{
+ union
+ {
+ double f;
+ uint64_t i;
+ } u = { f };
+ return u.i;
+}
+
#define V_LOG_POLY_ORDER 6
#define V_LOG_TABLE_BITS 7
extern const struct v_log_data
float: 1
ldouble: 2
+Function: "atan2_advsimd":
+double: 1
+float: 2
+
Function: "atan2_downward":
double: 1
float: 2
ldouble: 2
+Function: "atan2_sve":
+double: 1
+float: 2
+
Function: "atan2_towardzero":
double: 1
float: 2
GLIBC_2.39 _ZGVnN2v_log10 F
GLIBC_2.39 _ZGVnN2v_log2 F
GLIBC_2.39 _ZGVnN2v_tan F
+GLIBC_2.39 _ZGVnN2vv_atan2 F
GLIBC_2.39 _ZGVnN4v_acosf F
GLIBC_2.39 _ZGVnN4v_asinf F
GLIBC_2.39 _ZGVnN4v_atanf F
GLIBC_2.39 _ZGVnN4v_log10f F
GLIBC_2.39 _ZGVnN4v_log2f F
GLIBC_2.39 _ZGVnN4v_tanf F
+GLIBC_2.39 _ZGVnN4vv_atan2f F
GLIBC_2.39 _ZGVsMxv_acos F
GLIBC_2.39 _ZGVsMxv_acosf F
GLIBC_2.39 _ZGVsMxv_asin F
GLIBC_2.39 _ZGVsMxv_log2f F
GLIBC_2.39 _ZGVsMxv_tan F
GLIBC_2.39 _ZGVsMxv_tanf F
+GLIBC_2.39 _ZGVsMxvv_atan2 F
+GLIBC_2.39 _ZGVsMxvv_atan2f F