[PATCH 11/11] math: Use tanf from CORE-MATH

Paul Zimmermann Paul.Zimmermann@inria.fr
Tue Nov 12 13:17:10 GMT 2024


       Hi Adhemerval,

some typos and possible improvements (this is not a review since I am in
the "Signed-off-by" fields):

> +  (src/binary32/tan/tanf.cc in CORE-MATH)

should be src/binary32/tan/tanf.c

> +  - The code was adapted to use glibc code style and internal
> +    functions to handle errno, overflow, and underflow.  It was changed
> +    to use an internal wrapper for 128 bit unsigned integer operation

maybe "operations"?

> +/* Limited support for internal 128 bit integer, used on some math
> +   implementations.  It uses compiler builtin type if supported, otherwise
> +   it is emulate.  Only unsigned and some operations are currently supported:

emulate -> emulated

> +
> +   - u128_t:         the 128 bit unsigned type.
> +   - u128_high:      return the high part of the number.
> +   - u128_low:       return the low part of the number.
> +   - u128_from_u64:  createa 128 bit number from a 64 bit one.

create a

> +   - u128_mul:       multiple two 128 bit numbers.

multiply

> +   - u128_add:       add two 128 bit numbers.
> +   - u128_lshift:    left shift a number.
> +   - u128_rshift:    right shift a number.
> + */
> +
> +#ifdef __SIZEOF_INT128__
> +typedef unsigned __int128 u128;
> +# define u128_high(__x)         (uint64_t)((__x) >> 64)
> +# define u128_low(__x)          (uint64_t)(__x)
> +# define u128_from_u64(__x)     (u128)(__x)
> +# define u128_mul(__x, __y)     (__x) * (__y)
> +# define u128_add(__x, __y)     (__x) + (__y)
> +# define u128_lshift(__x, __y)  (__x) << (__y)
> +# define u128_rshift(__x, __y)  (__x) >> (__y)
> +#else
> +typedef struct
> +{
> +  uint64_t low;
> +  uint64_t high;
> +} u128;
> +
> +# define u128_high(__x)         (__x).high
> +# define u128_low(__x)          (__x).low
> +
> +# define u128_from_u64(__x)     (u128){.low = (__x), .high = 0}
> +
> +# define TOPBIT                 (UINT64_C(1) << 63)
> +# define MASK32                 (UINT64_C(0xffffffff))
> +
> +static u128 u128_add (u128 x, u128 y)
> +{
> +  uint64_t lower = (x.low & ~TOPBIT) + (y.low & ~TOPBIT);
> +  bool carry = (lower >> 63) + (x.low >> 63) + (y.low >> 63) > 1;
> +  return (u128) { .high = x.high + y.high + carry, .low = x.low + y.low };

why not simply the following?

    bool carry = x.low + y.low < x.low;
    return (u128) { ... };

> +}
> +
> +static u128 u128_lshift (u128 x, unsigned int n)
> +{
> +  switch (n)
> +    {
> +    case 0:         return x;
> +    case 1 ... 63:  return (u128) { .high = (x.high << n) | (x.low >> (64 - n)),
> +				    .low = x.low << n };
> +    case 64 ...127: return (u128) { .high = x.low << (n - 64), .low = 0};
> +    default:        return (u128) { .high = 0, .low = 0 };
> +    }
> +}
> +
> +static u128 u128_rshift (u128 x, unsigned int n)
> +{
> +  switch (n)
> +    {
> +    case 0:         return x;
> +    case 1 ... 63:  return (u128) { .high = x.high >> n,
> +				    .low = (x.high << (64 - n)) | (x.low >> n) };
> +    case 64 ...127: return (u128) { .high = 0, .low = x.high >> (n - 64) };
> +    default:        return (u128) { .high = 0, .low = 0 };
> +    }
> +}
> +
> +static u128 u128_mul (u128 x, u128 y)
> +{
> +  if (x.high == 0 && y.high == 0)
> +    {
> +      uint64_t x0 = x.low & MASK32;
> +      uint64_t x1 = x.low >> 32;
> +      uint64_t y0 = y.low & MASK32;
> +      uint64_t y1 = y.low >> 32;
> +      u128 x0y0 = { .high = 0, .low = x0 * y0 };
> +      u128 x0y1 = { .high = 0, .low = x0 * y1 };
> +      u128 x1y0 = { .high = 0, .low = x1 * y0 };
> +      u128 x1y1 = { .high = 0, .low = x1 * y1 };
> +      /* x0y0 + ((x0y1 + x1y0) << 32) + (x1y1 << 64)  */
> +      return u128_add (u128_add (x0y0, u128_lshift (u128_add (x0y1,
> +							      x1y0),
> +						    32)),
> +		       u128_lshift (x1y1, 64));

maybe you can form x1y1 = { .high = x1 * y1, .low = 0 } to avoid the left
shift of x1y1

Paul


More information about the Libc-alpha mailing list