1/*
2 * Single-precision vector atan(x) function.
3 *
4 * Copyright (c) 2021-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8#include "v_math.h"
9#include "pl_sig.h"
10#include "pl_test.h"
11#include "poly_advsimd_f32.h"
12
13static const struct data
14{
15  float32x4_t poly[8];
16  float32x4_t pi_over_2;
17} data = {
18  /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
19     [2**-128, 1.0].
20     Generated using fpminimax between FLT_MIN and 1.  */
21  .poly = { V4 (-0x1.55555p-2f), V4 (0x1.99935ep-3f), V4 (-0x1.24051ep-3f),
22	    V4 (0x1.bd7368p-4f), V4 (-0x1.491f0ep-4f), V4 (0x1.93a2c0p-5f),
23	    V4 (-0x1.4c3c60p-6f), V4 (0x1.01fd88p-8f) },
24  .pi_over_2 = V4 (0x1.921fb6p+0f),
25};
26
27#define SignMask v_u32 (0x80000000)
28
29#define P(i) d->poly[i]
30
31#define TinyBound 0x30800000 /* asuint(0x1p-30).  */
32#define BigBound 0x4e800000  /* asuint(0x1p30).  */
33
34#if WANT_SIMD_EXCEPT
35static float32x4_t VPCS_ATTR NOINLINE
36special_case (float32x4_t x, float32x4_t y, uint32x4_t special)
37{
38  return v_call_f32 (atanf, x, y, special);
39}
40#endif
41
42/* Fast implementation of vector atanf based on
43   atan(x) ~ shift + z + z^3 * P(z^2) with reduction to [0,1]
44   using z=-1/x and shift = pi/2. Maximum observed error is 2.9ulps:
45   _ZGVnN4v_atanf (0x1.0468f6p+0) got 0x1.967f06p-1 want 0x1.967fp-1.  */
46float32x4_t VPCS_ATTR V_NAME_F1 (atan) (float32x4_t x)
47{
48  const struct data *d = ptr_barrier (&data);
49
50  /* Small cases, infs and nans are supported by our approximation technique,
51     but do not set fenv flags correctly. Only trigger special case if we need
52     fenv.  */
53  uint32x4_t ix = vreinterpretq_u32_f32 (x);
54  uint32x4_t sign = vandq_u32 (ix, SignMask);
55
56#if WANT_SIMD_EXCEPT
57  uint32x4_t ia = vandq_u32 (ix, v_u32 (0x7ff00000));
58  uint32x4_t special = vcgtq_u32 (vsubq_u32 (ia, v_u32 (TinyBound)),
59				  v_u32 (BigBound - TinyBound));
60  /* If any lane is special, fall back to the scalar routine for all lanes.  */
61  if (unlikely (v_any_u32 (special)))
62    return special_case (x, x, v_u32 (-1));
63#endif
64
65  /* Argument reduction:
66     y := arctan(x) for x < 1
67     y := pi/2 + arctan(-1/x) for x > 1
68     Hence, use z=-1/a if x>=1, otherwise z=a.  */
69  uint32x4_t red = vcagtq_f32 (x, v_f32 (1.0));
70  /* Avoid dependency in abs(x) in division (and comparison).  */
71  float32x4_t z = vbslq_f32 (red, vdivq_f32 (v_f32 (1.0f), x), x);
72  float32x4_t shift = vreinterpretq_f32_u32 (
73      vandq_u32 (red, vreinterpretq_u32_f32 (d->pi_over_2)));
74  /* Use absolute value only when needed (odd powers of z).  */
75  float32x4_t az = vbslq_f32 (
76      SignMask, vreinterpretq_f32_u32 (vandq_u32 (SignMask, red)), z);
77
78  /* Calculate the polynomial approximation.
79     Use 2-level Estrin scheme for P(z^2) with deg(P)=7. However,
80     a standard implementation using z8 creates spurious underflow
81     in the very last fma (when z^8 is small enough).
82     Therefore, we split the last fma into a mul and an fma.
83     Horner and single-level Estrin have higher errors that exceed
84     threshold.  */
85  float32x4_t z2 = vmulq_f32 (z, z);
86  float32x4_t z4 = vmulq_f32 (z2, z2);
87
88  float32x4_t y = vfmaq_f32 (
89      v_pairwise_poly_3_f32 (z2, z4, d->poly), z4,
90      vmulq_f32 (z4, v_pairwise_poly_3_f32 (z2, z4, d->poly + 4)));
91
92  /* y = shift + z * P(z^2).  */
93  y = vaddq_f32 (vfmaq_f32 (az, y, vmulq_f32 (z2, az)), shift);
94
95  /* y = atan(x) if x>0, -atan(-x) otherwise.  */
96  y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), sign));
97
98  return y;
99}
100
101PL_SIG (V, F, 1, atan, -10.0, 10.0)
102PL_TEST_ULP (V_NAME_F1 (atan), 2.5)
103PL_TEST_EXPECT_FENV (V_NAME_F1 (atan), WANT_SIMD_EXCEPT)
104PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0, 0x1p-30, 5000)
105PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0x1p-30, 1, 40000)
106PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 1, 0x1p30, 40000)
107PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0x1p30, inf, 1000)
108