1/*
2 * Double-precision vector tanh(x) function.
3 * Copyright (c) 2023, Arm Limited.
4 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
5 */
6
7#include "v_math.h"
8#include "poly_advsimd_f64.h"
9#include "mathlib.h"
10#include "pl_sig.h"
11#include "pl_test.h"
12
13static const struct data
14{
15  float64x2_t poly[11];
16  float64x2_t inv_ln2, ln2_hi, ln2_lo, shift;
17  uint64x2_t onef;
18  uint64x2_t thresh, tiny_bound;
19} data = {
20  /* Generated using Remez, deg=12 in [-log(2)/2, log(2)/2].  */
21  .poly = { V2 (0x1p-1), V2 (0x1.5555555555559p-3), V2 (0x1.555555555554bp-5),
22	    V2 (0x1.111111110f663p-7), V2 (0x1.6c16c16c1b5f3p-10),
23	    V2 (0x1.a01a01affa35dp-13), V2 (0x1.a01a018b4ecbbp-16),
24	    V2 (0x1.71ddf82db5bb4p-19), V2 (0x1.27e517fc0d54bp-22),
25	    V2 (0x1.af5eedae67435p-26), V2 (0x1.1f143d060a28ap-29), },
26
27  .inv_ln2 = V2 (0x1.71547652b82fep0),
28  .ln2_hi = V2 (-0x1.62e42fefa39efp-1),
29  .ln2_lo = V2 (-0x1.abc9e3b39803fp-56),
30  .shift = V2 (0x1.8p52),
31
32  .onef = V2 (0x3ff0000000000000),
33  .tiny_bound = V2 (0x3e40000000000000), /* asuint64 (0x1p-27).  */
34  /* asuint64(0x1.241bf835f9d5fp+4) - asuint64(tiny_bound).  */
35  .thresh = V2 (0x01f241bf835f9d5f),
36};
37
38static inline float64x2_t
39expm1_inline (float64x2_t x, const struct data *d)
40{
41  /* Helper routine for calculating exp(x) - 1. Vector port of the helper from
42     the scalar variant of tanh.  */
43
44  /* Reduce argument: f in [-ln2/2, ln2/2], i is exact.  */
45  float64x2_t j = vsubq_f64 (vfmaq_f64 (d->shift, d->inv_ln2, x), d->shift);
46  int64x2_t i = vcvtq_s64_f64 (j);
47  float64x2_t f = vfmaq_f64 (x, j, d->ln2_hi);
48  f = vfmaq_f64 (f, j, d->ln2_lo);
49
50  /* Approximate expm1(f) using polynomial.  */
51  float64x2_t f2 = vmulq_f64 (f, f);
52  float64x2_t f4 = vmulq_f64 (f2, f2);
53  float64x2_t p = vfmaq_f64 (
54      f, f2, v_estrin_10_f64 (f, f2, f4, vmulq_f64 (f4, f4), d->poly));
55
56  /* t = 2 ^ i.  */
57  float64x2_t t = vreinterpretq_f64_u64 (
58      vaddq_u64 (vreinterpretq_u64_s64 (i << 52), d->onef));
59  /* expm1(x) = p * t + (t - 1).  */
60  return vfmaq_f64 (vsubq_f64 (t, v_f64 (1)), p, t);
61}
62
63static float64x2_t NOINLINE VPCS_ATTR
64special_case (float64x2_t x, float64x2_t y, uint64x2_t special)
65{
66  return v_call_f64 (tanh, x, y, special);
67}
68
69/* Vector approximation for double-precision tanh(x), using a simplified
70   version of expm1. The greatest observed error is 2.77 ULP:
71   _ZGVnN2v_tanh(-0x1.c4a4ca0f9f3b7p-3) got -0x1.bd6a21a163627p-3
72				       want -0x1.bd6a21a163624p-3.  */
73float64x2_t VPCS_ATTR V_NAME_D1 (tanh) (float64x2_t x)
74{
75  const struct data *d = ptr_barrier (&data);
76
77  uint64x2_t ia = vreinterpretq_u64_f64 (vabsq_f64 (x));
78
79  float64x2_t u = x;
80
81  /* Trigger special-cases for tiny, boring and infinity/NaN.  */
82  uint64x2_t special = vcgtq_u64 (vsubq_u64 (ia, d->tiny_bound), d->thresh);
83#if WANT_SIMD_EXCEPT
84  /* To trigger fp exceptions correctly, set special lanes to a neutral value.
85     They will be fixed up later by the special-case handler.  */
86  if (unlikely (v_any_u64 (special)))
87    u = v_zerofy_f64 (u, special);
88#endif
89
90  u = vaddq_f64 (u, u);
91
92  /* tanh(x) = (e^2x - 1) / (e^2x + 1).  */
93  float64x2_t q = expm1_inline (u, d);
94  float64x2_t qp2 = vaddq_f64 (q, v_f64 (2));
95
96  if (unlikely (v_any_u64 (special)))
97    return special_case (x, vdivq_f64 (q, qp2), special);
98  return vdivq_f64 (q, qp2);
99}
100
101PL_SIG (V, D, 1, tanh, -10.0, 10.0)
102PL_TEST_ULP (V_NAME_D1 (tanh), 2.27)
103PL_TEST_EXPECT_FENV (V_NAME_D1 (tanh), WANT_SIMD_EXCEPT)
104PL_TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0, 0x1p-27, 5000)
105PL_TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0x1p-27, 0x1.241bf835f9d5fp+4, 50000)
106PL_TEST_SYM_INTERVAL (V_NAME_D1 (tanh), 0x1.241bf835f9d5fp+4, inf, 1000)
107