11590Srgrimes/*
21590Srgrimes * Single-precision vector erf(x) function.
31590Srgrimes *
41590Srgrimes * Copyright (c) 2023, Arm Limited.
51590Srgrimes * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
61590Srgrimes */
71590Srgrimes
81590Srgrimes#include "v_math.h"
91590Srgrimes#include "pl_sig.h"
101590Srgrimes#include "pl_test.h"
111590Srgrimes
121590Srgrimesstatic const struct data
131590Srgrimes{
141590Srgrimes  float32x4_t max, shift, third;
151590Srgrimes#if WANT_SIMD_EXCEPT
161590Srgrimes  float32x4_t tiny_bound, scale_minus_one;
171590Srgrimes#endif
181590Srgrimes} data = {
191590Srgrimes  .max = V4 (3.9375), /* 4 - 8/128.  */
201590Srgrimes  .shift = V4 (0x1p16f),
211590Srgrimes  .third = V4 (0x1.555556p-2f), /* 1/3.  */
221590Srgrimes#if WANT_SIMD_EXCEPT
231590Srgrimes  .tiny_bound = V4 (0x1p-62f),
241590Srgrimes  .scale_minus_one = V4 (0x1.06eba8p-3f), /* scale - 1.0.  */
251590Srgrimes#endif
261590Srgrimes};
271590Srgrimes
281590Srgrimes#define AbsMask 0x7fffffff
291590Srgrimes
301590Srgrimesstruct entry
311590Srgrimes{
321590Srgrimes  float32x4_t erf;
331590Srgrimes  float32x4_t scale;
341590Srgrimes};
351590Srgrimes
361590Srgrimesstatic inline struct entry
3787712Smarkmlookup (uint32x4_t i)
3887712Smarkm{
3987712Smarkm  struct entry e;
4087712Smarkm  float64_t t0 = *((float64_t *) (__erff_data.tab + i[0]));
411590Srgrimes  float64_t t1 = *((float64_t *) (__erff_data.tab + i[1]));
4287712Smarkm  float64_t t2 = *((float64_t *) (__erff_data.tab + i[2]));
4369528Sasmodai  float64_t t3 = *((float64_t *) (__erff_data.tab + i[3]));
441590Srgrimes  float32x4_t e1 = vreinterpretq_f32_f64 ((float64x2_t){ t0, t1 });
451590Srgrimes  float32x4_t e2 = vreinterpretq_f32_f64 ((float64x2_t){ t2, t3 });
461590Srgrimes  e.erf = vuzp1q_f32 (e1, e2);
4774876Sdwmalone  e.scale = vuzp2q_f32 (e1, e2);
4887712Smarkm  return e;
4987712Smarkm}
501590Srgrimes
511590Srgrimes/* Single-precision implementation of vector erf(x).
521590Srgrimes   Approximation based on series expansion near x rounded to
531590Srgrimes   nearest multiple of 1/128.
5487712Smarkm   Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
5587712Smarkm
561590Srgrimes   erf(x) ~ erf(r) + scale * d * [1 - r * d - 1/3 * d^2]
571590Srgrimes
581590Srgrimes   Values of erf(r) and scale are read from lookup tables.
591590Srgrimes   For |x| > 3.9375, erf(|x|) rounds to 1.0f.
601590Srgrimes
6117833Sadam   Maximum error: 1.93 ULP
6217833Sadam     _ZGVnN4v_erff(0x1.c373e6p-9) got 0x1.fd686cp-9
631590Srgrimes				 want 0x1.fd6868p-9.  */
641590Srgrimesfloat32x4_t VPCS_ATTR V_NAME_F1 (erf) (float32x4_t x)
651590Srgrimes{
661590Srgrimes  const struct data *dat = ptr_barrier (&data);
671590Srgrimes
6817825Speter#if WANT_SIMD_EXCEPT
691590Srgrimes  /* |x| < 2^-62.  */
7074876Sdwmalone  uint32x4_t cmp = vcaltq_f32 (x, dat->tiny_bound);
7174876Sdwmalone  float32x4_t xm = x;
7274876Sdwmalone  /* If any lanes are special, mask them with 1 and retain a copy of x to allow
7374876Sdwmalone     special case handler to fix special lanes later. This is only necessary if
7474876Sdwmalone     fenv exceptions are to be triggered correctly.  */
7574876Sdwmalone  if (unlikely (v_any_u32 (cmp)))
76137157Spaul    x = vbslq_f32 (cmp, v_f32 (1), x);
7774876Sdwmalone#endif
7874876Sdwmalone
7974876Sdwmalone  float32x4_t a = vabsq_f32 (x);
8074876Sdwmalone  uint32x4_t a_gt_max = vcgtq_f32 (a, dat->max);
8174876Sdwmalone
8274876Sdwmalone  /* Lookup erf(r) and scale(r) in tables, e.g. set erf(r) to 0 and scale to
8374876Sdwmalone     2/sqrt(pi), when x reduced to r = 0.  */
8474876Sdwmalone  float32x4_t shift = dat->shift;
8574876Sdwmalone  float32x4_t z = vaddq_f32 (a, shift);
8674876Sdwmalone
8774876Sdwmalone  uint32x4_t i
8874876Sdwmalone      = vsubq_u32 (vreinterpretq_u32_f32 (z), vreinterpretq_u32_f32 (shift));
8974876Sdwmalone  i = vminq_u32 (i, v_u32 (512));
9074876Sdwmalone  struct entry e = lookup (i);
9174876Sdwmalone
9274876Sdwmalone  float32x4_t r = vsubq_f32 (z, shift);
9374876Sdwmalone
9474876Sdwmalone  /* erf(x) ~ erf(r) + scale * d * (1 - r * d - 1/3 * d^2).  */
9574876Sdwmalone  float32x4_t d = vsubq_f32 (a, r);
9674876Sdwmalone  float32x4_t d2 = vmulq_f32 (d, d);
9774876Sdwmalone  float32x4_t y = vfmaq_f32 (r, dat->third, d);
9874876Sdwmalone  y = vfmaq_f32 (e.erf, e.scale, vfmsq_f32 (d, d2, y));
9974876Sdwmalone
10074876Sdwmalone  /* Solves the |x| = inf case.  */
10174876Sdwmalone  y = vbslq_f32 (a_gt_max, v_f32 (1.0f), y);
102137157Spaul
10374876Sdwmalone  /* Copy sign.  */
10474876Sdwmalone  y = vbslq_f32 (v_u32 (AbsMask), y, x);
10574876Sdwmalone
10674876Sdwmalone#if WANT_SIMD_EXCEPT
10774876Sdwmalone  if (unlikely (v_any_u32 (cmp)))
10874876Sdwmalone    return vbslq_f32 (cmp, vfmaq_f32 (xm, dat->scale_minus_one, xm), y);
10974876Sdwmalone#endif
11074876Sdwmalone  return y;
11174876Sdwmalone}
11274876Sdwmalone
11374876SdwmalonePL_SIG (V, F, 1, erf, -4.0, 4.0)
11474876SdwmalonePL_TEST_ULP (V_NAME_F1 (erf), 1.43)
11574876SdwmalonePL_TEST_EXPECT_FENV (V_NAME_F1 (erf), WANT_SIMD_EXCEPT)
11674876SdwmalonePL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, 3.9375, 40000)
11774876SdwmalonePL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 3.9375, inf, 40000)
11874876SdwmalonePL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, inf, 40000)
11974876Sdwmalone