2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifndef GMX_SIMD_SIMD_MATH_H_
36 #define GMX_SIMD_SIMD_MATH_H_
38 /*! \libinternal \file
40 * \brief Math functions for SIMD datatypes.
42 * \attention This file is generic for all SIMD architectures, so you cannot
43 * assume that any of the optional SIMD features (as defined in simd.h) are
44 * present. In particular, this means you cannot assume support for integers,
45 * logical operations (neither on floating-point nor integer values), shifts,
46 * and the architecture might only have SIMD for either float or double.
47 * Second, to keep this file clean and general, any additions to this file
48 * must work for all possible SIMD architectures in both single and double
49 * precision (if they support it), and you cannot make any assumptions about
52 * \author Erik Lindahl <erik.lindahl@scilifelab.se>
55 * \ingroup module_simd
60 #include "gromacs/math/utilities.h"
61 #include "gromacs/simd/simd.h"
64 /*! \addtogroup module_simd */
67 /*! \name Implementation accuracy settings
71 /*! \brief We accept lsb errors for 1/sqrt(x) and 1/x, so float target is 22 bits */
72 #define GMX_SIMD_MATH_TARGET_SINGLE_BITS 22
74 /*! \brief We accept "double" that has 2x single precision - 44 bits.
76 * This way two Newton-Raphson iterations will suffice in double precision.
78 #define GMX_SIMD_MATH_TARGET_DOUBLE_BITS 44
82 #ifdef GMX_SIMD_HAVE_FLOAT
84 /*! \name Single precision SIMD math functions
86 * \note In most cases you should use the real-precision functions instead.
90 /****************************************
91 * SINGLE PRECISION SIMD MATH FUNCTIONS *
92 ****************************************/
94 /*! \brief SIMD float utility to sum a+b+c+d.
96 * You should normally call the real-precision routine \ref gmx_simd_sum4_r.
98 * \param a term 1 (multiple values)
99 * \param b term 2 (multiple values)
100 * \param c term 3 (multiple values)
101 * \param d term 4 (multiple values)
102 * \return sum of terms 1-4 (multiple values)
104 static gmx_inline gmx_simd_float_t gmx_simdcall
105 gmx_simd_sum4_f(gmx_simd_float_t a, gmx_simd_float_t b,
106 gmx_simd_float_t c, gmx_simd_float_t d)
108 return gmx_simd_add_f(gmx_simd_add_f(a, b), gmx_simd_add_f(c, d));
111 /*! \brief Return -a if b is negative, SIMD float.
113 * You should normally call the real-precision routine \ref gmx_simd_xor_sign_r.
115 * \param a Values to set sign for
116 * \param b Values used to set sign
117 * \return if b is negative, the sign of a will be changed.
119 * This is equivalent to doing an xor operation on a with the sign bit of b,
120 * with the exception that negative zero is not considered to be negative
121 * on architectures where \ref GMX_SIMD_HAVE_LOGICAL is not set.
123 static gmx_inline gmx_simd_float_t gmx_simdcall
124 gmx_simd_xor_sign_f(gmx_simd_float_t a, gmx_simd_float_t b)
126 #ifdef GMX_SIMD_HAVE_LOGICAL
127 return gmx_simd_xor_f(a, gmx_simd_and_f(gmx_simd_set1_f(GMX_FLOAT_NEGZERO), b));
129 return gmx_simd_blendv_f(a, gmx_simd_fneg_f(a), gmx_simd_cmplt_f(b, gmx_simd_setzero_f()));
133 /*! \brief Perform one Newton-Raphson iteration to improve 1/sqrt(x) for SIMD float.
135 * This is a low-level routine that should only be used by SIMD math routine
136 * that evaluates the inverse square root.
138 * \param lu Approximation of 1/sqrt(x), typically obtained from lookup.
139 * \param x The reference (starting) value x for which we want 1/sqrt(x).
140 * \return An improved approximation with roughly twice as many bits of accuracy.
142 static gmx_inline gmx_simd_float_t gmx_simdcall
143 gmx_simd_rsqrt_iter_f(gmx_simd_float_t lu, gmx_simd_float_t x)
145 # ifdef GMX_SIMD_HAVE_FMA
146 return gmx_simd_fmadd_f(gmx_simd_fnmadd_f(x, gmx_simd_mul_f(lu, lu), gmx_simd_set1_f(1.0f)), gmx_simd_mul_f(lu, gmx_simd_set1_f(0.5f)), lu);
148 return gmx_simd_mul_f(gmx_simd_set1_f(0.5f), gmx_simd_mul_f(gmx_simd_sub_f(gmx_simd_set1_f(3.0f), gmx_simd_mul_f(gmx_simd_mul_f(lu, lu), x)), lu));
152 /*! \brief Calculate 1/sqrt(x) for SIMD float.
154 * You should normally call the real-precision routine \ref gmx_simd_invsqrt_r.
156 * \param x Argument that must be >0. This routine does not check arguments.
157 * \return 1/sqrt(x). Result is undefined if your argument was invalid.
159 static gmx_inline gmx_simd_float_t gmx_simdcall
160 gmx_simd_invsqrt_f(gmx_simd_float_t x)
162 gmx_simd_float_t lu = gmx_simd_rsqrt_f(x);
163 #if (GMX_SIMD_RSQRT_BITS < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
164 lu = gmx_simd_rsqrt_iter_f(lu, x);
166 #if (GMX_SIMD_RSQRT_BITS*2 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
167 lu = gmx_simd_rsqrt_iter_f(lu, x);
169 #if (GMX_SIMD_RSQRT_BITS*4 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
170 lu = gmx_simd_rsqrt_iter_f(lu, x);
175 /*! \brief Calculate 1/sqrt(x) for two SIMD floats.
177 * You should normally call the real-precision routine \ref gmx_simd_invsqrt_pair_r.
179 * \param x0 First set of arguments, x0 must be positive - no argument checking.
180 * \param x1 Second set of arguments, x1 must be positive - no argument checking.
181 * \param[out] out0 Result 1/sqrt(x0)
182 * \param[out] out1 Result 1/sqrt(x1)
184 * In particular for double precision we can sometimes calculate square root
185 * pairs slightly faster by using single precision until the very last step.
187 static gmx_inline void gmx_simdcall
188 gmx_simd_invsqrt_pair_f(gmx_simd_float_t x0, gmx_simd_float_t x1,
189 gmx_simd_float_t *out0, gmx_simd_float_t *out1)
191 *out0 = gmx_simd_invsqrt_f(x0);
192 *out1 = gmx_simd_invsqrt_f(x1);
195 /*! \brief Perform one Newton-Raphson iteration to improve 1/x for SIMD float.
197 * This is a low-level routine that should only be used by SIMD math routine
198 * that evaluates the reciprocal.
200 * \param lu Approximation of 1/x, typically obtained from lookup.
201 * \param x The reference (starting) value x for which we want 1/x.
202 * \return An improved approximation with roughly twice as many bits of accuracy.
204 static gmx_inline gmx_simd_float_t gmx_simdcall
205 gmx_simd_rcp_iter_f(gmx_simd_float_t lu, gmx_simd_float_t x)
207 return gmx_simd_mul_f(lu, gmx_simd_fnmadd_f(lu, x, gmx_simd_set1_f(2.0f)));
210 /*! \brief Calculate 1/x for SIMD float.
212 * You should normally call the real-precision routine \ref gmx_simd_inv_r.
214 * \param x Argument that must be nonzero. This routine does not check arguments.
215 * \return 1/x. Result is undefined if your argument was invalid.
217 static gmx_inline gmx_simd_float_t gmx_simdcall
218 gmx_simd_inv_f(gmx_simd_float_t x)
220 gmx_simd_float_t lu = gmx_simd_rcp_f(x);
221 #if (GMX_SIMD_RCP_BITS < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
222 lu = gmx_simd_rcp_iter_f(lu, x);
224 #if (GMX_SIMD_RCP_BITS*2 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
225 lu = gmx_simd_rcp_iter_f(lu, x);
227 #if (GMX_SIMD_RCP_BITS*4 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
228 lu = gmx_simd_rcp_iter_f(lu, x);
233 /*! \brief Calculate sqrt(x) correctly for SIMD floats, including argument 0.0.
235 * You should normally call the real-precision routine \ref gmx_simd_sqrt_r.
237 * \param x Argument that must be >=0.
238 * \return sqrt(x). If x=0, the result will correctly be set to 0.
239 * The result is undefined if the input value is negative.
241 static gmx_inline gmx_simd_float_t gmx_simdcall
242 gmx_simd_sqrt_f(gmx_simd_float_t x)
244 gmx_simd_fbool_t mask;
245 gmx_simd_float_t res;
247 mask = gmx_simd_cmpeq_f(x, gmx_simd_setzero_f());
248 res = gmx_simd_blendnotzero_f(gmx_simd_invsqrt_f(x), mask);
249 return gmx_simd_mul_f(res, x);
252 /*! \brief SIMD float log(x). This is the natural logarithm.
254 * You should normally call the real-precision routine \ref gmx_simd_log_r.
256 * \param x Argument, should be >0.
257 * \result The natural logarithm of x. Undefined if argument is invalid.
259 #ifndef gmx_simd_log_f
260 static gmx_inline gmx_simd_float_t gmx_simdcall
261 gmx_simd_log_f(gmx_simd_float_t x)
263 const gmx_simd_float_t half = gmx_simd_set1_f(0.5f);
264 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
265 const gmx_simd_float_t sqrt2 = gmx_simd_set1_f(sqrt(2.0f));
266 const gmx_simd_float_t corr = gmx_simd_set1_f(0.693147180559945286226764f);
267 const gmx_simd_float_t CL9 = gmx_simd_set1_f(0.2371599674224853515625f);
268 const gmx_simd_float_t CL7 = gmx_simd_set1_f(0.285279005765914916992188f);
269 const gmx_simd_float_t CL5 = gmx_simd_set1_f(0.400005519390106201171875f);
270 const gmx_simd_float_t CL3 = gmx_simd_set1_f(0.666666567325592041015625f);
271 const gmx_simd_float_t CL1 = gmx_simd_set1_f(2.0f);
272 gmx_simd_float_t fexp, x2, p;
273 gmx_simd_fbool_t mask;
275 fexp = gmx_simd_get_exponent_f(x);
276 x = gmx_simd_get_mantissa_f(x);
278 mask = gmx_simd_cmplt_f(sqrt2, x);
279 /* Adjust to non-IEEE format for x>sqrt(2): exponent += 1, mantissa *= 0.5 */
280 fexp = gmx_simd_add_f(fexp, gmx_simd_blendzero_f(one, mask));
281 x = gmx_simd_mul_f(x, gmx_simd_blendv_f(one, half, mask));
283 x = gmx_simd_mul_f( gmx_simd_sub_f(x, one), gmx_simd_inv_f( gmx_simd_add_f(x, one) ) );
284 x2 = gmx_simd_mul_f(x, x);
286 p = gmx_simd_fmadd_f(CL9, x2, CL7);
287 p = gmx_simd_fmadd_f(p, x2, CL5);
288 p = gmx_simd_fmadd_f(p, x2, CL3);
289 p = gmx_simd_fmadd_f(p, x2, CL1);
290 p = gmx_simd_fmadd_f(p, x, gmx_simd_mul_f(corr, fexp));
296 #ifndef gmx_simd_exp2_f
297 /*! \brief SIMD float 2^x.
299 * You should normally call the real-precision routine \ref gmx_simd_exp2_r.
302 * \result 2^x. Undefined if input argument caused overflow.
304 static gmx_inline gmx_simd_float_t gmx_simdcall
305 gmx_simd_exp2_f(gmx_simd_float_t x)
307 /* Lower bound: Disallow numbers that would lead to an IEEE fp exponent reaching +-127. */
308 const gmx_simd_float_t arglimit = gmx_simd_set1_f(126.0f);
309 const gmx_simd_float_t CC6 = gmx_simd_set1_f(0.0001534581200287996416911311);
310 const gmx_simd_float_t CC5 = gmx_simd_set1_f(0.001339993121934088894618990);
311 const gmx_simd_float_t CC4 = gmx_simd_set1_f(0.009618488957115180159497841);
312 const gmx_simd_float_t CC3 = gmx_simd_set1_f(0.05550328776964726865751735);
313 const gmx_simd_float_t CC2 = gmx_simd_set1_f(0.2402264689063408646490722);
314 const gmx_simd_float_t CC1 = gmx_simd_set1_f(0.6931472057372680777553816);
315 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
317 gmx_simd_float_t fexppart;
318 gmx_simd_float_t intpart;
320 gmx_simd_fbool_t valuemask;
322 fexppart = gmx_simd_set_exponent_f(x);
323 intpart = gmx_simd_round_f(x);
324 valuemask = gmx_simd_cmple_f(gmx_simd_fabs_f(x), arglimit);
325 fexppart = gmx_simd_blendzero_f(fexppart, valuemask);
326 x = gmx_simd_sub_f(x, intpart);
328 p = gmx_simd_fmadd_f(CC6, x, CC5);
329 p = gmx_simd_fmadd_f(p, x, CC4);
330 p = gmx_simd_fmadd_f(p, x, CC3);
331 p = gmx_simd_fmadd_f(p, x, CC2);
332 p = gmx_simd_fmadd_f(p, x, CC1);
333 p = gmx_simd_fmadd_f(p, x, one);
334 x = gmx_simd_mul_f(p, fexppart);
339 #ifndef gmx_simd_exp_f
340 /*! \brief SIMD float exp(x).
342 * You should normally call the real-precision routine \ref gmx_simd_exp_r.
344 * In addition to scaling the argument for 2^x this routine correctly does
345 * extended precision arithmetics to improve accuracy.
348 * \result exp(x). Undefined if input argument caused overflow,
349 * which can happen if abs(x) \> 7e13.
351 static gmx_inline gmx_simd_float_t gmx_simdcall
352 gmx_simd_exp_f(gmx_simd_float_t x)
354 const gmx_simd_float_t argscale = gmx_simd_set1_f(1.44269504088896341f);
355 /* Lower bound: Disallow numbers that would lead to an IEEE fp exponent reaching +-127. */
356 const gmx_simd_float_t arglimit = gmx_simd_set1_f(126.0f);
357 const gmx_simd_float_t invargscale0 = gmx_simd_set1_f(0.693145751953125f);
358 const gmx_simd_float_t invargscale1 = gmx_simd_set1_f(1.428606765330187045e-06f);
359 const gmx_simd_float_t CC4 = gmx_simd_set1_f(0.00136324646882712841033936f);
360 const gmx_simd_float_t CC3 = gmx_simd_set1_f(0.00836596917361021041870117f);
361 const gmx_simd_float_t CC2 = gmx_simd_set1_f(0.0416710823774337768554688f);
362 const gmx_simd_float_t CC1 = gmx_simd_set1_f(0.166665524244308471679688f);
363 const gmx_simd_float_t CC0 = gmx_simd_set1_f(0.499999850988388061523438f);
364 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
365 gmx_simd_float_t fexppart;
366 gmx_simd_float_t intpart;
367 gmx_simd_float_t y, p;
368 gmx_simd_fbool_t valuemask;
370 y = gmx_simd_mul_f(x, argscale);
371 fexppart = gmx_simd_set_exponent_f(y); /* rounds to nearest int internally */
372 intpart = gmx_simd_round_f(y); /* use same rounding algorithm here */
373 valuemask = gmx_simd_cmple_f(gmx_simd_fabs_f(y), arglimit);
374 fexppart = gmx_simd_blendzero_f(fexppart, valuemask);
376 /* Extended precision arithmetics */
377 x = gmx_simd_fnmadd_f(invargscale0, intpart, x);
378 x = gmx_simd_fnmadd_f(invargscale1, intpart, x);
380 p = gmx_simd_fmadd_f(CC4, x, CC3);
381 p = gmx_simd_fmadd_f(p, x, CC2);
382 p = gmx_simd_fmadd_f(p, x, CC1);
383 p = gmx_simd_fmadd_f(p, x, CC0);
384 p = gmx_simd_fmadd_f(gmx_simd_mul_f(x, x), p, x);
385 p = gmx_simd_add_f(p, one);
386 x = gmx_simd_mul_f(p, fexppart);
391 /*! \brief SIMD float erf(x).
393 * You should normally call the real-precision routine \ref gmx_simd_erf_r.
395 * \param x The value to calculate erf(x) for.
398 * This routine achieves very close to full precision, but we do not care about
399 * the last bit or the subnormal result range.
401 static gmx_inline gmx_simd_float_t gmx_simdcall
402 gmx_simd_erf_f(gmx_simd_float_t x)
404 /* Coefficients for minimax approximation of erf(x)=x*P(x^2) in range [-1,1] */
405 const gmx_simd_float_t CA6 = gmx_simd_set1_f(7.853861353153693e-5f);
406 const gmx_simd_float_t CA5 = gmx_simd_set1_f(-8.010193625184903e-4f);
407 const gmx_simd_float_t CA4 = gmx_simd_set1_f(5.188327685732524e-3f);
408 const gmx_simd_float_t CA3 = gmx_simd_set1_f(-2.685381193529856e-2f);
409 const gmx_simd_float_t CA2 = gmx_simd_set1_f(1.128358514861418e-1f);
410 const gmx_simd_float_t CA1 = gmx_simd_set1_f(-3.761262582423300e-1f);
411 const gmx_simd_float_t CA0 = gmx_simd_set1_f(1.128379165726710f);
412 /* Coefficients for minimax approximation of erfc(x)=Exp(-x^2)*P((1/(x-1))^2) in range [0.67,2] */
413 const gmx_simd_float_t CB9 = gmx_simd_set1_f(-0.0018629930017603923f);
414 const gmx_simd_float_t CB8 = gmx_simd_set1_f(0.003909821287598495f);
415 const gmx_simd_float_t CB7 = gmx_simd_set1_f(-0.0052094582210355615f);
416 const gmx_simd_float_t CB6 = gmx_simd_set1_f(0.005685614362160572f);
417 const gmx_simd_float_t CB5 = gmx_simd_set1_f(-0.0025367682853477272f);
418 const gmx_simd_float_t CB4 = gmx_simd_set1_f(-0.010199799682318782f);
419 const gmx_simd_float_t CB3 = gmx_simd_set1_f(0.04369575504816542f);
420 const gmx_simd_float_t CB2 = gmx_simd_set1_f(-0.11884063474674492f);
421 const gmx_simd_float_t CB1 = gmx_simd_set1_f(0.2732120154030589f);
422 const gmx_simd_float_t CB0 = gmx_simd_set1_f(0.42758357702025784f);
423 /* Coefficients for minimax approximation of erfc(x)=Exp(-x^2)*(1/x)*P((1/x)^2) in range [2,9.19] */
424 const gmx_simd_float_t CC10 = gmx_simd_set1_f(-0.0445555913112064f);
425 const gmx_simd_float_t CC9 = gmx_simd_set1_f(0.21376355144663348f);
426 const gmx_simd_float_t CC8 = gmx_simd_set1_f(-0.3473187200259257f);
427 const gmx_simd_float_t CC7 = gmx_simd_set1_f(0.016690861551248114f);
428 const gmx_simd_float_t CC6 = gmx_simd_set1_f(0.7560973182491192f);
429 const gmx_simd_float_t CC5 = gmx_simd_set1_f(-1.2137903600145787f);
430 const gmx_simd_float_t CC4 = gmx_simd_set1_f(0.8411872321232948f);
431 const gmx_simd_float_t CC3 = gmx_simd_set1_f(-0.08670413896296343f);
432 const gmx_simd_float_t CC2 = gmx_simd_set1_f(-0.27124782687240334f);
433 const gmx_simd_float_t CC1 = gmx_simd_set1_f(-0.0007502488047806069f);
434 const gmx_simd_float_t CC0 = gmx_simd_set1_f(0.5642114853803148f);
435 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
436 const gmx_simd_float_t two = gmx_simd_set1_f(2.0f);
438 gmx_simd_float_t x2, x4, y;
439 gmx_simd_float_t t, t2, w, w2;
440 gmx_simd_float_t pA0, pA1, pB0, pB1, pC0, pC1;
441 gmx_simd_float_t expmx2;
442 gmx_simd_float_t res_erf, res_erfc, res;
443 gmx_simd_fbool_t mask;
445 /* Calculate erf() */
446 x2 = gmx_simd_mul_f(x, x);
447 x4 = gmx_simd_mul_f(x2, x2);
449 pA0 = gmx_simd_fmadd_f(CA6, x4, CA4);
450 pA1 = gmx_simd_fmadd_f(CA5, x4, CA3);
451 pA0 = gmx_simd_fmadd_f(pA0, x4, CA2);
452 pA1 = gmx_simd_fmadd_f(pA1, x4, CA1);
453 pA0 = gmx_simd_mul_f(pA0, x4);
454 pA0 = gmx_simd_fmadd_f(pA1, x2, pA0);
455 /* Constant term must come last for precision reasons */
456 pA0 = gmx_simd_add_f(pA0, CA0);
458 res_erf = gmx_simd_mul_f(x, pA0);
461 y = gmx_simd_fabs_f(x);
462 t = gmx_simd_inv_f(y);
463 w = gmx_simd_sub_f(t, one);
464 t2 = gmx_simd_mul_f(t, t);
465 w2 = gmx_simd_mul_f(w, w);
467 /* No need for a floating-point sieve here (as in erfc), since erf()
468 * will never return values that are extremely small for large args.
470 expmx2 = gmx_simd_exp_f( gmx_simd_fneg_f( gmx_simd_mul_f(y, y)));
472 pB1 = gmx_simd_fmadd_f(CB9, w2, CB7);
473 pB0 = gmx_simd_fmadd_f(CB8, w2, CB6);
474 pB1 = gmx_simd_fmadd_f(pB1, w2, CB5);
475 pB0 = gmx_simd_fmadd_f(pB0, w2, CB4);
476 pB1 = gmx_simd_fmadd_f(pB1, w2, CB3);
477 pB0 = gmx_simd_fmadd_f(pB0, w2, CB2);
478 pB1 = gmx_simd_fmadd_f(pB1, w2, CB1);
479 pB0 = gmx_simd_fmadd_f(pB0, w2, CB0);
480 pB0 = gmx_simd_fmadd_f(pB1, w, pB0);
482 pC0 = gmx_simd_fmadd_f(CC10, t2, CC8);
483 pC1 = gmx_simd_fmadd_f(CC9, t2, CC7);
484 pC0 = gmx_simd_fmadd_f(pC0, t2, CC6);
485 pC1 = gmx_simd_fmadd_f(pC1, t2, CC5);
486 pC0 = gmx_simd_fmadd_f(pC0, t2, CC4);
487 pC1 = gmx_simd_fmadd_f(pC1, t2, CC3);
488 pC0 = gmx_simd_fmadd_f(pC0, t2, CC2);
489 pC1 = gmx_simd_fmadd_f(pC1, t2, CC1);
491 pC0 = gmx_simd_fmadd_f(pC0, t2, CC0);
492 pC0 = gmx_simd_fmadd_f(pC1, t, pC0);
493 pC0 = gmx_simd_mul_f(pC0, t);
495 /* SELECT pB0 or pC0 for erfc() */
496 mask = gmx_simd_cmplt_f(two, y);
497 res_erfc = gmx_simd_blendv_f(pB0, pC0, mask);
498 res_erfc = gmx_simd_mul_f(res_erfc, expmx2);
500 /* erfc(x<0) = 2-erfc(|x|) */
501 mask = gmx_simd_cmplt_f(x, gmx_simd_setzero_f());
502 res_erfc = gmx_simd_blendv_f(res_erfc, gmx_simd_sub_f(two, res_erfc), mask);
504 /* Select erf() or erfc() */
505 mask = gmx_simd_cmplt_f(y, gmx_simd_set1_f(0.75f));
506 res = gmx_simd_blendv_f(gmx_simd_sub_f(one, res_erfc), res_erf, mask);
511 /*! \brief SIMD float erfc(x).
513 * You should normally call the real-precision routine \ref gmx_simd_erfc_r.
515 * \param x The value to calculate erfc(x) for.
518 * This routine achieves full precision (bar the last bit) over most of the
519 * input range, but for large arguments where the result is getting close
520 * to the minimum representable numbers we accept slightly larger errors
521 * (think results that are in the ballpark of 10^-30 for single precision,
522 * or 10^-200 for double) since that is not relevant for MD.
524 static gmx_inline gmx_simd_float_t gmx_simdcall
525 gmx_simd_erfc_f(gmx_simd_float_t x)
527 /* Coefficients for minimax approximation of erf(x)=x*P(x^2) in range [-1,1] */
528 const gmx_simd_float_t CA6 = gmx_simd_set1_f(7.853861353153693e-5f);
529 const gmx_simd_float_t CA5 = gmx_simd_set1_f(-8.010193625184903e-4f);
530 const gmx_simd_float_t CA4 = gmx_simd_set1_f(5.188327685732524e-3f);
531 const gmx_simd_float_t CA3 = gmx_simd_set1_f(-2.685381193529856e-2f);
532 const gmx_simd_float_t CA2 = gmx_simd_set1_f(1.128358514861418e-1f);
533 const gmx_simd_float_t CA1 = gmx_simd_set1_f(-3.761262582423300e-1f);
534 const gmx_simd_float_t CA0 = gmx_simd_set1_f(1.128379165726710f);
535 /* Coefficients for minimax approximation of erfc(x)=Exp(-x^2)*P((1/(x-1))^2) in range [0.67,2] */
536 const gmx_simd_float_t CB9 = gmx_simd_set1_f(-0.0018629930017603923f);
537 const gmx_simd_float_t CB8 = gmx_simd_set1_f(0.003909821287598495f);
538 const gmx_simd_float_t CB7 = gmx_simd_set1_f(-0.0052094582210355615f);
539 const gmx_simd_float_t CB6 = gmx_simd_set1_f(0.005685614362160572f);
540 const gmx_simd_float_t CB5 = gmx_simd_set1_f(-0.0025367682853477272f);
541 const gmx_simd_float_t CB4 = gmx_simd_set1_f(-0.010199799682318782f);
542 const gmx_simd_float_t CB3 = gmx_simd_set1_f(0.04369575504816542f);
543 const gmx_simd_float_t CB2 = gmx_simd_set1_f(-0.11884063474674492f);
544 const gmx_simd_float_t CB1 = gmx_simd_set1_f(0.2732120154030589f);
545 const gmx_simd_float_t CB0 = gmx_simd_set1_f(0.42758357702025784f);
546 /* Coefficients for minimax approximation of erfc(x)=Exp(-x^2)*(1/x)*P((1/x)^2) in range [2,9.19] */
547 const gmx_simd_float_t CC10 = gmx_simd_set1_f(-0.0445555913112064f);
548 const gmx_simd_float_t CC9 = gmx_simd_set1_f(0.21376355144663348f);
549 const gmx_simd_float_t CC8 = gmx_simd_set1_f(-0.3473187200259257f);
550 const gmx_simd_float_t CC7 = gmx_simd_set1_f(0.016690861551248114f);
551 const gmx_simd_float_t CC6 = gmx_simd_set1_f(0.7560973182491192f);
552 const gmx_simd_float_t CC5 = gmx_simd_set1_f(-1.2137903600145787f);
553 const gmx_simd_float_t CC4 = gmx_simd_set1_f(0.8411872321232948f);
554 const gmx_simd_float_t CC3 = gmx_simd_set1_f(-0.08670413896296343f);
555 const gmx_simd_float_t CC2 = gmx_simd_set1_f(-0.27124782687240334f);
556 const gmx_simd_float_t CC1 = gmx_simd_set1_f(-0.0007502488047806069f);
557 const gmx_simd_float_t CC0 = gmx_simd_set1_f(0.5642114853803148f);
558 /* Coefficients for expansion of exp(x) in [0,0.1] */
559 /* CD0 and CD1 are both 1.0, so no need to declare them separately */
560 const gmx_simd_float_t CD2 = gmx_simd_set1_f(0.5000066608081202f);
561 const gmx_simd_float_t CD3 = gmx_simd_set1_f(0.1664795422874624f);
562 const gmx_simd_float_t CD4 = gmx_simd_set1_f(0.04379839977652482f);
563 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
564 const gmx_simd_float_t two = gmx_simd_set1_f(2.0f);
566 /* We need to use a small trick here, since we cannot assume all SIMD
567 * architectures support integers, and the flag we want (0xfffff000) would
568 * evaluate to NaN (i.e., it cannot be expressed as a floating-point num).
569 * Instead, we represent the flags 0xf0f0f000 and 0x0f0f0000 as valid
570 * fp numbers, and perform a logical or. Since the expression is constant,
571 * we can at least hope it is evaluated at compile-time.
573 #ifdef GMX_SIMD_HAVE_LOGICAL
574 const gmx_simd_float_t sieve = gmx_simd_or_f(gmx_simd_set1_f(-5.965323564e+29f), gmx_simd_set1_f(7.05044434e-30f));
576 const int isieve = 0xFFFFF000;
577 float mem[GMX_SIMD_REAL_WIDTH*2];
578 float * pmem = gmx_simd_align_f(mem);
585 gmx_simd_float_t x2, x4, y;
586 gmx_simd_float_t q, z, t, t2, w, w2;
587 gmx_simd_float_t pA0, pA1, pB0, pB1, pC0, pC1;
588 gmx_simd_float_t expmx2, corr;
589 gmx_simd_float_t res_erf, res_erfc, res;
590 gmx_simd_fbool_t mask;
592 /* Calculate erf() */
593 x2 = gmx_simd_mul_f(x, x);
594 x4 = gmx_simd_mul_f(x2, x2);
596 pA0 = gmx_simd_fmadd_f(CA6, x4, CA4);
597 pA1 = gmx_simd_fmadd_f(CA5, x4, CA3);
598 pA0 = gmx_simd_fmadd_f(pA0, x4, CA2);
599 pA1 = gmx_simd_fmadd_f(pA1, x4, CA1);
600 pA1 = gmx_simd_mul_f(pA1, x2);
601 pA0 = gmx_simd_fmadd_f(pA0, x4, pA1);
602 /* Constant term must come last for precision reasons */
603 pA0 = gmx_simd_add_f(pA0, CA0);
605 res_erf = gmx_simd_mul_f(x, pA0);
608 y = gmx_simd_fabs_f(x);
609 t = gmx_simd_inv_f(y);
610 w = gmx_simd_sub_f(t, one);
611 t2 = gmx_simd_mul_f(t, t);
612 w2 = gmx_simd_mul_f(w, w);
614 * We cannot simply calculate exp(-y2) directly in single precision, since
615 * that will lose a couple of bits of precision due to the multiplication.
616 * Instead, we introduce y=z+w, where the last 12 bits of precision are in w.
617 * Then we get exp(-y2) = exp(-z2)*exp((z-y)*(z+y)).
619 * The only drawback with this is that it requires TWO separate exponential
620 * evaluations, which would be horrible performance-wise. However, the argument
621 * for the second exp() call is always small, so there we simply use a
622 * low-order minimax expansion on [0,0.1].
624 * However, this neat idea requires support for logical ops (and) on
625 * FP numbers, which some vendors decided isn't necessary in their SIMD
626 * instruction sets (Hi, IBM VSX!). In principle we could use some tricks
627 * in double, but we still need memory as a backup when that is not available,
628 * and this case is rare enough that we go directly there...
630 #ifdef GMX_SIMD_HAVE_LOGICAL
631 z = gmx_simd_and_f(y, sieve);
633 gmx_simd_store_f(pmem, y);
634 for (i = 0; i < GMX_SIMD_FLOAT_WIDTH; i++)
637 conv.i = conv.i & isieve;
640 z = gmx_simd_load_f(pmem);
642 q = gmx_simd_mul_f( gmx_simd_sub_f(z, y), gmx_simd_add_f(z, y) );
643 corr = gmx_simd_fmadd_f(CD4, q, CD3);
644 corr = gmx_simd_fmadd_f(corr, q, CD2);
645 corr = gmx_simd_fmadd_f(corr, q, one);
646 corr = gmx_simd_fmadd_f(corr, q, one);
648 expmx2 = gmx_simd_exp_f( gmx_simd_fneg_f( gmx_simd_mul_f(z, z) ) );
649 expmx2 = gmx_simd_mul_f(expmx2, corr);
651 pB1 = gmx_simd_fmadd_f(CB9, w2, CB7);
652 pB0 = gmx_simd_fmadd_f(CB8, w2, CB6);
653 pB1 = gmx_simd_fmadd_f(pB1, w2, CB5);
654 pB0 = gmx_simd_fmadd_f(pB0, w2, CB4);
655 pB1 = gmx_simd_fmadd_f(pB1, w2, CB3);
656 pB0 = gmx_simd_fmadd_f(pB0, w2, CB2);
657 pB1 = gmx_simd_fmadd_f(pB1, w2, CB1);
658 pB0 = gmx_simd_fmadd_f(pB0, w2, CB0);
659 pB0 = gmx_simd_fmadd_f(pB1, w, pB0);
661 pC0 = gmx_simd_fmadd_f(CC10, t2, CC8);
662 pC1 = gmx_simd_fmadd_f(CC9, t2, CC7);
663 pC0 = gmx_simd_fmadd_f(pC0, t2, CC6);
664 pC1 = gmx_simd_fmadd_f(pC1, t2, CC5);
665 pC0 = gmx_simd_fmadd_f(pC0, t2, CC4);
666 pC1 = gmx_simd_fmadd_f(pC1, t2, CC3);
667 pC0 = gmx_simd_fmadd_f(pC0, t2, CC2);
668 pC1 = gmx_simd_fmadd_f(pC1, t2, CC1);
670 pC0 = gmx_simd_fmadd_f(pC0, t2, CC0);
671 pC0 = gmx_simd_fmadd_f(pC1, t, pC0);
672 pC0 = gmx_simd_mul_f(pC0, t);
674 /* SELECT pB0 or pC0 for erfc() */
675 mask = gmx_simd_cmplt_f(two, y);
676 res_erfc = gmx_simd_blendv_f(pB0, pC0, mask);
677 res_erfc = gmx_simd_mul_f(res_erfc, expmx2);
679 /* erfc(x<0) = 2-erfc(|x|) */
680 mask = gmx_simd_cmplt_f(x, gmx_simd_setzero_f());
681 res_erfc = gmx_simd_blendv_f(res_erfc, gmx_simd_sub_f(two, res_erfc), mask);
683 /* Select erf() or erfc() */
684 mask = gmx_simd_cmplt_f(y, gmx_simd_set1_f(0.75f));
685 res = gmx_simd_blendv_f(res_erfc, gmx_simd_sub_f(one, res_erf), mask);
690 /*! \brief SIMD float sin \& cos.
692 * You should normally call the real-precision routine \ref gmx_simd_sincos_r.
694 * \param x The argument to evaluate sin/cos for
695 * \param[out] sinval Sin(x)
696 * \param[out] cosval Cos(x)
698 * This version achieves close to machine precision, but for very large
699 * magnitudes of the argument we inherently begin to lose accuracy due to the
700 * argument reduction, despite using extended precision arithmetics internally.
702 static gmx_inline void gmx_simdcall
703 gmx_simd_sincos_f(gmx_simd_float_t x, gmx_simd_float_t *sinval, gmx_simd_float_t *cosval)
705 /* Constants to subtract Pi/4*x from y while minimizing precision loss */
706 const gmx_simd_float_t argred0 = gmx_simd_set1_f(1.5703125);
707 const gmx_simd_float_t argred1 = gmx_simd_set1_f(4.83751296997070312500e-04f);
708 const gmx_simd_float_t argred2 = gmx_simd_set1_f(7.54953362047672271729e-08f);
709 const gmx_simd_float_t argred3 = gmx_simd_set1_f(2.56334406825708960298e-12f);
710 const gmx_simd_float_t two_over_pi = gmx_simd_set1_f(2.0f/M_PI);
711 const gmx_simd_float_t const_sin2 = gmx_simd_set1_f(-1.9515295891e-4f);
712 const gmx_simd_float_t const_sin1 = gmx_simd_set1_f( 8.3321608736e-3f);
713 const gmx_simd_float_t const_sin0 = gmx_simd_set1_f(-1.6666654611e-1f);
714 const gmx_simd_float_t const_cos2 = gmx_simd_set1_f( 2.443315711809948e-5f);
715 const gmx_simd_float_t const_cos1 = gmx_simd_set1_f(-1.388731625493765e-3f);
716 const gmx_simd_float_t const_cos0 = gmx_simd_set1_f( 4.166664568298827e-2f);
717 const gmx_simd_float_t half = gmx_simd_set1_f(0.5f);
718 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
719 gmx_simd_float_t ssign, csign;
720 gmx_simd_float_t x2, y, z, psin, pcos, sss, ccc;
721 gmx_simd_fbool_t mask;
722 #if (defined GMX_SIMD_HAVE_FINT32) && (defined GMX_SIMD_HAVE_FINT32_ARITHMETICS) && (defined GMX_SIMD_HAVE_LOGICAL)
723 const gmx_simd_fint32_t ione = gmx_simd_set1_fi(1);
724 const gmx_simd_fint32_t itwo = gmx_simd_set1_fi(2);
725 gmx_simd_fint32_t iy;
727 z = gmx_simd_mul_f(x, two_over_pi);
728 iy = gmx_simd_cvt_f2i(z);
729 y = gmx_simd_round_f(z);
731 mask = gmx_simd_cvt_fib2fb(gmx_simd_cmpeq_fi(gmx_simd_and_fi(iy, ione), gmx_simd_setzero_fi()));
732 ssign = gmx_simd_blendzero_f(gmx_simd_set1_f(GMX_FLOAT_NEGZERO), gmx_simd_cvt_fib2fb(gmx_simd_cmpeq_fi(gmx_simd_and_fi(iy, itwo), itwo)));
733 csign = gmx_simd_blendzero_f(gmx_simd_set1_f(GMX_FLOAT_NEGZERO), gmx_simd_cvt_fib2fb(gmx_simd_cmpeq_fi(gmx_simd_and_fi(gmx_simd_add_fi(iy, ione), itwo), itwo)));
735 const gmx_simd_float_t quarter = gmx_simd_set1_f(0.25f);
736 const gmx_simd_float_t minusquarter = gmx_simd_set1_f(-0.25f);
738 gmx_simd_fbool_t m1, m2, m3;
740 /* The most obvious way to find the arguments quadrant in the unit circle
741 * to calculate the sign is to use integer arithmetic, but that is not
742 * present in all SIMD implementations. As an alternative, we have devised a
743 * pure floating-point algorithm that uses truncation for argument reduction
744 * so that we get a new value 0<=q<1 over the unit circle, and then
745 * do floating-point comparisons with fractions. This is likely to be
746 * slightly slower (~10%) due to the longer latencies of floating-point, so
747 * we only use it when integer SIMD arithmetic is not present.
750 x = gmx_simd_fabs_f(x);
751 /* It is critical that half-way cases are rounded down */
752 z = gmx_simd_fmadd_f(x, two_over_pi, half);
753 y = gmx_simd_trunc_f(z);
754 q = gmx_simd_mul_f(z, quarter);
755 q = gmx_simd_sub_f(q, gmx_simd_trunc_f(q));
756 /* z now starts at 0.0 for x=-pi/4 (although neg. values cannot occur), and
757 * then increased by 1.0 as x increases by 2*Pi, when it resets to 0.0.
758 * This removes the 2*Pi periodicity without using any integer arithmetic.
759 * First check if y had the value 2 or 3, set csign if true.
761 q = gmx_simd_sub_f(q, half);
762 /* If we have logical operations we can work directly on the signbit, which
763 * saves instructions. Otherwise we need to represent signs as +1.0/-1.0.
764 * Thus, if you are altering defines to debug alternative code paths, the
765 * two GMX_SIMD_HAVE_LOGICAL sections in this routine must either both be
766 * active or inactive - you will get errors if only one is used.
768 # ifdef GMX_SIMD_HAVE_LOGICAL
769 ssign = gmx_simd_and_f(ssign, gmx_simd_set1_f(GMX_FLOAT_NEGZERO));
770 csign = gmx_simd_andnot_f(q, gmx_simd_set1_f(GMX_FLOAT_NEGZERO));
771 ssign = gmx_simd_xor_f(ssign, csign);
773 csign = gmx_simd_xor_sign_f(gmx_simd_set1_f(-1.0f), q);
774 // ALT: csign = gmx_simd_fneg_f(gmx_simd_copysign(gmx_simd_set1_f(1.0),q));
776 ssign = gmx_simd_xor_sign_f(ssign, csign); /* swap ssign if csign was set. */
778 /* Check if y had value 1 or 3 (remember we subtracted 0.5 from q) */
779 m1 = gmx_simd_cmplt_f(q, minusquarter);
780 m2 = gmx_simd_cmple_f(gmx_simd_setzero_f(), q);
781 m3 = gmx_simd_cmplt_f(q, quarter);
782 m2 = gmx_simd_and_fb(m2, m3);
783 mask = gmx_simd_or_fb(m1, m2);
784 /* where mask is FALSE, set sign. */
785 csign = gmx_simd_xor_sign_f(csign, gmx_simd_blendv_f(gmx_simd_set1_f(-1.0f), one, mask));
787 x = gmx_simd_fnmadd_f(y, argred0, x);
788 x = gmx_simd_fnmadd_f(y, argred1, x);
789 x = gmx_simd_fnmadd_f(y, argred2, x);
790 x = gmx_simd_fnmadd_f(y, argred3, x);
791 x2 = gmx_simd_mul_f(x, x);
793 psin = gmx_simd_fmadd_f(const_sin2, x2, const_sin1);
794 psin = gmx_simd_fmadd_f(psin, x2, const_sin0);
795 psin = gmx_simd_fmadd_f(psin, gmx_simd_mul_f(x, x2), x);
796 pcos = gmx_simd_fmadd_f(const_cos2, x2, const_cos1);
797 pcos = gmx_simd_fmadd_f(pcos, x2, const_cos0);
798 pcos = gmx_simd_fmsub_f(pcos, x2, half);
799 pcos = gmx_simd_fmadd_f(pcos, x2, one);
801 sss = gmx_simd_blendv_f(pcos, psin, mask);
802 ccc = gmx_simd_blendv_f(psin, pcos, mask);
803 /* See comment for GMX_SIMD_HAVE_LOGICAL section above. */
804 #ifdef GMX_SIMD_HAVE_LOGICAL
805 *sinval = gmx_simd_xor_f(sss, ssign);
806 *cosval = gmx_simd_xor_f(ccc, csign);
808 *sinval = gmx_simd_xor_sign_f(sss, ssign);
809 *cosval = gmx_simd_xor_sign_f(ccc, csign);
813 /*! \brief SIMD float sin(x).
815 * You should normally call the real-precision routine \ref gmx_simd_sin_r.
817 * \param x The argument to evaluate sin for
820 * \attention Do NOT call both sin & cos if you need both results, since each of them
821 * will then call \ref gmx_simd_sincos_r and waste a factor 2 in performance.
823 static gmx_inline gmx_simd_float_t gmx_simdcall
824 gmx_simd_sin_f(gmx_simd_float_t x)
826 gmx_simd_float_t s, c;
827 gmx_simd_sincos_f(x, &s, &c);
831 /*! \brief SIMD float cos(x).
833 * You should normally call the real-precision routine \ref gmx_simd_cos_r.
835 * \param x The argument to evaluate cos for
838 * \attention Do NOT call both sin & cos if you need both results, since each of them
839 * will then call \ref gmx_simd_sincos_r and waste a factor 2 in performance.
841 static gmx_inline gmx_simd_float_t gmx_simdcall
842 gmx_simd_cos_f(gmx_simd_float_t x)
844 gmx_simd_float_t s, c;
845 gmx_simd_sincos_f(x, &s, &c);
849 /*! \brief SIMD float tan(x).
851 * You should normally call the real-precision routine \ref gmx_simd_tan_r.
853 * \param x The argument to evaluate tan for
856 static gmx_inline gmx_simd_float_t gmx_simdcall
857 gmx_simd_tan_f(gmx_simd_float_t x)
859 const gmx_simd_float_t argred0 = gmx_simd_set1_f(1.5703125);
860 const gmx_simd_float_t argred1 = gmx_simd_set1_f(4.83751296997070312500e-04f);
861 const gmx_simd_float_t argred2 = gmx_simd_set1_f(7.54953362047672271729e-08f);
862 const gmx_simd_float_t argred3 = gmx_simd_set1_f(2.56334406825708960298e-12f);
863 const gmx_simd_float_t two_over_pi = gmx_simd_set1_f(2.0f/M_PI);
864 const gmx_simd_float_t CT6 = gmx_simd_set1_f(0.009498288995810566122993911);
865 const gmx_simd_float_t CT5 = gmx_simd_set1_f(0.002895755790837379295226923);
866 const gmx_simd_float_t CT4 = gmx_simd_set1_f(0.02460087336161924491836265);
867 const gmx_simd_float_t CT3 = gmx_simd_set1_f(0.05334912882656359828045988);
868 const gmx_simd_float_t CT2 = gmx_simd_set1_f(0.1333989091464957704418495);
869 const gmx_simd_float_t CT1 = gmx_simd_set1_f(0.3333307599244198227797507);
871 gmx_simd_float_t x2, p, y, z;
872 gmx_simd_fbool_t mask;
874 #if (defined GMX_SIMD_HAVE_FINT32) && (defined GMX_SIMD_HAVE_FINT32_ARITHMETICS) && (defined GMX_SIMD_HAVE_LOGICAL)
875 gmx_simd_fint32_t iy;
876 gmx_simd_fint32_t ione = gmx_simd_set1_fi(1);
878 z = gmx_simd_mul_f(x, two_over_pi);
879 iy = gmx_simd_cvt_f2i(z);
880 y = gmx_simd_round_f(z);
881 mask = gmx_simd_cvt_fib2fb(gmx_simd_cmpeq_fi(gmx_simd_and_fi(iy, ione), ione));
883 x = gmx_simd_fnmadd_f(y, argred0, x);
884 x = gmx_simd_fnmadd_f(y, argred1, x);
885 x = gmx_simd_fnmadd_f(y, argred2, x);
886 x = gmx_simd_fnmadd_f(y, argred3, x);
887 x = gmx_simd_xor_f(gmx_simd_blendzero_f(gmx_simd_set1_f(GMX_FLOAT_NEGZERO), mask), x);
889 const gmx_simd_float_t quarter = gmx_simd_set1_f(0.25f);
890 const gmx_simd_float_t half = gmx_simd_set1_f(0.5f);
891 const gmx_simd_float_t threequarter = gmx_simd_set1_f(0.75f);
892 gmx_simd_float_t w, q;
893 gmx_simd_fbool_t m1, m2, m3;
895 w = gmx_simd_fabs_f(x);
896 z = gmx_simd_fmadd_f(w, two_over_pi, half);
897 y = gmx_simd_trunc_f(z);
898 q = gmx_simd_mul_f(z, quarter);
899 q = gmx_simd_sub_f(q, gmx_simd_trunc_f(q));
900 m1 = gmx_simd_cmple_f(quarter, q);
901 m2 = gmx_simd_cmplt_f(q, half);
902 m3 = gmx_simd_cmple_f(threequarter, q);
903 m1 = gmx_simd_and_fb(m1, m2);
904 mask = gmx_simd_or_fb(m1, m3);
905 w = gmx_simd_fnmadd_f(y, argred0, w);
906 w = gmx_simd_fnmadd_f(y, argred1, w);
907 w = gmx_simd_fnmadd_f(y, argred2, w);
908 w = gmx_simd_fnmadd_f(y, argred3, w);
910 w = gmx_simd_blendv_f(w, gmx_simd_fneg_f(w), mask);
911 x = gmx_simd_xor_sign_f(w, x);
913 x2 = gmx_simd_mul_f(x, x);
914 p = gmx_simd_fmadd_f(CT6, x2, CT5);
915 p = gmx_simd_fmadd_f(p, x2, CT4);
916 p = gmx_simd_fmadd_f(p, x2, CT3);
917 p = gmx_simd_fmadd_f(p, x2, CT2);
918 p = gmx_simd_fmadd_f(p, x2, CT1);
919 p = gmx_simd_fmadd_f(x2, gmx_simd_mul_f(p, x), x);
921 p = gmx_simd_blendv_f( p, gmx_simd_inv_f(p), mask);
925 /*! \brief SIMD float asin(x).
927 * You should normally call the real-precision routine \ref gmx_simd_asin_r.
929 * \param x The argument to evaluate asin for
932 static gmx_inline gmx_simd_float_t gmx_simdcall
933 gmx_simd_asin_f(gmx_simd_float_t x)
935 const gmx_simd_float_t limitlow = gmx_simd_set1_f(1e-4f);
936 const gmx_simd_float_t half = gmx_simd_set1_f(0.5f);
937 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
938 const gmx_simd_float_t halfpi = gmx_simd_set1_f((float)M_PI/2.0f);
939 const gmx_simd_float_t CC5 = gmx_simd_set1_f(4.2163199048E-2f);
940 const gmx_simd_float_t CC4 = gmx_simd_set1_f(2.4181311049E-2f);
941 const gmx_simd_float_t CC3 = gmx_simd_set1_f(4.5470025998E-2f);
942 const gmx_simd_float_t CC2 = gmx_simd_set1_f(7.4953002686E-2f);
943 const gmx_simd_float_t CC1 = gmx_simd_set1_f(1.6666752422E-1f);
944 gmx_simd_float_t xabs;
945 gmx_simd_float_t z, z1, z2, q, q1, q2;
946 gmx_simd_float_t pA, pB;
947 gmx_simd_fbool_t mask;
949 xabs = gmx_simd_fabs_f(x);
950 mask = gmx_simd_cmplt_f(half, xabs);
951 z1 = gmx_simd_mul_f(half, gmx_simd_sub_f(one, xabs));
952 q1 = gmx_simd_mul_f(z1, gmx_simd_invsqrt_f(z1));
953 q1 = gmx_simd_blendnotzero_f(q1, gmx_simd_cmpeq_f(xabs, one));
955 z2 = gmx_simd_mul_f(q2, q2);
956 z = gmx_simd_blendv_f(z2, z1, mask);
957 q = gmx_simd_blendv_f(q2, q1, mask);
959 z2 = gmx_simd_mul_f(z, z);
960 pA = gmx_simd_fmadd_f(CC5, z2, CC3);
961 pB = gmx_simd_fmadd_f(CC4, z2, CC2);
962 pA = gmx_simd_fmadd_f(pA, z2, CC1);
963 pA = gmx_simd_mul_f(pA, z);
964 z = gmx_simd_fmadd_f(pB, z2, pA);
965 z = gmx_simd_fmadd_f(z, q, q);
966 q2 = gmx_simd_sub_f(halfpi, z);
967 q2 = gmx_simd_sub_f(q2, z);
968 z = gmx_simd_blendv_f(z, q2, mask);
970 mask = gmx_simd_cmplt_f(limitlow, xabs);
971 z = gmx_simd_blendv_f( xabs, z, mask );
972 z = gmx_simd_xor_sign_f(z, x);
977 /*! \brief SIMD float acos(x).
979 * You should normally call the real-precision routine \ref gmx_simd_acos_r.
981 * \param x The argument to evaluate acos for
984 static gmx_inline gmx_simd_float_t gmx_simdcall
985 gmx_simd_acos_f(gmx_simd_float_t x)
987 const gmx_simd_float_t one = gmx_simd_set1_f(1.0f);
988 const gmx_simd_float_t half = gmx_simd_set1_f(0.5f);
989 const gmx_simd_float_t pi = gmx_simd_set1_f((float)M_PI);
990 const gmx_simd_float_t halfpi = gmx_simd_set1_f((float)M_PI/2.0f);
991 gmx_simd_float_t xabs;
992 gmx_simd_float_t z, z1, z2, z3;
993 gmx_simd_fbool_t mask1, mask2;
995 xabs = gmx_simd_fabs_f(x);
996 mask1 = gmx_simd_cmplt_f(half, xabs);
997 mask2 = gmx_simd_cmplt_f(gmx_simd_setzero_f(), x);
999 z = gmx_simd_mul_f(half, gmx_simd_sub_f(one, xabs));
1000 z = gmx_simd_mul_f(z, gmx_simd_invsqrt_f(z));
1001 z = gmx_simd_blendnotzero_f(z, gmx_simd_cmpeq_f(xabs, one));
1002 z = gmx_simd_blendv_f(x, z, mask1);
1003 z = gmx_simd_asin_f(z);
1005 z2 = gmx_simd_add_f(z, z);
1006 z1 = gmx_simd_sub_f(pi, z2);
1007 z3 = gmx_simd_sub_f(halfpi, z);
1008 z = gmx_simd_blendv_f(z1, z2, mask2);
1009 z = gmx_simd_blendv_f(z3, z, mask1);
1014 /*! \brief SIMD float asin(x).
1016 * You should normally call the real-precision routine \ref gmx_simd_atan_r.
1018 * \param x The argument to evaluate atan for
1019 * \result Atan(x), same argument/value range as standard math library.
1021 static gmx_inline gmx_simd_float_t gmx_simdcall
1022 gmx_simd_atan_f(gmx_simd_float_t x)
1024 const gmx_simd_float_t halfpi = gmx_simd_set1_f(M_PI/2);
1025 const gmx_simd_float_t CA17 = gmx_simd_set1_f(0.002823638962581753730774f);
1026 const gmx_simd_float_t CA15 = gmx_simd_set1_f(-0.01595690287649631500244f);
1027 const gmx_simd_float_t CA13 = gmx_simd_set1_f(0.04250498861074447631836f);
1028 const gmx_simd_float_t CA11 = gmx_simd_set1_f(-0.07489009201526641845703f);
1029 const gmx_simd_float_t CA9 = gmx_simd_set1_f(0.1063479334115982055664f);
1030 const gmx_simd_float_t CA7 = gmx_simd_set1_f(-0.1420273631811141967773f);
1031 const gmx_simd_float_t CA5 = gmx_simd_set1_f(0.1999269574880599975585f);
1032 const gmx_simd_float_t CA3 = gmx_simd_set1_f(-0.3333310186862945556640f);
1033 gmx_simd_float_t x2, x3, x4, pA, pB;
1034 gmx_simd_fbool_t mask, mask2;
1036 mask = gmx_simd_cmplt_f(x, gmx_simd_setzero_f());
1037 x = gmx_simd_fabs_f(x);
1038 mask2 = gmx_simd_cmplt_f(gmx_simd_set1_f(1.0f), x);
1039 x = gmx_simd_blendv_f(x, gmx_simd_inv_f(x), mask2);
1041 x2 = gmx_simd_mul_f(x, x);
1042 x3 = gmx_simd_mul_f(x2, x);
1043 x4 = gmx_simd_mul_f(x2, x2);
1044 pA = gmx_simd_fmadd_f(CA17, x4, CA13);
1045 pB = gmx_simd_fmadd_f(CA15, x4, CA11);
1046 pA = gmx_simd_fmadd_f(pA, x4, CA9);
1047 pB = gmx_simd_fmadd_f(pB, x4, CA7);
1048 pA = gmx_simd_fmadd_f(pA, x4, CA5);
1049 pB = gmx_simd_fmadd_f(pB, x4, CA3);
1050 pA = gmx_simd_fmadd_f(pA, x2, pB);
1051 pA = gmx_simd_fmadd_f(pA, x3, x);
1053 pA = gmx_simd_blendv_f(pA, gmx_simd_sub_f(halfpi, pA), mask2);
1054 pA = gmx_simd_blendv_f(pA, gmx_simd_fneg_f(pA), mask);
1059 /*! \brief SIMD float atan2(y,x).
1061 * You should normally call the real-precision routine \ref gmx_simd_atan2_r.
1063 * \param y Y component of vector, any quartile
1064 * \param x X component of vector, any quartile
1065 * \result Atan(y,x), same argument/value range as standard math library.
1067 * \note This routine should provide correct results for all finite
1068 * non-zero or positive-zero arguments. However, negative zero arguments will
1069 * be treated as positive zero, which means the return value will deviate from
1070 * the standard math library atan2(y,x) for those cases. That should not be
1071 * of any concern in Gromacs, and in particular it will not affect calculations
1072 * of angles from vectors.
1074 static gmx_inline gmx_simd_float_t gmx_simdcall
1075 gmx_simd_atan2_f(gmx_simd_float_t y, gmx_simd_float_t x)
1077 const gmx_simd_float_t pi = gmx_simd_set1_f(M_PI);
1078 const gmx_simd_float_t halfpi = gmx_simd_set1_f(M_PI/2.0);
1079 gmx_simd_float_t xinv, p, aoffset;
1080 gmx_simd_fbool_t mask_x0, mask_y0, mask_xlt0, mask_ylt0;
1082 mask_x0 = gmx_simd_cmpeq_f(x, gmx_simd_setzero_f());
1083 mask_y0 = gmx_simd_cmpeq_f(y, gmx_simd_setzero_f());
1084 mask_xlt0 = gmx_simd_cmplt_f(x, gmx_simd_setzero_f());
1085 mask_ylt0 = gmx_simd_cmplt_f(y, gmx_simd_setzero_f());
1087 aoffset = gmx_simd_blendzero_f(halfpi, mask_x0);
1088 aoffset = gmx_simd_blendnotzero_f(aoffset, mask_y0);
1090 aoffset = gmx_simd_blendv_f(aoffset, pi, mask_xlt0);
1091 aoffset = gmx_simd_blendv_f(aoffset, gmx_simd_fneg_f(aoffset), mask_ylt0);
1093 xinv = gmx_simd_blendnotzero_f(gmx_simd_inv_f(x), mask_x0);
1094 p = gmx_simd_mul_f(y, xinv);
1095 p = gmx_simd_atan_f(p);
1096 p = gmx_simd_add_f(p, aoffset);
1101 /*! \brief Calculate the force correction due to PME analytically in SIMD float.
1103 * You should normally call the real-precision routine \ref gmx_simd_pmecorrF_r.
1105 * \param z2 \f$(r \beta)^2\f$ - see below for details.
1106 * \result Correction factor to coulomb force - see below for details.
1108 * This routine is meant to enable analytical evaluation of the
1109 * direct-space PME electrostatic force to avoid tables.
1111 * The direct-space potential should be \f$ \mbox{erfc}(\beta r)/r\f$, but there
1112 * are some problems evaluating that:
1114 * First, the error function is difficult (read: expensive) to
1115 * approxmiate accurately for intermediate to large arguments, and
1116 * this happens already in ranges of \f$(\beta r)\f$ that occur in simulations.
1117 * Second, we now try to avoid calculating potentials in Gromacs but
1118 * use forces directly.
1120 * We can simply things slight by noting that the PME part is really
1121 * a correction to the normal Coulomb force since \f$\mbox{erfc}(z)=1-\mbox{erf}(z)\f$, i.e.
1123 * V = \frac{1}{r} - \frac{\mbox{erf}(\beta r)}{r}
1125 * The first term we already have from the inverse square root, so
1126 * that we can leave out of this routine.
1128 * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
1129 * the argument \f$beta r\f$ will be in the range 0.15 to ~4, which is
1130 * the range used for the minimax fit. Use your favorite plotting program
1131 * to realize how well-behaved \f$\frac{\mbox{erf}(z)}{z}\f$ is in this range!
1133 * We approximate \f$f(z)=\mbox{erf}(z)/z\f$ with a rational minimax polynomial.
1134 * However, it turns out it is more efficient to approximate \f$f(z)/z\f$ and
1135 * then only use even powers. This is another minor optimization, since
1136 * we actually \a want \f$f(z)/z\f$, because it is going to be multiplied by
1137 * the vector between the two atoms to get the vectorial force. The
1138 * fastest flops are the ones we can avoid calculating!
1140 * So, here's how it should be used:
1142 * 1. Calculate \f$r^2\f$.
1143 * 2. Multiply by \f$\beta^2\f$, so you get \f$z^2=(\beta r)^2\f$.
1144 * 3. Evaluate this routine with \f$z^2\f$ as the argument.
1145 * 4. The return value is the expression:
1148 * \frac{2 \exp{-z^2}}{\sqrt{\pi} z^2}-\frac{\mbox{erf}(z)}{z^3}
1151 * 5. Multiply the entire expression by \f$\beta^3\f$. This will get you
1154 * \frac{2 \beta^3 \exp(-z^2)}{\sqrt{\pi} z^2} - \frac{\beta^3 \mbox{erf}(z)}{z^3}
1157 * or, switching back to \f$r\f$ (since \f$z=r \beta\f$):
1160 * \frac{2 \beta \exp(-r^2 \beta^2)}{\sqrt{\pi} r^2} - \frac{\mbox{erf}(r \beta)}{r^3}
1163 * With a bit of math exercise you should be able to confirm that
1167 * \frac{\frac{d}{dr}\left( \frac{\mbox{erf}(\beta r)}{r} \right)}{r}
1170 * 6. Add the result to \f$r^{-3}\f$, multiply by the product of the charges,
1171 * and you have your force (divided by \f$r\f$). A final multiplication
1172 * with the vector connecting the two particles and you have your
1173 * vectorial force to add to the particles.
1175 * This approximation achieves an error slightly lower than 1e-6
1176 * in single precision and 1e-11 in double precision
1177 * for arguments smaller than 16 (\f$\beta r \leq 4 \f$);
1178 * when added to \f$1/r\f$ the error will be insignificant.
1179 * For \f$\beta r \geq 7206\f$ the return value can be inf or NaN.
1182 static gmx_inline gmx_simd_float_t gmx_simdcall
1183 gmx_simd_pmecorrF_f(gmx_simd_float_t z2)
1185 const gmx_simd_float_t FN6 = gmx_simd_set1_f(-1.7357322914161492954e-8f);
1186 const gmx_simd_float_t FN5 = gmx_simd_set1_f(1.4703624142580877519e-6f);
1187 const gmx_simd_float_t FN4 = gmx_simd_set1_f(-0.000053401640219807709149f);
1188 const gmx_simd_float_t FN3 = gmx_simd_set1_f(0.0010054721316683106153f);
1189 const gmx_simd_float_t FN2 = gmx_simd_set1_f(-0.019278317264888380590f);
1190 const gmx_simd_float_t FN1 = gmx_simd_set1_f(0.069670166153766424023f);
1191 const gmx_simd_float_t FN0 = gmx_simd_set1_f(-0.75225204789749321333f);
1193 const gmx_simd_float_t FD4 = gmx_simd_set1_f(0.0011193462567257629232f);
1194 const gmx_simd_float_t FD3 = gmx_simd_set1_f(0.014866955030185295499f);
1195 const gmx_simd_float_t FD2 = gmx_simd_set1_f(0.11583842382862377919f);
1196 const gmx_simd_float_t FD1 = gmx_simd_set1_f(0.50736591960530292870f);
1197 const gmx_simd_float_t FD0 = gmx_simd_set1_f(1.0f);
1199 gmx_simd_float_t z4;
1200 gmx_simd_float_t polyFN0, polyFN1, polyFD0, polyFD1;
1202 z4 = gmx_simd_mul_f(z2, z2);
1204 polyFD0 = gmx_simd_fmadd_f(FD4, z4, FD2);
1205 polyFD1 = gmx_simd_fmadd_f(FD3, z4, FD1);
1206 polyFD0 = gmx_simd_fmadd_f(polyFD0, z4, FD0);
1207 polyFD0 = gmx_simd_fmadd_f(polyFD1, z2, polyFD0);
1209 polyFD0 = gmx_simd_inv_f(polyFD0);
1211 polyFN0 = gmx_simd_fmadd_f(FN6, z4, FN4);
1212 polyFN1 = gmx_simd_fmadd_f(FN5, z4, FN3);
1213 polyFN0 = gmx_simd_fmadd_f(polyFN0, z4, FN2);
1214 polyFN1 = gmx_simd_fmadd_f(polyFN1, z4, FN1);
1215 polyFN0 = gmx_simd_fmadd_f(polyFN0, z4, FN0);
1216 polyFN0 = gmx_simd_fmadd_f(polyFN1, z2, polyFN0);
1218 return gmx_simd_mul_f(polyFN0, polyFD0);
1223 /*! \brief Calculate the potential correction due to PME analytically in SIMD float.
1225 * You should normally call the real-precision routine \ref gmx_simd_pmecorrV_r.
1227 * \param z2 \f$(r \beta)^2\f$ - see below for details.
1228 * \result Correction factor to coulomb potential - see below for details.
1230 * See \ref gmx_simd_pmecorrF_f for details about the approximation.
1232 * This routine calculates \f$\mbox{erf}(z)/z\f$, although you should provide \f$z^2\f$
1233 * as the input argument.
1235 * Here's how it should be used:
1237 * 1. Calculate \f$r^2\f$.
1238 * 2. Multiply by \f$\beta^2\f$, so you get \f$z^2=\beta^2*r^2\f$.
1239 * 3. Evaluate this routine with z^2 as the argument.
1240 * 4. The return value is the expression:
1243 * \frac{\mbox{erf}(z)}{z}
1246 * 5. Multiply the entire expression by beta and switching back to \f$r\f$ (since \f$z=r \beta\f$):
1249 * \frac{\mbox{erf}(r \beta)}{r}
1252 * 6. Subtract the result from \f$1/r\f$, multiply by the product of the charges,
1253 * and you have your potential.
1255 * This approximation achieves an error slightly lower than 1e-6
1256 * in single precision and 4e-11 in double precision
1257 * for arguments smaller than 16 (\f$ 0.15 \leq \beta r \leq 4 \f$);
1258 * for \f$ \beta r \leq 0.15\f$ the error can be twice as high;
1259 * when added to \f$1/r\f$ the error will be insignificant.
1260 * For \f$\beta r \geq 7142\f$ the return value can be inf or NaN.
1262 static gmx_inline gmx_simd_float_t gmx_simdcall
1263 gmx_simd_pmecorrV_f(gmx_simd_float_t z2)
1265 const gmx_simd_float_t VN6 = gmx_simd_set1_f(1.9296833005951166339e-8f);
1266 const gmx_simd_float_t VN5 = gmx_simd_set1_f(-1.4213390571557850962e-6f);
1267 const gmx_simd_float_t VN4 = gmx_simd_set1_f(0.000041603292906656984871f);
1268 const gmx_simd_float_t VN3 = gmx_simd_set1_f(-0.00013134036773265025626f);
1269 const gmx_simd_float_t VN2 = gmx_simd_set1_f(0.038657983986041781264f);
1270 const gmx_simd_float_t VN1 = gmx_simd_set1_f(0.11285044772717598220f);
1271 const gmx_simd_float_t VN0 = gmx_simd_set1_f(1.1283802385263030286f);
1273 const gmx_simd_float_t VD3 = gmx_simd_set1_f(0.0066752224023576045451f);
1274 const gmx_simd_float_t VD2 = gmx_simd_set1_f(0.078647795836373922256f);
1275 const gmx_simd_float_t VD1 = gmx_simd_set1_f(0.43336185284710920150f);
1276 const gmx_simd_float_t VD0 = gmx_simd_set1_f(1.0f);
1278 gmx_simd_float_t z4;
1279 gmx_simd_float_t polyVN0, polyVN1, polyVD0, polyVD1;
1281 z4 = gmx_simd_mul_f(z2, z2);
1283 polyVD1 = gmx_simd_fmadd_f(VD3, z4, VD1);
1284 polyVD0 = gmx_simd_fmadd_f(VD2, z4, VD0);
1285 polyVD0 = gmx_simd_fmadd_f(polyVD1, z2, polyVD0);
1287 polyVD0 = gmx_simd_inv_f(polyVD0);
1289 polyVN0 = gmx_simd_fmadd_f(VN6, z4, VN4);
1290 polyVN1 = gmx_simd_fmadd_f(VN5, z4, VN3);
1291 polyVN0 = gmx_simd_fmadd_f(polyVN0, z4, VN2);
1292 polyVN1 = gmx_simd_fmadd_f(polyVN1, z4, VN1);
1293 polyVN0 = gmx_simd_fmadd_f(polyVN0, z4, VN0);
1294 polyVN0 = gmx_simd_fmadd_f(polyVN1, z2, polyVN0);
1296 return gmx_simd_mul_f(polyVN0, polyVD0);
1302 #ifdef GMX_SIMD_HAVE_DOUBLE
1304 /*! \name Double precision SIMD math functions
1306 * \note In most cases you should use the real-precision functions instead.
1310 /****************************************
1311 * DOUBLE PRECISION SIMD MATH FUNCTIONS *
1312 ****************************************/
1314 /*! \brief SIMD utility function to sum a+b+c+d for SIMD doubles.
1316 * \copydetails gmx_simd_sum4_f
1318 static gmx_inline gmx_simd_double_t gmx_simdcall
1319 gmx_simd_sum4_d(gmx_simd_double_t a, gmx_simd_double_t b,
1320 gmx_simd_double_t c, gmx_simd_double_t d)
1322 return gmx_simd_add_d(gmx_simd_add_d(a, b), gmx_simd_add_d(c, d));
1325 /*! \brief Return -a if b is negative, SIMD double.
1327 * You should normally call the real-precision routine \ref gmx_simd_xor_sign_r.
1329 * \param a Values to set sign for
1330 * \param b Values used to set sign
1331 * \return if b is negative, the sign of a will be changed.
1333 * This is equivalent to doing an xor operation on a with the sign bit of b,
1334 * with the exception that negative zero is not considered to be negative
1335 * on architectures where \ref GMX_SIMD_HAVE_LOGICAL is not set.
1337 static gmx_inline gmx_simd_double_t gmx_simdcall
1338 gmx_simd_xor_sign_d(gmx_simd_double_t a, gmx_simd_double_t b)
1340 #ifdef GMX_SIMD_HAVE_LOGICAL
1341 return gmx_simd_xor_d(a, gmx_simd_and_d(gmx_simd_set1_d(GMX_DOUBLE_NEGZERO), b));
1343 return gmx_simd_blendv_d(a, gmx_simd_fneg_d(a), gmx_simd_cmplt_d(b, gmx_simd_setzero_d()));
1347 /*! \brief Perform one Newton-Raphson iteration to improve 1/sqrt(x) for SIMD double.
1349 * \copydetails gmx_simd_rsqrt_iter_f
1351 static gmx_inline gmx_simd_double_t gmx_simdcall
1352 gmx_simd_rsqrt_iter_d(gmx_simd_double_t lu, gmx_simd_double_t x)
1354 #ifdef GMX_SIMD_HAVE_FMA
1355 return gmx_simd_fmadd_d(gmx_simd_fnmadd_d(x, gmx_simd_mul_d(lu, lu), gmx_simd_set1_d(1.0)), gmx_simd_mul_d(lu, gmx_simd_set1_d(0.5)), lu);
1357 return gmx_simd_mul_d(gmx_simd_set1_d(0.5), gmx_simd_mul_d(gmx_simd_sub_d(gmx_simd_set1_d(3.0), gmx_simd_mul_d(gmx_simd_mul_d(lu, lu), x)), lu));
1362 /*! \brief Calculate 1/sqrt(x) for SIMD double
1364 * \copydetails gmx_simd_invsqrt_f
1366 static gmx_inline gmx_simd_double_t gmx_simdcall
1367 gmx_simd_invsqrt_d(gmx_simd_double_t x)
1369 gmx_simd_double_t lu = gmx_simd_rsqrt_d(x);
1370 #if (GMX_SIMD_RSQRT_BITS < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1371 lu = gmx_simd_rsqrt_iter_d(lu, x);
1373 #if (GMX_SIMD_RSQRT_BITS*2 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1374 lu = gmx_simd_rsqrt_iter_d(lu, x);
1376 #if (GMX_SIMD_RSQRT_BITS*4 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1377 lu = gmx_simd_rsqrt_iter_d(lu, x);
1379 #if (GMX_SIMD_RSQRT_BITS*8 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1380 lu = gmx_simd_rsqrt_iter_d(lu, x);
1385 /*! \brief Calculate 1/sqrt(x) for two SIMD doubles.
1387 * \copydetails gmx_simd_invsqrt_pair_f
1389 static gmx_inline void gmx_simdcall
1390 gmx_simd_invsqrt_pair_d(gmx_simd_double_t x0, gmx_simd_double_t x1,
1391 gmx_simd_double_t *out0, gmx_simd_double_t *out1)
1393 #if (defined GMX_SIMD_HAVE_FLOAT) && (GMX_SIMD_FLOAT_WIDTH == 2*GMX_SIMD_DOUBLE_WIDTH) && (GMX_SIMD_RSQRT_BITS < 22)
1394 gmx_simd_float_t xf = gmx_simd_cvt_dd2f(x0, x1);
1395 gmx_simd_float_t luf = gmx_simd_rsqrt_f(xf);
1396 gmx_simd_double_t lu0, lu1;
1397 /* Intermediate target is single - mantissa+1 bits */
1398 #if (GMX_SIMD_RSQRT_BITS < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
1399 luf = gmx_simd_rsqrt_iter_f(luf, xf);
1401 #if (GMX_SIMD_RSQRT_BITS*2 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
1402 luf = gmx_simd_rsqrt_iter_f(luf, xf);
1404 #if (GMX_SIMD_RSQRT_BITS*4 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
1405 luf = gmx_simd_rsqrt_iter_f(luf, xf);
1407 gmx_simd_cvt_f2dd(luf, &lu0, &lu1);
1408 /* Last iteration(s) performed in double - if we had 22 bits, this gets us to 44 (~1e-15) */
1409 #if (GMX_SIMD_MATH_TARGET_SINGLE_BITS < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1410 lu0 = gmx_simd_rsqrt_iter_d(lu0, x0);
1411 lu1 = gmx_simd_rsqrt_iter_d(lu1, x1);
1413 #if (GMX_SIMD_MATH_TARGET_SINGLE_BITS*2 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1414 lu0 = gmx_simd_rsqrt_iter_d(lu0, x0);
1415 lu1 = gmx_simd_rsqrt_iter_d(lu1, x1);
1420 *out0 = gmx_simd_invsqrt_d(x0);
1421 *out1 = gmx_simd_invsqrt_d(x1);
1425 /*! \brief Perform one Newton-Raphson iteration to improve 1/x for SIMD double.
1427 * \copydetails gmx_simd_rcp_iter_f
1429 static gmx_inline gmx_simd_double_t gmx_simdcall
1430 gmx_simd_rcp_iter_d(gmx_simd_double_t lu, gmx_simd_double_t x)
1432 return gmx_simd_mul_d(lu, gmx_simd_fnmadd_d(lu, x, gmx_simd_set1_d(2.0)));
1435 /*! \brief Calculate 1/x for SIMD double.
1437 * \copydetails gmx_simd_inv_f
1439 static gmx_inline gmx_simd_double_t gmx_simdcall
1440 gmx_simd_inv_d(gmx_simd_double_t x)
1442 gmx_simd_double_t lu = gmx_simd_rcp_d(x);
1443 #if (GMX_SIMD_RCP_BITS < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1444 lu = gmx_simd_rcp_iter_d(lu, x);
1446 #if (GMX_SIMD_RCP_BITS*2 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1447 lu = gmx_simd_rcp_iter_d(lu, x);
1449 #if (GMX_SIMD_RCP_BITS*4 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1450 lu = gmx_simd_rcp_iter_d(lu, x);
1452 #if (GMX_SIMD_RCP_BITS*8 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
1453 lu = gmx_simd_rcp_iter_d(lu, x);
1458 /*! \brief Calculate sqrt(x) correctly for SIMD doubles, including argument 0.0.
1460 * \copydetails gmx_simd_sqrt_f
1462 static gmx_inline gmx_simd_double_t gmx_simdcall
1463 gmx_simd_sqrt_d(gmx_simd_double_t x)
1465 gmx_simd_dbool_t mask;
1466 gmx_simd_double_t res;
1468 mask = gmx_simd_cmpeq_d(x, gmx_simd_setzero_d());
1469 res = gmx_simd_blendnotzero_d(gmx_simd_invsqrt_d(x), mask);
1470 return gmx_simd_mul_d(res, x);
1473 /*! \brief SIMD double log(x). This is the natural logarithm.
1475 * \copydetails gmx_simd_log_f
1477 static gmx_inline gmx_simd_double_t gmx_simdcall
1478 gmx_simd_log_d(gmx_simd_double_t x)
1480 const gmx_simd_double_t half = gmx_simd_set1_d(0.5);
1481 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
1482 const gmx_simd_double_t sqrt2 = gmx_simd_set1_d(sqrt(2.0));
1483 const gmx_simd_double_t corr = gmx_simd_set1_d(0.693147180559945286226764);
1484 const gmx_simd_double_t CL15 = gmx_simd_set1_d(0.148197055177935105296783);
1485 const gmx_simd_double_t CL13 = gmx_simd_set1_d(0.153108178020442575739679);
1486 const gmx_simd_double_t CL11 = gmx_simd_set1_d(0.181837339521549679055568);
1487 const gmx_simd_double_t CL9 = gmx_simd_set1_d(0.22222194152736701733275);
1488 const gmx_simd_double_t CL7 = gmx_simd_set1_d(0.285714288030134544449368);
1489 const gmx_simd_double_t CL5 = gmx_simd_set1_d(0.399999999989941956712869);
1490 const gmx_simd_double_t CL3 = gmx_simd_set1_d(0.666666666666685503450651);
1491 const gmx_simd_double_t CL1 = gmx_simd_set1_d(2.0);
1492 gmx_simd_double_t fexp, x2, p;
1493 gmx_simd_dbool_t mask;
1495 fexp = gmx_simd_get_exponent_d(x);
1496 x = gmx_simd_get_mantissa_d(x);
1498 mask = gmx_simd_cmplt_d(sqrt2, x);
1499 /* Adjust to non-IEEE format for x>sqrt(2): exponent += 1, mantissa *= 0.5 */
1500 fexp = gmx_simd_add_d(fexp, gmx_simd_blendzero_d(one, mask));
1501 x = gmx_simd_mul_d(x, gmx_simd_blendv_d(one, half, mask));
1503 x = gmx_simd_mul_d( gmx_simd_sub_d(x, one), gmx_simd_inv_d( gmx_simd_add_d(x, one) ) );
1504 x2 = gmx_simd_mul_d(x, x);
1506 p = gmx_simd_fmadd_d(CL15, x2, CL13);
1507 p = gmx_simd_fmadd_d(p, x2, CL11);
1508 p = gmx_simd_fmadd_d(p, x2, CL9);
1509 p = gmx_simd_fmadd_d(p, x2, CL7);
1510 p = gmx_simd_fmadd_d(p, x2, CL5);
1511 p = gmx_simd_fmadd_d(p, x2, CL3);
1512 p = gmx_simd_fmadd_d(p, x2, CL1);
1513 p = gmx_simd_fmadd_d(p, x, gmx_simd_mul_d(corr, fexp));
1518 /*! \brief SIMD double 2^x.
1520 * \copydetails gmx_simd_exp2_f
1522 static gmx_inline gmx_simd_double_t gmx_simdcall
1523 gmx_simd_exp2_d(gmx_simd_double_t x)
1525 const gmx_simd_double_t arglimit = gmx_simd_set1_d(1022.0);
1526 const gmx_simd_double_t CE11 = gmx_simd_set1_d(4.435280790452730022081181e-10);
1527 const gmx_simd_double_t CE10 = gmx_simd_set1_d(7.074105630863314448024247e-09);
1528 const gmx_simd_double_t CE9 = gmx_simd_set1_d(1.017819803432096698472621e-07);
1529 const gmx_simd_double_t CE8 = gmx_simd_set1_d(1.321543308956718799557863e-06);
1530 const gmx_simd_double_t CE7 = gmx_simd_set1_d(0.00001525273348995851746990884);
1531 const gmx_simd_double_t CE6 = gmx_simd_set1_d(0.0001540353046251466849082632);
1532 const gmx_simd_double_t CE5 = gmx_simd_set1_d(0.001333355814678995257307880);
1533 const gmx_simd_double_t CE4 = gmx_simd_set1_d(0.009618129107588335039176502);
1534 const gmx_simd_double_t CE3 = gmx_simd_set1_d(0.05550410866481992147457793);
1535 const gmx_simd_double_t CE2 = gmx_simd_set1_d(0.2402265069591015620470894);
1536 const gmx_simd_double_t CE1 = gmx_simd_set1_d(0.6931471805599453304615075);
1537 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
1538 gmx_simd_double_t fexppart;
1539 gmx_simd_double_t intpart;
1540 gmx_simd_double_t p;
1541 gmx_simd_dbool_t valuemask;
1543 fexppart = gmx_simd_set_exponent_d(x); /* rounds to nearest int internally */
1544 intpart = gmx_simd_round_d(x); /* use same rounding mode here */
1545 valuemask = gmx_simd_cmple_d(gmx_simd_fabs_d(x), arglimit);
1546 fexppart = gmx_simd_blendzero_d(fexppart, valuemask);
1547 x = gmx_simd_sub_d(x, intpart);
1549 p = gmx_simd_fmadd_d(CE11, x, CE10);
1550 p = gmx_simd_fmadd_d(p, x, CE9);
1551 p = gmx_simd_fmadd_d(p, x, CE8);
1552 p = gmx_simd_fmadd_d(p, x, CE7);
1553 p = gmx_simd_fmadd_d(p, x, CE6);
1554 p = gmx_simd_fmadd_d(p, x, CE5);
1555 p = gmx_simd_fmadd_d(p, x, CE4);
1556 p = gmx_simd_fmadd_d(p, x, CE3);
1557 p = gmx_simd_fmadd_d(p, x, CE2);
1558 p = gmx_simd_fmadd_d(p, x, CE1);
1559 p = gmx_simd_fmadd_d(p, x, one);
1560 x = gmx_simd_mul_d(p, fexppart);
1564 /*! \brief SIMD double exp(x).
1566 * \copydetails gmx_simd_exp_f
1568 static gmx_inline gmx_simd_double_t gmx_simdcall
1569 gmx_simd_exp_d(gmx_simd_double_t x)
1571 const gmx_simd_double_t argscale = gmx_simd_set1_d(1.44269504088896340735992468100);
1572 const gmx_simd_double_t arglimit = gmx_simd_set1_d(1022.0);
1573 const gmx_simd_double_t invargscale0 = gmx_simd_set1_d(0.69314718055966295651160180568695068359375);
1574 const gmx_simd_double_t invargscale1 = gmx_simd_set1_d(2.8235290563031577122588448175013436025525412068e-13);
1575 const gmx_simd_double_t CE12 = gmx_simd_set1_d(2.078375306791423699350304e-09);
1576 const gmx_simd_double_t CE11 = gmx_simd_set1_d(2.518173854179933105218635e-08);
1577 const gmx_simd_double_t CE10 = gmx_simd_set1_d(2.755842049600488770111608e-07);
1578 const gmx_simd_double_t CE9 = gmx_simd_set1_d(2.755691815216689746619849e-06);
1579 const gmx_simd_double_t CE8 = gmx_simd_set1_d(2.480158383706245033920920e-05);
1580 const gmx_simd_double_t CE7 = gmx_simd_set1_d(0.0001984127043518048611841321);
1581 const gmx_simd_double_t CE6 = gmx_simd_set1_d(0.001388888889360258341755930);
1582 const gmx_simd_double_t CE5 = gmx_simd_set1_d(0.008333333332907368102819109);
1583 const gmx_simd_double_t CE4 = gmx_simd_set1_d(0.04166666666663836745814631);
1584 const gmx_simd_double_t CE3 = gmx_simd_set1_d(0.1666666666666796929434570);
1585 const gmx_simd_double_t CE2 = gmx_simd_set1_d(0.5);
1586 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
1587 gmx_simd_double_t fexppart;
1588 gmx_simd_double_t intpart;
1589 gmx_simd_double_t y, p;
1590 gmx_simd_dbool_t valuemask;
1592 y = gmx_simd_mul_d(x, argscale);
1593 fexppart = gmx_simd_set_exponent_d(y); /* rounds to nearest int internally */
1594 intpart = gmx_simd_round_d(y); /* use same rounding mode here */
1595 valuemask = gmx_simd_cmple_d(gmx_simd_fabs_d(y), arglimit);
1596 fexppart = gmx_simd_blendzero_d(fexppart, valuemask);
1598 /* Extended precision arithmetics */
1599 x = gmx_simd_fnmadd_d(invargscale0, intpart, x);
1600 x = gmx_simd_fnmadd_d(invargscale1, intpart, x);
1602 p = gmx_simd_fmadd_d(CE12, x, CE11);
1603 p = gmx_simd_fmadd_d(p, x, CE10);
1604 p = gmx_simd_fmadd_d(p, x, CE9);
1605 p = gmx_simd_fmadd_d(p, x, CE8);
1606 p = gmx_simd_fmadd_d(p, x, CE7);
1607 p = gmx_simd_fmadd_d(p, x, CE6);
1608 p = gmx_simd_fmadd_d(p, x, CE5);
1609 p = gmx_simd_fmadd_d(p, x, CE4);
1610 p = gmx_simd_fmadd_d(p, x, CE3);
1611 p = gmx_simd_fmadd_d(p, x, CE2);
1612 p = gmx_simd_fmadd_d(p, gmx_simd_mul_d(x, x), gmx_simd_add_d(x, one));
1613 x = gmx_simd_mul_d(p, fexppart);
1617 /*! \brief SIMD double erf(x).
1619 * \copydetails gmx_simd_erf_f
1621 static gmx_inline gmx_simd_double_t gmx_simdcall
1622 gmx_simd_erf_d(gmx_simd_double_t x)
1624 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1625 const gmx_simd_double_t CAP4 = gmx_simd_set1_d(-0.431780540597889301512e-4);
1626 const gmx_simd_double_t CAP3 = gmx_simd_set1_d(-0.00578562306260059236059);
1627 const gmx_simd_double_t CAP2 = gmx_simd_set1_d(-0.028593586920219752446);
1628 const gmx_simd_double_t CAP1 = gmx_simd_set1_d(-0.315924962948621698209);
1629 const gmx_simd_double_t CAP0 = gmx_simd_set1_d(0.14952975608477029151);
1631 const gmx_simd_double_t CAQ5 = gmx_simd_set1_d(-0.374089300177174709737e-5);
1632 const gmx_simd_double_t CAQ4 = gmx_simd_set1_d(0.00015126584532155383535);
1633 const gmx_simd_double_t CAQ3 = gmx_simd_set1_d(0.00536692680669480725423);
1634 const gmx_simd_double_t CAQ2 = gmx_simd_set1_d(0.0668686825594046122636);
1635 const gmx_simd_double_t CAQ1 = gmx_simd_set1_d(0.402604990869284362773);
1637 const gmx_simd_double_t CAoffset = gmx_simd_set1_d(0.9788494110107421875);
1639 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1640 const gmx_simd_double_t CBP6 = gmx_simd_set1_d(2.49650423685462752497647637088e-10);
1641 const gmx_simd_double_t CBP5 = gmx_simd_set1_d(0.00119770193298159629350136085658);
1642 const gmx_simd_double_t CBP4 = gmx_simd_set1_d(0.0164944422378370965881008942733);
1643 const gmx_simd_double_t CBP3 = gmx_simd_set1_d(0.0984581468691775932063932439252);
1644 const gmx_simd_double_t CBP2 = gmx_simd_set1_d(0.317364595806937763843589437418);
1645 const gmx_simd_double_t CBP1 = gmx_simd_set1_d(0.554167062641455850932670067075);
1646 const gmx_simd_double_t CBP0 = gmx_simd_set1_d(0.427583576155807163756925301060);
1647 const gmx_simd_double_t CBQ7 = gmx_simd_set1_d(0.00212288829699830145976198384930);
1648 const gmx_simd_double_t CBQ6 = gmx_simd_set1_d(0.0334810979522685300554606393425);
1649 const gmx_simd_double_t CBQ5 = gmx_simd_set1_d(0.2361713785181450957579508850717);
1650 const gmx_simd_double_t CBQ4 = gmx_simd_set1_d(0.955364736493055670530981883072);
1651 const gmx_simd_double_t CBQ3 = gmx_simd_set1_d(2.36815675631420037315349279199);
1652 const gmx_simd_double_t CBQ2 = gmx_simd_set1_d(3.55261649184083035537184223542);
1653 const gmx_simd_double_t CBQ1 = gmx_simd_set1_d(2.93501136050160872574376997993);
1656 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1657 const gmx_simd_double_t CCP6 = gmx_simd_set1_d(-2.8175401114513378771);
1658 const gmx_simd_double_t CCP5 = gmx_simd_set1_d(-3.22729451764143718517);
1659 const gmx_simd_double_t CCP4 = gmx_simd_set1_d(-2.5518551727311523996);
1660 const gmx_simd_double_t CCP3 = gmx_simd_set1_d(-0.687717681153649930619);
1661 const gmx_simd_double_t CCP2 = gmx_simd_set1_d(-0.212652252872804219852);
1662 const gmx_simd_double_t CCP1 = gmx_simd_set1_d(0.0175389834052493308818);
1663 const gmx_simd_double_t CCP0 = gmx_simd_set1_d(0.00628057170626964891937);
1665 const gmx_simd_double_t CCQ6 = gmx_simd_set1_d(5.48409182238641741584);
1666 const gmx_simd_double_t CCQ5 = gmx_simd_set1_d(13.5064170191802889145);
1667 const gmx_simd_double_t CCQ4 = gmx_simd_set1_d(22.9367376522880577224);
1668 const gmx_simd_double_t CCQ3 = gmx_simd_set1_d(15.930646027911794143);
1669 const gmx_simd_double_t CCQ2 = gmx_simd_set1_d(11.0567237927800161565);
1670 const gmx_simd_double_t CCQ1 = gmx_simd_set1_d(2.79257750980575282228);
1672 const gmx_simd_double_t CCoffset = gmx_simd_set1_d(0.5579090118408203125);
1674 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
1675 const gmx_simd_double_t two = gmx_simd_set1_d(2.0);
1677 gmx_simd_double_t xabs, x2, x4, t, t2, w, w2;
1678 gmx_simd_double_t PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
1679 gmx_simd_double_t PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
1680 gmx_simd_double_t PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
1681 gmx_simd_double_t res_erf, res_erfcB, res_erfcC, res_erfc, res;
1682 gmx_simd_double_t expmx2;
1683 gmx_simd_dbool_t mask;
1685 /* Calculate erf() */
1686 xabs = gmx_simd_fabs_d(x);
1687 x2 = gmx_simd_mul_d(x, x);
1688 x4 = gmx_simd_mul_d(x2, x2);
1690 PolyAP0 = gmx_simd_mul_d(CAP4, x4);
1691 PolyAP1 = gmx_simd_mul_d(CAP3, x4);
1692 PolyAP0 = gmx_simd_add_d(PolyAP0, CAP2);
1693 PolyAP1 = gmx_simd_add_d(PolyAP1, CAP1);
1694 PolyAP0 = gmx_simd_mul_d(PolyAP0, x4);
1695 PolyAP1 = gmx_simd_mul_d(PolyAP1, x2);
1696 PolyAP0 = gmx_simd_add_d(PolyAP0, CAP0);
1697 PolyAP0 = gmx_simd_add_d(PolyAP0, PolyAP1);
1699 PolyAQ1 = gmx_simd_mul_d(CAQ5, x4);
1700 PolyAQ0 = gmx_simd_mul_d(CAQ4, x4);
1701 PolyAQ1 = gmx_simd_add_d(PolyAQ1, CAQ3);
1702 PolyAQ0 = gmx_simd_add_d(PolyAQ0, CAQ2);
1703 PolyAQ1 = gmx_simd_mul_d(PolyAQ1, x4);
1704 PolyAQ0 = gmx_simd_mul_d(PolyAQ0, x4);
1705 PolyAQ1 = gmx_simd_add_d(PolyAQ1, CAQ1);
1706 PolyAQ0 = gmx_simd_add_d(PolyAQ0, one);
1707 PolyAQ1 = gmx_simd_mul_d(PolyAQ1, x2);
1708 PolyAQ0 = gmx_simd_add_d(PolyAQ0, PolyAQ1);
1710 res_erf = gmx_simd_mul_d(PolyAP0, gmx_simd_inv_d(PolyAQ0));
1711 res_erf = gmx_simd_add_d(CAoffset, res_erf);
1712 res_erf = gmx_simd_mul_d(x, res_erf);
1714 /* Calculate erfc() in range [1,4.5] */
1715 t = gmx_simd_sub_d(xabs, one);
1716 t2 = gmx_simd_mul_d(t, t);
1718 PolyBP0 = gmx_simd_mul_d(CBP6, t2);
1719 PolyBP1 = gmx_simd_mul_d(CBP5, t2);
1720 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP4);
1721 PolyBP1 = gmx_simd_add_d(PolyBP1, CBP3);
1722 PolyBP0 = gmx_simd_mul_d(PolyBP0, t2);
1723 PolyBP1 = gmx_simd_mul_d(PolyBP1, t2);
1724 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP2);
1725 PolyBP1 = gmx_simd_add_d(PolyBP1, CBP1);
1726 PolyBP0 = gmx_simd_mul_d(PolyBP0, t2);
1727 PolyBP1 = gmx_simd_mul_d(PolyBP1, t);
1728 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP0);
1729 PolyBP0 = gmx_simd_add_d(PolyBP0, PolyBP1);
1731 PolyBQ1 = gmx_simd_mul_d(CBQ7, t2);
1732 PolyBQ0 = gmx_simd_mul_d(CBQ6, t2);
1733 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ5);
1734 PolyBQ0 = gmx_simd_add_d(PolyBQ0, CBQ4);
1735 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t2);
1736 PolyBQ0 = gmx_simd_mul_d(PolyBQ0, t2);
1737 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ3);
1738 PolyBQ0 = gmx_simd_add_d(PolyBQ0, CBQ2);
1739 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t2);
1740 PolyBQ0 = gmx_simd_mul_d(PolyBQ0, t2);
1741 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ1);
1742 PolyBQ0 = gmx_simd_add_d(PolyBQ0, one);
1743 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t);
1744 PolyBQ0 = gmx_simd_add_d(PolyBQ0, PolyBQ1);
1746 res_erfcB = gmx_simd_mul_d(PolyBP0, gmx_simd_inv_d(PolyBQ0));
1748 res_erfcB = gmx_simd_mul_d(res_erfcB, xabs);
1750 /* Calculate erfc() in range [4.5,inf] */
1751 w = gmx_simd_inv_d(xabs);
1752 w2 = gmx_simd_mul_d(w, w);
1754 PolyCP0 = gmx_simd_mul_d(CCP6, w2);
1755 PolyCP1 = gmx_simd_mul_d(CCP5, w2);
1756 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP4);
1757 PolyCP1 = gmx_simd_add_d(PolyCP1, CCP3);
1758 PolyCP0 = gmx_simd_mul_d(PolyCP0, w2);
1759 PolyCP1 = gmx_simd_mul_d(PolyCP1, w2);
1760 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP2);
1761 PolyCP1 = gmx_simd_add_d(PolyCP1, CCP1);
1762 PolyCP0 = gmx_simd_mul_d(PolyCP0, w2);
1763 PolyCP1 = gmx_simd_mul_d(PolyCP1, w);
1764 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP0);
1765 PolyCP0 = gmx_simd_add_d(PolyCP0, PolyCP1);
1767 PolyCQ0 = gmx_simd_mul_d(CCQ6, w2);
1768 PolyCQ1 = gmx_simd_mul_d(CCQ5, w2);
1769 PolyCQ0 = gmx_simd_add_d(PolyCQ0, CCQ4);
1770 PolyCQ1 = gmx_simd_add_d(PolyCQ1, CCQ3);
1771 PolyCQ0 = gmx_simd_mul_d(PolyCQ0, w2);
1772 PolyCQ1 = gmx_simd_mul_d(PolyCQ1, w2);
1773 PolyCQ0 = gmx_simd_add_d(PolyCQ0, CCQ2);
1774 PolyCQ1 = gmx_simd_add_d(PolyCQ1, CCQ1);
1775 PolyCQ0 = gmx_simd_mul_d(PolyCQ0, w2);
1776 PolyCQ1 = gmx_simd_mul_d(PolyCQ1, w);
1777 PolyCQ0 = gmx_simd_add_d(PolyCQ0, one);
1778 PolyCQ0 = gmx_simd_add_d(PolyCQ0, PolyCQ1);
1780 expmx2 = gmx_simd_exp_d( gmx_simd_fneg_d(x2) );
1782 res_erfcC = gmx_simd_mul_d(PolyCP0, gmx_simd_inv_d(PolyCQ0));
1783 res_erfcC = gmx_simd_add_d(res_erfcC, CCoffset);
1784 res_erfcC = gmx_simd_mul_d(res_erfcC, w);
1786 mask = gmx_simd_cmplt_d(gmx_simd_set1_d(4.5), xabs);
1787 res_erfc = gmx_simd_blendv_d(res_erfcB, res_erfcC, mask);
1789 res_erfc = gmx_simd_mul_d(res_erfc, expmx2);
1791 /* erfc(x<0) = 2-erfc(|x|) */
1792 mask = gmx_simd_cmplt_d(x, gmx_simd_setzero_d());
1793 res_erfc = gmx_simd_blendv_d(res_erfc, gmx_simd_sub_d(two, res_erfc), mask);
1795 /* Select erf() or erfc() */
1796 mask = gmx_simd_cmplt_d(xabs, one);
1797 res = gmx_simd_blendv_d(gmx_simd_sub_d(one, res_erfc), res_erf, mask);
1802 /*! \brief SIMD double erfc(x).
1804 * \copydetails gmx_simd_erfc_f
1806 static gmx_inline gmx_simd_double_t gmx_simdcall
1807 gmx_simd_erfc_d(gmx_simd_double_t x)
1809 /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
1810 const gmx_simd_double_t CAP4 = gmx_simd_set1_d(-0.431780540597889301512e-4);
1811 const gmx_simd_double_t CAP3 = gmx_simd_set1_d(-0.00578562306260059236059);
1812 const gmx_simd_double_t CAP2 = gmx_simd_set1_d(-0.028593586920219752446);
1813 const gmx_simd_double_t CAP1 = gmx_simd_set1_d(-0.315924962948621698209);
1814 const gmx_simd_double_t CAP0 = gmx_simd_set1_d(0.14952975608477029151);
1816 const gmx_simd_double_t CAQ5 = gmx_simd_set1_d(-0.374089300177174709737e-5);
1817 const gmx_simd_double_t CAQ4 = gmx_simd_set1_d(0.00015126584532155383535);
1818 const gmx_simd_double_t CAQ3 = gmx_simd_set1_d(0.00536692680669480725423);
1819 const gmx_simd_double_t CAQ2 = gmx_simd_set1_d(0.0668686825594046122636);
1820 const gmx_simd_double_t CAQ1 = gmx_simd_set1_d(0.402604990869284362773);
1822 const gmx_simd_double_t CAoffset = gmx_simd_set1_d(0.9788494110107421875);
1824 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
1825 const gmx_simd_double_t CBP6 = gmx_simd_set1_d(2.49650423685462752497647637088e-10);
1826 const gmx_simd_double_t CBP5 = gmx_simd_set1_d(0.00119770193298159629350136085658);
1827 const gmx_simd_double_t CBP4 = gmx_simd_set1_d(0.0164944422378370965881008942733);
1828 const gmx_simd_double_t CBP3 = gmx_simd_set1_d(0.0984581468691775932063932439252);
1829 const gmx_simd_double_t CBP2 = gmx_simd_set1_d(0.317364595806937763843589437418);
1830 const gmx_simd_double_t CBP1 = gmx_simd_set1_d(0.554167062641455850932670067075);
1831 const gmx_simd_double_t CBP0 = gmx_simd_set1_d(0.427583576155807163756925301060);
1832 const gmx_simd_double_t CBQ7 = gmx_simd_set1_d(0.00212288829699830145976198384930);
1833 const gmx_simd_double_t CBQ6 = gmx_simd_set1_d(0.0334810979522685300554606393425);
1834 const gmx_simd_double_t CBQ5 = gmx_simd_set1_d(0.2361713785181450957579508850717);
1835 const gmx_simd_double_t CBQ4 = gmx_simd_set1_d(0.955364736493055670530981883072);
1836 const gmx_simd_double_t CBQ3 = gmx_simd_set1_d(2.36815675631420037315349279199);
1837 const gmx_simd_double_t CBQ2 = gmx_simd_set1_d(3.55261649184083035537184223542);
1838 const gmx_simd_double_t CBQ1 = gmx_simd_set1_d(2.93501136050160872574376997993);
1841 /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
1842 const gmx_simd_double_t CCP6 = gmx_simd_set1_d(-2.8175401114513378771);
1843 const gmx_simd_double_t CCP5 = gmx_simd_set1_d(-3.22729451764143718517);
1844 const gmx_simd_double_t CCP4 = gmx_simd_set1_d(-2.5518551727311523996);
1845 const gmx_simd_double_t CCP3 = gmx_simd_set1_d(-0.687717681153649930619);
1846 const gmx_simd_double_t CCP2 = gmx_simd_set1_d(-0.212652252872804219852);
1847 const gmx_simd_double_t CCP1 = gmx_simd_set1_d(0.0175389834052493308818);
1848 const gmx_simd_double_t CCP0 = gmx_simd_set1_d(0.00628057170626964891937);
1850 const gmx_simd_double_t CCQ6 = gmx_simd_set1_d(5.48409182238641741584);
1851 const gmx_simd_double_t CCQ5 = gmx_simd_set1_d(13.5064170191802889145);
1852 const gmx_simd_double_t CCQ4 = gmx_simd_set1_d(22.9367376522880577224);
1853 const gmx_simd_double_t CCQ3 = gmx_simd_set1_d(15.930646027911794143);
1854 const gmx_simd_double_t CCQ2 = gmx_simd_set1_d(11.0567237927800161565);
1855 const gmx_simd_double_t CCQ1 = gmx_simd_set1_d(2.79257750980575282228);
1857 const gmx_simd_double_t CCoffset = gmx_simd_set1_d(0.5579090118408203125);
1859 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
1860 const gmx_simd_double_t two = gmx_simd_set1_d(2.0);
1862 gmx_simd_double_t xabs, x2, x4, t, t2, w, w2;
1863 gmx_simd_double_t PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
1864 gmx_simd_double_t PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
1865 gmx_simd_double_t PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
1866 gmx_simd_double_t res_erf, res_erfcB, res_erfcC, res_erfc, res;
1867 gmx_simd_double_t expmx2;
1868 gmx_simd_dbool_t mask;
1870 /* Calculate erf() */
1871 xabs = gmx_simd_fabs_d(x);
1872 x2 = gmx_simd_mul_d(x, x);
1873 x4 = gmx_simd_mul_d(x2, x2);
1875 PolyAP0 = gmx_simd_mul_d(CAP4, x4);
1876 PolyAP1 = gmx_simd_mul_d(CAP3, x4);
1877 PolyAP0 = gmx_simd_add_d(PolyAP0, CAP2);
1878 PolyAP1 = gmx_simd_add_d(PolyAP1, CAP1);
1879 PolyAP0 = gmx_simd_mul_d(PolyAP0, x4);
1880 PolyAP1 = gmx_simd_mul_d(PolyAP1, x2);
1881 PolyAP0 = gmx_simd_add_d(PolyAP0, CAP0);
1882 PolyAP0 = gmx_simd_add_d(PolyAP0, PolyAP1);
1884 PolyAQ1 = gmx_simd_mul_d(CAQ5, x4);
1885 PolyAQ0 = gmx_simd_mul_d(CAQ4, x4);
1886 PolyAQ1 = gmx_simd_add_d(PolyAQ1, CAQ3);
1887 PolyAQ0 = gmx_simd_add_d(PolyAQ0, CAQ2);
1888 PolyAQ1 = gmx_simd_mul_d(PolyAQ1, x4);
1889 PolyAQ0 = gmx_simd_mul_d(PolyAQ0, x4);
1890 PolyAQ1 = gmx_simd_add_d(PolyAQ1, CAQ1);
1891 PolyAQ0 = gmx_simd_add_d(PolyAQ0, one);
1892 PolyAQ1 = gmx_simd_mul_d(PolyAQ1, x2);
1893 PolyAQ0 = gmx_simd_add_d(PolyAQ0, PolyAQ1);
1895 res_erf = gmx_simd_mul_d(PolyAP0, gmx_simd_inv_d(PolyAQ0));
1896 res_erf = gmx_simd_add_d(CAoffset, res_erf);
1897 res_erf = gmx_simd_mul_d(x, res_erf);
1899 /* Calculate erfc() in range [1,4.5] */
1900 t = gmx_simd_sub_d(xabs, one);
1901 t2 = gmx_simd_mul_d(t, t);
1903 PolyBP0 = gmx_simd_mul_d(CBP6, t2);
1904 PolyBP1 = gmx_simd_mul_d(CBP5, t2);
1905 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP4);
1906 PolyBP1 = gmx_simd_add_d(PolyBP1, CBP3);
1907 PolyBP0 = gmx_simd_mul_d(PolyBP0, t2);
1908 PolyBP1 = gmx_simd_mul_d(PolyBP1, t2);
1909 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP2);
1910 PolyBP1 = gmx_simd_add_d(PolyBP1, CBP1);
1911 PolyBP0 = gmx_simd_mul_d(PolyBP0, t2);
1912 PolyBP1 = gmx_simd_mul_d(PolyBP1, t);
1913 PolyBP0 = gmx_simd_add_d(PolyBP0, CBP0);
1914 PolyBP0 = gmx_simd_add_d(PolyBP0, PolyBP1);
1916 PolyBQ1 = gmx_simd_mul_d(CBQ7, t2);
1917 PolyBQ0 = gmx_simd_mul_d(CBQ6, t2);
1918 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ5);
1919 PolyBQ0 = gmx_simd_add_d(PolyBQ0, CBQ4);
1920 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t2);
1921 PolyBQ0 = gmx_simd_mul_d(PolyBQ0, t2);
1922 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ3);
1923 PolyBQ0 = gmx_simd_add_d(PolyBQ0, CBQ2);
1924 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t2);
1925 PolyBQ0 = gmx_simd_mul_d(PolyBQ0, t2);
1926 PolyBQ1 = gmx_simd_add_d(PolyBQ1, CBQ1);
1927 PolyBQ0 = gmx_simd_add_d(PolyBQ0, one);
1928 PolyBQ1 = gmx_simd_mul_d(PolyBQ1, t);
1929 PolyBQ0 = gmx_simd_add_d(PolyBQ0, PolyBQ1);
1931 res_erfcB = gmx_simd_mul_d(PolyBP0, gmx_simd_inv_d(PolyBQ0));
1933 res_erfcB = gmx_simd_mul_d(res_erfcB, xabs);
1935 /* Calculate erfc() in range [4.5,inf] */
1936 w = gmx_simd_inv_d(xabs);
1937 w2 = gmx_simd_mul_d(w, w);
1939 PolyCP0 = gmx_simd_mul_d(CCP6, w2);
1940 PolyCP1 = gmx_simd_mul_d(CCP5, w2);
1941 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP4);
1942 PolyCP1 = gmx_simd_add_d(PolyCP1, CCP3);
1943 PolyCP0 = gmx_simd_mul_d(PolyCP0, w2);
1944 PolyCP1 = gmx_simd_mul_d(PolyCP1, w2);
1945 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP2);
1946 PolyCP1 = gmx_simd_add_d(PolyCP1, CCP1);
1947 PolyCP0 = gmx_simd_mul_d(PolyCP0, w2);
1948 PolyCP1 = gmx_simd_mul_d(PolyCP1, w);
1949 PolyCP0 = gmx_simd_add_d(PolyCP0, CCP0);
1950 PolyCP0 = gmx_simd_add_d(PolyCP0, PolyCP1);
1952 PolyCQ0 = gmx_simd_mul_d(CCQ6, w2);
1953 PolyCQ1 = gmx_simd_mul_d(CCQ5, w2);
1954 PolyCQ0 = gmx_simd_add_d(PolyCQ0, CCQ4);
1955 PolyCQ1 = gmx_simd_add_d(PolyCQ1, CCQ3);
1956 PolyCQ0 = gmx_simd_mul_d(PolyCQ0, w2);
1957 PolyCQ1 = gmx_simd_mul_d(PolyCQ1, w2);
1958 PolyCQ0 = gmx_simd_add_d(PolyCQ0, CCQ2);
1959 PolyCQ1 = gmx_simd_add_d(PolyCQ1, CCQ1);
1960 PolyCQ0 = gmx_simd_mul_d(PolyCQ0, w2);
1961 PolyCQ1 = gmx_simd_mul_d(PolyCQ1, w);
1962 PolyCQ0 = gmx_simd_add_d(PolyCQ0, one);
1963 PolyCQ0 = gmx_simd_add_d(PolyCQ0, PolyCQ1);
1965 expmx2 = gmx_simd_exp_d( gmx_simd_fneg_d(x2) );
1967 res_erfcC = gmx_simd_mul_d(PolyCP0, gmx_simd_inv_d(PolyCQ0));
1968 res_erfcC = gmx_simd_add_d(res_erfcC, CCoffset);
1969 res_erfcC = gmx_simd_mul_d(res_erfcC, w);
1971 mask = gmx_simd_cmplt_d(gmx_simd_set1_d(4.5), xabs);
1972 res_erfc = gmx_simd_blendv_d(res_erfcB, res_erfcC, mask);
1974 res_erfc = gmx_simd_mul_d(res_erfc, expmx2);
1976 /* erfc(x<0) = 2-erfc(|x|) */
1977 mask = gmx_simd_cmplt_d(x, gmx_simd_setzero_d());
1978 res_erfc = gmx_simd_blendv_d(res_erfc, gmx_simd_sub_d(two, res_erfc), mask);
1980 /* Select erf() or erfc() */
1981 mask = gmx_simd_cmplt_d(xabs, one);
1982 res = gmx_simd_blendv_d(res_erfc, gmx_simd_sub_d(one, res_erf), mask);
1987 /*! \brief SIMD double sin \& cos.
1989 * \copydetails gmx_simd_sincos_f
1991 static gmx_inline void gmx_simdcall
1992 gmx_simd_sincos_d(gmx_simd_double_t x, gmx_simd_double_t *sinval, gmx_simd_double_t *cosval)
1994 /* Constants to subtract Pi/4*x from y while minimizing precision loss */
1995 const gmx_simd_double_t argred0 = gmx_simd_set1_d(2*0.78539816290140151978);
1996 const gmx_simd_double_t argred1 = gmx_simd_set1_d(2*4.9604678871439933374e-10);
1997 const gmx_simd_double_t argred2 = gmx_simd_set1_d(2*1.1258708853173288931e-18);
1998 const gmx_simd_double_t argred3 = gmx_simd_set1_d(2*1.7607799325916000908e-27);
1999 const gmx_simd_double_t two_over_pi = gmx_simd_set1_d(2.0/M_PI);
2000 const gmx_simd_double_t const_sin5 = gmx_simd_set1_d( 1.58938307283228937328511e-10);
2001 const gmx_simd_double_t const_sin4 = gmx_simd_set1_d(-2.50506943502539773349318e-08);
2002 const gmx_simd_double_t const_sin3 = gmx_simd_set1_d( 2.75573131776846360512547e-06);
2003 const gmx_simd_double_t const_sin2 = gmx_simd_set1_d(-0.000198412698278911770864914);
2004 const gmx_simd_double_t const_sin1 = gmx_simd_set1_d( 0.0083333333333191845961746);
2005 const gmx_simd_double_t const_sin0 = gmx_simd_set1_d(-0.166666666666666130709393);
2007 const gmx_simd_double_t const_cos7 = gmx_simd_set1_d(-1.13615350239097429531523e-11);
2008 const gmx_simd_double_t const_cos6 = gmx_simd_set1_d( 2.08757471207040055479366e-09);
2009 const gmx_simd_double_t const_cos5 = gmx_simd_set1_d(-2.75573144028847567498567e-07);
2010 const gmx_simd_double_t const_cos4 = gmx_simd_set1_d( 2.48015872890001867311915e-05);
2011 const gmx_simd_double_t const_cos3 = gmx_simd_set1_d(-0.00138888888888714019282329);
2012 const gmx_simd_double_t const_cos2 = gmx_simd_set1_d( 0.0416666666666665519592062);
2013 const gmx_simd_double_t half = gmx_simd_set1_d(0.5);
2014 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
2015 gmx_simd_double_t ssign, csign;
2016 gmx_simd_double_t x2, y, z, psin, pcos, sss, ccc;
2017 gmx_simd_dbool_t mask;
2018 #if (defined GMX_SIMD_HAVE_DINT32) && (defined GMX_SIMD_HAVE_DINT32_ARITHMETICS) && (defined GMX_SIMD_HAVE_LOGICAL)
2019 const gmx_simd_dint32_t ione = gmx_simd_set1_di(1);
2020 const gmx_simd_dint32_t itwo = gmx_simd_set1_di(2);
2021 gmx_simd_dint32_t iy;
2023 z = gmx_simd_mul_d(x, two_over_pi);
2024 iy = gmx_simd_cvt_d2i(z);
2025 y = gmx_simd_round_d(z);
2027 mask = gmx_simd_cvt_dib2db(gmx_simd_cmpeq_di(gmx_simd_and_di(iy, ione), gmx_simd_setzero_di()));
2028 ssign = gmx_simd_blendzero_d(gmx_simd_set1_d(GMX_DOUBLE_NEGZERO), gmx_simd_cvt_dib2db(gmx_simd_cmpeq_di(gmx_simd_and_di(iy, itwo), itwo)));
2029 csign = gmx_simd_blendzero_d(gmx_simd_set1_d(GMX_DOUBLE_NEGZERO), gmx_simd_cvt_dib2db(gmx_simd_cmpeq_di(gmx_simd_and_di(gmx_simd_add_di(iy, ione), itwo), itwo)));
2031 const gmx_simd_double_t quarter = gmx_simd_set1_d(0.25);
2032 const gmx_simd_double_t minusquarter = gmx_simd_set1_d(-0.25);
2033 gmx_simd_double_t q;
2034 gmx_simd_dbool_t m1, m2, m3;
2036 /* The most obvious way to find the arguments quadrant in the unit circle
2037 * to calculate the sign is to use integer arithmetic, but that is not
2038 * present in all SIMD implementations. As an alternative, we have devised a
2039 * pure floating-point algorithm that uses truncation for argument reduction
2040 * so that we get a new value 0<=q<1 over the unit circle, and then
2041 * do floating-point comparisons with fractions. This is likely to be
2042 * slightly slower (~10%) due to the longer latencies of floating-point, so
2043 * we only use it when integer SIMD arithmetic is not present.
2046 x = gmx_simd_fabs_d(x);
2047 /* It is critical that half-way cases are rounded down */
2048 z = gmx_simd_fmadd_d(x, two_over_pi, half);
2049 y = gmx_simd_trunc_d(z);
2050 q = gmx_simd_mul_d(z, quarter);
2051 q = gmx_simd_sub_d(q, gmx_simd_trunc_d(q));
2052 /* z now starts at 0.0 for x=-pi/4 (although neg. values cannot occur), and
2053 * then increased by 1.0 as x increases by 2*Pi, when it resets to 0.0.
2054 * This removes the 2*Pi periodicity without using any integer arithmetic.
2055 * First check if y had the value 2 or 3, set csign if true.
2057 q = gmx_simd_sub_d(q, half);
2058 /* If we have logical operations we can work directly on the signbit, which
2059 * saves instructions. Otherwise we need to represent signs as +1.0/-1.0.
2060 * Thus, if you are altering defines to debug alternative code paths, the
2061 * two GMX_SIMD_HAVE_LOGICAL sections in this routine must either both be
2062 * active or inactive - you will get errors if only one is used.
2064 # ifdef GMX_SIMD_HAVE_LOGICAL
2065 ssign = gmx_simd_and_d(ssign, gmx_simd_set1_d(GMX_DOUBLE_NEGZERO));
2066 csign = gmx_simd_andnot_d(q, gmx_simd_set1_d(GMX_DOUBLE_NEGZERO));
2067 ssign = gmx_simd_xor_d(ssign, csign);
2069 csign = gmx_simd_xor_sign_d(gmx_simd_set1_d(-1.0), q);
2070 ssign = gmx_simd_xor_sign_d(ssign, csign); /* swap ssign if csign was set. */
2072 /* Check if y had value 1 or 3 (remember we subtracted 0.5 from q) */
2073 m1 = gmx_simd_cmplt_d(q, minusquarter);
2074 m2 = gmx_simd_cmple_d(gmx_simd_setzero_d(), q);
2075 m3 = gmx_simd_cmplt_d(q, quarter);
2076 m2 = gmx_simd_and_db(m2, m3);
2077 mask = gmx_simd_or_db(m1, m2);
2078 /* where mask is FALSE, set sign. */
2079 csign = gmx_simd_xor_sign_d(csign, gmx_simd_blendv_d(gmx_simd_set1_d(-1.0), one, mask));
2081 x = gmx_simd_fnmadd_d(y, argred0, x);
2082 x = gmx_simd_fnmadd_d(y, argred1, x);
2083 x = gmx_simd_fnmadd_d(y, argred2, x);
2084 x = gmx_simd_fnmadd_d(y, argred3, x);
2085 x2 = gmx_simd_mul_d(x, x);
2087 psin = gmx_simd_fmadd_d(const_sin5, x2, const_sin4);
2088 psin = gmx_simd_fmadd_d(psin, x2, const_sin3);
2089 psin = gmx_simd_fmadd_d(psin, x2, const_sin2);
2090 psin = gmx_simd_fmadd_d(psin, x2, const_sin1);
2091 psin = gmx_simd_fmadd_d(psin, x2, const_sin0);
2092 psin = gmx_simd_fmadd_d(psin, gmx_simd_mul_d(x2, x), x);
2094 pcos = gmx_simd_fmadd_d(const_cos7, x2, const_cos6);
2095 pcos = gmx_simd_fmadd_d(pcos, x2, const_cos5);
2096 pcos = gmx_simd_fmadd_d(pcos, x2, const_cos4);
2097 pcos = gmx_simd_fmadd_d(pcos, x2, const_cos3);
2098 pcos = gmx_simd_fmadd_d(pcos, x2, const_cos2);
2099 pcos = gmx_simd_fmsub_d(pcos, x2, half);
2100 pcos = gmx_simd_fmadd_d(pcos, x2, one);
2102 sss = gmx_simd_blendv_d(pcos, psin, mask);
2103 ccc = gmx_simd_blendv_d(psin, pcos, mask);
2104 /* See comment for GMX_SIMD_HAVE_LOGICAL section above. */
2105 #ifdef GMX_SIMD_HAVE_LOGICAL
2106 *sinval = gmx_simd_xor_d(sss, ssign);
2107 *cosval = gmx_simd_xor_d(ccc, csign);
2109 *sinval = gmx_simd_xor_sign_d(sss, ssign);
2110 *cosval = gmx_simd_xor_sign_d(ccc, csign);
2114 /*! \brief SIMD double sin(x).
2116 * \copydetails gmx_simd_sin_f
2118 static gmx_inline gmx_simd_double_t gmx_simdcall
2119 gmx_simd_sin_d(gmx_simd_double_t x)
2121 gmx_simd_double_t s, c;
2122 gmx_simd_sincos_d(x, &s, &c);
2126 /*! \brief SIMD double cos(x).
2128 * \copydetails gmx_simd_cos_f
2130 static gmx_inline gmx_simd_double_t gmx_simdcall
2131 gmx_simd_cos_d(gmx_simd_double_t x)
2133 gmx_simd_double_t s, c;
2134 gmx_simd_sincos_d(x, &s, &c);
2138 /*! \brief SIMD double tan(x).
2140 * \copydetails gmx_simd_tan_f
2142 static gmx_inline gmx_simd_double_t gmx_simdcall
2143 gmx_simd_tan_d(gmx_simd_double_t x)
2145 const gmx_simd_double_t argred0 = gmx_simd_set1_d(2*0.78539816290140151978);
2146 const gmx_simd_double_t argred1 = gmx_simd_set1_d(2*4.9604678871439933374e-10);
2147 const gmx_simd_double_t argred2 = gmx_simd_set1_d(2*1.1258708853173288931e-18);
2148 const gmx_simd_double_t argred3 = gmx_simd_set1_d(2*1.7607799325916000908e-27);
2149 const gmx_simd_double_t two_over_pi = gmx_simd_set1_d(2.0/M_PI);
2150 const gmx_simd_double_t CT15 = gmx_simd_set1_d(1.01419718511083373224408e-05);
2151 const gmx_simd_double_t CT14 = gmx_simd_set1_d(-2.59519791585924697698614e-05);
2152 const gmx_simd_double_t CT13 = gmx_simd_set1_d(5.23388081915899855325186e-05);
2153 const gmx_simd_double_t CT12 = gmx_simd_set1_d(-3.05033014433946488225616e-05);
2154 const gmx_simd_double_t CT11 = gmx_simd_set1_d(7.14707504084242744267497e-05);
2155 const gmx_simd_double_t CT10 = gmx_simd_set1_d(8.09674518280159187045078e-05);
2156 const gmx_simd_double_t CT9 = gmx_simd_set1_d(0.000244884931879331847054404);
2157 const gmx_simd_double_t CT8 = gmx_simd_set1_d(0.000588505168743587154904506);
2158 const gmx_simd_double_t CT7 = gmx_simd_set1_d(0.00145612788922812427978848);
2159 const gmx_simd_double_t CT6 = gmx_simd_set1_d(0.00359208743836906619142924);
2160 const gmx_simd_double_t CT5 = gmx_simd_set1_d(0.00886323944362401618113356);
2161 const gmx_simd_double_t CT4 = gmx_simd_set1_d(0.0218694882853846389592078);
2162 const gmx_simd_double_t CT3 = gmx_simd_set1_d(0.0539682539781298417636002);
2163 const gmx_simd_double_t CT2 = gmx_simd_set1_d(0.133333333333125941821962);
2164 const gmx_simd_double_t CT1 = gmx_simd_set1_d(0.333333333333334980164153);
2166 gmx_simd_double_t x2, p, y, z;
2167 gmx_simd_dbool_t mask;
2169 #if (defined GMX_SIMD_HAVE_DINT32) && (defined GMX_SIMD_HAVE_DINT32_ARITHMETICS) && (defined GMX_SIMD_HAVE_LOGICAL)
2170 gmx_simd_dint32_t iy;
2171 gmx_simd_dint32_t ione = gmx_simd_set1_di(1);
2173 z = gmx_simd_mul_d(x, two_over_pi);
2174 iy = gmx_simd_cvt_d2i(z);
2175 y = gmx_simd_round_d(z);
2176 mask = gmx_simd_cvt_dib2db(gmx_simd_cmpeq_di(gmx_simd_and_di(iy, ione), ione));
2178 x = gmx_simd_fnmadd_d(y, argred0, x);
2179 x = gmx_simd_fnmadd_d(y, argred1, x);
2180 x = gmx_simd_fnmadd_d(y, argred2, x);
2181 x = gmx_simd_fnmadd_d(y, argred3, x);
2182 x = gmx_simd_xor_d(gmx_simd_blendzero_d(gmx_simd_set1_d(GMX_DOUBLE_NEGZERO), mask), x);
2184 const gmx_simd_double_t quarter = gmx_simd_set1_d(0.25);
2185 const gmx_simd_double_t half = gmx_simd_set1_d(0.5);
2186 const gmx_simd_double_t threequarter = gmx_simd_set1_d(0.75);
2187 gmx_simd_double_t w, q;
2188 gmx_simd_dbool_t m1, m2, m3;
2190 w = gmx_simd_fabs_d(x);
2191 z = gmx_simd_fmadd_d(w, two_over_pi, half);
2192 y = gmx_simd_trunc_d(z);
2193 q = gmx_simd_mul_d(z, quarter);
2194 q = gmx_simd_sub_d(q, gmx_simd_trunc_d(q));
2195 m1 = gmx_simd_cmple_d(quarter, q);
2196 m2 = gmx_simd_cmplt_d(q, half);
2197 m3 = gmx_simd_cmple_d(threequarter, q);
2198 m1 = gmx_simd_and_db(m1, m2);
2199 mask = gmx_simd_or_db(m1, m3);
2200 w = gmx_simd_fnmadd_d(y, argred0, w);
2201 w = gmx_simd_fnmadd_d(y, argred1, w);
2202 w = gmx_simd_fnmadd_d(y, argred2, w);
2203 w = gmx_simd_fnmadd_d(y, argred3, w);
2205 w = gmx_simd_blendv_d(w, gmx_simd_fneg_d(w), mask);
2206 x = gmx_simd_xor_sign_d(w, x);
2208 x2 = gmx_simd_mul_d(x, x);
2209 p = gmx_simd_fmadd_d(CT15, x2, CT14);
2210 p = gmx_simd_fmadd_d(p, x2, CT13);
2211 p = gmx_simd_fmadd_d(p, x2, CT12);
2212 p = gmx_simd_fmadd_d(p, x2, CT11);
2213 p = gmx_simd_fmadd_d(p, x2, CT10);
2214 p = gmx_simd_fmadd_d(p, x2, CT9);
2215 p = gmx_simd_fmadd_d(p, x2, CT8);
2216 p = gmx_simd_fmadd_d(p, x2, CT7);
2217 p = gmx_simd_fmadd_d(p, x2, CT6);
2218 p = gmx_simd_fmadd_d(p, x2, CT5);
2219 p = gmx_simd_fmadd_d(p, x2, CT4);
2220 p = gmx_simd_fmadd_d(p, x2, CT3);
2221 p = gmx_simd_fmadd_d(p, x2, CT2);
2222 p = gmx_simd_fmadd_d(p, x2, CT1);
2223 p = gmx_simd_fmadd_d(x2, gmx_simd_mul_d(p, x), x);
2225 p = gmx_simd_blendv_d( p, gmx_simd_inv_d(p), mask);
2229 /*! \brief SIMD double asin(x).
2231 * \copydetails gmx_simd_asin_f
2233 static gmx_inline gmx_simd_double_t gmx_simdcall
2234 gmx_simd_asin_d(gmx_simd_double_t x)
2236 /* Same algorithm as cephes library */
2237 const gmx_simd_double_t limit1 = gmx_simd_set1_d(0.625);
2238 const gmx_simd_double_t limit2 = gmx_simd_set1_d(1e-8);
2239 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
2240 const gmx_simd_double_t quarterpi = gmx_simd_set1_d(M_PI/4.0);
2241 const gmx_simd_double_t morebits = gmx_simd_set1_d(6.123233995736765886130e-17);
2243 const gmx_simd_double_t P5 = gmx_simd_set1_d(4.253011369004428248960e-3);
2244 const gmx_simd_double_t P4 = gmx_simd_set1_d(-6.019598008014123785661e-1);
2245 const gmx_simd_double_t P3 = gmx_simd_set1_d(5.444622390564711410273e0);
2246 const gmx_simd_double_t P2 = gmx_simd_set1_d(-1.626247967210700244449e1);
2247 const gmx_simd_double_t P1 = gmx_simd_set1_d(1.956261983317594739197e1);
2248 const gmx_simd_double_t P0 = gmx_simd_set1_d(-8.198089802484824371615e0);
2250 const gmx_simd_double_t Q4 = gmx_simd_set1_d(-1.474091372988853791896e1);
2251 const gmx_simd_double_t Q3 = gmx_simd_set1_d(7.049610280856842141659e1);
2252 const gmx_simd_double_t Q2 = gmx_simd_set1_d(-1.471791292232726029859e2);
2253 const gmx_simd_double_t Q1 = gmx_simd_set1_d(1.395105614657485689735e2);
2254 const gmx_simd_double_t Q0 = gmx_simd_set1_d(-4.918853881490881290097e1);
2256 const gmx_simd_double_t R4 = gmx_simd_set1_d(2.967721961301243206100e-3);
2257 const gmx_simd_double_t R3 = gmx_simd_set1_d(-5.634242780008963776856e-1);
2258 const gmx_simd_double_t R2 = gmx_simd_set1_d(6.968710824104713396794e0);
2259 const gmx_simd_double_t R1 = gmx_simd_set1_d(-2.556901049652824852289e1);
2260 const gmx_simd_double_t R0 = gmx_simd_set1_d(2.853665548261061424989e1);
2262 const gmx_simd_double_t S3 = gmx_simd_set1_d(-2.194779531642920639778e1);
2263 const gmx_simd_double_t S2 = gmx_simd_set1_d(1.470656354026814941758e2);
2264 const gmx_simd_double_t S1 = gmx_simd_set1_d(-3.838770957603691357202e2);
2265 const gmx_simd_double_t S0 = gmx_simd_set1_d(3.424398657913078477438e2);
2267 gmx_simd_double_t xabs;
2268 gmx_simd_double_t zz, ww, z, q, w, zz2, ww2;
2269 gmx_simd_double_t PA, PB;
2270 gmx_simd_double_t QA, QB;
2271 gmx_simd_double_t RA, RB;
2272 gmx_simd_double_t SA, SB;
2273 gmx_simd_double_t nom, denom;
2274 gmx_simd_dbool_t mask;
2276 xabs = gmx_simd_fabs_d(x);
2278 mask = gmx_simd_cmplt_d(limit1, xabs);
2280 zz = gmx_simd_sub_d(one, xabs);
2281 ww = gmx_simd_mul_d(xabs, xabs);
2282 zz2 = gmx_simd_mul_d(zz, zz);
2283 ww2 = gmx_simd_mul_d(ww, ww);
2286 RA = gmx_simd_mul_d(R4, zz2);
2287 RB = gmx_simd_mul_d(R3, zz2);
2288 RA = gmx_simd_add_d(RA, R2);
2289 RB = gmx_simd_add_d(RB, R1);
2290 RA = gmx_simd_mul_d(RA, zz2);
2291 RB = gmx_simd_mul_d(RB, zz);
2292 RA = gmx_simd_add_d(RA, R0);
2293 RA = gmx_simd_add_d(RA, RB);
2296 SB = gmx_simd_mul_d(S3, zz2);
2297 SA = gmx_simd_add_d(zz2, S2);
2298 SB = gmx_simd_add_d(SB, S1);
2299 SA = gmx_simd_mul_d(SA, zz2);
2300 SB = gmx_simd_mul_d(SB, zz);
2301 SA = gmx_simd_add_d(SA, S0);
2302 SA = gmx_simd_add_d(SA, SB);
2305 PA = gmx_simd_mul_d(P5, ww2);
2306 PB = gmx_simd_mul_d(P4, ww2);
2307 PA = gmx_simd_add_d(PA, P3);
2308 PB = gmx_simd_add_d(PB, P2);
2309 PA = gmx_simd_mul_d(PA, ww2);
2310 PB = gmx_simd_mul_d(PB, ww2);
2311 PA = gmx_simd_add_d(PA, P1);
2312 PB = gmx_simd_add_d(PB, P0);
2313 PA = gmx_simd_mul_d(PA, ww);
2314 PA = gmx_simd_add_d(PA, PB);
2317 QB = gmx_simd_mul_d(Q4, ww2);
2318 QA = gmx_simd_add_d(ww2, Q3);
2319 QB = gmx_simd_add_d(QB, Q2);
2320 QA = gmx_simd_mul_d(QA, ww2);
2321 QB = gmx_simd_mul_d(QB, ww2);
2322 QA = gmx_simd_add_d(QA, Q1);
2323 QB = gmx_simd_add_d(QB, Q0);
2324 QA = gmx_simd_mul_d(QA, ww);
2325 QA = gmx_simd_add_d(QA, QB);
2327 RA = gmx_simd_mul_d(RA, zz);
2328 PA = gmx_simd_mul_d(PA, ww);
2330 nom = gmx_simd_blendv_d( PA, RA, mask );
2331 denom = gmx_simd_blendv_d( QA, SA, mask );
2333 q = gmx_simd_mul_d( nom, gmx_simd_inv_d(denom) );
2335 zz = gmx_simd_add_d(zz, zz);
2336 zz = gmx_simd_sqrt_d(zz);
2337 z = gmx_simd_sub_d(quarterpi, zz);
2338 zz = gmx_simd_mul_d(zz, q);
2339 zz = gmx_simd_sub_d(zz, morebits);
2340 z = gmx_simd_sub_d(z, zz);
2341 z = gmx_simd_add_d(z, quarterpi);
2343 w = gmx_simd_mul_d(xabs, q);
2344 w = gmx_simd_add_d(w, xabs);
2346 z = gmx_simd_blendv_d( w, z, mask );
2348 mask = gmx_simd_cmplt_d(limit2, xabs);
2349 z = gmx_simd_blendv_d( xabs, z, mask );
2351 z = gmx_simd_xor_sign_d(z, x);
2356 /*! \brief SIMD double acos(x).
2358 * \copydetails gmx_simd_acos_f
2360 static gmx_inline gmx_simd_double_t gmx_simdcall
2361 gmx_simd_acos_d(gmx_simd_double_t x)
2363 const gmx_simd_double_t one = gmx_simd_set1_d(1.0);
2364 const gmx_simd_double_t half = gmx_simd_set1_d(0.5);
2365 const gmx_simd_double_t quarterpi0 = gmx_simd_set1_d(7.85398163397448309616e-1);
2366 const gmx_simd_double_t quarterpi1 = gmx_simd_set1_d(6.123233995736765886130e-17);
2368 gmx_simd_dbool_t mask1;
2369 gmx_simd_double_t z, z1, z2;
2371 mask1 = gmx_simd_cmplt_d(half, x);
2372 z1 = gmx_simd_mul_d(half, gmx_simd_sub_d(one, x));
2373 z1 = gmx_simd_sqrt_d(z1);
2374 z = gmx_simd_blendv_d( x, z1, mask1 );
2376 z = gmx_simd_asin_d(z);
2378 z1 = gmx_simd_add_d(z, z);
2380 z2 = gmx_simd_sub_d(quarterpi0, z);
2381 z2 = gmx_simd_add_d(z2, quarterpi1);
2382 z2 = gmx_simd_add_d(z2, quarterpi0);
2384 z = gmx_simd_blendv_d(z2, z1, mask1);
2389 /*! \brief SIMD double atan(x).
2391 * \copydetails gmx_simd_atan_f
2393 static gmx_inline gmx_simd_double_t gmx_simdcall
2394 gmx_simd_atan_d(gmx_simd_double_t x)
2396 /* Same algorithm as cephes library */
2397 const gmx_simd_double_t limit1 = gmx_simd_set1_d(0.66);
2398 const gmx_simd_double_t limit2 = gmx_simd_set1_d(2.41421356237309504880);
2399 const gmx_simd_double_t quarterpi = gmx_simd_set1_d(M_PI/4.0);
2400 const gmx_simd_double_t halfpi = gmx_simd_set1_d(M_PI/2.0);
2401 const gmx_simd_double_t mone = gmx_simd_set1_d(-1.0);
2402 const gmx_simd_double_t morebits1 = gmx_simd_set1_d(0.5*6.123233995736765886130E-17);
2403 const gmx_simd_double_t morebits2 = gmx_simd_set1_d(6.123233995736765886130E-17);
2405 const gmx_simd_double_t P4 = gmx_simd_set1_d(-8.750608600031904122785E-1);
2406 const gmx_simd_double_t P3 = gmx_simd_set1_d(-1.615753718733365076637E1);
2407 const gmx_simd_double_t P2 = gmx_simd_set1_d(-7.500855792314704667340E1);
2408 const gmx_simd_double_t P1 = gmx_simd_set1_d(-1.228866684490136173410E2);
2409 const gmx_simd_double_t P0 = gmx_simd_set1_d(-6.485021904942025371773E1);
2411 const gmx_simd_double_t Q4 = gmx_simd_set1_d(2.485846490142306297962E1);
2412 const gmx_simd_double_t Q3 = gmx_simd_set1_d(1.650270098316988542046E2);
2413 const gmx_simd_double_t Q2 = gmx_simd_set1_d(4.328810604912902668951E2);
2414 const gmx_simd_double_t Q1 = gmx_simd_set1_d(4.853903996359136964868E2);
2415 const gmx_simd_double_t Q0 = gmx_simd_set1_d(1.945506571482613964425E2);
2417 gmx_simd_double_t y, xabs, t1, t2;
2418 gmx_simd_double_t z, z2;
2419 gmx_simd_double_t P_A, P_B, Q_A, Q_B;
2420 gmx_simd_dbool_t mask1, mask2;
2422 xabs = gmx_simd_fabs_d(x);
2424 mask1 = gmx_simd_cmplt_d(limit1, xabs);
2425 mask2 = gmx_simd_cmplt_d(limit2, xabs);
2427 t1 = gmx_simd_mul_d(gmx_simd_add_d(xabs, mone), gmx_simd_inv_d(gmx_simd_sub_d(xabs, mone)));
2428 t2 = gmx_simd_mul_d(mone, gmx_simd_inv_d(xabs));
2430 y = gmx_simd_blendzero_d(quarterpi, mask1);
2431 y = gmx_simd_blendv_d(y, halfpi, mask2);
2432 xabs = gmx_simd_blendv_d(xabs, t1, mask1);
2433 xabs = gmx_simd_blendv_d(xabs, t2, mask2);
2435 z = gmx_simd_mul_d(xabs, xabs);
2436 z2 = gmx_simd_mul_d(z, z);
2438 P_A = gmx_simd_mul_d(P4, z2);
2439 P_B = gmx_simd_mul_d(P3, z2);
2440 P_A = gmx_simd_add_d(P_A, P2);
2441 P_B = gmx_simd_add_d(P_B, P1);
2442 P_A = gmx_simd_mul_d(P_A, z2);
2443 P_B = gmx_simd_mul_d(P_B, z);
2444 P_A = gmx_simd_add_d(P_A, P0);
2445 P_A = gmx_simd_add_d(P_A, P_B);
2448 Q_B = gmx_simd_mul_d(Q4, z2);
2449 Q_A = gmx_simd_add_d(z2, Q3);
2450 Q_B = gmx_simd_add_d(Q_B, Q2);
2451 Q_A = gmx_simd_mul_d(Q_A, z2);
2452 Q_B = gmx_simd_mul_d(Q_B, z2);
2453 Q_A = gmx_simd_add_d(Q_A, Q1);
2454 Q_B = gmx_simd_add_d(Q_B, Q0);
2455 Q_A = gmx_simd_mul_d(Q_A, z);
2456 Q_A = gmx_simd_add_d(Q_A, Q_B);
2458 z = gmx_simd_mul_d(z, P_A);
2459 z = gmx_simd_mul_d(z, gmx_simd_inv_d(Q_A));
2460 z = gmx_simd_mul_d(z, xabs);
2461 z = gmx_simd_add_d(z, xabs);
2463 t1 = gmx_simd_blendzero_d(morebits1, mask1);
2464 t1 = gmx_simd_blendv_d(t1, morebits2, mask2);
2466 z = gmx_simd_add_d(z, t1);
2467 y = gmx_simd_add_d(y, z);
2469 y = gmx_simd_xor_sign_d(y, x);
2474 /*! \brief SIMD double atan2(y,x).
2476 * \copydetails gmx_simd_atan2_f
2478 static gmx_inline gmx_simd_double_t gmx_simdcall
2479 gmx_simd_atan2_d(gmx_simd_double_t y, gmx_simd_double_t x)
2481 const gmx_simd_double_t pi = gmx_simd_set1_d(M_PI);
2482 const gmx_simd_double_t halfpi = gmx_simd_set1_d(M_PI/2.0);
2483 gmx_simd_double_t xinv, p, aoffset;
2484 gmx_simd_dbool_t mask_x0, mask_y0, mask_xlt0, mask_ylt0;
2486 mask_x0 = gmx_simd_cmpeq_d(x, gmx_simd_setzero_d());
2487 mask_y0 = gmx_simd_cmpeq_d(y, gmx_simd_setzero_d());
2488 mask_xlt0 = gmx_simd_cmplt_d(x, gmx_simd_setzero_d());
2489 mask_ylt0 = gmx_simd_cmplt_d(y, gmx_simd_setzero_d());
2491 aoffset = gmx_simd_blendzero_d(halfpi, mask_x0);
2492 aoffset = gmx_simd_blendnotzero_d(aoffset, mask_y0);
2494 aoffset = gmx_simd_blendv_d(aoffset, pi, mask_xlt0);
2495 aoffset = gmx_simd_blendv_d(aoffset, gmx_simd_fneg_d(aoffset), mask_ylt0);
2497 xinv = gmx_simd_blendnotzero_d(gmx_simd_inv_d(x), mask_x0);
2498 p = gmx_simd_mul_d(y, xinv);
2499 p = gmx_simd_atan_d(p);
2500 p = gmx_simd_add_d(p, aoffset);
2506 /*! \brief Calculate the force correction due to PME analytically for SIMD double.
2508 * \copydetails gmx_simd_pmecorrF_f
2510 static gmx_inline gmx_simd_double_t gmx_simdcall
2511 gmx_simd_pmecorrF_d(gmx_simd_double_t z2)
2513 const gmx_simd_double_t FN10 = gmx_simd_set1_d(-8.0072854618360083154e-14);
2514 const gmx_simd_double_t FN9 = gmx_simd_set1_d(1.1859116242260148027e-11);
2515 const gmx_simd_double_t FN8 = gmx_simd_set1_d(-8.1490406329798423616e-10);
2516 const gmx_simd_double_t FN7 = gmx_simd_set1_d(3.4404793543907847655e-8);
2517 const gmx_simd_double_t FN6 = gmx_simd_set1_d(-9.9471420832602741006e-7);
2518 const gmx_simd_double_t FN5 = gmx_simd_set1_d(0.000020740315999115847456);
2519 const gmx_simd_double_t FN4 = gmx_simd_set1_d(-0.00031991745139313364005);
2520 const gmx_simd_double_t FN3 = gmx_simd_set1_d(0.0035074449373659008203);
2521 const gmx_simd_double_t FN2 = gmx_simd_set1_d(-0.031750380176100813405);
2522 const gmx_simd_double_t FN1 = gmx_simd_set1_d(0.13884101728898463426);
2523 const gmx_simd_double_t FN0 = gmx_simd_set1_d(-0.75225277815249618847);
2525 const gmx_simd_double_t FD5 = gmx_simd_set1_d(0.000016009278224355026701);
2526 const gmx_simd_double_t FD4 = gmx_simd_set1_d(0.00051055686934806966046);
2527 const gmx_simd_double_t FD3 = gmx_simd_set1_d(0.0081803507497974289008);
2528 const gmx_simd_double_t FD2 = gmx_simd_set1_d(0.077181146026670287235);
2529 const gmx_simd_double_t FD1 = gmx_simd_set1_d(0.41543303143712535988);
2530 const gmx_simd_double_t FD0 = gmx_simd_set1_d(1.0);
2532 gmx_simd_double_t z4;
2533 gmx_simd_double_t polyFN0, polyFN1, polyFD0, polyFD1;
2535 z4 = gmx_simd_mul_d(z2, z2);
2537 polyFD1 = gmx_simd_fmadd_d(FD5, z4, FD3);
2538 polyFD1 = gmx_simd_fmadd_d(polyFD1, z4, FD1);
2539 polyFD1 = gmx_simd_mul_d(polyFD1, z2);
2540 polyFD0 = gmx_simd_fmadd_d(FD4, z4, FD2);
2541 polyFD0 = gmx_simd_fmadd_d(polyFD0, z4, FD0);
2542 polyFD0 = gmx_simd_add_d(polyFD0, polyFD1);
2544 polyFD0 = gmx_simd_inv_d(polyFD0);
2546 polyFN0 = gmx_simd_fmadd_d(FN10, z4, FN8);
2547 polyFN0 = gmx_simd_fmadd_d(polyFN0, z4, FN6);
2548 polyFN0 = gmx_simd_fmadd_d(polyFN0, z4, FN4);
2549 polyFN0 = gmx_simd_fmadd_d(polyFN0, z4, FN2);
2550 polyFN0 = gmx_simd_fmadd_d(polyFN0, z4, FN0);
2551 polyFN1 = gmx_simd_fmadd_d(FN9, z4, FN7);
2552 polyFN1 = gmx_simd_fmadd_d(polyFN1, z4, FN5);
2553 polyFN1 = gmx_simd_fmadd_d(polyFN1, z4, FN3);
2554 polyFN1 = gmx_simd_fmadd_d(polyFN1, z4, FN1);
2555 polyFN0 = gmx_simd_fmadd_d(polyFN1, z2, polyFN0);
2558 return gmx_simd_mul_d(polyFN0, polyFD0);
2563 /*! \brief Calculate the potential correction due to PME analytically for SIMD double.
2565 * \copydetails gmx_simd_pmecorrV_f
2567 static gmx_inline gmx_simd_double_t gmx_simdcall
2568 gmx_simd_pmecorrV_d(gmx_simd_double_t z2)
2570 const gmx_simd_double_t VN9 = gmx_simd_set1_d(-9.3723776169321855475e-13);
2571 const gmx_simd_double_t VN8 = gmx_simd_set1_d(1.2280156762674215741e-10);
2572 const gmx_simd_double_t VN7 = gmx_simd_set1_d(-7.3562157912251309487e-9);
2573 const gmx_simd_double_t VN6 = gmx_simd_set1_d(2.6215886208032517509e-7);
2574 const gmx_simd_double_t VN5 = gmx_simd_set1_d(-4.9532491651265819499e-6);
2575 const gmx_simd_double_t VN4 = gmx_simd_set1_d(0.00025907400778966060389);
2576 const gmx_simd_double_t VN3 = gmx_simd_set1_d(0.0010585044856156469792);
2577 const gmx_simd_double_t VN2 = gmx_simd_set1_d(0.045247661136833092885);
2578 const gmx_simd_double_t VN1 = gmx_simd_set1_d(0.11643931522926034421);
2579 const gmx_simd_double_t VN0 = gmx_simd_set1_d(1.1283791671726767970);
2581 const gmx_simd_double_t VD5 = gmx_simd_set1_d(0.000021784709867336150342);
2582 const gmx_simd_double_t VD4 = gmx_simd_set1_d(0.00064293662010911388448);
2583 const gmx_simd_double_t VD3 = gmx_simd_set1_d(0.0096311444822588683504);
2584 const gmx_simd_double_t VD2 = gmx_simd_set1_d(0.085608012351550627051);
2585 const gmx_simd_double_t VD1 = gmx_simd_set1_d(0.43652499166614811084);
2586 const gmx_simd_double_t VD0 = gmx_simd_set1_d(1.0);
2588 gmx_simd_double_t z4;
2589 gmx_simd_double_t polyVN0, polyVN1, polyVD0, polyVD1;
2591 z4 = gmx_simd_mul_d(z2, z2);
2593 polyVD1 = gmx_simd_fmadd_d(VD5, z4, VD3);
2594 polyVD0 = gmx_simd_fmadd_d(VD4, z4, VD2);
2595 polyVD1 = gmx_simd_fmadd_d(polyVD1, z4, VD1);
2596 polyVD0 = gmx_simd_fmadd_d(polyVD0, z4, VD0);
2597 polyVD0 = gmx_simd_fmadd_d(polyVD1, z2, polyVD0);
2599 polyVD0 = gmx_simd_inv_d(polyVD0);
2601 polyVN1 = gmx_simd_fmadd_d(VN9, z4, VN7);
2602 polyVN0 = gmx_simd_fmadd_d(VN8, z4, VN6);
2603 polyVN1 = gmx_simd_fmadd_d(polyVN1, z4, VN5);
2604 polyVN0 = gmx_simd_fmadd_d(polyVN0, z4, VN4);
2605 polyVN1 = gmx_simd_fmadd_d(polyVN1, z4, VN3);
2606 polyVN0 = gmx_simd_fmadd_d(polyVN0, z4, VN2);
2607 polyVN1 = gmx_simd_fmadd_d(polyVN1, z4, VN1);
2608 polyVN0 = gmx_simd_fmadd_d(polyVN0, z4, VN0);
2609 polyVN0 = gmx_simd_fmadd_d(polyVN1, z2, polyVN0);
2611 return gmx_simd_mul_d(polyVN0, polyVD0);
2619 /*! \name SIMD4 math functions
2621 * \note Only a subset of the math functions are implemented for SIMD4.
2626 #ifdef GMX_SIMD4_HAVE_FLOAT
2628 /*************************************************************************
2629 * SINGLE PRECISION SIMD4 MATH FUNCTIONS - JUST A SMALL SUBSET SUPPORTED *
2630 *************************************************************************/
2632 /*! \brief SIMD4 utility function to sum a+b+c+d for SIMD4 floats.
2634 * \copydetails gmx_simd_sum4_f
2636 static gmx_inline gmx_simd4_float_t gmx_simdcall
2637 gmx_simd4_sum4_f(gmx_simd4_float_t a, gmx_simd4_float_t b,
2638 gmx_simd4_float_t c, gmx_simd4_float_t d)
2640 return gmx_simd4_add_f(gmx_simd4_add_f(a, b), gmx_simd4_add_f(c, d));
2643 /*! \brief Perform one Newton-Raphson iteration to improve 1/sqrt(x) for SIMD4 float.
2645 * \copydetails gmx_simd_rsqrt_iter_f
2647 static gmx_inline gmx_simd4_float_t gmx_simdcall
2648 gmx_simd4_rsqrt_iter_f(gmx_simd4_float_t lu, gmx_simd4_float_t x)
2650 # ifdef GMX_SIMD_HAVE_FMA
2651 return gmx_simd4_fmadd_f(gmx_simd4_fnmadd_f(x, gmx_simd4_mul_f(lu, lu), gmx_simd4_set1_f(1.0f)), gmx_simd4_mul_f(lu, gmx_simd4_set1_f(0.5f)), lu);
2653 return gmx_simd4_mul_f(gmx_simd4_set1_f(0.5f), gmx_simd4_mul_f(gmx_simd4_sub_f(gmx_simd4_set1_f(3.0f), gmx_simd4_mul_f(gmx_simd4_mul_f(lu, lu), x)), lu));
2657 /*! \brief Calculate 1/sqrt(x) for SIMD4 float.
2659 * \copydetails gmx_simd_invsqrt_f
2661 static gmx_inline gmx_simd4_float_t gmx_simdcall
2662 gmx_simd4_invsqrt_f(gmx_simd4_float_t x)
2664 gmx_simd4_float_t lu = gmx_simd4_rsqrt_f(x);
2665 #if (GMX_SIMD_RSQRT_BITS < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
2666 lu = gmx_simd4_rsqrt_iter_f(lu, x);
2668 #if (GMX_SIMD_RSQRT_BITS*2 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
2669 lu = gmx_simd4_rsqrt_iter_f(lu, x);
2671 #if (GMX_SIMD_RSQRT_BITS*4 < GMX_SIMD_MATH_TARGET_SINGLE_BITS)
2672 lu = gmx_simd4_rsqrt_iter_f(lu, x);
2677 #endif /* GMX_SIMD4_HAVE_FLOAT */
2681 #ifdef GMX_SIMD4_HAVE_DOUBLE
2682 /*************************************************************************
2683 * DOUBLE PRECISION SIMD4 MATH FUNCTIONS - JUST A SMALL SUBSET SUPPORTED *
2684 *************************************************************************/
2687 /*! \brief SIMD4 utility function to sum a+b+c+d for SIMD4 doubles.
2689 * \copydetails gmx_simd_sum4_f
2691 static gmx_inline gmx_simd4_double_t gmx_simdcall
2692 gmx_simd4_sum4_d(gmx_simd4_double_t a, gmx_simd4_double_t b,
2693 gmx_simd4_double_t c, gmx_simd4_double_t d)
2695 return gmx_simd4_add_d(gmx_simd4_add_d(a, b), gmx_simd4_add_d(c, d));
2698 /*! \brief Perform one Newton-Raphson iteration to improve 1/sqrt(x) for SIMD4 double.
2700 * \copydetails gmx_simd_rsqrt_iter_f
2702 static gmx_inline gmx_simd4_double_t gmx_simdcall
2703 gmx_simd4_rsqrt_iter_d(gmx_simd4_double_t lu, gmx_simd4_double_t x)
2705 #ifdef GMX_SIMD_HAVE_FMA
2706 return gmx_simd4_fmadd_d(gmx_simd4_fnmadd_d(x, gmx_simd4_mul_d(lu, lu), gmx_simd4_set1_d(1.0)), gmx_simd4_mul_d(lu, gmx_simd4_set1_d(0.5)), lu);
2708 return gmx_simd4_mul_d(gmx_simd4_set1_d(0.5), gmx_simd4_mul_d(gmx_simd4_sub_d(gmx_simd4_set1_d(3.0), gmx_simd4_mul_d(gmx_simd4_mul_d(lu, lu), x)), lu));
2712 /*! \brief Calculate 1/sqrt(x) for SIMD4 double.
2714 * \copydetails gmx_simd_invsqrt_f
2716 static gmx_inline gmx_simd4_double_t gmx_simdcall
2717 gmx_simd4_invsqrt_d(gmx_simd4_double_t x)
2719 gmx_simd4_double_t lu = gmx_simd4_rsqrt_d(x);
2720 #if (GMX_SIMD_RSQRT_BITS < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
2721 lu = gmx_simd4_rsqrt_iter_d(lu, x);
2723 #if (GMX_SIMD_RSQRT_BITS*2 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
2724 lu = gmx_simd4_rsqrt_iter_d(lu, x);
2726 #if (GMX_SIMD_RSQRT_BITS*4 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
2727 lu = gmx_simd4_rsqrt_iter_d(lu, x);
2729 #if (GMX_SIMD_RSQRT_BITS*8 < GMX_SIMD_MATH_TARGET_DOUBLE_BITS)
2730 lu = gmx_simd4_rsqrt_iter_d(lu, x);
2734 #endif /* GMX_SIMD4_HAVE_DOUBLE */
2739 /* Set defines based on default Gromacs precision */
2741 /* Documentation in single branch below */
2742 # define gmx_simd_sum4_r gmx_simd_sum4_d
2743 # define gmx_simd_xor_sign_r gmx_simd_xor_sign_d
2744 # define gmx_simd_invsqrt_r gmx_simd_invsqrt_d
2745 # define gmx_simd_invsqrt_pair_r gmx_simd_invsqrt_pair_d
2746 # define gmx_simd_sqrt_r gmx_simd_sqrt_d
2747 # define gmx_simd_inv_r gmx_simd_inv_d
2748 # define gmx_simd_log_r gmx_simd_log_d
2749 # define gmx_simd_exp2_r gmx_simd_exp2_d
2750 # define gmx_simd_exp_r gmx_simd_exp_d
2751 # define gmx_simd_erf_r gmx_simd_erf_d
2752 # define gmx_simd_erfc_r gmx_simd_erfc_d
2753 # define gmx_simd_sincos_r gmx_simd_sincos_d
2754 # define gmx_simd_sin_r gmx_simd_sin_d
2755 # define gmx_simd_cos_r gmx_simd_cos_d
2756 # define gmx_simd_tan_r gmx_simd_tan_d
2757 # define gmx_simd_asin_r gmx_simd_asin_d
2758 # define gmx_simd_acos_r gmx_simd_acos_d
2759 # define gmx_simd_atan_r gmx_simd_atan_d
2760 # define gmx_simd_atan2_r gmx_simd_atan2_d
2761 # define gmx_simd_pmecorrF_r gmx_simd_pmecorrF_d
2762 # define gmx_simd_pmecorrV_r gmx_simd_pmecorrV_d
2763 # define gmx_simd4_sum4_r gmx_simd4_sum4_d
2764 # define gmx_simd4_invsqrt_r gmx_simd4_invsqrt_d
2766 #else /* GMX_DOUBLE */
2768 /*! \name Real-precision SIMD math functions
2770 * These are the ones you should typically call in Gromacs.
2774 /*! \brief SIMD utility function to sum a+b+c+d for SIMD reals.
2776 * \copydetails gmx_simd_sum4_f
2778 # define gmx_simd_sum4_r gmx_simd_sum4_f
2780 /*! \brief Return -a if b is negative, SIMD real.
2782 * \copydetails gmx_simd_xor_sign_f
2784 # define gmx_simd_xor_sign_r gmx_simd_xor_sign_f
2786 /*! \brief Calculate 1/sqrt(x) for SIMD real.
2788 * \copydetails gmx_simd_invsqrt_f
2790 # define gmx_simd_invsqrt_r gmx_simd_invsqrt_f
2792 /*! \brief Calculate 1/sqrt(x) for two SIMD reals.
2794 * \copydetails gmx_simd_invsqrt_pair_f
2796 # define gmx_simd_invsqrt_pair_r gmx_simd_invsqrt_pair_f
2798 /*! \brief Calculate sqrt(x) correctly for SIMD real, including argument 0.0.
2800 * \copydetails gmx_simd_sqrt_f
2802 # define gmx_simd_sqrt_r gmx_simd_sqrt_f
2804 /*! \brief Calculate 1/x for SIMD real.
2806 * \copydetails gmx_simd_inv_f
2808 # define gmx_simd_inv_r gmx_simd_inv_f
2810 /*! \brief SIMD real log(x). This is the natural logarithm.
2812 * \copydetails gmx_simd_log_f
2814 # define gmx_simd_log_r gmx_simd_log_f
2816 /*! \brief SIMD real 2^x.
2818 * \copydetails gmx_simd_exp2_f
2820 # define gmx_simd_exp2_r gmx_simd_exp2_f
2822 /*! \brief SIMD real e^x.
2824 * \copydetails gmx_simd_exp_f
2826 # define gmx_simd_exp_r gmx_simd_exp_f
2828 /*! \brief SIMD real erf(x).
2830 * \copydetails gmx_simd_erf_f
2832 # define gmx_simd_erf_r gmx_simd_erf_f
2834 /*! \brief SIMD real erfc(x).
2836 * \copydetails gmx_simd_erfc_f
2838 # define gmx_simd_erfc_r gmx_simd_erfc_f
2840 /*! \brief SIMD real sin \& cos.
2842 * \copydetails gmx_simd_sincos_f
2844 # define gmx_simd_sincos_r gmx_simd_sincos_f
2846 /*! \brief SIMD real sin(x).
2848 * \copydetails gmx_simd_sin_f
2850 # define gmx_simd_sin_r gmx_simd_sin_f
2852 /*! \brief SIMD real cos(x).
2854 * \copydetails gmx_simd_cos_f
2856 # define gmx_simd_cos_r gmx_simd_cos_f
2858 /*! \brief SIMD real tan(x).
2860 * \copydetails gmx_simd_tan_f
2862 # define gmx_simd_tan_r gmx_simd_tan_f
2864 /*! \brief SIMD real asin(x).
2866 * \copydetails gmx_simd_asin_f
2868 # define gmx_simd_asin_r gmx_simd_asin_f
2870 /*! \brief SIMD real acos(x).
2872 * \copydetails gmx_simd_acos_f
2874 # define gmx_simd_acos_r gmx_simd_acos_f
2876 /*! \brief SIMD real atan(x).
2878 * \copydetails gmx_simd_atan_f
2880 # define gmx_simd_atan_r gmx_simd_atan_f
2882 /*! \brief SIMD real atan2(y,x).
2884 * \copydetails gmx_simd_atan2_f
2886 # define gmx_simd_atan2_r gmx_simd_atan2_f
2888 /*! \brief SIMD Analytic PME force correction.
2890 * \copydetails gmx_simd_pmecorrF_f
2892 # define gmx_simd_pmecorrF_r gmx_simd_pmecorrF_f
2894 /*! \brief SIMD Analytic PME potential correction.
2896 * \copydetails gmx_simd_pmecorrV_f
2898 # define gmx_simd_pmecorrV_r gmx_simd_pmecorrV_f
2901 * \name SIMD4 math functions
2905 /*! \brief SIMD4 utility function to sum a+b+c+d for SIMD4 reals.
2907 * \copydetails gmx_simd_sum4_f
2909 # define gmx_simd4_sum4_r gmx_simd4_sum4_f
2911 /*! \brief Calculate 1/sqrt(x) for SIMD4 real.
2913 * \copydetails gmx_simd_invsqrt_f
2915 # define gmx_simd4_invsqrt_r gmx_simd4_invsqrt_f
2919 #endif /* GMX_DOUBLE */
2924 #endif /* GMX_SIMD_SIMD_MATH_H_ */