Update copyright statements and change license to LGPL
[alexxy/gromacs.git] / include / gmx_math_x86_avx_128_fma_double.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012, by the GROMACS development team, led by
5  * David van der Spoel, Berk Hess, Erik Lindahl, and including many
6  * others, as listed in the AUTHORS file in the top-level source
7  * directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #ifndef _gmx_math_x86_avx_128_fma_double_h_
36 #define _gmx_math_x86_avx_128_fma_double_h_
37
38 #include <math.h>
39
40 #include "gmx_x86_avx_128_fma.h"
41
42
43 #ifndef M_PI
44 #  define M_PI 3.14159265358979323846264338327950288
45 #endif
46
47
48 /************************
49  *                      *
50  * Simple math routines *
51  *                      *
52  ************************/
53
54 /* 1.0/sqrt(x) */
55 static gmx_inline __m128d
56 gmx_mm_invsqrt_pd(__m128d x)
57 {
58     const __m128d half  = _mm_set1_pd(0.5);
59     const __m128d three = _mm_set1_pd(3.0);
60
61     /* Lookup instruction only exists in single precision, convert back and forth... */
62     __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
63
64     lu = _mm_mul_pd(_mm_mul_pd(half,lu),_mm_nmacc_pd(_mm_mul_pd(lu,lu),x,three));
65     return _mm_mul_pd(_mm_mul_pd(half,lu),_mm_nmacc_pd(_mm_mul_pd(lu,lu),x,three));
66 }
67
68 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
69 static void
70 gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
71 {
72     const __m128d half   = _mm_set1_pd(0.5);
73     const __m128d three  = _mm_set1_pd(3.0);
74     const __m128  halff  = _mm_set1_ps(0.5f);
75     const __m128  threef = _mm_set1_ps(3.0f);
76     
77     __m128 xf,luf;
78     __m128d lu1,lu2;
79     
80     /* Do first N-R step in float for 2x throughput */
81     xf  = _mm_shuffle_ps(_mm_cvtpd_ps(x1),_mm_cvtpd_ps(x2),_MM_SHUFFLE(1,0,1,0));
82     luf = _mm_rsqrt_ps(xf);
83     
84     luf = _mm_mul_ps(_mm_mul_ps(halff,luf),_mm_nmacc_ps(_mm_mul_ps(luf,luf),xf,threef));
85
86     
87     lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf,luf,_MM_SHUFFLE(3,2,3,2)));
88     lu1 = _mm_cvtps_pd(luf);
89     
90     *invsqrt1 = _mm_mul_pd(_mm_mul_pd(half,lu1),_mm_nmacc_pd(_mm_mul_pd(lu1,lu1),x1,three));
91     *invsqrt2 = _mm_mul_pd(_mm_mul_pd(half,lu2),_mm_nmacc_pd(_mm_mul_pd(lu2,lu2),x2,three));
92 }
93
94 /* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
95 static gmx_inline __m128d
96 gmx_mm_sqrt_pd(__m128d x)
97 {
98     __m128d mask;
99     __m128d res;
100
101     mask = _mm_cmpeq_pd(x,_mm_setzero_pd());
102     res  = _mm_andnot_pd(mask,gmx_mm_invsqrt_pd(x));
103
104     res  = _mm_mul_pd(x,res);
105
106     return res;
107 }
108
109 /* 1.0/x */
110 static gmx_inline __m128d
111 gmx_mm_inv_pd(__m128d x)
112 {
113     const __m128d two  = _mm_set1_pd(2.0);
114
115     /* Lookup instruction only exists in single precision, convert back and forth... */
116     __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
117
118     /* Perform two N-R steps for double precision */
119     lu         = _mm_mul_pd(lu,_mm_nmacc_pd(lu,x,two));
120     return _mm_mul_pd(lu,_mm_nmacc_pd(lu,x,two));
121 }
122
123 static gmx_inline __m128d
124 gmx_mm_abs_pd(__m128d x)
125 {
126     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
127
128     return _mm_and_pd(x,signmask);
129 }
130
131
132 /*
133  * 2^x function.
134  *
135  * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
136  * [-0.5,0.5].
137  *
138  * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
139  * according to the same algorithm as used in the Cephes/netlib math routines.
140  */
141 static __m128d
142 gmx_mm_exp2_pd(__m128d x)
143 {
144     /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
145     const __m128d arglimit = _mm_set1_pd(1022.0);
146     const __m128i expbase  = _mm_set1_epi32(1023);
147
148     const __m128d P2       = _mm_set1_pd(2.30933477057345225087e-2);
149     const __m128d P1       = _mm_set1_pd(2.02020656693165307700e1);
150     const __m128d P0       = _mm_set1_pd(1.51390680115615096133e3);
151     /* Q2 == 1.0 */
152     const __m128d Q1       = _mm_set1_pd(2.33184211722314911771e2);
153     const __m128d Q0       = _mm_set1_pd(4.36821166879210612817e3);
154     const __m128d one      = _mm_set1_pd(1.0);
155     const __m128d two      = _mm_set1_pd(2.0);
156
157     __m128d valuemask;
158     __m128i iexppart;
159     __m128d fexppart;
160     __m128d intpart;
161     __m128d z,z2;
162     __m128d PolyP,PolyQ;
163
164     iexppart  = _mm_cvtpd_epi32(x);
165     intpart   = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
166
167     /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
168      * To be able to shift it into the exponent for a double precision number we first need to
169      * shuffle so that the lower half contains the first element, and the upper half the second.
170      * This should really be done as a zero-extension, but since the next instructions will shift
171      * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
172      * (thus we just use element 2 from iexppart).
173      */
174     iexppart  = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
175
176     /* Do the shift operation on the 64-bit registers */
177     iexppart  = _mm_add_epi32(iexppart,expbase);
178     iexppart  = _mm_slli_epi64(iexppart,52);
179
180     valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
181     fexppart  = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
182
183     z         = _mm_sub_pd(x,intpart);
184     z2        = _mm_mul_pd(z,z);
185
186     PolyP     = _mm_macc_pd(P2,z2,P1);
187     PolyQ     = _mm_add_pd(z2,Q1);
188     PolyP     = _mm_macc_pd(PolyP,z2,P0);
189     PolyQ     = _mm_macc_pd(PolyQ,z2,Q0);
190     PolyP     = _mm_mul_pd(PolyP,z);
191
192     z         = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
193     z         = _mm_macc_pd(two,z,one);
194
195     z         = _mm_mul_pd(z,fexppart);
196
197     return  z;
198 }
199
200 /* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
201  * but there will then be a small rounding error since we lose some precision due to the
202  * multiplication. This will then be magnified a lot by the exponential.
203  *
204  * Instead, we calculate the fractional part directly as a Padé approximation of
205  * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
206  * remaining after 2^y, which avoids the precision-loss.
207  */
208 static __m128d
209 gmx_mm_exp_pd(__m128d exparg)
210 {
211     const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
212     /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
213     const __m128d arglimit = _mm_set1_pd(1022.0);
214     const __m128i expbase  = _mm_set1_epi32(1023);
215
216     const __m128d invargscale0  = _mm_set1_pd(6.93145751953125e-1);
217     const __m128d invargscale1  = _mm_set1_pd(1.42860682030941723212e-6);
218
219     const __m128d P2       = _mm_set1_pd(1.26177193074810590878e-4);
220     const __m128d P1       = _mm_set1_pd(3.02994407707441961300e-2);
221     /* P0 == 1.0 */
222     const __m128d Q3       = _mm_set1_pd(3.00198505138664455042E-6);
223     const __m128d Q2       = _mm_set1_pd(2.52448340349684104192E-3);
224     const __m128d Q1       = _mm_set1_pd(2.27265548208155028766E-1);
225     /* Q0 == 2.0 */
226     const __m128d one      = _mm_set1_pd(1.0);
227     const __m128d two      = _mm_set1_pd(2.0);
228
229     __m128d valuemask;
230     __m128i iexppart;
231     __m128d fexppart;
232     __m128d intpart;
233     __m128d x,z,z2;
234     __m128d PolyP,PolyQ;
235
236     x             = _mm_mul_pd(exparg,argscale);
237
238     iexppart  = _mm_cvtpd_epi32(x);
239     intpart   = _mm_round_pd(x,_MM_FROUND_TO_NEAREST_INT);
240
241     /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
242      * To be able to shift it into the exponent for a double precision number we first need to
243      * shuffle so that the lower half contains the first element, and the upper half the second.
244      * This should really be done as a zero-extension, but since the next instructions will shift
245      * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
246      * (thus we just use element 2 from iexppart).
247      */
248     iexppart  = _mm_shuffle_epi32(iexppart,_MM_SHUFFLE(2,1,2,0));
249
250     /* Do the shift operation on the 64-bit registers */
251     iexppart  = _mm_add_epi32(iexppart,expbase);
252     iexppart  = _mm_slli_epi64(iexppart,52);
253
254     valuemask = _mm_cmpge_pd(arglimit,gmx_mm_abs_pd(x));
255     fexppart  = _mm_and_pd(valuemask,gmx_mm_castsi128_pd(iexppart));
256
257     z         = _mm_sub_pd(exparg,_mm_mul_pd(invargscale0,intpart));
258     z         = _mm_sub_pd(z,_mm_mul_pd(invargscale1,intpart));
259
260     z2        = _mm_mul_pd(z,z);
261
262     PolyQ     = _mm_macc_pd(Q3,z2,Q2);
263     PolyP     = _mm_macc_pd(P2,z2,P1);
264     PolyQ     = _mm_macc_pd(PolyQ,z2,Q1);
265
266     PolyP     = _mm_macc_pd(PolyP,z2,one);
267     PolyQ     = _mm_macc_pd(PolyQ,z2,two);
268
269     PolyP     = _mm_mul_pd(PolyP,z);
270
271     z         = _mm_mul_pd(PolyP,gmx_mm_inv_pd(_mm_sub_pd(PolyQ,PolyP)));
272     z         = _mm_macc_pd(two,z,one);
273
274     z         = _mm_mul_pd(z,fexppart);
275
276     return  z;
277 }
278
279
280
281 static __m128d
282 gmx_mm_log_pd(__m128d x)
283 {
284     /* Same algorithm as cephes library */
285     const __m128d expmask    = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
286
287     const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
288
289     const __m128d half       = _mm_set1_pd(0.5);
290     const __m128d one        = _mm_set1_pd(1.0);
291     const __m128d two        = _mm_set1_pd(2.0);
292     const __m128d invsq2     = _mm_set1_pd(1.0/sqrt(2.0));
293
294     const __m128d corr1      = _mm_set1_pd(-2.121944400546905827679e-4);
295     const __m128d corr2      = _mm_set1_pd(0.693359375);
296
297     const __m128d P5         = _mm_set1_pd(1.01875663804580931796e-4);
298     const __m128d P4         = _mm_set1_pd(4.97494994976747001425e-1);
299     const __m128d P3         = _mm_set1_pd(4.70579119878881725854e0);
300     const __m128d P2         = _mm_set1_pd(1.44989225341610930846e1);
301     const __m128d P1         = _mm_set1_pd(1.79368678507819816313e1);
302     const __m128d P0         = _mm_set1_pd(7.70838733755885391666e0);
303
304     const __m128d Q4         = _mm_set1_pd(1.12873587189167450590e1);
305     const __m128d Q3         = _mm_set1_pd(4.52279145837532221105e1);
306     const __m128d Q2         = _mm_set1_pd(8.29875266912776603211e1);
307     const __m128d Q1         = _mm_set1_pd(7.11544750618563894466e1);
308     const __m128d Q0         = _mm_set1_pd(2.31251620126765340583e1);
309
310     const __m128d R2         = _mm_set1_pd(-7.89580278884799154124e-1);
311     const __m128d R1         = _mm_set1_pd(1.63866645699558079767e1);
312     const __m128d R0         = _mm_set1_pd(-6.41409952958715622951e1);
313
314     const __m128d S2         = _mm_set1_pd(-3.56722798256324312549E1);
315     const __m128d S1         = _mm_set1_pd(3.12093766372244180303E2);
316     const __m128d S0         = _mm_set1_pd(-7.69691943550460008604E2);
317
318     __m128d fexp;
319     __m128i iexp;
320
321     __m128d mask1,mask2;
322     __m128d corr,t1,t2,q;
323     __m128d zA,yA,xA,zB,yB,xB,z;
324     __m128d polyR,polyS;
325     __m128d polyP1,polyP2,polyQ1,polyQ2;
326
327     /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
328     fexp   = _mm_and_pd(x,expmask);
329     iexp   = gmx_mm_castpd_si128(fexp);
330     iexp   = _mm_srli_epi64(iexp,52);
331     iexp   = _mm_sub_epi32(iexp,expbase_m1);
332     iexp   = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1,1,2,0) );
333     fexp   = _mm_cvtepi32_pd(iexp);
334
335     x      = _mm_andnot_pd(expmask,x);
336     x      = _mm_or_pd(x,one);
337     x      = _mm_mul_pd(x,half);
338
339     mask1     = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp),two);
340     mask2     = _mm_cmplt_pd(x,invsq2);
341
342     fexp   = _mm_sub_pd(fexp,_mm_and_pd(mask2,one));
343
344     /* If mask1 is set ('A') */
345     zA     = _mm_sub_pd(x,half);
346     t1     = _mm_blendv_pd( zA,x,mask2 );
347     zA     = _mm_sub_pd(t1,half);
348     t2     = _mm_blendv_pd( x,zA,mask2 );
349     yA     = _mm_mul_pd(half,_mm_add_pd(t2,one));
350
351     xA     = _mm_mul_pd(zA,gmx_mm_inv_pd(yA));
352     zA     = _mm_mul_pd(xA,xA);
353
354     /* EVALUATE POLY */
355     polyR  = _mm_macc_pd(R2,zA,R1);
356     polyR  = _mm_macc_pd(polyR,zA,R0);
357
358     polyS  = _mm_add_pd(zA,S2);
359     polyS  = _mm_macc_pd(polyS,zA,S1);
360     polyS  = _mm_macc_pd(polyS,zA,S0);
361
362     q      = _mm_mul_pd(polyR,gmx_mm_inv_pd(polyS));
363     zA     = _mm_mul_pd(_mm_mul_pd(xA,zA),q);
364
365     zA     = _mm_macc_pd(corr1,fexp,zA);
366     zA     = _mm_add_pd(zA,xA);
367     zA     = _mm_macc_pd(corr2,fexp,zA);
368
369     /* If mask1 is not set ('B') */
370     corr   = _mm_and_pd(mask2,x);
371     xB     = _mm_add_pd(x,corr);
372     xB     = _mm_sub_pd(xB,one);
373     zB     = _mm_mul_pd(xB,xB);
374
375     polyP1 = _mm_macc_pd(P5,zB,P3);
376     polyP2 = _mm_macc_pd(P4,zB,P2);
377     polyP1 = _mm_macc_pd(polyP1,zB,P1);
378     polyP2 = _mm_macc_pd(polyP2,zB,P0);
379     polyP1 = _mm_macc_pd(polyP1,xB,polyP2);
380
381     polyQ2 = _mm_macc_pd(Q4,zB,Q2);
382     polyQ1 = _mm_add_pd(zB,Q3);
383     polyQ1 = _mm_macc_pd(polyQ1,zB,Q1);
384     polyQ2 = _mm_macc_pd(polyQ2,zB,Q0);
385     polyQ1 = _mm_macc_pd(polyQ1,xB,polyQ2);
386
387     fexp   = _mm_and_pd(fexp,_mm_cmpneq_pd(fexp,_mm_setzero_pd()));
388
389     q      = _mm_mul_pd(polyP1,gmx_mm_inv_pd(polyQ1));
390     yB     = _mm_macc_pd(_mm_mul_pd(xB,zB),q,_mm_mul_pd(corr1,fexp));
391
392     yB     = _mm_nmacc_pd(half,zB,yB);
393     zB     = _mm_add_pd(xB,yB);
394     zB     = _mm_macc_pd(corr2,fexp,zB);
395
396     z      = _mm_blendv_pd( zB,zA,mask1 );
397
398     return z;
399 }
400
401
402
403 static __m128d
404 gmx_mm_erf_pd(__m128d x)
405 {
406     /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
407     const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
408     const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
409     const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
410     const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
411     const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
412
413     const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
414     const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
415     const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
416     const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
417     const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
418     /* CAQ0 == 1.0 */
419     const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
420
421     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
422     const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
423     const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
424     const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
425     const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
426     const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
427     const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
428     const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
429     const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
430     const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
431     const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
432     const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
433     const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
434     const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
435     const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
436     /* CBQ0 == 1.0 */
437
438     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
439     const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
440     const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
441     const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
442     const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
443     const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
444     const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
445     const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
446
447     const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
448     const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
449     const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
450     const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
451     const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
452     const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
453     /* CCQ0 == 1.0 */
454     const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
455
456     const __m128d one       = _mm_set1_pd(1.0);
457     const __m128d two       = _mm_set1_pd(2.0);
458
459     const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
460
461     __m128d xabs,x2,x4,t,t2,w,w2;
462     __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
463     __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
464     __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
465     __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
466     __m128d mask,expmx2;
467
468     /* Calculate erf() */
469     xabs     = gmx_mm_abs_pd(x);
470     x2       = _mm_mul_pd(x,x);
471     x4       = _mm_mul_pd(x2,x2);
472
473     PolyAP0  = _mm_macc_pd(CAP4,x4,CAP2);
474     PolyAP1  = _mm_macc_pd(CAP3,x4,CAP1);
475     PolyAP0  = _mm_macc_pd(PolyAP0,x4,CAP0);
476     PolyAP0  = _mm_macc_pd(PolyAP1,x2,PolyAP0);
477
478     PolyAQ1  = _mm_macc_pd(CAQ5,x4,CAQ3);
479     PolyAQ0  = _mm_macc_pd(CAQ4,x4,CAQ2);
480     PolyAQ1  = _mm_macc_pd(PolyAQ1,x4,CAQ1);
481     PolyAQ0  = _mm_macc_pd(PolyAQ0,x4,one);
482     PolyAQ0  = _mm_macc_pd(PolyAQ1,x2,PolyAQ0);
483
484     res_erf  = _mm_macc_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0),CAoffset);
485     res_erf  = _mm_mul_pd(x,res_erf);
486
487     /* Calculate erfc() in range [1,4.5] */
488     t       = _mm_sub_pd(xabs,one);
489     t2      = _mm_mul_pd(t,t);
490
491     PolyBP0  = _mm_macc_pd(CBP6,t2,CBP4);
492     PolyBP1  = _mm_macc_pd(CBP5,t2,CBP3);
493     PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP2);
494     PolyBP1  = _mm_macc_pd(PolyBP1,t2,CBP1);
495     PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP0);
496     PolyBP0  = _mm_macc_pd(PolyBP1,t,PolyBP0);
497
498     PolyBQ1 = _mm_macc_pd(CBQ7,t2,CBQ5);
499     PolyBQ0 = _mm_macc_pd(CBQ6,t2,CBQ4);
500     PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ3);
501     PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,CBQ2);
502     PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ1);
503     PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,one);
504     PolyBQ0 = _mm_macc_pd(PolyBQ1,t,PolyBQ0);
505
506     res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
507
508     res_erfcB = _mm_mul_pd(res_erfcB,xabs);
509
510     /* Calculate erfc() in range [4.5,inf] */
511     w       = gmx_mm_inv_pd(xabs);
512     w2      = _mm_mul_pd(w,w);
513
514     PolyCP0  = _mm_macc_pd(CCP6,w2,CCP4);
515     PolyCP1  = _mm_macc_pd(CCP5,w2,CCP3);
516     PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP2);
517     PolyCP1  = _mm_macc_pd(PolyCP1,w2,CCP1);
518     PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP0);
519     PolyCP0  = _mm_macc_pd(PolyCP1,w,PolyCP0);
520
521     PolyCQ0  = _mm_macc_pd(CCQ6,w2,CCQ4);
522     PolyCQ1  = _mm_macc_pd(CCQ5,w2,CCQ3);
523     PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,CCQ2);
524     PolyCQ1  = _mm_macc_pd(PolyCQ1,w2,CCQ1);
525     PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,one);
526     PolyCQ0  = _mm_macc_pd(PolyCQ1,w,PolyCQ0);
527
528     expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
529
530     res_erfcC = _mm_macc_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0),CCoffset);
531     res_erfcC = _mm_mul_pd(res_erfcC,w);
532
533     mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
534     res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
535
536     res_erfc = _mm_mul_pd(res_erfc,expmx2);
537
538     /* erfc(x<0) = 2-erfc(|x|) */
539     mask = _mm_cmplt_pd(x,_mm_setzero_pd());
540     res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
541
542     /* Select erf() or erfc() */
543     mask = _mm_cmplt_pd(xabs,one);
544     res  = _mm_blendv_pd(_mm_sub_pd(one,res_erfc),res_erf,mask);
545
546     return res;
547 }
548
549
550 static __m128d
551 gmx_mm_erfc_pd(__m128d x)
552 {
553     /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
554     const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
555     const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
556     const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
557     const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
558     const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
559
560     const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
561     const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
562     const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
563     const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
564     const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
565     /* CAQ0 == 1.0 */
566     const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
567
568     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
569     const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
570     const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
571     const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
572     const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
573     const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
574     const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
575     const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
576     const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
577     const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
578     const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
579     const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
580     const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
581     const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
582     const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
583     /* CBQ0 == 1.0 */
584
585     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
586     const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
587     const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
588     const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
589     const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
590     const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
591     const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
592     const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
593
594     const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
595     const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
596     const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
597     const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
598     const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
599     const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
600     /* CCQ0 == 1.0 */
601     const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
602
603     const __m128d one       = _mm_set1_pd(1.0);
604     const __m128d two       = _mm_set1_pd(2.0);
605
606     const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
607
608     __m128d xabs,x2,x4,t,t2,w,w2;
609     __m128d PolyAP0,PolyAP1,PolyAQ0,PolyAQ1;
610     __m128d PolyBP0,PolyBP1,PolyBQ0,PolyBQ1;
611     __m128d PolyCP0,PolyCP1,PolyCQ0,PolyCQ1;
612     __m128d res_erf,res_erfcB,res_erfcC,res_erfc,res;
613     __m128d mask,expmx2;
614
615     /* Calculate erf() */
616     xabs     = gmx_mm_abs_pd(x);
617     x2       = _mm_mul_pd(x,x);
618     x4       = _mm_mul_pd(x2,x2);
619
620     PolyAP0  = _mm_macc_pd(CAP4,x4,CAP2);
621     PolyAP1  = _mm_macc_pd(CAP3,x4,CAP1);
622     PolyAP0  = _mm_macc_pd(PolyAP0,x4,CAP0);
623     PolyAP0  = _mm_macc_pd(PolyAP1,x2,PolyAP0);
624
625     PolyAQ1  = _mm_macc_pd(CAQ5,x4,CAQ3);
626     PolyAQ0  = _mm_macc_pd(CAQ4,x4,CAQ2);
627     PolyAQ1  = _mm_macc_pd(PolyAQ1,x4,CAQ1);
628     PolyAQ0  = _mm_macc_pd(PolyAQ0,x4,one);
629     PolyAQ0  = _mm_macc_pd(PolyAQ1,x2,PolyAQ0);
630
631     res_erf  = _mm_macc_pd(PolyAP0,gmx_mm_inv_pd(PolyAQ0),CAoffset);
632     res_erf  = _mm_mul_pd(x,res_erf);
633
634     /* Calculate erfc() in range [1,4.5] */
635     t       = _mm_sub_pd(xabs,one);
636     t2      = _mm_mul_pd(t,t);
637
638     PolyBP0  = _mm_macc_pd(CBP6,t2,CBP4);
639     PolyBP1  = _mm_macc_pd(CBP5,t2,CBP3);
640     PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP2);
641     PolyBP1  = _mm_macc_pd(PolyBP1,t2,CBP1);
642     PolyBP0  = _mm_macc_pd(PolyBP0,t2,CBP0);
643     PolyBP0  = _mm_macc_pd(PolyBP1,t,PolyBP0);
644
645     PolyBQ1 = _mm_macc_pd(CBQ7,t2,CBQ5);
646     PolyBQ0 = _mm_macc_pd(CBQ6,t2,CBQ4);
647     PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ3);
648     PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,CBQ2);
649     PolyBQ1 = _mm_macc_pd(PolyBQ1,t2,CBQ1);
650     PolyBQ0 = _mm_macc_pd(PolyBQ0,t2,one);
651     PolyBQ0 = _mm_macc_pd(PolyBQ1,t,PolyBQ0);
652
653     res_erfcB = _mm_mul_pd(PolyBP0,gmx_mm_inv_pd(PolyBQ0));
654
655     res_erfcB = _mm_mul_pd(res_erfcB,xabs);
656
657     /* Calculate erfc() in range [4.5,inf] */
658     w       = gmx_mm_inv_pd(xabs);
659     w2      = _mm_mul_pd(w,w);
660
661     PolyCP0  = _mm_macc_pd(CCP6,w2,CCP4);
662     PolyCP1  = _mm_macc_pd(CCP5,w2,CCP3);
663     PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP2);
664     PolyCP1  = _mm_macc_pd(PolyCP1,w2,CCP1);
665     PolyCP0  = _mm_macc_pd(PolyCP0,w2,CCP0);
666     PolyCP0  = _mm_macc_pd(PolyCP1,w,PolyCP0);
667
668     PolyCQ0  = _mm_macc_pd(CCQ6,w2,CCQ4);
669     PolyCQ1  = _mm_macc_pd(CCQ5,w2,CCQ3);
670     PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,CCQ2);
671     PolyCQ1  = _mm_macc_pd(PolyCQ1,w2,CCQ1);
672     PolyCQ0  = _mm_macc_pd(PolyCQ0,w2,one);
673     PolyCQ0  = _mm_macc_pd(PolyCQ1,w,PolyCQ0);
674
675     expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
676
677     res_erfcC = _mm_macc_pd(PolyCP0,gmx_mm_inv_pd(PolyCQ0),CCoffset);
678     res_erfcC = _mm_mul_pd(res_erfcC,w);
679
680     mask = _mm_cmpgt_pd(xabs,_mm_set1_pd(4.5));
681     res_erfc = _mm_blendv_pd(res_erfcB,res_erfcC,mask);
682
683     res_erfc = _mm_mul_pd(res_erfc,expmx2);
684
685     /* erfc(x<0) = 2-erfc(|x|) */
686     mask = _mm_cmplt_pd(x,_mm_setzero_pd());
687     res_erfc = _mm_blendv_pd(res_erfc,_mm_sub_pd(two,res_erfc),mask);
688
689     /* Select erf() or erfc() */
690     mask = _mm_cmplt_pd(xabs,one);
691     res  = _mm_blendv_pd(res_erfc,_mm_sub_pd(one,res_erf),mask);
692
693     return res;
694 }
695
696
697
698 /* Calculate the force correction due to PME analytically.
699  *
700  * This routine is meant to enable analytical evaluation of the
701  * direct-space PME electrostatic force to avoid tables.
702  *
703  * The direct-space potential should be Erfc(beta*r)/r, but there
704  * are some problems evaluating that:
705  *
706  * First, the error function is difficult (read: expensive) to
707  * approxmiate accurately for intermediate to large arguments, and
708  * this happens already in ranges of beta*r that occur in simulations.
709  * Second, we now try to avoid calculating potentials in Gromacs but
710  * use forces directly.
711  *
712  * We can simply things slight by noting that the PME part is really
713  * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
714  *
715  * V= 1/r - Erf(beta*r)/r
716  *
717  * The first term we already have from the inverse square root, so
718  * that we can leave out of this routine.
719  *
720  * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
721  * the argument beta*r will be in the range 0.15 to ~4. Use your
722  * favorite plotting program to realize how well-behaved Erf(z)/z is
723  * in this range!
724  *
725  * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
726  * However, it turns out it is more efficient to approximate f(z)/z and
727  * then only use even powers. This is another minor optimization, since
728  * we actually WANT f(z)/z, because it is going to be multiplied by
729  * the vector between the two atoms to get the vectorial force. The
730  * fastest flops are the ones we can avoid calculating!
731  *
732  * So, here's how it should be used:
733  *
734  * 1. Calculate r^2.
735  * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
736  * 3. Evaluate this routine with z^2 as the argument.
737  * 4. The return value is the expression:
738  *
739  *
740  *       2*exp(-z^2)     erf(z)
741  *       ------------ - --------
742  *       sqrt(Pi)*z^2      z^3
743  *
744  * 5. Multiply the entire expression by beta^3. This will get you
745  *
746  *       beta^3*2*exp(-z^2)     beta^3*erf(z)
747  *       ------------------  - ---------------
748  *          sqrt(Pi)*z^2            z^3
749  *
750  *    or, switching back to r (z=r*beta):
751  *
752  *       2*beta*exp(-r^2*beta^2)   erf(r*beta)
753  *       ----------------------- - -----------
754  *            sqrt(Pi)*r^2            r^3
755  *
756  *
757  *    With a bit of math exercise you should be able to confirm that
758  *    this is exactly D[Erf[beta*r]/r,r] divided by r another time.
759  *
760  * 6. Add the result to 1/r^3, multiply by the product of the charges,
761  *    and you have your force (divided by r). A final multiplication
762  *    with the vector connecting the two particles and you have your
763  *    vectorial force to add to the particles.
764  *
765  */
766 static __m128d
767 gmx_mm_pmecorrF_pd(__m128d z2)
768 {
769     const __m128d  FN10     = _mm_set1_pd(-8.0072854618360083154e-14);
770     const __m128d  FN9      = _mm_set1_pd(1.1859116242260148027e-11);
771     const __m128d  FN8      = _mm_set1_pd(-8.1490406329798423616e-10);
772     const __m128d  FN7      = _mm_set1_pd(3.4404793543907847655e-8);
773     const __m128d  FN6      = _mm_set1_pd(-9.9471420832602741006e-7);
774     const __m128d  FN5      = _mm_set1_pd(0.000020740315999115847456);
775     const __m128d  FN4      = _mm_set1_pd(-0.00031991745139313364005);
776     const __m128d  FN3      = _mm_set1_pd(0.0035074449373659008203);
777     const __m128d  FN2      = _mm_set1_pd(-0.031750380176100813405);
778     const __m128d  FN1      = _mm_set1_pd(0.13884101728898463426);
779     const __m128d  FN0      = _mm_set1_pd(-0.75225277815249618847);
780     
781     const __m128d  FD5      = _mm_set1_pd(0.000016009278224355026701);
782     const __m128d  FD4      = _mm_set1_pd(0.00051055686934806966046);
783     const __m128d  FD3      = _mm_set1_pd(0.0081803507497974289008);
784     const __m128d  FD2      = _mm_set1_pd(0.077181146026670287235);
785     const __m128d  FD1      = _mm_set1_pd(0.41543303143712535988);
786     const __m128d  FD0      = _mm_set1_pd(1.0);
787     
788     __m128d z4;
789     __m128d polyFN0,polyFN1,polyFD0,polyFD1;
790     
791     z4             = _mm_mul_pd(z2,z2);
792     
793     polyFD1        = _mm_macc_pd(FD5,z4,FD3);
794     polyFD1        = _mm_macc_pd(polyFD1,z4,FD1);
795     polyFD1        = _mm_mul_pd(polyFD1,z2);
796     polyFD0        = _mm_macc_pd(FD4,z4,FD2);
797     polyFD0        = _mm_macc_pd(polyFD0,z4,FD0);
798     polyFD0        = _mm_add_pd(polyFD0,polyFD1);
799     
800     polyFD0        = gmx_mm_inv_pd(polyFD0);
801     
802     polyFN0        = _mm_macc_pd(FN10,z4,FN8);
803     polyFN0        = _mm_macc_pd(polyFN0,z4,FN6);
804     polyFN0        = _mm_macc_pd(polyFN0,z4,FN4);
805     polyFN0        = _mm_macc_pd(polyFN0,z4,FN2);
806     polyFN0        = _mm_macc_pd(polyFN0,z4,FN0);
807     polyFN1        = _mm_macc_pd(FN9,z4,FN7);
808     polyFN1        = _mm_macc_pd(polyFN1,z4,FN5);
809     polyFN1        = _mm_macc_pd(polyFN1,z4,FN3);
810     polyFN1        = _mm_macc_pd(polyFN1,z4,FN1);
811     polyFN0        = _mm_macc_pd(polyFN1,z2,polyFN0);
812     
813     return   _mm_mul_pd(polyFN0,polyFD0);
814 }
815
816
817 /* Calculate the potential correction due to PME analytically.
818  *
819  * This routine calculates Erf(z)/z, although you should provide z^2
820  * as the input argument.
821  *
822  * Here's how it should be used:
823  *
824  * 1. Calculate r^2.
825  * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
826  * 3. Evaluate this routine with z^2 as the argument.
827  * 4. The return value is the expression:
828  *
829  *
830  *        erf(z)
831  *       --------
832  *          z
833  *
834  * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
835  *
836  *       erf(r*beta)
837  *       -----------
838  *           r
839  *
840  * 6. Subtract the result from 1/r, multiply by the product of the charges,
841  *    and you have your potential.
842  *
843  */
844 static __m128d
845 gmx_mm_pmecorrV_pd(__m128d z2)
846 {
847     const __m128d  VN9      = _mm_set1_pd(-9.3723776169321855475e-13);
848     const __m128d  VN8      = _mm_set1_pd(1.2280156762674215741e-10);
849     const __m128d  VN7      = _mm_set1_pd(-7.3562157912251309487e-9);
850     const __m128d  VN6      = _mm_set1_pd(2.6215886208032517509e-7);
851     const __m128d  VN5      = _mm_set1_pd(-4.9532491651265819499e-6);
852     const __m128d  VN4      = _mm_set1_pd(0.00025907400778966060389);
853     const __m128d  VN3      = _mm_set1_pd(0.0010585044856156469792);
854     const __m128d  VN2      = _mm_set1_pd(0.045247661136833092885);
855     const __m128d  VN1      = _mm_set1_pd(0.11643931522926034421);
856     const __m128d  VN0      = _mm_set1_pd(1.1283791671726767970);
857     
858     const __m128d  VD5      = _mm_set1_pd(0.000021784709867336150342);
859     const __m128d  VD4      = _mm_set1_pd(0.00064293662010911388448);
860     const __m128d  VD3      = _mm_set1_pd(0.0096311444822588683504);
861     const __m128d  VD2      = _mm_set1_pd(0.085608012351550627051);
862     const __m128d  VD1      = _mm_set1_pd(0.43652499166614811084);
863     const __m128d  VD0      = _mm_set1_pd(1.0);
864     
865     __m128d z4;
866     __m128d polyVN0,polyVN1,polyVD0,polyVD1;
867     
868     z4             = _mm_mul_pd(z2,z2);
869     
870     polyVD1        = _mm_macc_pd(VD5,z4,VD3);
871     polyVD0        = _mm_macc_pd(VD4,z4,VD2);
872     polyVD1        = _mm_macc_pd(polyVD1,z4,VD1);
873     polyVD0        = _mm_macc_pd(polyVD0,z4,VD0);
874     polyVD0        = _mm_macc_pd(polyVD1,z2,polyVD0);
875     
876     polyVD0        = gmx_mm_inv_pd(polyVD0);
877     
878     polyVN1        = _mm_macc_pd(VN9,z4,VN7);
879     polyVN0        = _mm_macc_pd(VN8,z4,VN6);
880     polyVN1        = _mm_macc_pd(polyVN1,z4,VN5);
881     polyVN0        = _mm_macc_pd(polyVN0,z4,VN4);
882     polyVN1        = _mm_macc_pd(polyVN1,z4,VN3);
883     polyVN0        = _mm_macc_pd(polyVN0,z4,VN2);
884     polyVN1        = _mm_macc_pd(polyVN1,z4,VN1);
885     polyVN0        = _mm_macc_pd(polyVN0,z4,VN0);
886     polyVN0        = _mm_macc_pd(polyVN1,z2,polyVN0);
887     
888     return   _mm_mul_pd(polyVN0,polyVD0);
889 }
890
891
892 static int
893 gmx_mm_sincos_pd(__m128d x,
894                  __m128d *sinval,
895                  __m128d *cosval)
896 {
897 #ifdef _MSC_VER
898     __declspec(align(16))
899     const double sintable[34] =
900     {
901         1.00000000000000000e+00 , 0.00000000000000000e+00 ,
902         9.95184726672196929e-01 , 9.80171403295606036e-02 ,
903         9.80785280403230431e-01 , 1.95090322016128248e-01 ,
904         9.56940335732208824e-01 , 2.90284677254462331e-01 ,
905         9.23879532511286738e-01 , 3.82683432365089782e-01 ,
906         8.81921264348355050e-01 , 4.71396736825997642e-01 ,
907         8.31469612302545236e-01 , 5.55570233019602178e-01 ,
908         7.73010453362736993e-01 , 6.34393284163645488e-01 ,
909         7.07106781186547573e-01 , 7.07106781186547462e-01 ,
910         6.34393284163645599e-01 , 7.73010453362736882e-01 ,
911         5.55570233019602289e-01 , 8.31469612302545125e-01 ,
912         4.71396736825997809e-01 , 8.81921264348354939e-01 ,
913         3.82683432365089837e-01 , 9.23879532511286738e-01 ,
914         2.90284677254462276e-01 , 9.56940335732208935e-01 ,
915         1.95090322016128304e-01 , 9.80785280403230431e-01 ,
916         9.80171403295607702e-02 , 9.95184726672196818e-01 ,
917         0.0 , 1.00000000000000000e+00
918     };
919 #else
920     const __m128d sintable[17] =
921     {
922         _mm_set_pd( 0.0 , 1.0 ),
923         _mm_set_pd( sin(  1.0 * (M_PI/2.0) / 16.0) , cos(  1.0 * (M_PI/2.0) / 16.0) ),
924         _mm_set_pd( sin(  2.0 * (M_PI/2.0) / 16.0) , cos(  2.0 * (M_PI/2.0) / 16.0) ),
925         _mm_set_pd( sin(  3.0 * (M_PI/2.0) / 16.0) , cos(  3.0 * (M_PI/2.0) / 16.0) ),
926         _mm_set_pd( sin(  4.0 * (M_PI/2.0) / 16.0) , cos(  4.0 * (M_PI/2.0) / 16.0) ),
927         _mm_set_pd( sin(  5.0 * (M_PI/2.0) / 16.0) , cos(  5.0 * (M_PI/2.0) / 16.0) ),
928         _mm_set_pd( sin(  6.0 * (M_PI/2.0) / 16.0) , cos(  6.0 * (M_PI/2.0) / 16.0) ),
929         _mm_set_pd( sin(  7.0 * (M_PI/2.0) / 16.0) , cos(  7.0 * (M_PI/2.0) / 16.0) ),
930         _mm_set_pd( sin(  8.0 * (M_PI/2.0) / 16.0) , cos(  8.0 * (M_PI/2.0) / 16.0) ),
931         _mm_set_pd( sin(  9.0 * (M_PI/2.0) / 16.0) , cos(  9.0 * (M_PI/2.0) / 16.0) ),
932         _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0) , cos( 10.0 * (M_PI/2.0) / 16.0) ),
933         _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0) , cos( 11.0 * (M_PI/2.0) / 16.0) ),
934         _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0) , cos( 12.0 * (M_PI/2.0) / 16.0) ),
935         _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0) , cos( 13.0 * (M_PI/2.0) / 16.0) ),
936         _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0) , cos( 14.0 * (M_PI/2.0) / 16.0) ),
937         _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0) , cos( 15.0 * (M_PI/2.0) / 16.0) ),
938         _mm_set_pd(  1.0 , 0.0 )
939     };
940 #endif
941
942     const __m128d signmask    = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
943     const __m128i signbit_epi32  = _mm_set1_epi32(0x80000000);
944
945     const __m128d tabscale      = _mm_set1_pd(32.0/M_PI);
946     const __m128d invtabscale0  = _mm_set1_pd(9.81747508049011230469e-02);
947     const __m128d invtabscale1  = _mm_set1_pd(1.96197799156550576057e-08);
948     const __m128i ione          = _mm_set1_epi32(1);
949     const __m128i i32           = _mm_set1_epi32(32);
950     const __m128i i16           = _mm_set1_epi32(16);
951     const __m128i tabmask       = _mm_set1_epi32(0x3F);
952     const __m128d sinP7         = _mm_set1_pd(-1.0/5040.0);
953     const __m128d sinP5         = _mm_set1_pd(1.0/120.0);
954     const __m128d sinP3         = _mm_set1_pd(-1.0/6.0);
955     const __m128d sinP1         = _mm_set1_pd(1.0);
956
957     const __m128d cosP6         = _mm_set1_pd(-1.0/720.0);
958     const __m128d cosP4         = _mm_set1_pd(1.0/24.0);
959     const __m128d cosP2         = _mm_set1_pd(-1.0/2.0);
960     const __m128d cosP0         = _mm_set1_pd(1.0);
961
962     __m128d scalex;
963     __m128i tabidx,corridx;
964     __m128d xabs,z,z2,polySin,polyCos;
965     __m128d xpoint;
966     __m128d ypoint0,ypoint1;
967
968     __m128d sinpoint,cospoint;
969     __m128d xsign,ssign,csign;
970     __m128i imask,sswapsign,cswapsign;
971     __m128d minusone;
972
973     xsign    = _mm_andnot_pd(signmask,x);
974     xabs     = _mm_and_pd(x,signmask);
975
976     scalex   = _mm_mul_pd(tabscale,xabs);
977     tabidx   = _mm_cvtpd_epi32(scalex);
978
979     xpoint   = _mm_round_pd(scalex,_MM_FROUND_TO_NEAREST_INT);
980
981     /* Extended precision arithmetics */
982     z        = _mm_nmacc_pd(invtabscale0,xpoint,xabs);
983     z        = _mm_nmacc_pd(invtabscale1,xpoint,z);
984
985     /* Range reduction to 0..2*Pi */
986     tabidx   = _mm_and_si128(tabidx,tabmask);
987
988     /* tabidx is now in range [0,..,64] */
989     imask     = _mm_cmpgt_epi32(tabidx,i32);
990     sswapsign = imask;
991     cswapsign = imask;
992     corridx   = _mm_and_si128(imask,i32);
993     tabidx    = _mm_sub_epi32(tabidx,corridx);
994
995     /* tabidx is now in range [0..32] */
996     imask     = _mm_cmpgt_epi32(tabidx,i16);
997     cswapsign = _mm_xor_si128(cswapsign,imask);
998     corridx   = _mm_sub_epi32(i32,tabidx);
999     tabidx    = _mm_blendv_epi8(tabidx,corridx,imask);
1000     /* tabidx is now in range [0..16] */
1001     ssign     = _mm_cvtepi32_pd( _mm_or_si128( sswapsign , ione ) );
1002     csign     = _mm_cvtepi32_pd( _mm_or_si128( cswapsign , ione ) );
1003
1004 #ifdef _MSC_VER
1005     ypoint0  = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,0));
1006     ypoint1  = _mm_load_pd(sintable + 2*_mm_extract_epi32(tabidx,1));
1007 #else
1008     ypoint0  = sintable[_mm_extract_epi32(tabidx,0)];
1009     ypoint1  = sintable[_mm_extract_epi32(tabidx,1)];
1010 #endif
1011     sinpoint = _mm_unpackhi_pd(ypoint0,ypoint1);
1012     cospoint = _mm_unpacklo_pd(ypoint0,ypoint1);
1013
1014     sinpoint = _mm_mul_pd(sinpoint,ssign);
1015     cospoint = _mm_mul_pd(cospoint,csign);
1016
1017     z2       = _mm_mul_pd(z,z);
1018
1019     polySin  = _mm_macc_pd(sinP7,z2,sinP5);
1020     polySin  = _mm_macc_pd(polySin,z2,sinP3);
1021     polySin  = _mm_macc_pd(polySin,z2,sinP1);
1022     polySin  = _mm_mul_pd(polySin,z);
1023
1024     polyCos  = _mm_macc_pd(cosP6,z2,cosP4);
1025     polyCos  = _mm_macc_pd(polyCos,z2,cosP2);
1026     polyCos  = _mm_macc_pd(polyCos,z2,cosP0);
1027
1028     *sinval  = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint,polyCos) , _mm_mul_pd(cospoint,polySin) ),xsign);
1029     *cosval  = _mm_sub_pd( _mm_mul_pd(cospoint,polyCos) , _mm_mul_pd(sinpoint,polySin) );
1030
1031     return 0;
1032 }
1033
1034 /*
1035  * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1036  * will then call the sincos() routine and waste a factor 2 in performance!
1037  */
1038 static __m128d
1039 gmx_mm_sin_pd(__m128d x)
1040 {
1041     __m128d s,c;
1042     gmx_mm_sincos_pd(x,&s,&c);
1043     return s;
1044 }
1045
1046 /*
1047  * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1048  * will then call the sincos() routine and waste a factor 2 in performance!
1049  */
1050 static __m128d
1051 gmx_mm_cos_pd(__m128d x)
1052 {
1053     __m128d s,c;
1054     gmx_mm_sincos_pd(x,&s,&c);
1055     return c;
1056 }
1057
1058
1059
1060 static __m128d
1061 gmx_mm_tan_pd(__m128d x)
1062 {
1063     __m128d sinval,cosval;
1064     __m128d tanval;
1065
1066     gmx_mm_sincos_pd(x,&sinval,&cosval);
1067
1068     tanval = _mm_mul_pd(sinval,gmx_mm_inv_pd(cosval));
1069
1070     return tanval;
1071 }
1072
1073
1074
1075 static __m128d
1076 gmx_mm_asin_pd(__m128d x)
1077 {
1078     /* Same algorithm as cephes library */
1079     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1080     const __m128d limit1    = _mm_set1_pd(0.625);
1081     const __m128d limit2    = _mm_set1_pd(1e-8);
1082     const __m128d one       = _mm_set1_pd(1.0);
1083     const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
1084     const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1085     const __m128d morebits  = _mm_set1_pd(6.123233995736765886130e-17);
1086
1087     const __m128d P5        = _mm_set1_pd(4.253011369004428248960e-3);
1088     const __m128d P4        = _mm_set1_pd(-6.019598008014123785661e-1);
1089     const __m128d P3        = _mm_set1_pd(5.444622390564711410273e0);
1090     const __m128d P2        = _mm_set1_pd(-1.626247967210700244449e1);
1091     const __m128d P1        = _mm_set1_pd(1.956261983317594739197e1);
1092     const __m128d P0        = _mm_set1_pd(-8.198089802484824371615e0);
1093
1094     const __m128d Q4        = _mm_set1_pd(-1.474091372988853791896e1);
1095     const __m128d Q3        = _mm_set1_pd(7.049610280856842141659e1);
1096     const __m128d Q2        = _mm_set1_pd(-1.471791292232726029859e2);
1097     const __m128d Q1        = _mm_set1_pd(1.395105614657485689735e2);
1098     const __m128d Q0        = _mm_set1_pd(-4.918853881490881290097e1);
1099
1100     const __m128d R4        = _mm_set1_pd(2.967721961301243206100e-3);
1101     const __m128d R3        = _mm_set1_pd(-5.634242780008963776856e-1);
1102     const __m128d R2        = _mm_set1_pd(6.968710824104713396794e0);
1103     const __m128d R1        = _mm_set1_pd(-2.556901049652824852289e1);
1104     const __m128d R0        = _mm_set1_pd(2.853665548261061424989e1);
1105
1106     const __m128d S3        = _mm_set1_pd(-2.194779531642920639778e1);
1107     const __m128d S2        = _mm_set1_pd(1.470656354026814941758e2);
1108     const __m128d S1        = _mm_set1_pd(-3.838770957603691357202e2);
1109     const __m128d S0        = _mm_set1_pd(3.424398657913078477438e2);
1110
1111     __m128d sign;
1112     __m128d mask;
1113     __m128d xabs;
1114     __m128d zz,ww,z,q,w,y,zz2,ww2;
1115     __m128d PA,PB;
1116     __m128d QA,QB;
1117     __m128d RA,RB;
1118     __m128d SA,SB;
1119     __m128d nom,denom;
1120
1121     sign  = _mm_andnot_pd(signmask,x);
1122     xabs  = _mm_and_pd(x,signmask);
1123
1124     mask  = _mm_cmpgt_pd(xabs,limit1);
1125
1126     zz    = _mm_sub_pd(one,xabs);
1127     ww    = _mm_mul_pd(xabs,xabs);
1128     zz2   = _mm_mul_pd(zz,zz);
1129     ww2   = _mm_mul_pd(ww,ww);
1130
1131     /* R */
1132     RA    = _mm_macc_pd(R4,zz2,R2);
1133     RB    = _mm_macc_pd(R3,zz2,R1);
1134     RA    = _mm_macc_pd(RA,zz2,R0);
1135     RA    = _mm_macc_pd(RB,zz,RA);
1136
1137     /* S, SA = zz2 */
1138     SB    = _mm_macc_pd(S3,zz2,S1);
1139     SA    = _mm_add_pd(zz2,S2);
1140     SA    = _mm_macc_pd(SA,zz2,S0);
1141     SA    = _mm_macc_pd(SB,zz,SA);
1142
1143     /* P */
1144     PA    = _mm_macc_pd(P5,ww2,P3);
1145     PB    = _mm_macc_pd(P4,ww2,P2);
1146     PA    = _mm_macc_pd(PA,ww2,P1);
1147     PB    = _mm_macc_pd(PB,ww2,P0);
1148     PA    = _mm_macc_pd(PA,ww,PB);
1149
1150     /* Q, QA = ww2 */
1151     QB    = _mm_macc_pd(Q4,ww2,Q2);
1152     QA    = _mm_add_pd(ww2,Q3);
1153     QA    = _mm_macc_pd(QA,ww2,Q1);
1154     QB    = _mm_macc_pd(QB,ww2,Q0);
1155     QA    = _mm_macc_pd(QA,ww,QB);
1156
1157     RA    = _mm_mul_pd(RA,zz);
1158     PA    = _mm_mul_pd(PA,ww);
1159
1160     nom   = _mm_blendv_pd( PA,RA,mask );
1161     denom = _mm_blendv_pd( QA,SA,mask );
1162
1163     q     = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
1164
1165     zz    = _mm_add_pd(zz,zz);
1166     zz    = gmx_mm_sqrt_pd(zz);
1167     z     = _mm_sub_pd(quarterpi,zz);
1168     zz    = _mm_mul_pd(zz,q);
1169     zz    = _mm_sub_pd(zz,morebits);
1170     z     = _mm_sub_pd(z,zz);
1171     z     = _mm_add_pd(z,quarterpi);
1172
1173     w     = _mm_macc_pd(xabs,q,xabs);
1174
1175     z     = _mm_blendv_pd( w,z,mask );
1176
1177     mask  = _mm_cmpgt_pd(xabs,limit2);
1178     z     = _mm_blendv_pd( xabs,z,mask );
1179
1180     z = _mm_xor_pd(z,sign);
1181
1182     return z;
1183 }
1184
1185
1186 static __m128d
1187 gmx_mm_acos_pd(__m128d x)
1188 {
1189     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1190     const __m128d one        = _mm_set1_pd(1.0);
1191     const __m128d half       = _mm_set1_pd(0.5);
1192     const __m128d pi         = _mm_set1_pd(M_PI);
1193     const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
1194     const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
1195
1196
1197     __m128d mask1;
1198
1199     __m128d z,z1,z2;
1200
1201     mask1 = _mm_cmpgt_pd(x,half);
1202     z1    = _mm_mul_pd(half,_mm_sub_pd(one,x));
1203     z1    = gmx_mm_sqrt_pd(z1);
1204     z     = _mm_blendv_pd( x,z1,mask1 );
1205
1206     z     = gmx_mm_asin_pd(z);
1207
1208     z1    = _mm_add_pd(z,z);
1209
1210     z2    = _mm_sub_pd(quarterpi0,z);
1211     z2    = _mm_add_pd(z2,quarterpi1);
1212     z2    = _mm_add_pd(z2,quarterpi0);
1213
1214     z     = _mm_blendv_pd(z2,z1,mask1);
1215
1216     return z;
1217 }
1218
1219 static __m128d
1220 gmx_mm_atan_pd(__m128d x)
1221 {
1222     /* Same algorithm as cephes library */
1223     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF) );
1224     const __m128d limit1    = _mm_set1_pd(0.66);
1225     const __m128d limit2    = _mm_set1_pd(2.41421356237309504880);
1226     const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1227     const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
1228     const __m128d mone      = _mm_set1_pd(-1.0);
1229     const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
1230     const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
1231
1232     const __m128d P4        = _mm_set1_pd(-8.750608600031904122785E-1);
1233     const __m128d P3        = _mm_set1_pd(-1.615753718733365076637E1);
1234     const __m128d P2        = _mm_set1_pd(-7.500855792314704667340E1);
1235     const __m128d P1        = _mm_set1_pd(-1.228866684490136173410E2);
1236     const __m128d P0        = _mm_set1_pd(-6.485021904942025371773E1);
1237
1238     const __m128d Q4        = _mm_set1_pd(2.485846490142306297962E1);
1239     const __m128d Q3        = _mm_set1_pd(1.650270098316988542046E2);
1240     const __m128d Q2        = _mm_set1_pd(4.328810604912902668951E2);
1241     const __m128d Q1        = _mm_set1_pd(4.853903996359136964868E2);
1242     const __m128d Q0        = _mm_set1_pd(1.945506571482613964425E2);
1243
1244     __m128d sign;
1245     __m128d mask1,mask2;
1246     __m128d y,t1,t2;
1247     __m128d z,z2;
1248     __m128d P_A,P_B,Q_A,Q_B;
1249
1250     sign   = _mm_andnot_pd(signmask,x);
1251     x      = _mm_and_pd(x,signmask);
1252
1253     mask1  = _mm_cmpgt_pd(x,limit1);
1254     mask2  = _mm_cmpgt_pd(x,limit2);
1255
1256     t1     = _mm_mul_pd(_mm_add_pd(x,mone),gmx_mm_inv_pd(_mm_sub_pd(x,mone)));
1257     t2     = _mm_mul_pd(mone,gmx_mm_inv_pd(x));
1258
1259     y      = _mm_and_pd(mask1,quarterpi);
1260     y      = _mm_or_pd( _mm_and_pd(mask2,halfpi) , _mm_andnot_pd(mask2,y) );
1261
1262     x      = _mm_or_pd( _mm_and_pd(mask1,t1) , _mm_andnot_pd(mask1,x) );
1263     x      = _mm_or_pd( _mm_and_pd(mask2,t2) , _mm_andnot_pd(mask2,x) );
1264
1265     z      = _mm_mul_pd(x,x);
1266     z2     = _mm_mul_pd(z,z);
1267
1268     P_A    = _mm_macc_pd(P4,z2,P2);
1269     P_B    = _mm_macc_pd(P3,z2,P1);
1270     P_A    = _mm_macc_pd(P_A,z2,P0);
1271     P_A    = _mm_macc_pd(P_B,z,P_A);
1272
1273     /* Q_A = z2 */
1274     Q_B    = _mm_macc_pd(Q4,z2,Q2);
1275     Q_A    = _mm_add_pd(z2,Q3);
1276     Q_A    = _mm_macc_pd(Q_A,z2,Q1);
1277     Q_B    = _mm_macc_pd(Q_B,z2,Q0);
1278     Q_A    = _mm_macc_pd(Q_A,z,Q_B);
1279
1280     z      = _mm_mul_pd(z,P_A);
1281     z      = _mm_mul_pd(z,gmx_mm_inv_pd(Q_A));
1282     z      = _mm_macc_pd(z,x,x);
1283
1284     t1     = _mm_and_pd(mask1,morebits1);
1285     t1     = _mm_or_pd( _mm_and_pd(mask2,morebits2) , _mm_andnot_pd(mask2,t1) );
1286
1287     z      = _mm_add_pd(z,t1);
1288     y      = _mm_add_pd(y,z);
1289
1290     y      = _mm_xor_pd(y,sign);
1291
1292     return y;
1293 }
1294
1295
1296 static __m128d
1297 gmx_mm_atan2_pd(__m128d y, __m128d x)
1298 {
1299     const __m128d pi          = _mm_set1_pd(M_PI);
1300     const __m128d minuspi     = _mm_set1_pd(-M_PI);
1301     const __m128d halfpi      = _mm_set1_pd(M_PI/2.0);
1302     const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
1303
1304     __m128d z,z1,z3,z4;
1305     __m128d w;
1306     __m128d maskx_lt,maskx_eq;
1307     __m128d masky_lt,masky_eq;
1308     __m128d mask1,mask2,mask3,mask4,maskall;
1309
1310     maskx_lt  = _mm_cmplt_pd(x,_mm_setzero_pd());
1311     masky_lt  = _mm_cmplt_pd(y,_mm_setzero_pd());
1312     maskx_eq  = _mm_cmpeq_pd(x,_mm_setzero_pd());
1313     masky_eq  = _mm_cmpeq_pd(y,_mm_setzero_pd());
1314
1315     z         = _mm_mul_pd(y,gmx_mm_inv_pd(x));
1316     z         = gmx_mm_atan_pd(z);
1317
1318     mask1     = _mm_and_pd(maskx_eq,masky_lt);
1319     mask2     = _mm_andnot_pd(maskx_lt,masky_eq);
1320     mask3     = _mm_andnot_pd( _mm_or_pd(masky_lt,masky_eq) , maskx_eq);
1321     mask4     = _mm_and_pd(masky_eq,maskx_lt);
1322
1323     maskall   = _mm_or_pd( _mm_or_pd(mask1,mask2), _mm_or_pd(mask3,mask4) );
1324
1325     z         = _mm_andnot_pd(maskall,z);
1326     z1        = _mm_and_pd(mask1,minushalfpi);
1327     z3        = _mm_and_pd(mask3,halfpi);
1328     z4        = _mm_and_pd(mask4,pi);
1329
1330     z         = _mm_or_pd( _mm_or_pd(z,z1), _mm_or_pd(z3,z4) );
1331
1332     w         = _mm_blendv_pd(pi,minuspi,masky_lt);
1333     w         = _mm_and_pd(w,maskx_lt);
1334
1335     w         = _mm_andnot_pd(maskall,w);
1336
1337     z         = _mm_add_pd(z,w);
1338
1339     return z;
1340 }
1341
1342 #endif /*_gmx_math_x86_avx_128_fma_double_h_ */