Code beautification with uncrustify
[alexxy/gromacs.git] / src / gromacs / legacyheaders / gmx_math_x86_sse2_double.h
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  * This file is part of GROMACS.
5  * Copyright (c) 2012-
6  *
7  * Written by the Gromacs development team under coordination of
8  * David van der Spoel, Berk Hess, and Erik Lindahl.
9  *
10  * This library is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * To help us fund GROMACS development, we humbly ask that you cite
16  * the research papers on the package. Check out http://www.gromacs.org
17  *
18  * And Hey:
19  * Gnomes, ROck Monsters And Chili Sauce
20  */
21 #ifndef _gmx_math_x86_sse2_double_h_
22 #define _gmx_math_x86_sse2_double_h_
23
24
25 #include <stdio.h>
26 #include <math.h>
27
28 #include "gmx_x86_sse2.h"
29
30
31 #ifndef M_PI
32 #  define M_PI 3.14159265358979323846264338327950288
33 #endif
34
35
36
37 /************************
38  *                      *
39  * Simple math routines *
40  *                      *
41  ************************/
42
43 /* 1.0/sqrt(x) */
44 static gmx_inline __m128d
45 gmx_mm_invsqrt_pd(__m128d x)
46 {
47     const __m128d half  = _mm_set1_pd(0.5);
48     const __m128d three = _mm_set1_pd(3.0);
49
50     /* Lookup instruction only exists in single precision, convert back and forth... */
51     __m128d lu = _mm_cvtps_pd(_mm_rsqrt_ps( _mm_cvtpd_ps(x)));
52
53     lu = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu, lu), x)), lu));
54     return _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu, lu), x)), lu));
55 }
56
57 /* 1.0/sqrt(x), done for a pair of arguments to improve throughput */
58 static void
59 gmx_mm_invsqrt_pair_pd(__m128d x1, __m128d x2, __m128d *invsqrt1, __m128d *invsqrt2)
60 {
61     const __m128d half   = _mm_set1_pd(0.5);
62     const __m128d three  = _mm_set1_pd(3.0);
63     const __m128  halff  = _mm_set1_ps(0.5f);
64     const __m128  threef = _mm_set1_ps(3.0f);
65
66     __m128        xf, luf;
67     __m128d       lu1, lu2;
68
69     /* Do first N-R step in float for 2x throughput */
70     xf  = _mm_shuffle_ps(_mm_cvtpd_ps(x1), _mm_cvtpd_ps(x2), _MM_SHUFFLE(1, 0, 1, 0));
71     luf = _mm_rsqrt_ps(xf);
72     luf = _mm_mul_ps(halff, _mm_mul_ps(_mm_sub_ps(threef, _mm_mul_ps(_mm_mul_ps(luf, luf), xf)), luf));
73
74     lu2 = _mm_cvtps_pd(_mm_shuffle_ps(luf, luf, _MM_SHUFFLE(3, 2, 3, 2)));
75     lu1 = _mm_cvtps_pd(luf);
76
77     *invsqrt1 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu1, lu1), x1)), lu1));
78     *invsqrt2 = _mm_mul_pd(half, _mm_mul_pd(_mm_sub_pd(three, _mm_mul_pd(_mm_mul_pd(lu2, lu2), x2)), lu2));
79 }
80
81
82 /* sqrt(x) - Do NOT use this (but rather invsqrt) if you actually need 1.0/sqrt(x) */
83 static gmx_inline __m128d
84 gmx_mm_sqrt_pd(__m128d x)
85 {
86     __m128d mask;
87     __m128d res;
88
89     mask = _mm_cmpeq_pd(x, _mm_setzero_pd());
90     res  = _mm_andnot_pd(mask, gmx_mm_invsqrt_pd(x));
91
92     res  = _mm_mul_pd(x, res);
93
94     return res;
95 }
96
97 /* 1.0/x */
98 static gmx_inline __m128d
99 gmx_mm_inv_pd(__m128d x)
100 {
101     const __m128d two  = _mm_set1_pd(2.0);
102
103     /* Lookup instruction only exists in single precision, convert back and forth... */
104     __m128d lu = _mm_cvtps_pd(_mm_rcp_ps( _mm_cvtpd_ps(x)));
105
106     /* Perform two N-R steps for double precision */
107     lu         = _mm_mul_pd(lu, _mm_sub_pd(two, _mm_mul_pd(x, lu)));
108     return _mm_mul_pd(lu, _mm_sub_pd(two, _mm_mul_pd(x, lu)));
109 }
110
111 static gmx_inline __m128d
112 gmx_mm_abs_pd(__m128d x)
113 {
114     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
115
116     return _mm_and_pd(x, signmask);
117 }
118
119
120 /*
121  * 2^x function.
122  *
123  * The 2^w term is calculated from a (6,0)-th order (no denominator) Minimax polynomia on the interval
124  * [-0.5,0.5].
125  *
126  * The approximation on [-0.5,0.5] is a rational Padé approximation, 1+2*P(x^2)/(Q(x^2)-P(x^2)),
127  * according to the same algorithm as used in the Cephes/netlib math routines.
128  */
129 static __m128d
130 gmx_mm_exp2_pd(__m128d x)
131 {
132     /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
133     const __m128d arglimit = _mm_set1_pd(1022.0);
134     const __m128i expbase  = _mm_set1_epi32(1023);
135
136     const __m128d P2       = _mm_set1_pd(2.30933477057345225087e-2);
137     const __m128d P1       = _mm_set1_pd(2.02020656693165307700e1);
138     const __m128d P0       = _mm_set1_pd(1.51390680115615096133e3);
139     /* Q2 == 1.0 */
140     const __m128d Q1       = _mm_set1_pd(2.33184211722314911771e2);
141     const __m128d Q0       = _mm_set1_pd(4.36821166879210612817e3);
142     const __m128d one      = _mm_set1_pd(1.0);
143     const __m128d two      = _mm_set1_pd(2.0);
144
145     __m128d       valuemask;
146     __m128i       iexppart;
147     __m128d       fexppart;
148     __m128d       intpart;
149     __m128d       z, z2;
150     __m128d       PolyP, PolyQ;
151
152     iexppart  = _mm_cvtpd_epi32(x);
153     intpart   = _mm_cvtepi32_pd(iexppart);
154
155     /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
156      * To be able to shift it into the exponent for a double precision number we first need to
157      * shuffle so that the lower half contains the first element, and the upper half the second.
158      * This should really be done as a zero-extension, but since the next instructions will shift
159      * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
160      * (thus we just use element 2 from iexppart).
161      */
162     iexppart  = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
163
164     /* Do the shift operation on the 64-bit registers */
165     iexppart  = _mm_add_epi32(iexppart, expbase);
166     iexppart  = _mm_slli_epi64(iexppart, 52);
167
168     valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
169     fexppart  = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
170
171     z         = _mm_sub_pd(x, intpart);
172     z2        = _mm_mul_pd(z, z);
173
174     PolyP     = _mm_mul_pd(P2, z2);
175     PolyP     = _mm_add_pd(PolyP, P1);
176     PolyQ     = _mm_add_pd(z2, Q1);
177     PolyP     = _mm_mul_pd(PolyP, z2);
178     PolyQ     = _mm_mul_pd(PolyQ, z2);
179     PolyP     = _mm_add_pd(PolyP, P0);
180     PolyQ     = _mm_add_pd(PolyQ, Q0);
181     PolyP     = _mm_mul_pd(PolyP, z);
182
183     z         = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
184     z         = _mm_add_pd(one, _mm_mul_pd(two, z));
185
186     z         = _mm_mul_pd(z, fexppart);
187
188     return z;
189 }
190
191 /* Exponential function. This could be calculated from 2^x as Exp(x)=2^(y), where y=log2(e)*x,
192  * but there will then be a small rounding error since we lose some precision due to the
193  * multiplication. This will then be magnified a lot by the exponential.
194  *
195  * Instead, we calculate the fractional part directly as a Padé approximation of
196  * Exp(z) on [-0.5,0.5]. We use extended precision arithmetics to calculate the fraction
197  * remaining after 2^y, which avoids the precision-loss.
198  */
199 static __m128d
200 gmx_mm_exp_pd(__m128d exparg)
201 {
202     const __m128d argscale = _mm_set1_pd(1.4426950408889634073599);
203     /* Lower bound: We do not allow numbers that would lead to an IEEE fp representation exponent smaller than -126. */
204     const __m128d arglimit = _mm_set1_pd(1022.0);
205     const __m128i expbase  = _mm_set1_epi32(1023);
206
207     const __m128d invargscale0  = _mm_set1_pd(6.93145751953125e-1);
208     const __m128d invargscale1  = _mm_set1_pd(1.42860682030941723212e-6);
209
210     const __m128d P2       = _mm_set1_pd(1.26177193074810590878e-4);
211     const __m128d P1       = _mm_set1_pd(3.02994407707441961300e-2);
212     /* P0 == 1.0 */
213     const __m128d Q3       = _mm_set1_pd(3.00198505138664455042E-6);
214     const __m128d Q2       = _mm_set1_pd(2.52448340349684104192E-3);
215     const __m128d Q1       = _mm_set1_pd(2.27265548208155028766E-1);
216     /* Q0 == 2.0 */
217     const __m128d one      = _mm_set1_pd(1.0);
218     const __m128d two      = _mm_set1_pd(2.0);
219
220     __m128d       valuemask;
221     __m128i       iexppart;
222     __m128d       fexppart;
223     __m128d       intpart;
224     __m128d       x, z, z2;
225     __m128d       PolyP, PolyQ;
226
227     x             = _mm_mul_pd(exparg, argscale);
228
229     iexppart  = _mm_cvtpd_epi32(x);
230     intpart   = _mm_cvtepi32_pd(iexppart);
231
232     /* The two lowest elements of iexppart now contains 32-bit numbers with a correctly biased exponent.
233      * To be able to shift it into the exponent for a double precision number we first need to
234      * shuffle so that the lower half contains the first element, and the upper half the second.
235      * This should really be done as a zero-extension, but since the next instructions will shift
236      * the registers left by 52 bits it doesn't matter what we put there - it will be shifted out.
237      * (thus we just use element 2 from iexppart).
238      */
239     iexppart  = _mm_shuffle_epi32(iexppart, _MM_SHUFFLE(2, 1, 2, 0));
240
241     /* Do the shift operation on the 64-bit registers */
242     iexppart  = _mm_add_epi32(iexppart, expbase);
243     iexppart  = _mm_slli_epi64(iexppart, 52);
244
245     valuemask = _mm_cmpge_pd(arglimit, gmx_mm_abs_pd(x));
246     fexppart  = _mm_and_pd(valuemask, gmx_mm_castsi128_pd(iexppart));
247
248     z         = _mm_sub_pd(exparg, _mm_mul_pd(invargscale0, intpart));
249     z         = _mm_sub_pd(z, _mm_mul_pd(invargscale1, intpart));
250
251     z2        = _mm_mul_pd(z, z);
252
253     PolyQ     = _mm_mul_pd(Q3, z2);
254     PolyQ     = _mm_add_pd(PolyQ, Q2);
255     PolyP     = _mm_mul_pd(P2, z2);
256     PolyQ     = _mm_mul_pd(PolyQ, z2);
257     PolyP     = _mm_add_pd(PolyP, P1);
258     PolyQ     = _mm_add_pd(PolyQ, Q1);
259     PolyP     = _mm_mul_pd(PolyP, z2);
260     PolyQ     = _mm_mul_pd(PolyQ, z2);
261     PolyP     = _mm_add_pd(PolyP, one);
262     PolyQ     = _mm_add_pd(PolyQ, two);
263
264     PolyP     = _mm_mul_pd(PolyP, z);
265
266     z         = _mm_mul_pd(PolyP, gmx_mm_inv_pd(_mm_sub_pd(PolyQ, PolyP)));
267     z         = _mm_add_pd(one, _mm_mul_pd(two, z));
268
269     z         = _mm_mul_pd(z, fexppart);
270
271     return z;
272 }
273
274
275
276 static __m128d
277 gmx_mm_log_pd(__m128d x)
278 {
279     /* Same algorithm as cephes library */
280     const __m128d expmask    = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000) );
281
282     const __m128i expbase_m1 = _mm_set1_epi32(1023-1); /* We want non-IEEE format */
283
284     const __m128d half       = _mm_set1_pd(0.5);
285     const __m128d one        = _mm_set1_pd(1.0);
286     const __m128d two        = _mm_set1_pd(2.0);
287     const __m128d invsq2     = _mm_set1_pd(1.0/sqrt(2.0));
288
289     const __m128d corr1      = _mm_set1_pd(-2.121944400546905827679e-4);
290     const __m128d corr2      = _mm_set1_pd(0.693359375);
291
292     const __m128d P5         = _mm_set1_pd(1.01875663804580931796e-4);
293     const __m128d P4         = _mm_set1_pd(4.97494994976747001425e-1);
294     const __m128d P3         = _mm_set1_pd(4.70579119878881725854e0);
295     const __m128d P2         = _mm_set1_pd(1.44989225341610930846e1);
296     const __m128d P1         = _mm_set1_pd(1.79368678507819816313e1);
297     const __m128d P0         = _mm_set1_pd(7.70838733755885391666e0);
298
299     const __m128d Q4         = _mm_set1_pd(1.12873587189167450590e1);
300     const __m128d Q3         = _mm_set1_pd(4.52279145837532221105e1);
301     const __m128d Q2         = _mm_set1_pd(8.29875266912776603211e1);
302     const __m128d Q1         = _mm_set1_pd(7.11544750618563894466e1);
303     const __m128d Q0         = _mm_set1_pd(2.31251620126765340583e1);
304
305     const __m128d R2         = _mm_set1_pd(-7.89580278884799154124e-1);
306     const __m128d R1         = _mm_set1_pd(1.63866645699558079767e1);
307     const __m128d R0         = _mm_set1_pd(-6.41409952958715622951e1);
308
309     const __m128d S2         = _mm_set1_pd(-3.56722798256324312549E1);
310     const __m128d S1         = _mm_set1_pd(3.12093766372244180303E2);
311     const __m128d S0         = _mm_set1_pd(-7.69691943550460008604E2);
312
313     __m128d       fexp;
314     __m128i       iexp;
315
316     __m128d       mask1, mask2;
317     __m128d       corr, t1, t2, q;
318     __m128d       zA, yA, xA, zB, yB, xB, z;
319     __m128d       polyR, polyS;
320     __m128d       polyP1, polyP2, polyQ1, polyQ2;
321
322     /* Separate x into exponent and mantissa, with a mantissa in the range [0.5..1[ (not IEEE754 standard!) */
323     fexp   = _mm_and_pd(x, expmask);
324     iexp   = gmx_mm_castpd_si128(fexp);
325     iexp   = _mm_srli_epi64(iexp, 52);
326     iexp   = _mm_sub_epi32(iexp, expbase_m1);
327     iexp   = _mm_shuffle_epi32(iexp, _MM_SHUFFLE(1, 1, 2, 0) );
328     fexp   = _mm_cvtepi32_pd(iexp);
329
330     x      = _mm_andnot_pd(expmask, x);
331     x      = _mm_or_pd(x, one);
332     x      = _mm_mul_pd(x, half);
333
334     mask1     = _mm_cmpgt_pd(gmx_mm_abs_pd(fexp), two);
335     mask2     = _mm_cmplt_pd(x, invsq2);
336
337     fexp   = _mm_sub_pd(fexp, _mm_and_pd(mask2, one));
338
339     /* If mask1 is set ('A') */
340     zA     = _mm_sub_pd(x, half);
341     t1     = _mm_or_pd( _mm_andnot_pd(mask2, zA), _mm_and_pd(mask2, x) );
342     zA     = _mm_sub_pd(t1, half);
343     t2     = _mm_or_pd( _mm_andnot_pd(mask2, x), _mm_and_pd(mask2, zA) );
344     yA     = _mm_mul_pd(half, _mm_add_pd(t2, one));
345
346     xA     = _mm_mul_pd(zA, gmx_mm_inv_pd(yA));
347     zA     = _mm_mul_pd(xA, xA);
348
349     /* EVALUATE POLY */
350     polyR  = _mm_mul_pd(R2, zA);
351     polyR  = _mm_add_pd(polyR, R1);
352     polyR  = _mm_mul_pd(polyR, zA);
353     polyR  = _mm_add_pd(polyR, R0);
354
355     polyS  = _mm_add_pd(zA, S2);
356     polyS  = _mm_mul_pd(polyS, zA);
357     polyS  = _mm_add_pd(polyS, S1);
358     polyS  = _mm_mul_pd(polyS, zA);
359     polyS  = _mm_add_pd(polyS, S0);
360
361     q      = _mm_mul_pd(polyR, gmx_mm_inv_pd(polyS));
362     zA     = _mm_mul_pd(_mm_mul_pd(xA, zA), q);
363
364     zA     = _mm_add_pd(zA, _mm_mul_pd(corr1, fexp));
365     zA     = _mm_add_pd(zA, xA);
366     zA     = _mm_add_pd(zA, _mm_mul_pd(corr2, fexp));
367
368     /* If mask1 is not set ('B') */
369     corr   = _mm_and_pd(mask2, x);
370     xB     = _mm_add_pd(x, corr);
371     xB     = _mm_sub_pd(xB, one);
372     zB     = _mm_mul_pd(xB, xB);
373
374     polyP1 = _mm_mul_pd(P5, zB);
375     polyP2 = _mm_mul_pd(P4, zB);
376     polyP1 = _mm_add_pd(polyP1, P3);
377     polyP2 = _mm_add_pd(polyP2, P2);
378     polyP1 = _mm_mul_pd(polyP1, zB);
379     polyP2 = _mm_mul_pd(polyP2, zB);
380     polyP1 = _mm_add_pd(polyP1, P1);
381     polyP2 = _mm_add_pd(polyP2, P0);
382     polyP1 = _mm_mul_pd(polyP1, xB);
383     polyP1 = _mm_add_pd(polyP1, polyP2);
384
385     polyQ2 = _mm_mul_pd(Q4, zB);
386     polyQ1 = _mm_add_pd(zB, Q3);
387     polyQ2 = _mm_add_pd(polyQ2, Q2);
388     polyQ1 = _mm_mul_pd(polyQ1, zB);
389     polyQ2 = _mm_mul_pd(polyQ2, zB);
390     polyQ1 = _mm_add_pd(polyQ1, Q1);
391     polyQ2 = _mm_add_pd(polyQ2, Q0);
392     polyQ1 = _mm_mul_pd(polyQ1, xB);
393     polyQ1 = _mm_add_pd(polyQ1, polyQ2);
394
395     fexp   = _mm_and_pd(fexp, _mm_cmpneq_pd(fexp, _mm_setzero_pd()));
396
397     q      = _mm_mul_pd(polyP1, gmx_mm_inv_pd(polyQ1));
398     yB     = _mm_mul_pd(_mm_mul_pd(xB, zB), q);
399
400     yB     = _mm_add_pd(yB, _mm_mul_pd(corr1, fexp));
401     yB     = _mm_sub_pd(yB, _mm_mul_pd(half, zB));
402     zB     = _mm_add_pd(xB, yB);
403     zB     = _mm_add_pd(zB, _mm_mul_pd(corr2, fexp));
404
405     z      = _mm_or_pd( _mm_andnot_pd(mask1, zB), _mm_and_pd(mask1, zA) );
406
407     return z;
408 }
409
410
411
412 static __m128d
413 gmx_mm_erf_pd(__m128d x)
414 {
415     /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
416     const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
417     const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
418     const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
419     const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
420     const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
421
422     const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
423     const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
424     const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
425     const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
426     const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
427     /* CAQ0 == 1.0 */
428     const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
429
430     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
431     const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
432     const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
433     const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
434     const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
435     const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
436     const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
437     const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
438     const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
439     const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
440     const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
441     const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
442     const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
443     const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
444     const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
445     /* CBQ0 == 1.0 */
446
447     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
448     const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
449     const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
450     const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
451     const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
452     const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
453     const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
454     const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
455
456     const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
457     const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
458     const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
459     const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
460     const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
461     const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
462     /* CCQ0 == 1.0 */
463     const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
464
465     const __m128d one       = _mm_set1_pd(1.0);
466     const __m128d two       = _mm_set1_pd(2.0);
467
468     const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
469
470     __m128d       xabs, x2, x4, t, t2, w, w2;
471     __m128d       PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
472     __m128d       PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
473     __m128d       PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
474     __m128d       res_erf, res_erfcB, res_erfcC, res_erfc, res;
475     __m128d       mask, expmx2;
476
477     /* Calculate erf() */
478     xabs     = gmx_mm_abs_pd(x);
479     x2       = _mm_mul_pd(x, x);
480     x4       = _mm_mul_pd(x2, x2);
481
482     PolyAP0  = _mm_mul_pd(CAP4, x4);
483     PolyAP1  = _mm_mul_pd(CAP3, x4);
484     PolyAP0  = _mm_add_pd(PolyAP0, CAP2);
485     PolyAP1  = _mm_add_pd(PolyAP1, CAP1);
486     PolyAP0  = _mm_mul_pd(PolyAP0, x4);
487     PolyAP1  = _mm_mul_pd(PolyAP1, x2);
488     PolyAP0  = _mm_add_pd(PolyAP0, CAP0);
489     PolyAP0  = _mm_add_pd(PolyAP0, PolyAP1);
490
491     PolyAQ1  = _mm_mul_pd(CAQ5, x4);
492     PolyAQ0  = _mm_mul_pd(CAQ4, x4);
493     PolyAQ1  = _mm_add_pd(PolyAQ1, CAQ3);
494     PolyAQ0  = _mm_add_pd(PolyAQ0, CAQ2);
495     PolyAQ1  = _mm_mul_pd(PolyAQ1, x4);
496     PolyAQ0  = _mm_mul_pd(PolyAQ0, x4);
497     PolyAQ1  = _mm_add_pd(PolyAQ1, CAQ1);
498     PolyAQ0  = _mm_add_pd(PolyAQ0, one);
499     PolyAQ1  = _mm_mul_pd(PolyAQ1, x2);
500     PolyAQ0  = _mm_add_pd(PolyAQ0, PolyAQ1);
501
502     res_erf  = _mm_mul_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0));
503     res_erf  = _mm_add_pd(CAoffset, res_erf);
504     res_erf  = _mm_mul_pd(x, res_erf);
505
506     /* Calculate erfc() in range [1,4.5] */
507     t       = _mm_sub_pd(xabs, one);
508     t2      = _mm_mul_pd(t, t);
509
510     PolyBP0  = _mm_mul_pd(CBP6, t2);
511     PolyBP1  = _mm_mul_pd(CBP5, t2);
512     PolyBP0  = _mm_add_pd(PolyBP0, CBP4);
513     PolyBP1  = _mm_add_pd(PolyBP1, CBP3);
514     PolyBP0  = _mm_mul_pd(PolyBP0, t2);
515     PolyBP1  = _mm_mul_pd(PolyBP1, t2);
516     PolyBP0  = _mm_add_pd(PolyBP0, CBP2);
517     PolyBP1  = _mm_add_pd(PolyBP1, CBP1);
518     PolyBP0  = _mm_mul_pd(PolyBP0, t2);
519     PolyBP1  = _mm_mul_pd(PolyBP1, t);
520     PolyBP0  = _mm_add_pd(PolyBP0, CBP0);
521     PolyBP0  = _mm_add_pd(PolyBP0, PolyBP1);
522
523     PolyBQ1 = _mm_mul_pd(CBQ7, t2);
524     PolyBQ0 = _mm_mul_pd(CBQ6, t2);
525     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ5);
526     PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ4);
527     PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
528     PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
529     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ3);
530     PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ2);
531     PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
532     PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
533     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ1);
534     PolyBQ0 = _mm_add_pd(PolyBQ0, one);
535     PolyBQ1 = _mm_mul_pd(PolyBQ1, t);
536     PolyBQ0 = _mm_add_pd(PolyBQ0, PolyBQ1);
537
538     res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
539
540     res_erfcB = _mm_mul_pd(res_erfcB, xabs);
541
542     /* Calculate erfc() in range [4.5,inf] */
543     w       = gmx_mm_inv_pd(xabs);
544     w2      = _mm_mul_pd(w, w);
545
546     PolyCP0  = _mm_mul_pd(CCP6, w2);
547     PolyCP1  = _mm_mul_pd(CCP5, w2);
548     PolyCP0  = _mm_add_pd(PolyCP0, CCP4);
549     PolyCP1  = _mm_add_pd(PolyCP1, CCP3);
550     PolyCP0  = _mm_mul_pd(PolyCP0, w2);
551     PolyCP1  = _mm_mul_pd(PolyCP1, w2);
552     PolyCP0  = _mm_add_pd(PolyCP0, CCP2);
553     PolyCP1  = _mm_add_pd(PolyCP1, CCP1);
554     PolyCP0  = _mm_mul_pd(PolyCP0, w2);
555     PolyCP1  = _mm_mul_pd(PolyCP1, w);
556     PolyCP0  = _mm_add_pd(PolyCP0, CCP0);
557     PolyCP0  = _mm_add_pd(PolyCP0, PolyCP1);
558
559     PolyCQ0  = _mm_mul_pd(CCQ6, w2);
560     PolyCQ1  = _mm_mul_pd(CCQ5, w2);
561     PolyCQ0  = _mm_add_pd(PolyCQ0, CCQ4);
562     PolyCQ1  = _mm_add_pd(PolyCQ1, CCQ3);
563     PolyCQ0  = _mm_mul_pd(PolyCQ0, w2);
564     PolyCQ1  = _mm_mul_pd(PolyCQ1, w2);
565     PolyCQ0  = _mm_add_pd(PolyCQ0, CCQ2);
566     PolyCQ1  = _mm_add_pd(PolyCQ1, CCQ1);
567     PolyCQ0  = _mm_mul_pd(PolyCQ0, w2);
568     PolyCQ1  = _mm_mul_pd(PolyCQ1, w);
569     PolyCQ0  = _mm_add_pd(PolyCQ0, one);
570     PolyCQ0  = _mm_add_pd(PolyCQ0, PolyCQ1);
571
572     expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
573
574     res_erfcC = _mm_mul_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0));
575     res_erfcC = _mm_add_pd(res_erfcC, CCoffset);
576     res_erfcC = _mm_mul_pd(res_erfcC, w);
577
578     mask     = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
579     res_erfc = _mm_or_pd(_mm_andnot_pd(mask, res_erfcB), _mm_and_pd(mask, res_erfcC));
580
581     res_erfc = _mm_mul_pd(res_erfc, expmx2);
582
583     /* erfc(x<0) = 2-erfc(|x|) */
584     mask     = _mm_cmplt_pd(x, _mm_setzero_pd());
585     res_erfc = _mm_or_pd(_mm_andnot_pd(mask, res_erfc), _mm_and_pd(mask, _mm_sub_pd(two, res_erfc)));
586
587     /* Select erf() or erfc() */
588     mask = _mm_cmplt_pd(xabs, one);
589     res  = _mm_or_pd(_mm_andnot_pd(mask, _mm_sub_pd(one, res_erfc)), _mm_and_pd(mask, res_erf));
590
591     return res;
592 }
593
594
595 static __m128d
596 gmx_mm_erfc_pd(__m128d x)
597 {
598     /* Coefficients for minimax approximation of erf(x)=x*(CAoffset + P(x^2)/Q(x^2)) in range [-0.75,0.75] */
599     const __m128d CAP4      = _mm_set1_pd(-0.431780540597889301512e-4);
600     const __m128d CAP3      = _mm_set1_pd(-0.00578562306260059236059);
601     const __m128d CAP2      = _mm_set1_pd(-0.028593586920219752446);
602     const __m128d CAP1      = _mm_set1_pd(-0.315924962948621698209);
603     const __m128d CAP0      = _mm_set1_pd(0.14952975608477029151);
604
605     const __m128d CAQ5      = _mm_set1_pd(-0.374089300177174709737e-5);
606     const __m128d CAQ4      = _mm_set1_pd(0.00015126584532155383535);
607     const __m128d CAQ3      = _mm_set1_pd(0.00536692680669480725423);
608     const __m128d CAQ2      = _mm_set1_pd(0.0668686825594046122636);
609     const __m128d CAQ1      = _mm_set1_pd(0.402604990869284362773);
610     /* CAQ0 == 1.0 */
611     const __m128d CAoffset  = _mm_set1_pd(0.9788494110107421875);
612
613     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)*x*(P(x-1)/Q(x-1)) in range [1.0,4.5] */
614     const __m128d CBP6      = _mm_set1_pd(2.49650423685462752497647637088e-10);
615     const __m128d CBP5      = _mm_set1_pd(0.00119770193298159629350136085658);
616     const __m128d CBP4      = _mm_set1_pd(0.0164944422378370965881008942733);
617     const __m128d CBP3      = _mm_set1_pd(0.0984581468691775932063932439252);
618     const __m128d CBP2      = _mm_set1_pd(0.317364595806937763843589437418);
619     const __m128d CBP1      = _mm_set1_pd(0.554167062641455850932670067075);
620     const __m128d CBP0      = _mm_set1_pd(0.427583576155807163756925301060);
621     const __m128d CBQ7      = _mm_set1_pd(0.00212288829699830145976198384930);
622     const __m128d CBQ6      = _mm_set1_pd(0.0334810979522685300554606393425);
623     const __m128d CBQ5      = _mm_set1_pd(0.2361713785181450957579508850717);
624     const __m128d CBQ4      = _mm_set1_pd(0.955364736493055670530981883072);
625     const __m128d CBQ3      = _mm_set1_pd(2.36815675631420037315349279199);
626     const __m128d CBQ2      = _mm_set1_pd(3.55261649184083035537184223542);
627     const __m128d CBQ1      = _mm_set1_pd(2.93501136050160872574376997993);
628     /* CBQ0 == 1.0 */
629
630     /* Coefficients for minimax approximation of erfc(x)=exp(-x^2)/x*(P(1/x)/Q(1/x)) in range [4.5,inf] */
631     const __m128d CCP6      = _mm_set1_pd(-2.8175401114513378771);
632     const __m128d CCP5      = _mm_set1_pd(-3.22729451764143718517);
633     const __m128d CCP4      = _mm_set1_pd(-2.5518551727311523996);
634     const __m128d CCP3      = _mm_set1_pd(-0.687717681153649930619);
635     const __m128d CCP2      = _mm_set1_pd(-0.212652252872804219852);
636     const __m128d CCP1      = _mm_set1_pd(0.0175389834052493308818);
637     const __m128d CCP0      = _mm_set1_pd(0.00628057170626964891937);
638
639     const __m128d CCQ6      = _mm_set1_pd(5.48409182238641741584);
640     const __m128d CCQ5      = _mm_set1_pd(13.5064170191802889145);
641     const __m128d CCQ4      = _mm_set1_pd(22.9367376522880577224);
642     const __m128d CCQ3      = _mm_set1_pd(15.930646027911794143);
643     const __m128d CCQ2      = _mm_set1_pd(11.0567237927800161565);
644     const __m128d CCQ1      = _mm_set1_pd(2.79257750980575282228);
645     /* CCQ0 == 1.0 */
646     const __m128d CCoffset  = _mm_set1_pd(0.5579090118408203125);
647
648     const __m128d one       = _mm_set1_pd(1.0);
649     const __m128d two       = _mm_set1_pd(2.0);
650
651     const __m128d signbit   = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000, 0x00000000, 0x80000000, 0x00000000) );
652
653     __m128d       xabs, x2, x4, t, t2, w, w2;
654     __m128d       PolyAP0, PolyAP1, PolyAQ0, PolyAQ1;
655     __m128d       PolyBP0, PolyBP1, PolyBQ0, PolyBQ1;
656     __m128d       PolyCP0, PolyCP1, PolyCQ0, PolyCQ1;
657     __m128d       res_erf, res_erfcB, res_erfcC, res_erfc, res;
658     __m128d       mask, expmx2;
659
660     /* Calculate erf() */
661     xabs     = gmx_mm_abs_pd(x);
662     x2       = _mm_mul_pd(x, x);
663     x4       = _mm_mul_pd(x2, x2);
664
665     PolyAP0  = _mm_mul_pd(CAP4, x4);
666     PolyAP1  = _mm_mul_pd(CAP3, x4);
667     PolyAP0  = _mm_add_pd(PolyAP0, CAP2);
668     PolyAP1  = _mm_add_pd(PolyAP1, CAP1);
669     PolyAP0  = _mm_mul_pd(PolyAP0, x4);
670     PolyAP1  = _mm_mul_pd(PolyAP1, x2);
671     PolyAP0  = _mm_add_pd(PolyAP0, CAP0);
672     PolyAP0  = _mm_add_pd(PolyAP0, PolyAP1);
673
674     PolyAQ1  = _mm_mul_pd(CAQ5, x4);
675     PolyAQ0  = _mm_mul_pd(CAQ4, x4);
676     PolyAQ1  = _mm_add_pd(PolyAQ1, CAQ3);
677     PolyAQ0  = _mm_add_pd(PolyAQ0, CAQ2);
678     PolyAQ1  = _mm_mul_pd(PolyAQ1, x4);
679     PolyAQ0  = _mm_mul_pd(PolyAQ0, x4);
680     PolyAQ1  = _mm_add_pd(PolyAQ1, CAQ1);
681     PolyAQ0  = _mm_add_pd(PolyAQ0, one);
682     PolyAQ1  = _mm_mul_pd(PolyAQ1, x2);
683     PolyAQ0  = _mm_add_pd(PolyAQ0, PolyAQ1);
684
685     res_erf  = _mm_mul_pd(PolyAP0, gmx_mm_inv_pd(PolyAQ0));
686     res_erf  = _mm_add_pd(CAoffset, res_erf);
687     res_erf  = _mm_mul_pd(x, res_erf);
688
689     /* Calculate erfc() in range [1,4.5] */
690     t       = _mm_sub_pd(xabs, one);
691     t2      = _mm_mul_pd(t, t);
692
693     PolyBP0  = _mm_mul_pd(CBP6, t2);
694     PolyBP1  = _mm_mul_pd(CBP5, t2);
695     PolyBP0  = _mm_add_pd(PolyBP0, CBP4);
696     PolyBP1  = _mm_add_pd(PolyBP1, CBP3);
697     PolyBP0  = _mm_mul_pd(PolyBP0, t2);
698     PolyBP1  = _mm_mul_pd(PolyBP1, t2);
699     PolyBP0  = _mm_add_pd(PolyBP0, CBP2);
700     PolyBP1  = _mm_add_pd(PolyBP1, CBP1);
701     PolyBP0  = _mm_mul_pd(PolyBP0, t2);
702     PolyBP1  = _mm_mul_pd(PolyBP1, t);
703     PolyBP0  = _mm_add_pd(PolyBP0, CBP0);
704     PolyBP0  = _mm_add_pd(PolyBP0, PolyBP1);
705
706     PolyBQ1 = _mm_mul_pd(CBQ7, t2);
707     PolyBQ0 = _mm_mul_pd(CBQ6, t2);
708     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ5);
709     PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ4);
710     PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
711     PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
712     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ3);
713     PolyBQ0 = _mm_add_pd(PolyBQ0, CBQ2);
714     PolyBQ1 = _mm_mul_pd(PolyBQ1, t2);
715     PolyBQ0 = _mm_mul_pd(PolyBQ0, t2);
716     PolyBQ1 = _mm_add_pd(PolyBQ1, CBQ1);
717     PolyBQ0 = _mm_add_pd(PolyBQ0, one);
718     PolyBQ1 = _mm_mul_pd(PolyBQ1, t);
719     PolyBQ0 = _mm_add_pd(PolyBQ0, PolyBQ1);
720
721     res_erfcB = _mm_mul_pd(PolyBP0, gmx_mm_inv_pd(PolyBQ0));
722
723     res_erfcB = _mm_mul_pd(res_erfcB, xabs);
724
725     /* Calculate erfc() in range [4.5,inf] */
726     w       = gmx_mm_inv_pd(xabs);
727     w2      = _mm_mul_pd(w, w);
728
729     PolyCP0  = _mm_mul_pd(CCP6, w2);
730     PolyCP1  = _mm_mul_pd(CCP5, w2);
731     PolyCP0  = _mm_add_pd(PolyCP0, CCP4);
732     PolyCP1  = _mm_add_pd(PolyCP1, CCP3);
733     PolyCP0  = _mm_mul_pd(PolyCP0, w2);
734     PolyCP1  = _mm_mul_pd(PolyCP1, w2);
735     PolyCP0  = _mm_add_pd(PolyCP0, CCP2);
736     PolyCP1  = _mm_add_pd(PolyCP1, CCP1);
737     PolyCP0  = _mm_mul_pd(PolyCP0, w2);
738     PolyCP1  = _mm_mul_pd(PolyCP1, w);
739     PolyCP0  = _mm_add_pd(PolyCP0, CCP0);
740     PolyCP0  = _mm_add_pd(PolyCP0, PolyCP1);
741
742     PolyCQ0  = _mm_mul_pd(CCQ6, w2);
743     PolyCQ1  = _mm_mul_pd(CCQ5, w2);
744     PolyCQ0  = _mm_add_pd(PolyCQ0, CCQ4);
745     PolyCQ1  = _mm_add_pd(PolyCQ1, CCQ3);
746     PolyCQ0  = _mm_mul_pd(PolyCQ0, w2);
747     PolyCQ1  = _mm_mul_pd(PolyCQ1, w2);
748     PolyCQ0  = _mm_add_pd(PolyCQ0, CCQ2);
749     PolyCQ1  = _mm_add_pd(PolyCQ1, CCQ1);
750     PolyCQ0  = _mm_mul_pd(PolyCQ0, w2);
751     PolyCQ1  = _mm_mul_pd(PolyCQ1, w);
752     PolyCQ0  = _mm_add_pd(PolyCQ0, one);
753     PolyCQ0  = _mm_add_pd(PolyCQ0, PolyCQ1);
754
755     expmx2   = gmx_mm_exp_pd( _mm_or_pd(signbit, x2) );
756
757     res_erfcC = _mm_mul_pd(PolyCP0, gmx_mm_inv_pd(PolyCQ0));
758     res_erfcC = _mm_add_pd(res_erfcC, CCoffset);
759     res_erfcC = _mm_mul_pd(res_erfcC, w);
760
761     mask     = _mm_cmpgt_pd(xabs, _mm_set1_pd(4.5));
762     res_erfc = _mm_or_pd(_mm_andnot_pd(mask, res_erfcB), _mm_and_pd(mask, res_erfcC));
763
764     res_erfc = _mm_mul_pd(res_erfc, expmx2);
765
766     /* erfc(x<0) = 2-erfc(|x|) */
767     mask     = _mm_cmplt_pd(x, _mm_setzero_pd());
768     res_erfc = _mm_or_pd(_mm_andnot_pd(mask, res_erfc), _mm_and_pd(mask, _mm_sub_pd(two, res_erfc)));
769
770     /* Select erf() or erfc() */
771     mask = _mm_cmplt_pd(xabs, one);
772     res  = _mm_or_pd(_mm_andnot_pd(mask, res_erfc), _mm_and_pd(mask, _mm_sub_pd(one, res_erf)));
773
774     return res;
775 }
776
777
778 /* Calculate the force correction due to PME analytically.
779  *
780  * This routine is meant to enable analytical evaluation of the
781  * direct-space PME electrostatic force to avoid tables.
782  *
783  * The direct-space potential should be Erfc(beta*r)/r, but there
784  * are some problems evaluating that:
785  *
786  * First, the error function is difficult (read: expensive) to
787  * approxmiate accurately for intermediate to large arguments, and
788  * this happens already in ranges of beta*r that occur in simulations.
789  * Second, we now try to avoid calculating potentials in Gromacs but
790  * use forces directly.
791  *
792  * We can simply things slight by noting that the PME part is really
793  * a correction to the normal Coulomb force since Erfc(z)=1-Erf(z), i.e.
794  *
795  * V= 1/r - Erf(beta*r)/r
796  *
797  * The first term we already have from the inverse square root, so
798  * that we can leave out of this routine.
799  *
800  * For pme tolerances of 1e-3 to 1e-8 and cutoffs of 0.5nm to 1.8nm,
801  * the argument beta*r will be in the range 0.15 to ~4. Use your
802  * favorite plotting program to realize how well-behaved Erf(z)/z is
803  * in this range!
804  *
805  * We approximate f(z)=erf(z)/z with a rational minimax polynomial.
806  * However, it turns out it is more efficient to approximate f(z)/z and
807  * then only use even powers. This is another minor optimization, since
808  * we actually WANT f(z)/z, because it is going to be multiplied by
809  * the vector between the two atoms to get the vectorial force. The
810  * fastest flops are the ones we can avoid calculating!
811  *
812  * So, here's how it should be used:
813  *
814  * 1. Calculate r^2.
815  * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
816  * 3. Evaluate this routine with z^2 as the argument.
817  * 4. The return value is the expression:
818  *
819  *
820  *       2*exp(-z^2)     erf(z)
821  *       ------------ - --------
822  *       sqrt(Pi)*z^2      z^3
823  *
824  * 5. Multiply the entire expression by beta^3. This will get you
825  *
826  *       beta^3*2*exp(-z^2)     beta^3*erf(z)
827  *       ------------------  - ---------------
828  *          sqrt(Pi)*z^2            z^3
829  *
830  *    or, switching back to r (z=r*beta):
831  *
832  *       2*beta*exp(-r^2*beta^2)   erf(r*beta)
833  *       ----------------------- - -----------
834  *            sqrt(Pi)*r^2            r^3
835  *
836  *
837  *    With a bit of math exercise you should be able to confirm that
838  *    this is exactly D[Erf[beta*r]/r,r] divided by r another time.
839  *
840  * 6. Add the result to 1/r^3, multiply by the product of the charges,
841  *    and you have your force (divided by r). A final multiplication
842  *    with the vector connecting the two particles and you have your
843  *    vectorial force to add to the particles.
844  *
845  */
846 static __m128d
847 gmx_mm_pmecorrF_pd(__m128d z2)
848 {
849     const __m128d  FN10     = _mm_set1_pd(-8.0072854618360083154e-14);
850     const __m128d  FN9      = _mm_set1_pd(1.1859116242260148027e-11);
851     const __m128d  FN8      = _mm_set1_pd(-8.1490406329798423616e-10);
852     const __m128d  FN7      = _mm_set1_pd(3.4404793543907847655e-8);
853     const __m128d  FN6      = _mm_set1_pd(-9.9471420832602741006e-7);
854     const __m128d  FN5      = _mm_set1_pd(0.000020740315999115847456);
855     const __m128d  FN4      = _mm_set1_pd(-0.00031991745139313364005);
856     const __m128d  FN3      = _mm_set1_pd(0.0035074449373659008203);
857     const __m128d  FN2      = _mm_set1_pd(-0.031750380176100813405);
858     const __m128d  FN1      = _mm_set1_pd(0.13884101728898463426);
859     const __m128d  FN0      = _mm_set1_pd(-0.75225277815249618847);
860
861     const __m128d  FD5      = _mm_set1_pd(0.000016009278224355026701);
862     const __m128d  FD4      = _mm_set1_pd(0.00051055686934806966046);
863     const __m128d  FD3      = _mm_set1_pd(0.0081803507497974289008);
864     const __m128d  FD2      = _mm_set1_pd(0.077181146026670287235);
865     const __m128d  FD1      = _mm_set1_pd(0.41543303143712535988);
866     const __m128d  FD0      = _mm_set1_pd(1.0);
867
868     __m128d        z4;
869     __m128d        polyFN0, polyFN1, polyFD0, polyFD1;
870
871     z4             = _mm_mul_pd(z2, z2);
872
873     polyFD1        = _mm_mul_pd(FD5, z4);
874     polyFD0        = _mm_mul_pd(FD4, z4);
875     polyFD1        = _mm_add_pd(polyFD1, FD3);
876     polyFD0        = _mm_add_pd(polyFD0, FD2);
877     polyFD1        = _mm_mul_pd(polyFD1, z4);
878     polyFD0        = _mm_mul_pd(polyFD0, z4);
879     polyFD1        = _mm_add_pd(polyFD1, FD1);
880     polyFD0        = _mm_add_pd(polyFD0, FD0);
881     polyFD1        = _mm_mul_pd(polyFD1, z2);
882     polyFD0        = _mm_add_pd(polyFD0, polyFD1);
883
884     polyFD0        = gmx_mm_inv_pd(polyFD0);
885
886     polyFN0        = _mm_mul_pd(FN10, z4);
887     polyFN1        = _mm_mul_pd(FN9, z4);
888     polyFN0        = _mm_add_pd(polyFN0, FN8);
889     polyFN1        = _mm_add_pd(polyFN1, FN7);
890     polyFN0        = _mm_mul_pd(polyFN0, z4);
891     polyFN1        = _mm_mul_pd(polyFN1, z4);
892     polyFN0        = _mm_add_pd(polyFN0, FN6);
893     polyFN1        = _mm_add_pd(polyFN1, FN5);
894     polyFN0        = _mm_mul_pd(polyFN0, z4);
895     polyFN1        = _mm_mul_pd(polyFN1, z4);
896     polyFN0        = _mm_add_pd(polyFN0, FN4);
897     polyFN1        = _mm_add_pd(polyFN1, FN3);
898     polyFN0        = _mm_mul_pd(polyFN0, z4);
899     polyFN1        = _mm_mul_pd(polyFN1, z4);
900     polyFN0        = _mm_add_pd(polyFN0, FN2);
901     polyFN1        = _mm_add_pd(polyFN1, FN1);
902     polyFN0        = _mm_mul_pd(polyFN0, z4);
903     polyFN1        = _mm_mul_pd(polyFN1, z2);
904     polyFN0        = _mm_add_pd(polyFN0, FN0);
905     polyFN0        = _mm_add_pd(polyFN0, polyFN1);
906
907     return _mm_mul_pd(polyFN0, polyFD0);
908 }
909
910
911
912
913 /* Calculate the potential correction due to PME analytically.
914  *
915  * See gmx_mm256_pmecorrF_ps() for details about the approximation.
916  *
917  * This routine calculates Erf(z)/z, although you should provide z^2
918  * as the input argument.
919  *
920  * Here's how it should be used:
921  *
922  * 1. Calculate r^2.
923  * 2. Multiply by beta^2, so you get z^2=beta^2*r^2.
924  * 3. Evaluate this routine with z^2 as the argument.
925  * 4. The return value is the expression:
926  *
927  *
928  *        erf(z)
929  *       --------
930  *          z
931  *
932  * 5. Multiply the entire expression by beta and switching back to r (z=r*beta):
933  *
934  *       erf(r*beta)
935  *       -----------
936  *           r
937  *
938  * 6. Subtract the result from 1/r, multiply by the product of the charges,
939  *    and you have your potential.
940  *
941  */
942 static __m128d
943 gmx_mm_pmecorrV_pd(__m128d z2)
944 {
945     const __m128d  VN9      = _mm_set1_pd(-9.3723776169321855475e-13);
946     const __m128d  VN8      = _mm_set1_pd(1.2280156762674215741e-10);
947     const __m128d  VN7      = _mm_set1_pd(-7.3562157912251309487e-9);
948     const __m128d  VN6      = _mm_set1_pd(2.6215886208032517509e-7);
949     const __m128d  VN5      = _mm_set1_pd(-4.9532491651265819499e-6);
950     const __m128d  VN4      = _mm_set1_pd(0.00025907400778966060389);
951     const __m128d  VN3      = _mm_set1_pd(0.0010585044856156469792);
952     const __m128d  VN2      = _mm_set1_pd(0.045247661136833092885);
953     const __m128d  VN1      = _mm_set1_pd(0.11643931522926034421);
954     const __m128d  VN0      = _mm_set1_pd(1.1283791671726767970);
955
956     const __m128d  VD5      = _mm_set1_pd(0.000021784709867336150342);
957     const __m128d  VD4      = _mm_set1_pd(0.00064293662010911388448);
958     const __m128d  VD3      = _mm_set1_pd(0.0096311444822588683504);
959     const __m128d  VD2      = _mm_set1_pd(0.085608012351550627051);
960     const __m128d  VD1      = _mm_set1_pd(0.43652499166614811084);
961     const __m128d  VD0      = _mm_set1_pd(1.0);
962
963     __m128d        z4;
964     __m128d        polyVN0, polyVN1, polyVD0, polyVD1;
965
966     z4             = _mm_mul_pd(z2, z2);
967
968     polyVD1        = _mm_mul_pd(VD5, z4);
969     polyVD0        = _mm_mul_pd(VD4, z4);
970     polyVD1        = _mm_add_pd(polyVD1, VD3);
971     polyVD0        = _mm_add_pd(polyVD0, VD2);
972     polyVD1        = _mm_mul_pd(polyVD1, z4);
973     polyVD0        = _mm_mul_pd(polyVD0, z4);
974     polyVD1        = _mm_add_pd(polyVD1, VD1);
975     polyVD0        = _mm_add_pd(polyVD0, VD0);
976     polyVD1        = _mm_mul_pd(polyVD1, z2);
977     polyVD0        = _mm_add_pd(polyVD0, polyVD1);
978
979     polyVD0        = gmx_mm_inv_pd(polyVD0);
980
981     polyVN1        = _mm_mul_pd(VN9, z4);
982     polyVN0        = _mm_mul_pd(VN8, z4);
983     polyVN1        = _mm_add_pd(polyVN1, VN7);
984     polyVN0        = _mm_add_pd(polyVN0, VN6);
985     polyVN1        = _mm_mul_pd(polyVN1, z4);
986     polyVN0        = _mm_mul_pd(polyVN0, z4);
987     polyVN1        = _mm_add_pd(polyVN1, VN5);
988     polyVN0        = _mm_add_pd(polyVN0, VN4);
989     polyVN1        = _mm_mul_pd(polyVN1, z4);
990     polyVN0        = _mm_mul_pd(polyVN0, z4);
991     polyVN1        = _mm_add_pd(polyVN1, VN3);
992     polyVN0        = _mm_add_pd(polyVN0, VN2);
993     polyVN1        = _mm_mul_pd(polyVN1, z4);
994     polyVN0        = _mm_mul_pd(polyVN0, z4);
995     polyVN1        = _mm_add_pd(polyVN1, VN1);
996     polyVN0        = _mm_add_pd(polyVN0, VN0);
997     polyVN1        = _mm_mul_pd(polyVN1, z2);
998     polyVN0        = _mm_add_pd(polyVN0, polyVN1);
999
1000     return _mm_mul_pd(polyVN0, polyVD0);
1001 }
1002
1003
1004
1005 static int
1006 gmx_mm_sincos_pd(__m128d  x,
1007                  __m128d *sinval,
1008                  __m128d *cosval)
1009 {
1010 #ifdef _MSC_VER
1011     __declspec(align(16))
1012     const double sintable[34] =
1013     {
1014         1.00000000000000000e+00, 0.00000000000000000e+00,
1015         9.95184726672196929e-01, 9.80171403295606036e-02,
1016         9.80785280403230431e-01, 1.95090322016128248e-01,
1017         9.56940335732208824e-01, 2.90284677254462331e-01,
1018         9.23879532511286738e-01, 3.82683432365089782e-01,
1019         8.81921264348355050e-01, 4.71396736825997642e-01,
1020         8.31469612302545236e-01, 5.55570233019602178e-01,
1021         7.73010453362736993e-01, 6.34393284163645488e-01,
1022         7.07106781186547573e-01, 7.07106781186547462e-01,
1023         6.34393284163645599e-01, 7.73010453362736882e-01,
1024         5.55570233019602289e-01, 8.31469612302545125e-01,
1025         4.71396736825997809e-01, 8.81921264348354939e-01,
1026         3.82683432365089837e-01, 9.23879532511286738e-01,
1027         2.90284677254462276e-01, 9.56940335732208935e-01,
1028         1.95090322016128304e-01, 9.80785280403230431e-01,
1029         9.80171403295607702e-02, 9.95184726672196818e-01,
1030         0.0, 1.00000000000000000e+00
1031     };
1032 #else
1033     const __m128d sintable[17] =
1034     {
1035         _mm_set_pd( 0.0, 1.0 ),
1036         _mm_set_pd( sin(  1.0 * (M_PI/2.0) / 16.0), cos(  1.0 * (M_PI/2.0) / 16.0) ),
1037         _mm_set_pd( sin(  2.0 * (M_PI/2.0) / 16.0), cos(  2.0 * (M_PI/2.0) / 16.0) ),
1038         _mm_set_pd( sin(  3.0 * (M_PI/2.0) / 16.0), cos(  3.0 * (M_PI/2.0) / 16.0) ),
1039         _mm_set_pd( sin(  4.0 * (M_PI/2.0) / 16.0), cos(  4.0 * (M_PI/2.0) / 16.0) ),
1040         _mm_set_pd( sin(  5.0 * (M_PI/2.0) / 16.0), cos(  5.0 * (M_PI/2.0) / 16.0) ),
1041         _mm_set_pd( sin(  6.0 * (M_PI/2.0) / 16.0), cos(  6.0 * (M_PI/2.0) / 16.0) ),
1042         _mm_set_pd( sin(  7.0 * (M_PI/2.0) / 16.0), cos(  7.0 * (M_PI/2.0) / 16.0) ),
1043         _mm_set_pd( sin(  8.0 * (M_PI/2.0) / 16.0), cos(  8.0 * (M_PI/2.0) / 16.0) ),
1044         _mm_set_pd( sin(  9.0 * (M_PI/2.0) / 16.0), cos(  9.0 * (M_PI/2.0) / 16.0) ),
1045         _mm_set_pd( sin( 10.0 * (M_PI/2.0) / 16.0), cos( 10.0 * (M_PI/2.0) / 16.0) ),
1046         _mm_set_pd( sin( 11.0 * (M_PI/2.0) / 16.0), cos( 11.0 * (M_PI/2.0) / 16.0) ),
1047         _mm_set_pd( sin( 12.0 * (M_PI/2.0) / 16.0), cos( 12.0 * (M_PI/2.0) / 16.0) ),
1048         _mm_set_pd( sin( 13.0 * (M_PI/2.0) / 16.0), cos( 13.0 * (M_PI/2.0) / 16.0) ),
1049         _mm_set_pd( sin( 14.0 * (M_PI/2.0) / 16.0), cos( 14.0 * (M_PI/2.0) / 16.0) ),
1050         _mm_set_pd( sin( 15.0 * (M_PI/2.0) / 16.0), cos( 15.0 * (M_PI/2.0) / 16.0) ),
1051         _mm_set_pd(  1.0, 0.0 )
1052     };
1053 #endif
1054
1055     const __m128d signmask       = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1056     const __m128i signbit_epi32  = _mm_set1_epi32(0x80000000);
1057
1058     const __m128d tabscale      = _mm_set1_pd(32.0/M_PI);
1059     const __m128d invtabscale0  = _mm_set1_pd(9.81747508049011230469e-02);
1060     const __m128d invtabscale1  = _mm_set1_pd(1.96197799156550576057e-08);
1061     const __m128i ione          = _mm_set1_epi32(1);
1062     const __m128i i32           = _mm_set1_epi32(32);
1063     const __m128i i16           = _mm_set1_epi32(16);
1064     const __m128i tabmask       = _mm_set1_epi32(0x3F);
1065     const __m128d sinP7         = _mm_set1_pd(-1.0/5040.0);
1066     const __m128d sinP5         = _mm_set1_pd(1.0/120.0);
1067     const __m128d sinP3         = _mm_set1_pd(-1.0/6.0);
1068     const __m128d sinP1         = _mm_set1_pd(1.0);
1069
1070     const __m128d cosP6         = _mm_set1_pd(-1.0/720.0);
1071     const __m128d cosP4         = _mm_set1_pd(1.0/24.0);
1072     const __m128d cosP2         = _mm_set1_pd(-1.0/2.0);
1073     const __m128d cosP0         = _mm_set1_pd(1.0);
1074
1075     __m128d       scalex;
1076     __m128i       tabidx, corridx;
1077     __m128d       xabs, z, z2, polySin, polyCos;
1078     __m128d       xpoint;
1079     __m128d       ypoint0, ypoint1;
1080
1081     __m128d       sinpoint, cospoint;
1082     __m128d       xsign, ssign, csign;
1083     __m128i       imask, sswapsign, cswapsign;
1084     __m128d       minusone;
1085
1086     xsign    = _mm_andnot_pd(signmask, x);
1087     xabs     = _mm_and_pd(x, signmask);
1088
1089     scalex   = _mm_mul_pd(tabscale, xabs);
1090     tabidx   = _mm_cvtpd_epi32(scalex);
1091
1092     xpoint   = _mm_cvtepi32_pd(tabidx);
1093
1094     /* Extended precision arithmetics */
1095     z        = _mm_sub_pd(xabs, _mm_mul_pd(invtabscale0, xpoint));
1096     z        = _mm_sub_pd(z, _mm_mul_pd(invtabscale1, xpoint));
1097
1098     /* Range reduction to 0..2*Pi */
1099     tabidx   = _mm_and_si128(tabidx, tabmask);
1100
1101     /* tabidx is now in range [0,..,64] */
1102     imask     = _mm_cmpgt_epi32(tabidx, i32);
1103     sswapsign = imask;
1104     cswapsign = imask;
1105     corridx   = _mm_and_si128(imask, i32);
1106     tabidx    = _mm_sub_epi32(tabidx, corridx);
1107
1108     /* tabidx is now in range [0..32] */
1109     imask     = _mm_cmpgt_epi32(tabidx, i16);
1110     cswapsign = _mm_xor_si128(cswapsign, imask);
1111     corridx   = _mm_sub_epi32(i32, tabidx);
1112     tabidx    = _mm_or_si128( _mm_and_si128(imask, corridx), _mm_andnot_si128(imask, tabidx) );
1113     /* tabidx is now in range [0..16] */
1114     ssign     = _mm_cvtepi32_pd( _mm_or_si128( sswapsign, ione ) );
1115     csign     = _mm_cvtepi32_pd( _mm_or_si128( cswapsign, ione ) );
1116
1117 #ifdef _MSC_VER
1118     ypoint0  = _mm_load_pd(sintable + 2*gmx_mm_extract_epi32(tabidx, 0));
1119     ypoint1  = _mm_load_pd(sintable + 2*gmx_mm_extract_epi32(tabidx, 1));
1120 #else
1121     ypoint0  = sintable[gmx_mm_extract_epi32(tabidx, 0)];
1122     ypoint1  = sintable[gmx_mm_extract_epi32(tabidx, 1)];
1123 #endif
1124     sinpoint = _mm_unpackhi_pd(ypoint0, ypoint1);
1125     cospoint = _mm_unpacklo_pd(ypoint0, ypoint1);
1126
1127     sinpoint = _mm_mul_pd(sinpoint, ssign);
1128     cospoint = _mm_mul_pd(cospoint, csign);
1129
1130     z2       = _mm_mul_pd(z, z);
1131
1132     polySin  = _mm_mul_pd(sinP7, z2);
1133     polySin  = _mm_add_pd(polySin, sinP5);
1134     polySin  = _mm_mul_pd(polySin, z2);
1135     polySin  = _mm_add_pd(polySin, sinP3);
1136     polySin  = _mm_mul_pd(polySin, z2);
1137     polySin  = _mm_add_pd(polySin, sinP1);
1138     polySin  = _mm_mul_pd(polySin, z);
1139
1140     polyCos  = _mm_mul_pd(cosP6, z2);
1141     polyCos  = _mm_add_pd(polyCos, cosP4);
1142     polyCos  = _mm_mul_pd(polyCos, z2);
1143     polyCos  = _mm_add_pd(polyCos, cosP2);
1144     polyCos  = _mm_mul_pd(polyCos, z2);
1145     polyCos  = _mm_add_pd(polyCos, cosP0);
1146
1147     *sinval  = _mm_xor_pd(_mm_add_pd( _mm_mul_pd(sinpoint, polyCos), _mm_mul_pd(cospoint, polySin) ), xsign);
1148     *cosval  = _mm_sub_pd( _mm_mul_pd(cospoint, polyCos), _mm_mul_pd(sinpoint, polySin) );
1149
1150     return 0;
1151 }
1152
1153 /*
1154  * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1155  * will then call the sincos() routine and waste a factor 2 in performance!
1156  */
1157 static __m128d
1158 gmx_mm_sin_pd(__m128d x)
1159 {
1160     __m128d s, c;
1161     gmx_mm_sincos_pd(x, &s, &c);
1162     return s;
1163 }
1164
1165 /*
1166  * IMPORTANT: Do NOT call both sin & cos if you need both results, since each of them
1167  * will then call the sincos() routine and waste a factor 2 in performance!
1168  */
1169 static __m128d
1170 gmx_mm_cos_pd(__m128d x)
1171 {
1172     __m128d s, c;
1173     gmx_mm_sincos_pd(x, &s, &c);
1174     return c;
1175 }
1176
1177
1178
1179 static __m128d
1180 gmx_mm_tan_pd(__m128d x)
1181 {
1182     __m128d sinval, cosval;
1183     __m128d tanval;
1184
1185     gmx_mm_sincos_pd(x, &sinval, &cosval);
1186
1187     tanval = _mm_mul_pd(sinval, gmx_mm_inv_pd(cosval));
1188
1189     return tanval;
1190 }
1191
1192
1193
1194 static __m128d
1195 gmx_mm_asin_pd(__m128d x)
1196 {
1197     /* Same algorithm as cephes library */
1198     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1199     const __m128d limit1    = _mm_set1_pd(0.625);
1200     const __m128d limit2    = _mm_set1_pd(1e-8);
1201     const __m128d one       = _mm_set1_pd(1.0);
1202     const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
1203     const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1204     const __m128d morebits  = _mm_set1_pd(6.123233995736765886130e-17);
1205
1206     const __m128d P5        = _mm_set1_pd(4.253011369004428248960e-3);
1207     const __m128d P4        = _mm_set1_pd(-6.019598008014123785661e-1);
1208     const __m128d P3        = _mm_set1_pd(5.444622390564711410273e0);
1209     const __m128d P2        = _mm_set1_pd(-1.626247967210700244449e1);
1210     const __m128d P1        = _mm_set1_pd(1.956261983317594739197e1);
1211     const __m128d P0        = _mm_set1_pd(-8.198089802484824371615e0);
1212
1213     const __m128d Q4        = _mm_set1_pd(-1.474091372988853791896e1);
1214     const __m128d Q3        = _mm_set1_pd(7.049610280856842141659e1);
1215     const __m128d Q2        = _mm_set1_pd(-1.471791292232726029859e2);
1216     const __m128d Q1        = _mm_set1_pd(1.395105614657485689735e2);
1217     const __m128d Q0        = _mm_set1_pd(-4.918853881490881290097e1);
1218
1219     const __m128d R4        = _mm_set1_pd(2.967721961301243206100e-3);
1220     const __m128d R3        = _mm_set1_pd(-5.634242780008963776856e-1);
1221     const __m128d R2        = _mm_set1_pd(6.968710824104713396794e0);
1222     const __m128d R1        = _mm_set1_pd(-2.556901049652824852289e1);
1223     const __m128d R0        = _mm_set1_pd(2.853665548261061424989e1);
1224
1225     const __m128d S3        = _mm_set1_pd(-2.194779531642920639778e1);
1226     const __m128d S2        = _mm_set1_pd(1.470656354026814941758e2);
1227     const __m128d S1        = _mm_set1_pd(-3.838770957603691357202e2);
1228     const __m128d S0        = _mm_set1_pd(3.424398657913078477438e2);
1229
1230     __m128d       sign;
1231     __m128d       mask;
1232     __m128d       xabs;
1233     __m128d       zz, ww, z, q, w, y, zz2, ww2;
1234     __m128d       PA, PB;
1235     __m128d       QA, QB;
1236     __m128d       RA, RB;
1237     __m128d       SA, SB;
1238     __m128d       nom, denom;
1239
1240     sign  = _mm_andnot_pd(signmask, x);
1241     xabs  = _mm_and_pd(x, signmask);
1242
1243     mask  = _mm_cmpgt_pd(xabs, limit1);
1244
1245     zz    = _mm_sub_pd(one, xabs);
1246     ww    = _mm_mul_pd(xabs, xabs);
1247     zz2   = _mm_mul_pd(zz, zz);
1248     ww2   = _mm_mul_pd(ww, ww);
1249
1250     /* R */
1251     RA    = _mm_mul_pd(R4, zz2);
1252     RB    = _mm_mul_pd(R3, zz2);
1253     RA    = _mm_add_pd(RA, R2);
1254     RB    = _mm_add_pd(RB, R1);
1255     RA    = _mm_mul_pd(RA, zz2);
1256     RB    = _mm_mul_pd(RB, zz);
1257     RA    = _mm_add_pd(RA, R0);
1258     RA    = _mm_add_pd(RA, RB);
1259
1260     /* S, SA = zz2 */
1261     SB    = _mm_mul_pd(S3, zz2);
1262     SA    = _mm_add_pd(zz2, S2);
1263     SB    = _mm_add_pd(SB, S1);
1264     SA    = _mm_mul_pd(SA, zz2);
1265     SB    = _mm_mul_pd(SB, zz);
1266     SA    = _mm_add_pd(SA, S0);
1267     SA    = _mm_add_pd(SA, SB);
1268
1269     /* P */
1270     PA    = _mm_mul_pd(P5, ww2);
1271     PB    = _mm_mul_pd(P4, ww2);
1272     PA    = _mm_add_pd(PA, P3);
1273     PB    = _mm_add_pd(PB, P2);
1274     PA    = _mm_mul_pd(PA, ww2);
1275     PB    = _mm_mul_pd(PB, ww2);
1276     PA    = _mm_add_pd(PA, P1);
1277     PB    = _mm_add_pd(PB, P0);
1278     PA    = _mm_mul_pd(PA, ww);
1279     PA    = _mm_add_pd(PA, PB);
1280
1281     /* Q, QA = ww2 */
1282     QB    = _mm_mul_pd(Q4, ww2);
1283     QA    = _mm_add_pd(ww2, Q3);
1284     QB    = _mm_add_pd(QB, Q2);
1285     QA    = _mm_mul_pd(QA, ww2);
1286     QB    = _mm_mul_pd(QB, ww2);
1287     QA    = _mm_add_pd(QA, Q1);
1288     QB    = _mm_add_pd(QB, Q0);
1289     QA    = _mm_mul_pd(QA, ww);
1290     QA    = _mm_add_pd(QA, QB);
1291
1292     RA    = _mm_mul_pd(RA, zz);
1293     PA    = _mm_mul_pd(PA, ww);
1294
1295     nom   = _mm_or_pd( _mm_andnot_pd(mask, PA), _mm_and_pd(mask, RA) );
1296     denom = _mm_or_pd( _mm_andnot_pd(mask, QA), _mm_and_pd(mask, SA) );
1297
1298     q     = _mm_mul_pd( nom, gmx_mm_inv_pd(denom) );
1299
1300     zz    = _mm_add_pd(zz, zz);
1301     zz    = gmx_mm_sqrt_pd(zz);
1302     z     = _mm_sub_pd(quarterpi, zz);
1303     zz    = _mm_mul_pd(zz, q);
1304     zz    = _mm_sub_pd(zz, morebits);
1305     z     = _mm_sub_pd(z, zz);
1306     z     = _mm_add_pd(z, quarterpi);
1307
1308     w     = _mm_mul_pd(xabs, q);
1309     w     = _mm_add_pd(w, xabs);
1310
1311     z     = _mm_or_pd( _mm_andnot_pd(mask, w), _mm_and_pd(mask, z) );
1312
1313     mask  = _mm_cmpgt_pd(xabs, limit2);
1314     z     = _mm_or_pd( _mm_andnot_pd(mask, xabs), _mm_and_pd(mask, z) );
1315
1316     z = _mm_xor_pd(z, sign);
1317
1318     return z;
1319 }
1320
1321
1322 static __m128d
1323 gmx_mm_acos_pd(__m128d x)
1324 {
1325     const __m128d signmask   = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1326     const __m128d one        = _mm_set1_pd(1.0);
1327     const __m128d half       = _mm_set1_pd(0.5);
1328     const __m128d pi         = _mm_set1_pd(M_PI);
1329     const __m128d quarterpi0 = _mm_set1_pd(7.85398163397448309616e-1);
1330     const __m128d quarterpi1 = _mm_set1_pd(6.123233995736765886130e-17);
1331
1332
1333     __m128d mask1;
1334
1335     __m128d z, z1, z2;
1336
1337     mask1 = _mm_cmpgt_pd(x, half);
1338     z1    = _mm_mul_pd(half, _mm_sub_pd(one, x));
1339     z1    = gmx_mm_sqrt_pd(z1);
1340     z     = _mm_or_pd( _mm_andnot_pd(mask1, x), _mm_and_pd(mask1, z1) );
1341
1342     z     = gmx_mm_asin_pd(z);
1343
1344     z1    = _mm_add_pd(z, z);
1345
1346     z2    = _mm_sub_pd(quarterpi0, z);
1347     z2    = _mm_add_pd(z2, quarterpi1);
1348     z2    = _mm_add_pd(z2, quarterpi0);
1349
1350     z     = _mm_or_pd(_mm_andnot_pd(mask1, z2), _mm_and_pd(mask1, z1));
1351
1352     return z;
1353 }
1354
1355 static __m128d
1356 gmx_mm_atan_pd(__m128d x)
1357 {
1358     /* Same algorithm as cephes library */
1359     const __m128d signmask  = gmx_mm_castsi128_pd( _mm_set_epi32(0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF) );
1360     const __m128d limit1    = _mm_set1_pd(0.66);
1361     const __m128d limit2    = _mm_set1_pd(2.41421356237309504880);
1362     const __m128d quarterpi = _mm_set1_pd(M_PI/4.0);
1363     const __m128d halfpi    = _mm_set1_pd(M_PI/2.0);
1364     const __m128d mone      = _mm_set1_pd(-1.0);
1365     const __m128d morebits1 = _mm_set1_pd(0.5*6.123233995736765886130E-17);
1366     const __m128d morebits2 = _mm_set1_pd(6.123233995736765886130E-17);
1367
1368     const __m128d P4        = _mm_set1_pd(-8.750608600031904122785E-1);
1369     const __m128d P3        = _mm_set1_pd(-1.615753718733365076637E1);
1370     const __m128d P2        = _mm_set1_pd(-7.500855792314704667340E1);
1371     const __m128d P1        = _mm_set1_pd(-1.228866684490136173410E2);
1372     const __m128d P0        = _mm_set1_pd(-6.485021904942025371773E1);
1373
1374     const __m128d Q4        = _mm_set1_pd(2.485846490142306297962E1);
1375     const __m128d Q3        = _mm_set1_pd(1.650270098316988542046E2);
1376     const __m128d Q2        = _mm_set1_pd(4.328810604912902668951E2);
1377     const __m128d Q1        = _mm_set1_pd(4.853903996359136964868E2);
1378     const __m128d Q0        = _mm_set1_pd(1.945506571482613964425E2);
1379
1380     __m128d       sign;
1381     __m128d       mask1, mask2;
1382     __m128d       y, t1, t2;
1383     __m128d       z, z2;
1384     __m128d       P_A, P_B, Q_A, Q_B;
1385
1386     sign   = _mm_andnot_pd(signmask, x);
1387     x      = _mm_and_pd(x, signmask);
1388
1389     mask1  = _mm_cmpgt_pd(x, limit1);
1390     mask2  = _mm_cmpgt_pd(x, limit2);
1391
1392     t1     = _mm_mul_pd(_mm_add_pd(x, mone), gmx_mm_inv_pd(_mm_sub_pd(x, mone)));
1393     t2     = _mm_mul_pd(mone, gmx_mm_inv_pd(x));
1394
1395     y      = _mm_and_pd(mask1, quarterpi);
1396     y      = _mm_or_pd( _mm_and_pd(mask2, halfpi), _mm_andnot_pd(mask2, y) );
1397
1398     x      = _mm_or_pd( _mm_and_pd(mask1, t1), _mm_andnot_pd(mask1, x) );
1399     x      = _mm_or_pd( _mm_and_pd(mask2, t2), _mm_andnot_pd(mask2, x) );
1400
1401     z      = _mm_mul_pd(x, x);
1402     z2     = _mm_mul_pd(z, z);
1403
1404     P_A    = _mm_mul_pd(P4, z2);
1405     P_B    = _mm_mul_pd(P3, z2);
1406     P_A    = _mm_add_pd(P_A, P2);
1407     P_B    = _mm_add_pd(P_B, P1);
1408     P_A    = _mm_mul_pd(P_A, z2);
1409     P_B    = _mm_mul_pd(P_B, z);
1410     P_A    = _mm_add_pd(P_A, P0);
1411     P_A    = _mm_add_pd(P_A, P_B);
1412
1413     /* Q_A = z2 */
1414     Q_B    = _mm_mul_pd(Q4, z2);
1415     Q_A    = _mm_add_pd(z2, Q3);
1416     Q_B    = _mm_add_pd(Q_B, Q2);
1417     Q_A    = _mm_mul_pd(Q_A, z2);
1418     Q_B    = _mm_mul_pd(Q_B, z2);
1419     Q_A    = _mm_add_pd(Q_A, Q1);
1420     Q_B    = _mm_add_pd(Q_B, Q0);
1421     Q_A    = _mm_mul_pd(Q_A, z);
1422     Q_A    = _mm_add_pd(Q_A, Q_B);
1423
1424     z      = _mm_mul_pd(z, P_A);
1425     z      = _mm_mul_pd(z, gmx_mm_inv_pd(Q_A));
1426     z      = _mm_mul_pd(z, x);
1427     z      = _mm_add_pd(z, x);
1428
1429     t1     = _mm_and_pd(mask1, morebits1);
1430     t1     = _mm_or_pd( _mm_and_pd(mask2, morebits2), _mm_andnot_pd(mask2, t1) );
1431
1432     z      = _mm_add_pd(z, t1);
1433     y      = _mm_add_pd(y, z);
1434
1435     y      = _mm_xor_pd(y, sign);
1436
1437     return y;
1438 }
1439
1440
1441 static __m128d
1442 gmx_mm_atan2_pd(__m128d y, __m128d x)
1443 {
1444     const __m128d pi          = _mm_set1_pd(M_PI);
1445     const __m128d minuspi     = _mm_set1_pd(-M_PI);
1446     const __m128d halfpi      = _mm_set1_pd(M_PI/2.0);
1447     const __m128d minushalfpi = _mm_set1_pd(-M_PI/2.0);
1448
1449     __m128d       z, z1, z3, z4;
1450     __m128d       w;
1451     __m128d       maskx_lt, maskx_eq;
1452     __m128d       masky_lt, masky_eq;
1453     __m128d       mask1, mask2, mask3, mask4, maskall;
1454
1455     maskx_lt  = _mm_cmplt_pd(x, _mm_setzero_pd());
1456     masky_lt  = _mm_cmplt_pd(y, _mm_setzero_pd());
1457     maskx_eq  = _mm_cmpeq_pd(x, _mm_setzero_pd());
1458     masky_eq  = _mm_cmpeq_pd(y, _mm_setzero_pd());
1459
1460     z         = _mm_mul_pd(y, gmx_mm_inv_pd(x));
1461     z         = gmx_mm_atan_pd(z);
1462
1463     mask1     = _mm_and_pd(maskx_eq, masky_lt);
1464     mask2     = _mm_andnot_pd(maskx_lt, masky_eq);
1465     mask3     = _mm_andnot_pd( _mm_or_pd(masky_lt, masky_eq), maskx_eq);
1466     mask4     = _mm_and_pd(masky_eq, maskx_lt);
1467
1468     maskall   = _mm_or_pd( _mm_or_pd(mask1, mask2), _mm_or_pd(mask3, mask4) );
1469
1470     z         = _mm_andnot_pd(maskall, z);
1471     z1        = _mm_and_pd(mask1, minushalfpi);
1472     z3        = _mm_and_pd(mask3, halfpi);
1473     z4        = _mm_and_pd(mask4, pi);
1474
1475     z         = _mm_or_pd( _mm_or_pd(z, z1), _mm_or_pd(z3, z4) );
1476
1477     w         = _mm_or_pd(_mm_andnot_pd(masky_lt, pi), _mm_and_pd(masky_lt, minuspi));
1478     w         = _mm_and_pd(w, maskx_lt);
1479
1480     w         = _mm_andnot_pd(maskall, w);
1481
1482     z         = _mm_add_pd(z, w);
1483
1484     return z;
1485 }
1486
1487 #endif /*_gmx_math_x86_sse2_double_h_ */