Merge branch 'release-4-6'
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_sse4_1_double / kernelutil_x86_sse4_1_double.h
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 #ifndef _kernelutil_x86_sse4_1_double_h_
36 #define _kernelutil_x86_sse4_1_double_h_
37
38 #include <math.h>
39
40 #include "gromacs/simd/general_x86_sse4_1.h"
41
42 #include <stdio.h>
43
44
45 /* Normal sum of four ymm registers */
46 #define gmx_mm_sum4_pd(t0, t1, t2, t3)  _mm_add_pd(_mm_add_pd(t0, t1), _mm_add_pd(t2, t3))
47
48 static int
49 gmx_mm_any_lt(__m128d a, __m128d b)
50 {
51     return _mm_movemask_pd(_mm_cmplt_pd(a, b));
52 }
53
54 static gmx_inline __m128d
55 gmx_mm_calc_rsq_pd(__m128d dx, __m128d dy, __m128d dz)
56 {
57     return _mm_add_pd( _mm_add_pd( _mm_mul_pd(dx, dx), _mm_mul_pd(dy, dy) ), _mm_mul_pd(dz, dz) );
58 }
59
60
61 /* Load a double value from 1-2 places, merge into xmm register */
62 static gmx_inline __m128d
63 gmx_mm_load_2real_swizzle_pd(const double * gmx_restrict ptrA,
64                              const double * gmx_restrict ptrB)
65 {
66     return _mm_unpacklo_pd(_mm_load_sd(ptrA), _mm_load_sd(ptrB));
67 }
68
69 static gmx_inline __m128d
70 gmx_mm_load_1real_pd(const double * gmx_restrict ptrA)
71 {
72     return _mm_load_sd(ptrA);
73 }
74
75
76 static gmx_inline void
77 gmx_mm_store_2real_swizzle_pd(double * gmx_restrict ptrA,
78                               double * gmx_restrict ptrB,
79                               __m128d               xmm1)
80 {
81     __m128d t2;
82
83     t2       = _mm_unpackhi_pd(xmm1, xmm1);
84     _mm_store_sd(ptrA, xmm1);
85     _mm_store_sd(ptrB, t2);
86 }
87
88 static gmx_inline void
89 gmx_mm_store_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
90 {
91     _mm_store_sd(ptrA, xmm1);
92 }
93
94
95 /* Similar to store, but increments value in memory */
96 static gmx_inline void
97 gmx_mm_increment_2real_swizzle_pd(double * gmx_restrict ptrA,
98                                   double * gmx_restrict ptrB, __m128d xmm1)
99 {
100     __m128d t1;
101
102     t1   = _mm_unpackhi_pd(xmm1, xmm1);
103     xmm1 = _mm_add_sd(xmm1, _mm_load_sd(ptrA));
104     t1   = _mm_add_sd(t1, _mm_load_sd(ptrB));
105     _mm_store_sd(ptrA, xmm1);
106     _mm_store_sd(ptrB, t1);
107 }
108
109 static gmx_inline void
110 gmx_mm_increment_1real_pd(double * gmx_restrict ptrA, __m128d xmm1)
111 {
112     __m128d tmp;
113
114     tmp = gmx_mm_load_1real_pd(ptrA);
115     tmp = _mm_add_sd(tmp, xmm1);
116     gmx_mm_store_1real_pd(ptrA, tmp);
117 }
118
119
120 static gmx_inline void
121 gmx_mm_load_2pair_swizzle_pd(const double * gmx_restrict p1,
122                              const double * gmx_restrict p2,
123                              __m128d * gmx_restrict      c6,
124                              __m128d * gmx_restrict      c12)
125 {
126     __m128d t1, t2, t3;
127
128     t1   = _mm_loadu_pd(p1);
129     t2   = _mm_loadu_pd(p2);
130     *c6  = _mm_unpacklo_pd(t1, t2);
131     *c12 = _mm_unpackhi_pd(t1, t2);
132 }
133
134 static gmx_inline void
135 gmx_mm_load_1pair_swizzle_pd(const double * gmx_restrict p1,
136                              __m128d * gmx_restrict      c6,
137                              __m128d * gmx_restrict      c12)
138 {
139     *c6     = _mm_load_sd(p1);
140     *c12    = _mm_load_sd(p1+1);
141 }
142
143
144
145 static gmx_inline void
146 gmx_mm_load_shift_and_1rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
147                                          const double * gmx_restrict xyz,
148                                          __m128d * gmx_restrict      x1,
149                                          __m128d * gmx_restrict      y1,
150                                          __m128d * gmx_restrict      z1)
151 {
152     __m128d mem_xy, mem_z, mem_sxy, mem_sz;
153
154     mem_xy  = _mm_loadu_pd(xyz);
155     mem_z   = _mm_load_sd(xyz+2);
156     mem_sxy = _mm_loadu_pd(xyz_shift);
157     mem_sz  = _mm_load_sd(xyz_shift+2);
158
159     mem_xy  = _mm_add_pd(mem_xy, mem_sxy);
160     mem_z   = _mm_add_pd(mem_z, mem_sz);
161
162     *x1  = _mm_shuffle_pd(mem_xy, mem_xy, _MM_SHUFFLE2(0, 0));
163     *y1  = _mm_shuffle_pd(mem_xy, mem_xy, _MM_SHUFFLE2(1, 1));
164     *z1  = _mm_shuffle_pd(mem_z, mem_z, _MM_SHUFFLE2(0, 0));
165 }
166
167
168 static gmx_inline void
169 gmx_mm_load_shift_and_3rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
170                                          const double * gmx_restrict xyz,
171                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
172                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
173                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
174 {
175     __m128d t1, t2, t3, t4, t5, sxy, sz, szx, syz;
176
177     t1  = _mm_loadu_pd(xyz);
178     t2  = _mm_loadu_pd(xyz+2);
179     t3  = _mm_loadu_pd(xyz+4);
180     t4  = _mm_loadu_pd(xyz+6);
181     t5  = _mm_load_sd(xyz+8);
182
183     sxy = _mm_loadu_pd(xyz_shift);
184     sz  = _mm_load_sd(xyz_shift+2);
185     szx = _mm_shuffle_pd(sz, sxy, _MM_SHUFFLE2(0, 0));
186     syz = _mm_shuffle_pd(sxy, sz, _MM_SHUFFLE2(0, 1));
187
188     t1  = _mm_add_pd(t1, sxy);
189     t2  = _mm_add_pd(t2, szx);
190     t3  = _mm_add_pd(t3, syz);
191     t4  = _mm_add_pd(t4, sxy);
192     t5  = _mm_add_sd(t5, sz);
193
194     *x1  = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(0, 0));
195     *y1  = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(1, 1));
196     *z1  = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(0, 0));
197     *x2  = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(1, 1));
198     *y2  = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(0, 0));
199     *z2  = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(1, 1));
200     *x3  = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(0, 0));
201     *y3  = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(1, 1));
202     *z3  = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(0, 0));
203 }
204
205
206 static gmx_inline void
207 gmx_mm_load_shift_and_4rvec_broadcast_pd(const double * gmx_restrict xyz_shift,
208                                          const double * gmx_restrict xyz,
209                                          __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
210                                          __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
211                                          __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
212                                          __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
213 {
214     __m128d t1, t2, t3, t4, t5, t6, sxy, sz, szx, syz;
215
216     t1  = _mm_loadu_pd(xyz);
217     t2  = _mm_loadu_pd(xyz+2);
218     t3  = _mm_loadu_pd(xyz+4);
219     t4  = _mm_loadu_pd(xyz+6);
220     t5  = _mm_loadu_pd(xyz+8);
221     t6  = _mm_loadu_pd(xyz+10);
222
223     sxy = _mm_loadu_pd(xyz_shift);
224     sz  = _mm_load_sd(xyz_shift+2);
225     szx = _mm_shuffle_pd(sz, sxy, _MM_SHUFFLE2(0, 0));
226     syz = _mm_shuffle_pd(sxy, sz, _MM_SHUFFLE2(0, 1));
227
228     t1  = _mm_add_pd(t1, sxy);
229     t2  = _mm_add_pd(t2, szx);
230     t3  = _mm_add_pd(t3, syz);
231     t4  = _mm_add_pd(t4, sxy);
232     t5  = _mm_add_pd(t5, szx);
233     t6  = _mm_add_pd(t6, syz);
234
235     *x1  = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(0, 0));
236     *y1  = _mm_shuffle_pd(t1, t1, _MM_SHUFFLE2(1, 1));
237     *z1  = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(0, 0));
238     *x2  = _mm_shuffle_pd(t2, t2, _MM_SHUFFLE2(1, 1));
239     *y2  = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(0, 0));
240     *z2  = _mm_shuffle_pd(t3, t3, _MM_SHUFFLE2(1, 1));
241     *x3  = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(0, 0));
242     *y3  = _mm_shuffle_pd(t4, t4, _MM_SHUFFLE2(1, 1));
243     *z3  = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(0, 0));
244     *x4  = _mm_shuffle_pd(t5, t5, _MM_SHUFFLE2(1, 1));
245     *y4  = _mm_shuffle_pd(t6, t6, _MM_SHUFFLE2(0, 0));
246     *z4  = _mm_shuffle_pd(t6, t6, _MM_SHUFFLE2(1, 1));
247 }
248
249
250
251
252 static gmx_inline void
253 gmx_mm_load_1rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
254                                   __m128d * gmx_restrict x, __m128d * gmx_restrict y, __m128d * gmx_restrict z)
255 {
256     *x            = _mm_load_sd(p1);
257     *y            = _mm_load_sd(p1+1);
258     *z            = _mm_load_sd(p1+2);
259 }
260
261 static gmx_inline void
262 gmx_mm_load_3rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
263                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
264                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
265                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
266 {
267     *x1            = _mm_load_sd(p1);
268     *y1            = _mm_load_sd(p1+1);
269     *z1            = _mm_load_sd(p1+2);
270     *x2            = _mm_load_sd(p1+3);
271     *y2            = _mm_load_sd(p1+4);
272     *z2            = _mm_load_sd(p1+5);
273     *x3            = _mm_load_sd(p1+6);
274     *y3            = _mm_load_sd(p1+7);
275     *z3            = _mm_load_sd(p1+8);
276 }
277
278 static gmx_inline void
279 gmx_mm_load_4rvec_1ptr_swizzle_pd(const double * gmx_restrict p1,
280                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
281                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
282                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
283                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
284 {
285     *x1            = _mm_load_sd(p1);
286     *y1            = _mm_load_sd(p1+1);
287     *z1            = _mm_load_sd(p1+2);
288     *x2            = _mm_load_sd(p1+3);
289     *y2            = _mm_load_sd(p1+4);
290     *z2            = _mm_load_sd(p1+5);
291     *x3            = _mm_load_sd(p1+6);
292     *y3            = _mm_load_sd(p1+7);
293     *z3            = _mm_load_sd(p1+8);
294     *x4            = _mm_load_sd(p1+9);
295     *y4            = _mm_load_sd(p1+10);
296     *z4            = _mm_load_sd(p1+11);
297 }
298
299
300 static gmx_inline void
301 gmx_mm_load_1rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA,
302                                   const double * gmx_restrict ptrB,
303                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1)
304 {
305     __m128d t1, t2, t3, t4;
306     t1           = _mm_loadu_pd(ptrA);
307     t2           = _mm_loadu_pd(ptrB);
308     t3           = _mm_load_sd(ptrA+2);
309     t4           = _mm_load_sd(ptrB+2);
310     GMX_MM_TRANSPOSE2_PD(t1, t2);
311     *x1          = t1;
312     *y1          = t2;
313     *z1          = _mm_unpacklo_pd(t3, t4);
314 }
315
316
317 static gmx_inline void
318 gmx_mm_load_3rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
319                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
320                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
321                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3)
322 {
323     __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
324     t1           = _mm_loadu_pd(ptrA);
325     t2           = _mm_loadu_pd(ptrB);
326     t3           = _mm_loadu_pd(ptrA+2);
327     t4           = _mm_loadu_pd(ptrB+2);
328     t5           = _mm_loadu_pd(ptrA+4);
329     t6           = _mm_loadu_pd(ptrB+4);
330     t7           = _mm_loadu_pd(ptrA+6);
331     t8           = _mm_loadu_pd(ptrB+6);
332     t9           = _mm_load_sd(ptrA+8);
333     t10          = _mm_load_sd(ptrB+8);
334     GMX_MM_TRANSPOSE2_PD(t1, t2);
335     GMX_MM_TRANSPOSE2_PD(t3, t4);
336     GMX_MM_TRANSPOSE2_PD(t5, t6);
337     GMX_MM_TRANSPOSE2_PD(t7, t8);
338     *x1          = t1;
339     *y1          = t2;
340     *z1          = t3;
341     *x2          = t4;
342     *y2          = t5;
343     *z2          = t6;
344     *x3          = t7;
345     *y3          = t8;
346     *z3          = _mm_unpacklo_pd(t9, t10);
347 }
348
349
350 static gmx_inline void
351 gmx_mm_load_4rvec_2ptr_swizzle_pd(const double * gmx_restrict ptrA, const double * gmx_restrict ptrB,
352                                   __m128d * gmx_restrict x1, __m128d * gmx_restrict y1, __m128d * gmx_restrict z1,
353                                   __m128d * gmx_restrict x2, __m128d * gmx_restrict y2, __m128d * gmx_restrict z2,
354                                   __m128d * gmx_restrict x3, __m128d * gmx_restrict y3, __m128d * gmx_restrict z3,
355                                   __m128d * gmx_restrict x4, __m128d * gmx_restrict y4, __m128d * gmx_restrict z4)
356 {
357     __m128d t1, t2, t3, t4, t5, t6;
358     t1           = _mm_loadu_pd(ptrA);
359     t2           = _mm_loadu_pd(ptrB);
360     t3           = _mm_loadu_pd(ptrA+2);
361     t4           = _mm_loadu_pd(ptrB+2);
362     t5           = _mm_loadu_pd(ptrA+4);
363     t6           = _mm_loadu_pd(ptrB+4);
364     GMX_MM_TRANSPOSE2_PD(t1, t2);
365     GMX_MM_TRANSPOSE2_PD(t3, t4);
366     GMX_MM_TRANSPOSE2_PD(t5, t6);
367     *x1          = t1;
368     *y1          = t2;
369     *z1          = t3;
370     *x2          = t4;
371     *y2          = t5;
372     *z2          = t6;
373     t1           = _mm_loadu_pd(ptrA+6);
374     t2           = _mm_loadu_pd(ptrB+6);
375     t3           = _mm_loadu_pd(ptrA+8);
376     t4           = _mm_loadu_pd(ptrB+8);
377     t5           = _mm_loadu_pd(ptrA+10);
378     t6           = _mm_loadu_pd(ptrB+10);
379     GMX_MM_TRANSPOSE2_PD(t1, t2);
380     GMX_MM_TRANSPOSE2_PD(t3, t4);
381     GMX_MM_TRANSPOSE2_PD(t5, t6);
382     *x3          = t1;
383     *y3          = t2;
384     *z3          = t3;
385     *x4          = t4;
386     *y4          = t5;
387     *z4          = t6;
388 }
389
390
391 /* Routines to decrement rvec in memory, typically use for j particle force updates */
392 static gmx_inline void
393 gmx_mm_decrement_1rvec_1ptr_noswizzle_pd(double * gmx_restrict ptrA,
394                                          __m128d xy, __m128d z)
395 {
396     __m128d t1, t2;
397
398     t1 = _mm_loadu_pd(ptrA);
399     t2 = _mm_load_sd(ptrA+2);
400
401     t1 = _mm_sub_pd(t1, xy);
402     t2 = _mm_sub_sd(t2, z);
403
404     _mm_storeu_pd(ptrA, t1);
405     _mm_store_sd(ptrA+2, t2);
406 }
407
408
409 static gmx_inline void
410 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
411                                        __m128d x1, __m128d y1, __m128d z1)
412 {
413     __m128d t1, t2, t3;
414
415     t1           = _mm_load_sd(ptrA);
416     t2           = _mm_load_sd(ptrA+1);
417     t3           = _mm_load_sd(ptrA+2);
418
419     t1           = _mm_sub_sd(t1, x1);
420     t2           = _mm_sub_sd(t2, y1);
421     t3           = _mm_sub_sd(t3, z1);
422     _mm_store_sd(ptrA, t1);
423     _mm_store_sd(ptrA+1, t2);
424     _mm_store_sd(ptrA+2, t3);
425 }
426
427
428 #if defined (_MSC_VER) && defined(_M_IX86)
429 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
430 #define gmx_mm_decrement_3rvec_1ptr_swizzle_pd(ptrA, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
431     { \
432         __m128d _t1, _t2, _t3, _t4, _t5; \
433         _t1          = _mm_loadu_pd(ptrA); \
434         _t2          = _mm_loadu_pd(ptrA+2); \
435         _t3          = _mm_loadu_pd(ptrA+4); \
436         _t4          = _mm_loadu_pd(ptrA+6); \
437         _t5          = _mm_load_sd(ptrA+8); \
438         _x1          = _mm_unpacklo_pd(_x1, _y1); \
439         _z1          = _mm_unpacklo_pd(_z1, _x2); \
440         _y2          = _mm_unpacklo_pd(_y2, _z2); \
441         _x3          = _mm_unpacklo_pd(_x3, _y3); \
442         _t1          = _mm_sub_pd(_t1, _x1); \
443         _t2          = _mm_sub_pd(_t2, _z1); \
444         _t3          = _mm_sub_pd(_t3, _y2); \
445         _t4          = _mm_sub_pd(_t4, _x3); \
446         _t5          = _mm_sub_sd(_t5, _z3); \
447         _mm_storeu_pd(ptrA, _t1); \
448         _mm_storeu_pd(ptrA+2, _t2); \
449         _mm_storeu_pd(ptrA+4, _t3); \
450         _mm_storeu_pd(ptrA+6, _t4); \
451         _mm_store_sd(ptrA+8, _t5); \
452     }
453 #else
454 /* Real function for sane compilers */
455 static gmx_inline void
456 gmx_mm_decrement_3rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
457                                        __m128d x1, __m128d y1, __m128d z1,
458                                        __m128d x2, __m128d y2, __m128d z2,
459                                        __m128d x3, __m128d y3, __m128d z3)
460 {
461     __m128d t1, t2, t3, t4, t5;
462
463     t1          = _mm_loadu_pd(ptrA);
464     t2          = _mm_loadu_pd(ptrA+2);
465     t3          = _mm_loadu_pd(ptrA+4);
466     t4          = _mm_loadu_pd(ptrA+6);
467     t5          = _mm_load_sd(ptrA+8);
468
469     x1          = _mm_unpacklo_pd(x1, y1);
470     z1          = _mm_unpacklo_pd(z1, x2);
471     y2          = _mm_unpacklo_pd(y2, z2);
472     x3          = _mm_unpacklo_pd(x3, y3);
473     /* nothing to be done for z3 */
474
475     t1          = _mm_sub_pd(t1, x1);
476     t2          = _mm_sub_pd(t2, z1);
477     t3          = _mm_sub_pd(t3, y2);
478     t4          = _mm_sub_pd(t4, x3);
479     t5          = _mm_sub_sd(t5, z3);
480     _mm_storeu_pd(ptrA, t1);
481     _mm_storeu_pd(ptrA+2, t2);
482     _mm_storeu_pd(ptrA+4, t3);
483     _mm_storeu_pd(ptrA+6, t4);
484     _mm_store_sd(ptrA+8, t5);
485 }
486 #endif
487
488
489 #if defined (_MSC_VER) && defined(_M_IX86)
490 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
491 #define gmx_mm_decrement_4rvec_1ptr_swizzle_pd(ptrA, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
492     { \
493         __m128d _t1, _t2, _t3, _t4, _t5, _t6; \
494         _t1          = _mm_loadu_pd(ptrA); \
495         _t2          = _mm_loadu_pd(ptrA+2); \
496         _t3          = _mm_loadu_pd(ptrA+4); \
497         _t4          = _mm_loadu_pd(ptrA+6); \
498         _t5          = _mm_loadu_pd(ptrA+8); \
499         _t6          = _mm_loadu_pd(ptrA+10); \
500         _x1          = _mm_unpacklo_pd(_x1, _y1); \
501         _z1          = _mm_unpacklo_pd(_z1, _x2); \
502         _y2          = _mm_unpacklo_pd(_y2, _z2); \
503         _x3          = _mm_unpacklo_pd(_x3, _y3); \
504         _z3          = _mm_unpacklo_pd(_z3, _x4); \
505         _y4          = _mm_unpacklo_pd(_y4, _z4); \
506         _mm_storeu_pd(ptrA,    _mm_sub_pd( _t1, _x1 )); \
507         _mm_storeu_pd(ptrA+2,  _mm_sub_pd( _t2, _z1 )); \
508         _mm_storeu_pd(ptrA+4,  _mm_sub_pd( _t3, _y2 )); \
509         _mm_storeu_pd(ptrA+6,  _mm_sub_pd( _t4, _x3 )); \
510         _mm_storeu_pd(ptrA+8,  _mm_sub_pd( _t5, _z3 )); \
511         _mm_storeu_pd(ptrA+10, _mm_sub_pd( _t6, _y4 )); \
512     }
513 #else
514 /* Real function for sane compilers */
515 static gmx_inline void
516 gmx_mm_decrement_4rvec_1ptr_swizzle_pd(double * gmx_restrict ptrA,
517                                        __m128d x1, __m128d y1, __m128d z1,
518                                        __m128d x2, __m128d y2, __m128d z2,
519                                        __m128d x3, __m128d y3, __m128d z3,
520                                        __m128d x4, __m128d y4, __m128d z4)
521 {
522     __m128d t1, t2, t3, t4, t5, t6;
523
524     t1          = _mm_loadu_pd(ptrA);
525     t2          = _mm_loadu_pd(ptrA+2);
526     t3          = _mm_loadu_pd(ptrA+4);
527     t4          = _mm_loadu_pd(ptrA+6);
528     t5          = _mm_loadu_pd(ptrA+8);
529     t6          = _mm_loadu_pd(ptrA+10);
530
531     x1          = _mm_unpacklo_pd(x1, y1);
532     z1          = _mm_unpacklo_pd(z1, x2);
533     y2          = _mm_unpacklo_pd(y2, z2);
534     x3          = _mm_unpacklo_pd(x3, y3);
535     z3          = _mm_unpacklo_pd(z3, x4);
536     y4          = _mm_unpacklo_pd(y4, z4);
537
538     _mm_storeu_pd(ptrA,    _mm_sub_pd( t1, x1 ));
539     _mm_storeu_pd(ptrA+2,  _mm_sub_pd( t2, z1 ));
540     _mm_storeu_pd(ptrA+4,  _mm_sub_pd( t3, y2 ));
541     _mm_storeu_pd(ptrA+6,  _mm_sub_pd( t4, x3 ));
542     _mm_storeu_pd(ptrA+8,  _mm_sub_pd( t5, z3 ));
543     _mm_storeu_pd(ptrA+10, _mm_sub_pd( t6, y4 ));
544 }
545 #endif
546
547
548 static gmx_inline void
549 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
550                                        __m128d x1, __m128d y1, __m128d z1)
551 {
552     __m128d t1, t2, t3, t4, t5, t6, t7;
553
554     t1          = _mm_loadu_pd(ptrA);
555     t2          = _mm_load_sd(ptrA+2);
556     t3          = _mm_loadu_pd(ptrB);
557     t4          = _mm_load_sd(ptrB+2);
558
559     t5          = _mm_unpacklo_pd(x1, y1);
560     t6          = _mm_unpackhi_pd(x1, y1);
561     t7          = _mm_unpackhi_pd(z1, z1);
562
563     t1          = _mm_sub_pd(t1, t5);
564     t2          = _mm_sub_sd(t2, z1);
565
566     t3          = _mm_sub_pd(t3, t6);
567     t4          = _mm_sub_sd(t4, t7);
568
569     _mm_storeu_pd(ptrA, t1);
570     _mm_store_sd(ptrA+2, t2);
571     _mm_storeu_pd(ptrB, t3);
572     _mm_store_sd(ptrB+2, t4);
573 }
574
575 #if defined (_MSC_VER) && defined(_M_IX86)
576 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
577 #define gmx_mm_decrement_3rvec_2ptr_swizzle_pd(ptrA, ptrB, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3) \
578     { \
579         __m128d _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10; \
580         __m128d _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI; \
581         _t1          = _mm_loadu_pd(ptrA); \
582         _t2          = _mm_loadu_pd(ptrA+2); \
583         _t3          = _mm_loadu_pd(ptrA+4); \
584         _t4          = _mm_loadu_pd(ptrA+6); \
585         _t5          = _mm_load_sd(ptrA+8); \
586         _t6          = _mm_loadu_pd(ptrB); \
587         _t7          = _mm_loadu_pd(ptrB+2); \
588         _t8          = _mm_loadu_pd(ptrB+4); \
589         _t9          = _mm_loadu_pd(ptrB+6); \
590         _t10         = _mm_load_sd(ptrB+8); \
591         _tA          = _mm_unpacklo_pd(_x1, _y1); \
592         _tB          = _mm_unpackhi_pd(_x1, _y1); \
593         _tC          = _mm_unpacklo_pd(_z1, _x2); \
594         _tD          = _mm_unpackhi_pd(_z1, _x2); \
595         _tE          = _mm_unpacklo_pd(_y2, _z2); \
596         _tF          = _mm_unpackhi_pd(_y2, _z2); \
597         _tG          = _mm_unpacklo_pd(_x3, _y3); \
598         _tH          = _mm_unpackhi_pd(_x3, _y3); \
599         _tI          = _mm_unpackhi_pd(_z3, _z3); \
600         _t1          = _mm_sub_pd(_t1, _tA); \
601         _t2          = _mm_sub_pd(_t2, _tC); \
602         _t3          = _mm_sub_pd(_t3, _tE); \
603         _t4          = _mm_sub_pd(_t4, _tG); \
604         _t5          = _mm_sub_sd(_t5, _z3); \
605         _t6          = _mm_sub_pd(_t6, _tB); \
606         _t7          = _mm_sub_pd(_t7, _tD); \
607         _t8          = _mm_sub_pd(_t8, _tF); \
608         _t9          = _mm_sub_pd(_t9, _tH); \
609         _t10         = _mm_sub_sd(_t10, _tI); \
610         _mm_storeu_pd(ptrA, _t1); \
611         _mm_storeu_pd(ptrA+2, _t2); \
612         _mm_storeu_pd(ptrA+4, _t3); \
613         _mm_storeu_pd(ptrA+6, _t4); \
614         _mm_store_sd(ptrA+8, _t5); \
615         _mm_storeu_pd(ptrB, _t6); \
616         _mm_storeu_pd(ptrB+2, _t7); \
617         _mm_storeu_pd(ptrB+4, _t8); \
618         _mm_storeu_pd(ptrB+6, _t9); \
619         _mm_store_sd(ptrB+8, _t10); \
620     }
621 #else
622 /* Real function for sane compilers */
623 static gmx_inline void
624 gmx_mm_decrement_3rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
625                                        __m128d x1, __m128d y1, __m128d z1,
626                                        __m128d x2, __m128d y2, __m128d z2,
627                                        __m128d x3, __m128d y3, __m128d z3)
628 {
629     __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10;
630     __m128d tA, tB, tC, tD, tE, tF, tG, tH, tI;
631
632     t1          = _mm_loadu_pd(ptrA);
633     t2          = _mm_loadu_pd(ptrA+2);
634     t3          = _mm_loadu_pd(ptrA+4);
635     t4          = _mm_loadu_pd(ptrA+6);
636     t5          = _mm_load_sd(ptrA+8);
637     t6          = _mm_loadu_pd(ptrB);
638     t7          = _mm_loadu_pd(ptrB+2);
639     t8          = _mm_loadu_pd(ptrB+4);
640     t9          = _mm_loadu_pd(ptrB+6);
641     t10         = _mm_load_sd(ptrB+8);
642
643     tA          = _mm_unpacklo_pd(x1, y1);
644     tB          = _mm_unpackhi_pd(x1, y1);
645     tC          = _mm_unpacklo_pd(z1, x2);
646     tD          = _mm_unpackhi_pd(z1, x2);
647     tE          = _mm_unpacklo_pd(y2, z2);
648     tF          = _mm_unpackhi_pd(y2, z2);
649     tG          = _mm_unpacklo_pd(x3, y3);
650     tH          = _mm_unpackhi_pd(x3, y3);
651     tI          = _mm_unpackhi_pd(z3, z3);
652
653     t1          = _mm_sub_pd(t1, tA);
654     t2          = _mm_sub_pd(t2, tC);
655     t3          = _mm_sub_pd(t3, tE);
656     t4          = _mm_sub_pd(t4, tG);
657     t5          = _mm_sub_sd(t5, z3);
658
659     t6          = _mm_sub_pd(t6, tB);
660     t7          = _mm_sub_pd(t7, tD);
661     t8          = _mm_sub_pd(t8, tF);
662     t9          = _mm_sub_pd(t9, tH);
663     t10         = _mm_sub_sd(t10, tI);
664
665     _mm_storeu_pd(ptrA, t1);
666     _mm_storeu_pd(ptrA+2, t2);
667     _mm_storeu_pd(ptrA+4, t3);
668     _mm_storeu_pd(ptrA+6, t4);
669     _mm_store_sd(ptrA+8, t5);
670     _mm_storeu_pd(ptrB, t6);
671     _mm_storeu_pd(ptrB+2, t7);
672     _mm_storeu_pd(ptrB+4, t8);
673     _mm_storeu_pd(ptrB+6, t9);
674     _mm_store_sd(ptrB+8, t10);
675 }
676 #endif
677
678
679 #if defined (_MSC_VER) && defined(_M_IX86)
680 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
681 #define gmx_mm_decrement_4rvec_2ptr_swizzle_pd(ptrA, ptrB, _x1, _y1, _z1, _x2, _y2, _z2, _x3, _y3, _z3, _x4, _y4, _z4) \
682     { \
683         __m128d _t1, _t2, _t3, _t4, _t5, _t6, _t7, _t8, _t9, _t10, _t11, _t12; \
684         __m128d _tA, _tB, _tC, _tD, _tE, _tF, _tG, _tH, _tI, _tJ, _tK, _tL; \
685         _t1          = _mm_loadu_pd(ptrA); \
686         _t2          = _mm_loadu_pd(ptrA+2); \
687         _t3          = _mm_loadu_pd(ptrA+4); \
688         _t4          = _mm_loadu_pd(ptrA+6); \
689         _t5          = _mm_loadu_pd(ptrA+8); \
690         _t6          = _mm_loadu_pd(ptrA+10); \
691         _t7          = _mm_loadu_pd(ptrB); \
692         _t8          = _mm_loadu_pd(ptrB+2); \
693         _t9          = _mm_loadu_pd(ptrB+4); \
694         _t10         = _mm_loadu_pd(ptrB+6); \
695         _t11         = _mm_loadu_pd(ptrB+8); \
696         _t12         = _mm_loadu_pd(ptrB+10); \
697         _tA          = _mm_unpacklo_pd(_x1, _y1); \
698         _tB          = _mm_unpackhi_pd(_x1, _y1); \
699         _tC          = _mm_unpacklo_pd(_z1, _x2); \
700         _tD          = _mm_unpackhi_pd(_z1, _x2); \
701         _tE          = _mm_unpacklo_pd(_y2, _z2); \
702         _tF          = _mm_unpackhi_pd(_y2, _z2); \
703         _tG          = _mm_unpacklo_pd(_x3, _y3); \
704         _tH          = _mm_unpackhi_pd(_x3, _y3); \
705         _tI          = _mm_unpacklo_pd(_z3, _x4); \
706         _tJ          = _mm_unpackhi_pd(_z3, _x4); \
707         _tK          = _mm_unpacklo_pd(_y4, _z4); \
708         _tL          = _mm_unpackhi_pd(_y4, _z4); \
709         _t1          = _mm_sub_pd(_t1, _tA); \
710         _t2          = _mm_sub_pd(_t2, _tC); \
711         _t3          = _mm_sub_pd(_t3, _tE); \
712         _t4          = _mm_sub_pd(_t4, _tG); \
713         _t5          = _mm_sub_pd(_t5, _tI); \
714         _t6          = _mm_sub_pd(_t6, _tK); \
715         _t7          = _mm_sub_pd(_t7, _tB); \
716         _t8          = _mm_sub_pd(_t8, _tD); \
717         _t9          = _mm_sub_pd(_t9, _tF); \
718         _t10         = _mm_sub_pd(_t10, _tH); \
719         _t11         = _mm_sub_pd(_t11, _tJ); \
720         _t12         = _mm_sub_pd(_t12, _tL); \
721         _mm_storeu_pd(ptrA,  _t1); \
722         _mm_storeu_pd(ptrA+2, _t2); \
723         _mm_storeu_pd(ptrA+4, _t3); \
724         _mm_storeu_pd(ptrA+6, _t4); \
725         _mm_storeu_pd(ptrA+8, _t5); \
726         _mm_storeu_pd(ptrA+10, _t6); \
727         _mm_storeu_pd(ptrB,  _t7); \
728         _mm_storeu_pd(ptrB+2, _t8); \
729         _mm_storeu_pd(ptrB+4, _t9); \
730         _mm_storeu_pd(ptrB+6, _t10); \
731         _mm_storeu_pd(ptrB+8, _t11); \
732         _mm_storeu_pd(ptrB+10, _t12); \
733     }
734 #else
735 /* Real function for sane compilers */
736 static gmx_inline void
737 gmx_mm_decrement_4rvec_2ptr_swizzle_pd(double * gmx_restrict ptrA, double * gmx_restrict ptrB,
738                                        __m128d x1, __m128d y1, __m128d z1,
739                                        __m128d x2, __m128d y2, __m128d z2,
740                                        __m128d x3, __m128d y3, __m128d z3,
741                                        __m128d x4, __m128d y4, __m128d z4)
742 {
743     __m128d t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
744     __m128d tA, tB, tC, tD, tE, tF, tG, tH, tI, tJ, tK, tL;
745
746     t1          = _mm_loadu_pd(ptrA);
747     t2          = _mm_loadu_pd(ptrA+2);
748     t3          = _mm_loadu_pd(ptrA+4);
749     t4          = _mm_loadu_pd(ptrA+6);
750     t5          = _mm_loadu_pd(ptrA+8);
751     t6          = _mm_loadu_pd(ptrA+10);
752     t7          = _mm_loadu_pd(ptrB);
753     t8          = _mm_loadu_pd(ptrB+2);
754     t9          = _mm_loadu_pd(ptrB+4);
755     t10         = _mm_loadu_pd(ptrB+6);
756     t11         = _mm_loadu_pd(ptrB+8);
757     t12         = _mm_loadu_pd(ptrB+10);
758
759     tA          = _mm_unpacklo_pd(x1, y1);
760     tB          = _mm_unpackhi_pd(x1, y1);
761     tC          = _mm_unpacklo_pd(z1, x2);
762     tD          = _mm_unpackhi_pd(z1, x2);
763     tE          = _mm_unpacklo_pd(y2, z2);
764     tF          = _mm_unpackhi_pd(y2, z2);
765     tG          = _mm_unpacklo_pd(x3, y3);
766     tH          = _mm_unpackhi_pd(x3, y3);
767     tI          = _mm_unpacklo_pd(z3, x4);
768     tJ          = _mm_unpackhi_pd(z3, x4);
769     tK          = _mm_unpacklo_pd(y4, z4);
770     tL          = _mm_unpackhi_pd(y4, z4);
771
772     t1          = _mm_sub_pd(t1, tA);
773     t2          = _mm_sub_pd(t2, tC);
774     t3          = _mm_sub_pd(t3, tE);
775     t4          = _mm_sub_pd(t4, tG);
776     t5          = _mm_sub_pd(t5, tI);
777     t6          = _mm_sub_pd(t6, tK);
778
779     t7          = _mm_sub_pd(t7, tB);
780     t8          = _mm_sub_pd(t8, tD);
781     t9          = _mm_sub_pd(t9, tF);
782     t10         = _mm_sub_pd(t10, tH);
783     t11         = _mm_sub_pd(t11, tJ);
784     t12         = _mm_sub_pd(t12, tL);
785
786     _mm_storeu_pd(ptrA,  t1);
787     _mm_storeu_pd(ptrA+2, t2);
788     _mm_storeu_pd(ptrA+4, t3);
789     _mm_storeu_pd(ptrA+6, t4);
790     _mm_storeu_pd(ptrA+8, t5);
791     _mm_storeu_pd(ptrA+10, t6);
792     _mm_storeu_pd(ptrB,  t7);
793     _mm_storeu_pd(ptrB+2, t8);
794     _mm_storeu_pd(ptrB+4, t9);
795     _mm_storeu_pd(ptrB+6, t10);
796     _mm_storeu_pd(ptrB+8, t11);
797     _mm_storeu_pd(ptrB+10, t12);
798 }
799 #endif
800
801
802
803
804 static gmx_inline void
805 gmx_mm_update_iforce_1atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
806                                       double * gmx_restrict fptr,
807                                       double * gmx_restrict fshiftptr)
808 {
809     fix1 = _mm_hadd_pd(fix1, fiy1);
810     fiz1 = _mm_hadd_pd(fiz1, fiz1);
811
812     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
813     _mm_store_sd( fptr+2, _mm_add_sd( _mm_load_sd(fptr+2), fiz1 ));
814
815     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
816     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
817 }
818
819
820 #if defined (_MSC_VER) && defined(_M_IX86)
821 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
822 #define gmx_mm_update_iforce_3atom_swizzle_pd(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, \
823                                               fptr, fshiftptr) \
824     { \
825         __m128d _t1, _t2; \
826         fix1 = _mm_hadd_pd(fix1, fiy1); \
827         fiz1 = _mm_hadd_pd(fiz1, fix2); \
828         fiy2 = _mm_hadd_pd(fiy2, fiz2); \
829         fix3 = _mm_hadd_pd(fix3, fiy3); \
830         fiz3 = _mm_hadd_pd(fiz3, fiz3); \
831         _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 )); \
832         _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 )); \
833         _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 )); \
834         _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 )); \
835         _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 )); \
836         fix1  = _mm_add_pd(fix1, fix3); \
837         _t1   = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1)); \
838         fix1  = _mm_add_pd(fix1, _t1); \
839         _t2   = _mm_shuffle_pd(fiy2, fiy2, _MM_SHUFFLE2(1, 1)); \
840         fiz1  = _mm_add_sd(fiz1, fiz3); \
841         fiz1  = _mm_add_sd(fiz1, _t2); \
842         _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 )); \
843         _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 )); \
844     }
845 #else
846 /* Real function for sane compilers */
847 static gmx_inline void
848 gmx_mm_update_iforce_3atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
849                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
850                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
851                                       double * gmx_restrict fptr,
852                                       double * gmx_restrict fshiftptr)
853 {
854     __m128d t1, t2;
855
856     fix1 = _mm_hadd_pd(fix1, fiy1);
857     fiz1 = _mm_hadd_pd(fiz1, fix2);
858     fiy2 = _mm_hadd_pd(fiy2, fiz2);
859     fix3 = _mm_hadd_pd(fix3, fiy3);
860     fiz3 = _mm_hadd_pd(fiz3, fiz3);
861
862     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr), fix1 ));
863     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2), fiz1 ));
864     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4), fiy2 ));
865     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6), fix3 ));
866     _mm_store_sd( fptr+8, _mm_add_sd( _mm_load_sd(fptr+8), fiz3 ));
867
868     fix1 = _mm_add_pd(fix1, fix3);
869     t1   = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1));
870     fix1 = _mm_add_pd(fix1, t1); /* x and y sums */
871
872     t2   = _mm_shuffle_pd(fiy2, fiy2, _MM_SHUFFLE2(1, 1));
873     fiz1 = _mm_add_sd(fiz1, fiz3);
874     fiz1 = _mm_add_sd(fiz1, t2); /* z sum */
875
876     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
877     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
878 }
879 #endif
880
881 #if defined (_MSC_VER) && defined(_M_IX86)
882 /* Macro work-around since 32-bit MSVC cannot handle >3 xmm/ymm parameters */
883 #define gmx_mm_update_iforce_4atom_swizzle_pd(fix1, fiy1, fiz1, fix2, fiy2, fiz2, fix3, fiy3, fiz3, fix4, fiy4, fiz4, \
884                                               fptr, fshiftptr) \
885     { \
886         __m128d _t1, _t2; \
887         fix1 = _mm_hadd_pd(fix1, fiy1); \
888         fiz1 = _mm_hadd_pd(fiz1, fix2); \
889         fiy2 = _mm_hadd_pd(fiy2, fiz2); \
890         fix3 = _mm_hadd_pd(fix3, fiy3); \
891         fiz3 = _mm_hadd_pd(fiz3, fix4); \
892         fiy4 = _mm_hadd_pd(fiy4, fiz4); \
893         _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr),       fix1 )); \
894         _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2),   fiz1 )); \
895         _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4),   fiy2 )); \
896         _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6),   fix3 )); \
897         _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8),   fiz3 )); \
898         _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 )); \
899         _t1  = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1)); \
900         fix1 = _mm_add_pd(fix1, _t1); \
901         _t2  = _mm_shuffle_pd(fiz3, fiy4, _MM_SHUFFLE2(0, 1)); \
902         fix3 = _mm_add_pd(fix3, _t2); \
903         fix1 = _mm_add_pd(fix1, fix3); \
904         fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2, fiy2)); \
905         fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4, fiy4)); \
906         fiz1 = _mm_add_sd(fiz1, fiz3); \
907         _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 )); \
908         _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 )); \
909     }
910 #else
911 /* Real function for sane compilers */
912 static gmx_inline void
913 gmx_mm_update_iforce_4atom_swizzle_pd(__m128d fix1, __m128d fiy1, __m128d fiz1,
914                                       __m128d fix2, __m128d fiy2, __m128d fiz2,
915                                       __m128d fix3, __m128d fiy3, __m128d fiz3,
916                                       __m128d fix4, __m128d fiy4, __m128d fiz4,
917                                       double * gmx_restrict fptr,
918                                       double * gmx_restrict fshiftptr)
919 {
920     __m128d t1, t2;
921
922     fix1 = _mm_hadd_pd(fix1, fiy1);
923     fiz1 = _mm_hadd_pd(fiz1, fix2);
924     fiy2 = _mm_hadd_pd(fiy2, fiz2);
925     fix3 = _mm_hadd_pd(fix3, fiy3);
926     fiz3 = _mm_hadd_pd(fiz3, fix4);
927     fiy4 = _mm_hadd_pd(fiy4, fiz4);
928
929     _mm_storeu_pd( fptr, _mm_add_pd( _mm_loadu_pd(fptr),       fix1 ));
930     _mm_storeu_pd( fptr+2, _mm_add_pd( _mm_loadu_pd(fptr+2),   fiz1 ));
931     _mm_storeu_pd( fptr+4, _mm_add_pd( _mm_loadu_pd(fptr+4),   fiy2 ));
932     _mm_storeu_pd( fptr+6, _mm_add_pd( _mm_loadu_pd(fptr+6),   fix3 ));
933     _mm_storeu_pd( fptr+8, _mm_add_pd( _mm_loadu_pd(fptr+8),   fiz3 ));
934     _mm_storeu_pd( fptr+10, _mm_add_pd( _mm_loadu_pd(fptr+10), fiy4 ));
935
936     t1   = _mm_shuffle_pd(fiz1, fiy2, _MM_SHUFFLE2(0, 1));
937     fix1 = _mm_add_pd(fix1, t1);
938     t2   = _mm_shuffle_pd(fiz3, fiy4, _MM_SHUFFLE2(0, 1));
939     fix3 = _mm_add_pd(fix3, t2);
940     fix1 = _mm_add_pd(fix1, fix3); /* x and y sums */
941
942     fiz1 = _mm_add_sd(fiz1, _mm_unpackhi_pd(fiy2, fiy2));
943     fiz3 = _mm_add_sd(fiz3, _mm_unpackhi_pd(fiy4, fiy4));
944     fiz1 = _mm_add_sd(fiz1, fiz3); /* z sum */
945
946     _mm_storeu_pd( fshiftptr, _mm_add_pd( _mm_loadu_pd(fshiftptr), fix1 ));
947     _mm_store_sd( fshiftptr+2, _mm_add_sd( _mm_load_sd(fshiftptr+2), fiz1 ));
948 }
949 #endif
950
951 static gmx_inline void
952 gmx_mm_update_1pot_pd(__m128d pot1, double * gmx_restrict ptrA)
953 {
954     pot1 = _mm_hadd_pd(pot1, pot1);
955     _mm_store_sd(ptrA, _mm_add_sd(pot1, _mm_load_sd(ptrA)));
956 }
957
958 static gmx_inline void
959 gmx_mm_update_2pot_pd(__m128d pot1, double * gmx_restrict ptrA,
960                       __m128d pot2, double * gmx_restrict ptrB)
961 {
962     pot1 = _mm_hadd_pd(pot1, pot2);
963     pot2 = _mm_unpackhi_pd(pot1, pot1);
964
965     _mm_store_sd(ptrA, _mm_add_sd(pot1, _mm_load_sd(ptrA)));
966     _mm_store_sd(ptrB, _mm_add_sd(pot2, _mm_load_sd(ptrB)));
967 }
968
969
970 #endif /* _kernelutil_x86_sse4_1_double_h_ */